1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "../dmub_srv.h"
27 #include "dmub_reg.h"
28 #include "dmub_dcn32.h"
29 
30 #include "dcn/dcn_3_2_0_offset.h"
31 #include "dcn/dcn_3_2_0_sh_mask.h"
32 
33 #define DCN_BASE__INST0_SEG2                       0x000034C0
34 
35 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
36 #define CTX dmub
37 #define REGS dmub->regs_dcn32
38 #define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
39 
40 const struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs = {
41 #define DMUB_SR(reg) REG_OFFSET_EXP(reg),
42 		{ DMUB_DCN32_REGS() },
43 #undef DMUB_SR
44 
45 #define DMUB_SF(reg, field) FD_MASK(reg, field),
46 		{ DMUB_DCN32_FIELDS() },
47 #undef DMUB_SF
48 
49 #define DMUB_SF(reg, field) FD_SHIFT(reg, field),
50 		{ DMUB_DCN32_FIELDS() },
51 #undef DMUB_SF
52 };
53 
54 static void dmub_dcn32_get_fb_base_offset(struct dmub_srv *dmub,
55 		uint64_t *fb_base,
56 		uint64_t *fb_offset)
57 {
58 	uint32_t tmp;
59 
60 	if (dmub->fb_base || dmub->fb_offset) {
61 		*fb_base = dmub->fb_base;
62 		*fb_offset = dmub->fb_offset;
63 		return;
64 	}
65 
66 	REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
67 	*fb_base = (uint64_t)tmp << 24;
68 
69 	REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp);
70 	*fb_offset = (uint64_t)tmp << 24;
71 }
72 
73 static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in,
74 		uint64_t fb_base,
75 		uint64_t fb_offset,
76 		union dmub_addr *addr_out)
77 {
78 	addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
79 }
80 
81 void dmub_dcn32_reset(struct dmub_srv *dmub)
82 {
83 	union dmub_gpint_data_register cmd;
84 	const uint32_t timeout = 30;
85 	uint32_t in_reset, scratch, i;
86 
87 	REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
88 
89 	if (in_reset == 0) {
90 		cmd.bits.status = 1;
91 		cmd.bits.command_code = DMUB_GPINT__STOP_FW;
92 		cmd.bits.param = 0;
93 
94 		dmub->hw_funcs.set_gpint(dmub, cmd);
95 
96 		/**
97 		 * Timeout covers both the ACK and the wait
98 		 * for remaining work to finish.
99 		 *
100 		 * This is mostly bound by the PHY disable sequence.
101 		 * Each register check will be greater than 1us, so
102 		 * don't bother using udelay.
103 		 */
104 
105 		for (i = 0; i < timeout; ++i) {
106 			if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
107 				break;
108 		}
109 
110 		for (i = 0; i < timeout; ++i) {
111 			scratch = dmub->hw_funcs.get_gpint_response(dmub);
112 			if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
113 				break;
114 		}
115 
116 		/* Clear the GPINT command manually so we don't reset again. */
117 		cmd.all = 0;
118 		dmub->hw_funcs.set_gpint(dmub, cmd);
119 
120 		/* Force reset in case we timed out, DMCUB is likely hung. */
121 	}
122 
123 	REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
124 	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
125 	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
126 	REG_WRITE(DMCUB_INBOX1_RPTR, 0);
127 	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
128 	REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
129 	REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
130 	REG_WRITE(DMCUB_SCRATCH0, 0);
131 }
132 
133 void dmub_dcn32_reset_release(struct dmub_srv *dmub)
134 {
135 	REG_WRITE(DMCUB_GPINT_DATAIN1, 0);
136 	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
137 	REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
138 	REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
139 	REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0);
140 }
141 
142 void dmub_dcn32_backdoor_load(struct dmub_srv *dmub,
143 		const struct dmub_window *cw0,
144 		const struct dmub_window *cw1)
145 {
146 	union dmub_addr offset;
147 	uint64_t fb_base, fb_offset;
148 
149 	dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset);
150 
151 	REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
152 
153 	dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
154 
155 	REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
156 	REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
157 	REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
158 	REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
159 			DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
160 			DMCUB_REGION3_CW0_ENABLE, 1);
161 
162 	dmub_dcn32_translate_addr(&cw1->offset, fb_base, fb_offset, &offset);
163 
164 	REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
165 	REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
166 	REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
167 	REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
168 			DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
169 			DMCUB_REGION3_CW1_ENABLE, 1);
170 
171 	REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
172 			0x20);
173 }
174 
175 void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub,
176 		      const struct dmub_window *cw0,
177 		      const struct dmub_window *cw1)
178 {
179 	union dmub_addr offset;
180 
181 	REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
182 
183 	offset = cw0->offset;
184 
185 	REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
186 	REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
187 	REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
188 	REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
189 			DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
190 			DMCUB_REGION3_CW0_ENABLE, 1);
191 
192 	offset = cw1->offset;
193 
194 	REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
195 	REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
196 	REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
197 	REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
198 			DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
199 			DMCUB_REGION3_CW1_ENABLE, 1);
200 
201 	REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
202 			0x20);
203 }
204 
205 void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
206 		const struct dmub_window *cw2,
207 		const struct dmub_window *cw3,
208 		const struct dmub_window *cw4,
209 		const struct dmub_window *cw5,
210 		const struct dmub_window *cw6)
211 {
212 	union dmub_addr offset;
213 
214 	offset = cw3->offset;
215 
216 	REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part);
217 	REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part);
218 	REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base);
219 	REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0,
220 			DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top,
221 			DMCUB_REGION3_CW3_ENABLE, 1);
222 
223 	offset = cw4->offset;
224 
225 	REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
226 	REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
227 	REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
228 	REG_SET_2(DMCUB_REGION3_CW4_TOP_ADDRESS, 0,
229 			DMCUB_REGION3_CW4_TOP_ADDRESS, cw4->region.top,
230 			DMCUB_REGION3_CW4_ENABLE, 1);
231 
232 	offset = cw5->offset;
233 
234 	REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part);
235 	REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part);
236 	REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
237 	REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
238 			DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
239 			DMCUB_REGION3_CW5_ENABLE, 1);
240 
241 	REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part);
242 	REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part);
243 	REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0,
244 			DMCUB_REGION5_TOP_ADDRESS,
245 			cw5->region.top - cw5->region.base - 1,
246 			DMCUB_REGION5_ENABLE, 1);
247 
248 	offset = cw6->offset;
249 
250 	REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part);
251 	REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part);
252 	REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base);
253 	REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
254 			DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
255 			DMCUB_REGION3_CW6_ENABLE, 1);
256 }
257 
258 void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
259 		const struct dmub_region *inbox1)
260 {
261 	REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base);
262 	REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
263 }
264 
265 uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub)
266 {
267 	return REG_READ(DMCUB_INBOX1_RPTR);
268 }
269 
270 void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset)
271 {
272 	REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset);
273 }
274 
275 void dmub_dcn32_setup_out_mailbox(struct dmub_srv *dmub,
276 		const struct dmub_region *outbox1)
277 {
278 	REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, outbox1->base);
279 	REG_WRITE(DMCUB_OUTBOX1_SIZE, outbox1->top - outbox1->base);
280 }
281 
282 uint32_t dmub_dcn32_get_outbox1_wptr(struct dmub_srv *dmub)
283 {
284 	/**
285 	 * outbox1 wptr register is accessed without locks (dal & dc)
286 	 * and to be called only by dmub_srv_stat_get_notification()
287 	 */
288 	return REG_READ(DMCUB_OUTBOX1_WPTR);
289 }
290 
291 void dmub_dcn32_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
292 {
293 	/**
294 	 * outbox1 rptr register is accessed without locks (dal & dc)
295 	 * and to be called only by dmub_srv_stat_get_notification()
296 	 */
297 	REG_WRITE(DMCUB_OUTBOX1_RPTR, rptr_offset);
298 }
299 
300 bool dmub_dcn32_is_hw_init(struct dmub_srv *dmub)
301 {
302 	union dmub_fw_boot_status status;
303 	uint32_t is_hw_init;
304 
305 	status.all = REG_READ(DMCUB_SCRATCH0);
306 	REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init);
307 
308 	return is_hw_init != 0 && status.bits.dal_fw;
309 }
310 
311 bool dmub_dcn32_is_supported(struct dmub_srv *dmub)
312 {
313 	uint32_t supported = 0;
314 
315 	REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported);
316 
317 	return supported;
318 }
319 
320 void dmub_dcn32_set_gpint(struct dmub_srv *dmub,
321 		union dmub_gpint_data_register reg)
322 {
323 	REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all);
324 }
325 
326 bool dmub_dcn32_is_gpint_acked(struct dmub_srv *dmub,
327 		union dmub_gpint_data_register reg)
328 {
329 	union dmub_gpint_data_register test;
330 
331 	reg.bits.status = 0;
332 	test.all = REG_READ(DMCUB_GPINT_DATAIN1);
333 
334 	return test.all == reg.all;
335 }
336 
337 uint32_t dmub_dcn32_get_gpint_response(struct dmub_srv *dmub)
338 {
339 	return REG_READ(DMCUB_SCRATCH7);
340 }
341 
342 uint32_t dmub_dcn32_get_gpint_dataout(struct dmub_srv *dmub)
343 {
344 	uint32_t dataout = REG_READ(DMCUB_GPINT_DATAOUT);
345 
346 	REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 0);
347 
348 	REG_WRITE(DMCUB_GPINT_DATAOUT, 0);
349 	REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 1);
350 	REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 0);
351 
352 	REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 1);
353 
354 	return dataout;
355 }
356 
357 union dmub_fw_boot_status dmub_dcn32_get_fw_boot_status(struct dmub_srv *dmub)
358 {
359 	union dmub_fw_boot_status status;
360 
361 	status.all = REG_READ(DMCUB_SCRATCH0);
362 	return status;
363 }
364 
365 void dmub_dcn32_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
366 {
367 	union dmub_fw_boot_options boot_options = {0};
368 
369 	boot_options.bits.z10_disable = params->disable_z10;
370 
371 	REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
372 }
373 
374 void dmub_dcn32_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip)
375 {
376 	union dmub_fw_boot_options boot_options;
377 	boot_options.all = REG_READ(DMCUB_SCRATCH14);
378 	boot_options.bits.skip_phy_init_panel_sequence = skip;
379 	REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
380 }
381 
382 void dmub_dcn32_setup_outbox0(struct dmub_srv *dmub,
383 		const struct dmub_region *outbox0)
384 {
385 	REG_WRITE(DMCUB_OUTBOX0_BASE_ADDRESS, outbox0->base);
386 
387 	REG_WRITE(DMCUB_OUTBOX0_SIZE, outbox0->top - outbox0->base);
388 }
389 
390 uint32_t dmub_dcn32_get_outbox0_wptr(struct dmub_srv *dmub)
391 {
392 	return REG_READ(DMCUB_OUTBOX0_WPTR);
393 }
394 
395 void dmub_dcn32_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
396 {
397 	REG_WRITE(DMCUB_OUTBOX0_RPTR, rptr_offset);
398 }
399 
400 uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub)
401 {
402 	return REG_READ(DMCUB_TIMER_CURRENT);
403 }
404 
405 void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
406 {
407 	uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
408 	uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
409 
410 	if (!dmub || !diag_data)
411 		return;
412 
413 	memset(diag_data, 0, sizeof(*diag_data));
414 
415 	diag_data->dmcub_version = dmub->fw_version;
416 
417 	diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
418 	diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
419 	diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
420 	diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
421 	diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
422 	diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
423 	diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
424 	diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
425 	diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
426 	diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
427 	diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
428 	diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
429 	diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
430 	diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
431 	diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
432 	diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
433 
434 	diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
435 	diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
436 	diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
437 
438 	diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
439 	diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
440 	diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
441 
442 	diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
443 	diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
444 	diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
445 
446 	REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
447 	diag_data->is_dmcub_enabled = is_dmub_enabled;
448 
449 	REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
450 	diag_data->is_dmcub_soft_reset = is_soft_reset;
451 
452 	REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
453 	diag_data->is_dmcub_secure_reset = is_sec_reset;
454 
455 	REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
456 	diag_data->is_traceport_en  = is_traceport_enabled;
457 
458 	REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
459 	diag_data->is_cw0_enabled = is_cw0_enabled;
460 
461 	REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
462 	diag_data->is_cw6_enabled = is_cw6_enabled;
463 }
464 void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
465 {
466 	/* DMCUB_REGION3_TMR_AXI_SPACE values:
467 	 * 0b011 (0x3) - FB physical address
468 	 * 0b100 (0x4) - GPU virtual address
469 	 *
470 	 * Default value is 0x3 (FB Physical address for TMR). When programming
471 	 * DMUB to be in system memory, change to 0x4. The system memory allocated
472 	 * is accessible by both GPU and CPU, so we use GPU virtual address.
473 	 */
474 	REG_WRITE(DMCUB_REGION3_TMR_AXI_SPACE, 0x4);
475 }
476 
477 void dmub_dcn32_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data)
478 {
479 	REG_WRITE(DMCUB_INBOX0_WPTR, data.inbox0_cmd_common.all);
480 }
481 
482 void dmub_dcn32_clear_inbox0_ack_register(struct dmub_srv *dmub)
483 {
484 	REG_WRITE(DMCUB_SCRATCH17, 0);
485 }
486 
487 uint32_t dmub_dcn32_read_inbox0_ack_register(struct dmub_srv *dmub)
488 {
489 	return REG_READ(DMCUB_SCRATCH17);
490 }
491