1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dccg.h"
27 #include "clk_mgr_internal.h"
28 
29 #include "dcn30_clk_mgr_smu_msg.h"
30 #include "dcn20/dcn20_clk_mgr.h"
31 #include "dce100/dce_clk_mgr.h"
32 #include "reg_helper.h"
33 #include "core_types.h"
34 #include "dm_helpers.h"
35 
36 #include "atomfirmware.h"
37 
38 
39 #include "sienna_cichlid_ip_offset.h"
40 #include "dcn/dcn_3_0_0_offset.h"
41 #include "dcn/dcn_3_0_0_sh_mask.h"
42 
43 #include "nbio/nbio_7_4_offset.h"
44 
45 #include "dcn/dpcs_3_0_0_offset.h"
46 #include "dcn/dpcs_3_0_0_sh_mask.h"
47 
48 #include "mmhub/mmhub_2_0_0_offset.h"
49 #include "mmhub/mmhub_2_0_0_sh_mask.h"
50 /*we don't have clk folder yet*/
51 #include "dcn30/dcn30_clk_mgr.h"
52 
53 #undef FN
54 #define FN(reg_name, field_name) \
55 	clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
56 
57 #define REG(reg) \
58 	(clk_mgr->regs->reg)
59 
60 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
61 
62 #define BASE(seg) BASE_INNER(seg)
63 
64 #define SR(reg_name)\
65 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
66 					mm ## reg_name
67 
68 #undef CLK_SRI
69 #define CLK_SRI(reg_name, block, inst)\
70 	.reg_name = mm ## block ## _ ## reg_name
71 
72 static const struct clk_mgr_registers clk_mgr_regs = {
73 	CLK_REG_LIST_DCN3()
74 };
75 
76 static const struct clk_mgr_shift clk_mgr_shift = {
77 	CLK_COMMON_MASK_SH_LIST_DCN20_BASE(__SHIFT)
78 };
79 
80 static const struct clk_mgr_mask clk_mgr_mask = {
81 	CLK_COMMON_MASK_SH_LIST_DCN20_BASE(_MASK)
82 };
83 
84 
85 /* Query SMU for all clock states for a particular clock */
86 static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0, unsigned int *num_levels)
87 {
88 	unsigned int i;
89 	char *entry_i = (char *)entry_0;
90 	uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
91 
92 	if (ret & (1 << 31))
93 		/* fine-grained, only min and max */
94 		*num_levels = 2;
95 	else
96 		/* discrete, a number of fixed states */
97 		/* will set num_levels to 0 on failure */
98 		*num_levels = ret & 0xFF;
99 
100 	/* if the initial message failed, num_levels will be 0 */
101 	for (i = 0; i < *num_levels; i++) {
102 		*((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
103 		entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
104 	}
105 }
106 
107 static noinline void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
108 {
109 	/* defaults */
110 	double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us;
111 	double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us;
112 	double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
113 	uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz;
114 
115 	/* Set A - Normal - default values*/
116 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true;
117 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
118 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
119 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
120 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
121 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = 0;
122 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
123 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
124 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
125 
126 	/* Set B - Performance - higher minimum clocks */
127 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true;
128 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
129 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
130 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
131 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
132 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = TUNED VALUE;
133 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
134 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = TUNED VALUE;
135 //	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
136 
137 	/* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
138 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
139 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dummy_pstate_latency_us;
140 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
141 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
142 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
143 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = 0;
144 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
145 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
146 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
147 
148 	/* Set D - MALL - SR enter and exit times adjusted for MALL */
149 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
150 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
151 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
152 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
153 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
154 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
155 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
156 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
157 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
158 }
159 
160 void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
161 {
162 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
163 	unsigned int num_levels;
164 
165 	memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
166 	clk_mgr_base->clks.p_state_change_support = true;
167 	clk_mgr_base->clks.prev_p_state_change_support = true;
168 	clk_mgr->smu_present = false;
169 
170 	if (!clk_mgr_base->bw_params)
171 		return;
172 
173 	if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
174 		clk_mgr->smu_present = true;
175 
176 	if (!clk_mgr->smu_present)
177 		return;
178 
179 	// do we fail if these fail? if so, how? do we not care to check?
180 	dcn30_smu_check_driver_if_version(clk_mgr);
181 	dcn30_smu_check_msg_header_version(clk_mgr);
182 
183 	/* DCFCLK */
184 	dcn3_init_single_clock(clk_mgr, PPCLK_DCEFCLK,
185 			&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
186 			&num_levels);
187 
188 	/* DTBCLK */
189 	dcn3_init_single_clock(clk_mgr, PPCLK_DTBCLK,
190 			&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
191 			&num_levels);
192 
193 	/* SOCCLK */
194 	dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK,
195 					&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
196 					&num_levels);
197 	// DPREFCLK ???
198 
199 	/* DISPCLK */
200 	dcn3_init_single_clock(clk_mgr, PPCLK_DISPCLK,
201 			&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
202 			&num_levels);
203 
204 	/* DPPCLK */
205 	dcn3_init_single_clock(clk_mgr, PPCLK_PIXCLK,
206 			&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
207 			&num_levels);
208 
209 	/* PHYCLK */
210 	dcn3_init_single_clock(clk_mgr, PPCLK_PHYCLK,
211 			&clk_mgr_base->bw_params->clk_table.entries[0].phyclk_mhz,
212 			&num_levels);
213 
214 	/* Get UCLK, update bounding box */
215 	clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
216 
217 	/* WM range table */
218 	DC_FP_START();
219 	dcn3_build_wm_range_table(clk_mgr);
220 	DC_FP_END();
221 }
222 
223 static int dcn30_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
224 {
225 	/* get FbMult value */
226 	struct fixed31_32 pll_req;
227 	/* get FbMult value */
228 	uint32_t pll_req_reg = REG_READ(CLK0_CLK_PLL_REQ);
229 
230 	/* set up a fixed-point number
231 	 * this works because the int part is on the right edge of the register
232 	 * and the frac part is on the left edge
233 	 */
234 	pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
235 	pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
236 
237 	/* multiply by REFCLK period */
238 	pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
239 
240 	return dc_fixpt_floor(pll_req);
241 }
242 
243 static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
244 			struct dc_state *context,
245 			bool safe_to_lower)
246 {
247 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
248 	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
249 	struct dc *dc = clk_mgr_base->ctx->dc;
250 	int display_count;
251 	bool update_dppclk = false;
252 	bool update_dispclk = false;
253 	bool enter_display_off = false;
254 	bool dpp_clock_lowered = false;
255 	bool update_pstate_unsupported_clk = false;
256 	struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
257 	bool force_reset = false;
258 	bool update_uclk = false;
259 	bool p_state_change_support;
260 	int total_plane_count;
261 
262 	if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
263 		return;
264 
265 	if (clk_mgr_base->clks.dispclk_khz == 0 ||
266 			(dc->debug.force_clock_mode & 0x1)) {
267 		/* this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. */
268 		force_reset = true;
269 
270 		dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
271 
272 		/* force_clock_mode 0x1:  force reset the clock even it is the same clock as long as it is in Passive level. */
273 	}
274 	display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
275 
276 	if (display_count == 0)
277 		enter_display_off = true;
278 
279 	if (enter_display_off == safe_to_lower)
280 		dcn30_smu_set_num_of_displays(clk_mgr, display_count);
281 
282 	if (dc->debug.force_min_dcfclk_mhz > 0)
283 		new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
284 				new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
285 
286 	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
287 		clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
288 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
289 	}
290 
291 	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
292 		clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
293 		dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
294 	}
295 
296 	if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
297 		/* We don't actually care about socclk, don't notify SMU of hard min */
298 		clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
299 
300 	clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
301 	total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
302 	p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
303 
304 	// invalidate the current P-State forced min in certain dc_mode_softmax situations
305 	if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) {
306 		if ((new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) !=
307 				(clk_mgr_base->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000))
308 			update_pstate_unsupported_clk = true;
309 	}
310 
311 	if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) ||
312 			update_pstate_unsupported_clk) {
313 		clk_mgr_base->clks.p_state_change_support = p_state_change_support;
314 
315 		/* to disable P-State switching, set UCLK min = max */
316 		if (!clk_mgr_base->clks.p_state_change_support) {
317 			if (dc->clk_mgr->dc_mode_softmax_enabled &&
318 				new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
319 				dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
320 					dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
321 			else
322 				dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
323 					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
324 		}
325 	}
326 
327 	/* Always update saved value, even if new value not set due to P-State switching unsupported */
328 	if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
329 		clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
330 		update_uclk = true;
331 	}
332 
333 	/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
334 	if (clk_mgr_base->clks.p_state_change_support &&
335 			(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
336 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
337 
338 	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
339 		if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
340 			dpp_clock_lowered = true;
341 
342 		clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
343 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
344 		update_dppclk = true;
345 	}
346 
347 	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
348 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
349 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz));
350 		update_dispclk = true;
351 	}
352 
353 	if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
354 		if (dpp_clock_lowered) {
355 			/* if clock is being lowered, increase DTO before lowering refclk */
356 			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
357 			dcn20_update_clocks_update_dentist(clk_mgr, context);
358 		} else {
359 			/* if clock is being raised, increase refclk before lowering DTO */
360 			if (update_dppclk || update_dispclk)
361 				dcn20_update_clocks_update_dentist(clk_mgr, context);
362 			/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
363 			 * that we do not lower dto when it is not safe to lower. We do not need to
364 			 * compare the current and new dppclk before calling this function.*/
365 			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
366 		}
367 	}
368 
369 	if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
370 		/*update dmcu for wait_loop count*/
371 		dmcu->funcs->set_psr_wait_loop(dmcu,
372 				clk_mgr_base->clks.dispclk_khz / 1000 / 7);
373 }
374 
375 
376 static void dcn3_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
377 {
378 	unsigned int i;
379 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
380 	WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table;
381 
382 	if (!clk_mgr->smu_present)
383 		return;
384 
385 	if (!table)
386 		// should log failure
387 		return;
388 
389 	memset(table, 0, sizeof(*table));
390 
391 	/* collect valid ranges, place in pmfw table */
392 	for (i = 0; i < WM_SET_COUNT; i++)
393 		if (clk_mgr->base.bw_params->wm_table.nv_entries[i].valid) {
394 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_dcfclk;
395 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_dcfclk;
396 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_uclk;
397 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_uclk;
398 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].WmSetting = i;
399 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
400 		}
401 
402 	dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
403 	dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
404 	dcn30_smu_transfer_wm_table_dram_2_smu(clk_mgr);
405 }
406 
407 /* Set min memclk to minimum, either constrained by the current mode or DPM0 */
408 static void dcn3_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode)
409 {
410 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
411 
412 	if (!clk_mgr->smu_present)
413 		return;
414 
415 	if (current_mode) {
416 		if (clk_mgr_base->clks.p_state_change_support)
417 			dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
418 					khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
419 		else
420 			dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
421 					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
422 	} else {
423 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
424 				clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
425 	}
426 }
427 
428 /* Set max memclk to highest DPM value */
429 static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
430 {
431 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
432 
433 	if (!clk_mgr->smu_present)
434 		return;
435 
436 	dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
437 			clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
438 }
439 
440 static void dcn3_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
441 {
442 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
443 
444 	if (!clk_mgr->smu_present)
445 		return;
446 
447 	dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
448 }
449 static void dcn3_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
450 {
451 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
452 
453 	if (!clk_mgr->smu_present)
454 		return;
455 	dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
456 }
457 
458 /* Get current memclk states, update bounding box */
459 static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
460 {
461 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
462 	unsigned int num_levels;
463 
464 	if (!clk_mgr->smu_present)
465 		return;
466 
467 	/* Refresh memclk states */
468 	dcn3_init_single_clock(clk_mgr, PPCLK_UCLK,
469 			&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
470 			&num_levels);
471 	clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
472 
473 	clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
474 
475 	/* Refresh bounding box */
476 	clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
477 			clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
478 }
479 
480 static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
481 {
482 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
483 	return clk_mgr->smu_present;
484 }
485 
486 static bool dcn3_are_clock_states_equal(struct dc_clocks *a,
487 					struct dc_clocks *b)
488 {
489 	if (a->dispclk_khz != b->dispclk_khz)
490 		return false;
491 	else if (a->dppclk_khz != b->dppclk_khz)
492 		return false;
493 	else if (a->dcfclk_khz != b->dcfclk_khz)
494 		return false;
495 	else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
496 		return false;
497 	else if (a->dramclk_khz != b->dramclk_khz)
498 		return false;
499 	else if (a->p_state_change_support != b->p_state_change_support)
500 		return false;
501 
502 	return true;
503 }
504 
505 static void dcn3_enable_pme_wa(struct clk_mgr *clk_mgr_base)
506 {
507 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
508 
509 	if (!clk_mgr->smu_present)
510 		return;
511 
512 	dcn30_smu_set_pme_workaround(clk_mgr);
513 }
514 
515 /* Notify clk_mgr of a change in link rate, update phyclk frequency if necessary */
516 static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_link *link)
517 {
518 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
519 	unsigned int i, max_phyclk_req = clk_mgr_base->bw_params->clk_table.entries[0].phyclk_mhz * 1000;
520 
521 	if (!clk_mgr->smu_present)
522 		return;
523 
524 	clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
525 
526 	for (i = 0; i < MAX_PIPES * 2; i++) {
527 		if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
528 			max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
529 	}
530 
531 	if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {
532 		clk_mgr_base->clks.phyclk_khz = max_phyclk_req;
533 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, khz_to_mhz_ceil(clk_mgr_base->clks.phyclk_khz));
534 	}
535 }
536 
537 static struct clk_mgr_funcs dcn3_funcs = {
538 		.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
539 		.update_clocks = dcn3_update_clocks,
540 		.init_clocks = dcn3_init_clocks,
541 		.notify_wm_ranges = dcn3_notify_wm_ranges,
542 		.set_hard_min_memclk = dcn3_set_hard_min_memclk,
543 		.set_hard_max_memclk = dcn3_set_hard_max_memclk,
544 		.set_max_memclk = dcn3_set_max_memclk,
545 		.set_min_memclk = dcn3_set_min_memclk,
546 		.get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu,
547 		.are_clock_states_equal = dcn3_are_clock_states_equal,
548 		.enable_pme_wa = dcn3_enable_pme_wa,
549 		.notify_link_rate_change = dcn30_notify_link_rate_change,
550 		.is_smu_present = dcn3_is_smu_present
551 };
552 
553 static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
554 {
555 	dcn2_init_clocks(clk_mgr);
556 
557 /* TODO: Implement the functions and remove the ifndef guard */
558 }
559 
560 struct clk_mgr_funcs dcn3_fpga_funcs = {
561 	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
562 	.update_clocks = dcn2_update_clocks_fpga,
563 	.init_clocks = dcn3_init_clocks_fpga,
564 };
565 
566 /*todo for dcn30 for clk register offset*/
567 void dcn3_clk_mgr_construct(
568 		struct dc_context *ctx,
569 		struct clk_mgr_internal *clk_mgr,
570 		struct pp_smu_funcs *pp_smu,
571 		struct dccg *dccg)
572 {
573 	clk_mgr->base.ctx = ctx;
574 	clk_mgr->base.funcs = &dcn3_funcs;
575 	clk_mgr->regs = &clk_mgr_regs;
576 	clk_mgr->clk_mgr_shift = &clk_mgr_shift;
577 	clk_mgr->clk_mgr_mask = &clk_mgr_mask;
578 
579 	clk_mgr->dccg = dccg;
580 	clk_mgr->dfs_bypass_disp_clk = 0;
581 
582 	clk_mgr->dprefclk_ss_percentage = 0;
583 	clk_mgr->dprefclk_ss_divider = 1000;
584 	clk_mgr->ss_on_dprefclk = false;
585 	clk_mgr->dfs_ref_freq_khz = 100000;
586 
587 	clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
588 
589 	if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
590 		clk_mgr->base.funcs  = &dcn3_fpga_funcs;
591 		clk_mgr->base.dentist_vco_freq_khz = 3650000;
592 
593 	} else {
594 		struct clk_state_registers_and_bypass s = { 0 };
595 
596 		/* integer part is now VCO frequency in kHz */
597 		clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr);
598 
599 		/* in case we don't get a value from the register, use default */
600 		if (clk_mgr->base.dentist_vco_freq_khz == 0)
601 			clk_mgr->base.dentist_vco_freq_khz = 3650000;
602 		/* Convert dprefclk units from MHz to KHz */
603 		/* Value already divided by 10, some resolution lost */
604 
605 		/*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */
606 		//ASSERT(s.dprefclk != 0);
607 		if (s.dprefclk != 0)
608 			clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
609 	}
610 
611 	clk_mgr->dfs_bypass_enabled = false;
612 
613 	clk_mgr->smu_present = false;
614 
615 	dce_clock_read_ss_info(clk_mgr);
616 
617 	clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
618 
619 	/* need physical address of table to give to PMFW */
620 	clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
621 			DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
622 			&clk_mgr->wm_range_table_addr);
623 }
624 
625 void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
626 {
627 	kfree(clk_mgr->base.bw_params);
628 
629 	if (clk_mgr->wm_range_table)
630 		dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
631 				clk_mgr->wm_range_table);
632 }
633