1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/string.h>
27 #include <linux/acpi.h>
28 
29 #include <drm/drmP.h>
30 #include <drm/drm_crtc_helper.h>
31 #include <drm/amdgpu_drm.h>
32 #include "dm_services.h"
33 #include "amdgpu.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_dm_irq.h"
36 #include "amdgpu_pm.h"
37 
38 unsigned long long dm_get_timestamp(struct dc_context *ctx)
39 {
40 	/* TODO: return actual timestamp */
41 	return 0;
42 }
43 
44 bool dm_write_persistent_data(struct dc_context *ctx,
45 		const struct dc_sink *sink,
46 		const char *module_name,
47 		const char *key_name,
48 		void *params,
49 		unsigned int size,
50 		struct persistent_data_flag *flag)
51 {
52 	/*TODO implement*/
53 	return false;
54 }
55 
56 bool dm_read_persistent_data(struct dc_context *ctx,
57 				const struct dc_sink *sink,
58 				const char *module_name,
59 				const char *key_name,
60 				void *params,
61 				unsigned int size,
62 				struct persistent_data_flag *flag)
63 {
64 	/*TODO implement*/
65 	return false;
66 }
67 
68 /**** power component interfaces ****/
69 
70 bool dm_pp_pre_dce_clock_change(
71 		struct dc_context *ctx,
72 		struct dm_pp_gpu_clock_range *requested_state,
73 		struct dm_pp_gpu_clock_range *actual_state)
74 {
75 	/*TODO*/
76 	return false;
77 }
78 
79 bool dm_pp_apply_display_requirements(
80 		const struct dc_context *ctx,
81 		const struct dm_pp_display_configuration *pp_display_cfg)
82 {
83 	struct amdgpu_device *adev = ctx->driver_context;
84 
85 	if (adev->pm.dpm_enabled) {
86 
87 		memset(&adev->pm.pm_display_cfg, 0,
88 				sizeof(adev->pm.pm_display_cfg));
89 
90 		adev->pm.pm_display_cfg.cpu_cc6_disable =
91 			pp_display_cfg->cpu_cc6_disable;
92 
93 		adev->pm.pm_display_cfg.cpu_pstate_disable =
94 			pp_display_cfg->cpu_pstate_disable;
95 
96 		adev->pm.pm_display_cfg.cpu_pstate_separation_time =
97 			pp_display_cfg->cpu_pstate_separation_time;
98 
99 		adev->pm.pm_display_cfg.nb_pstate_switch_disable =
100 			pp_display_cfg->nb_pstate_switch_disable;
101 
102 		adev->pm.pm_display_cfg.num_display =
103 				pp_display_cfg->display_count;
104 		adev->pm.pm_display_cfg.num_path_including_non_display =
105 				pp_display_cfg->display_count;
106 
107 		adev->pm.pm_display_cfg.min_core_set_clock =
108 				pp_display_cfg->min_engine_clock_khz/10;
109 		adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
110 				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
111 		adev->pm.pm_display_cfg.min_mem_set_clock =
112 				pp_display_cfg->min_memory_clock_khz/10;
113 
114 		adev->pm.pm_display_cfg.multi_monitor_in_sync =
115 				pp_display_cfg->all_displays_in_sync;
116 		adev->pm.pm_display_cfg.min_vblank_time =
117 				pp_display_cfg->avail_mclk_switch_time_us;
118 
119 		adev->pm.pm_display_cfg.display_clk =
120 				pp_display_cfg->disp_clk_khz/10;
121 
122 		adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
123 				pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
124 
125 		adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
126 		adev->pm.pm_display_cfg.line_time_in_us =
127 				pp_display_cfg->line_time_in_us;
128 
129 		adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
130 		adev->pm.pm_display_cfg.crossfire_display_index = -1;
131 		adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
132 
133 		/* TODO: complete implementation of
134 		 * amd_powerplay_display_configuration_change().
135 		 * Follow example of:
136 		 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
137 		 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
138 		amd_powerplay_display_configuration_change(
139 				adev->powerplay.pp_handle,
140 				&adev->pm.pm_display_cfg);
141 
142 		/* TODO: replace by a separate call to 'apply display cfg'? */
143 		amdgpu_pm_compute_clocks(adev);
144 	}
145 
146 	return true;
147 }
148 
149 bool dc_service_get_system_clocks_range(
150 		const struct dc_context *ctx,
151 		struct dm_pp_gpu_clock_range *sys_clks)
152 {
153 	struct amdgpu_device *adev = ctx->driver_context;
154 
155 	/* Default values, in case PPLib is not compiled-in. */
156 	sys_clks->mclk.max_khz = 800000;
157 	sys_clks->mclk.min_khz = 800000;
158 
159 	sys_clks->sclk.max_khz = 600000;
160 	sys_clks->sclk.min_khz = 300000;
161 
162 	if (adev->pm.dpm_enabled) {
163 		sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
164 		sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
165 
166 		sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
167 		sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
168 	}
169 
170 	return true;
171 }
172 
173 static void get_default_clock_levels(
174 		enum dm_pp_clock_type clk_type,
175 		struct dm_pp_clock_levels *clks)
176 {
177 	uint32_t disp_clks_in_khz[6] = {
178 			300000, 400000, 496560, 626090, 685720, 757900 };
179 	uint32_t sclks_in_khz[6] = {
180 			300000, 360000, 423530, 514290, 626090, 720000 };
181 	uint32_t mclks_in_khz[2] = { 333000, 800000 };
182 
183 	switch (clk_type) {
184 	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
185 		clks->num_levels = 6;
186 		memmove(clks->clocks_in_khz, disp_clks_in_khz,
187 				sizeof(disp_clks_in_khz));
188 		break;
189 	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
190 		clks->num_levels = 6;
191 		memmove(clks->clocks_in_khz, sclks_in_khz,
192 				sizeof(sclks_in_khz));
193 		break;
194 	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
195 		clks->num_levels = 2;
196 		memmove(clks->clocks_in_khz, mclks_in_khz,
197 				sizeof(mclks_in_khz));
198 		break;
199 	default:
200 		clks->num_levels = 0;
201 		break;
202 	}
203 }
204 
205 static enum amd_pp_clock_type dc_to_pp_clock_type(
206 		enum dm_pp_clock_type dm_pp_clk_type)
207 {
208 	enum amd_pp_clock_type amd_pp_clk_type = 0;
209 
210 	switch (dm_pp_clk_type) {
211 	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
212 		amd_pp_clk_type = amd_pp_disp_clock;
213 		break;
214 	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
215 		amd_pp_clk_type = amd_pp_sys_clock;
216 		break;
217 	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
218 		amd_pp_clk_type = amd_pp_mem_clock;
219 		break;
220 	default:
221 		DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
222 				dm_pp_clk_type);
223 		break;
224 	}
225 
226 	return amd_pp_clk_type;
227 }
228 
229 static void pp_to_dc_clock_levels(
230 		const struct amd_pp_clocks *pp_clks,
231 		struct dm_pp_clock_levels *dc_clks,
232 		enum dm_pp_clock_type dc_clk_type)
233 {
234 	uint32_t i;
235 
236 	if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
237 		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
238 				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
239 				pp_clks->count,
240 				DM_PP_MAX_CLOCK_LEVELS);
241 
242 		dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
243 	} else
244 		dc_clks->num_levels = pp_clks->count;
245 
246 	DRM_INFO("DM_PPLIB: values for %s clock\n",
247 			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
248 
249 	for (i = 0; i < dc_clks->num_levels; i++) {
250 		DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
251 		/* translate 10kHz to kHz */
252 		dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
253 	}
254 }
255 
256 bool dm_pp_get_clock_levels_by_type(
257 		const struct dc_context *ctx,
258 		enum dm_pp_clock_type clk_type,
259 		struct dm_pp_clock_levels *dc_clks)
260 {
261 	struct amdgpu_device *adev = ctx->driver_context;
262 	void *pp_handle = adev->powerplay.pp_handle;
263 	struct amd_pp_clocks pp_clks = { 0 };
264 	struct amd_pp_simple_clock_info validation_clks = { 0 };
265 	uint32_t i;
266 
267 	if (amd_powerplay_get_clock_by_type(pp_handle,
268 			dc_to_pp_clock_type(clk_type), &pp_clks)) {
269 		/* Error in pplib. Provide default values. */
270 		get_default_clock_levels(clk_type, dc_clks);
271 		return true;
272 	}
273 
274 	pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
275 
276 	if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
277 			&validation_clks)) {
278 		/* Error in pplib. Provide default values. */
279 		DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
280 		validation_clks.engine_max_clock = 72000;
281 		validation_clks.memory_max_clock = 80000;
282 		validation_clks.level = 0;
283 	}
284 
285 	DRM_INFO("DM_PPLIB: Validation clocks:\n");
286 	DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
287 			validation_clks.engine_max_clock);
288 	DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
289 			validation_clks.memory_max_clock);
290 	DRM_INFO("DM_PPLIB:    level           : %d\n",
291 			validation_clks.level);
292 
293 	/* Translate 10 kHz to kHz. */
294 	validation_clks.engine_max_clock *= 10;
295 	validation_clks.memory_max_clock *= 10;
296 
297 	/* Determine the highest non-boosted level from the Validation Clocks */
298 	if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
299 		for (i = 0; i < dc_clks->num_levels; i++) {
300 			if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
301 				/* This clock is higher the validation clock.
302 				 * Than means the previous one is the highest
303 				 * non-boosted one. */
304 				DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
305 						dc_clks->num_levels, i);
306 				dc_clks->num_levels = i > 0 ? i : 1;
307 				break;
308 			}
309 		}
310 	} else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
311 		for (i = 0; i < dc_clks->num_levels; i++) {
312 			if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
313 				DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
314 						dc_clks->num_levels, i);
315 				dc_clks->num_levels = i > 0 ? i : 1;
316 				break;
317 			}
318 		}
319 	}
320 
321 	return true;
322 }
323 
324 bool dm_pp_get_clock_levels_by_type_with_latency(
325 	const struct dc_context *ctx,
326 	enum dm_pp_clock_type clk_type,
327 	struct dm_pp_clock_levels_with_latency *clk_level_info)
328 {
329 	/* TODO: to be implemented */
330 	return false;
331 }
332 
333 bool dm_pp_get_clock_levels_by_type_with_voltage(
334 	const struct dc_context *ctx,
335 	enum dm_pp_clock_type clk_type,
336 	struct dm_pp_clock_levels_with_voltage *clk_level_info)
337 {
338 	/* TODO: to be implemented */
339 	return false;
340 }
341 
342 bool dm_pp_notify_wm_clock_changes(
343 	const struct dc_context *ctx,
344 	struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
345 {
346 	/* TODO: to be implemented */
347 	return false;
348 }
349 
350 bool dm_pp_apply_power_level_change_request(
351 	const struct dc_context *ctx,
352 	struct dm_pp_power_level_change_request *level_change_req)
353 {
354 	/* TODO: to be implemented */
355 	return false;
356 }
357 
358 bool dm_pp_apply_clock_for_voltage_request(
359 	const struct dc_context *ctx,
360 	struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
361 {
362 	/* TODO: to be implemented */
363 	return false;
364 }
365 
366 bool dm_pp_get_static_clocks(
367 	const struct dc_context *ctx,
368 	struct dm_pp_static_clock_info *static_clk_info)
369 {
370 	/* TODO: to be implemented */
371 	return false;
372 }
373 
374 void dm_pp_get_funcs_rv(
375 		struct dc_context *ctx,
376 		struct pp_smu_funcs_rv *funcs)
377 {}
378 
379 /**** end of power component interfaces ****/
380