1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 #include <linux/delay.h>
26 #include "dm_services.h"
27 #include "dcn20/dcn20_hubbub.h"
28 #include "dcn21_hubbub.h"
29 #include "reg_helper.h"
30 
31 #define REG(reg)\
32 	hubbub1->regs->reg
33 #define DC_LOGGER \
34 	hubbub1->base.ctx->logger
35 #define CTX \
36 	hubbub1->base.ctx
37 
38 #undef FN
39 #define FN(reg_name, field_name) \
40 	hubbub1->shifts->field_name, hubbub1->masks->field_name
41 
42 #define REG(reg)\
43 	hubbub1->regs->reg
44 
45 #define CTX \
46 	hubbub1->base.ctx
47 
48 #undef FN
49 #define FN(reg_name, field_name) \
50 	hubbub1->shifts->field_name, hubbub1->masks->field_name
51 
52 static uint32_t convert_and_clamp(
53 	uint32_t wm_ns,
54 	uint32_t refclk_mhz,
55 	uint32_t clamp_value)
56 {
57 	uint32_t ret_val = 0;
58 	ret_val = wm_ns * refclk_mhz;
59 	ret_val /= 1000;
60 
61 	if (ret_val > clamp_value)
62 		ret_val = clamp_value;
63 
64 	return ret_val;
65 }
66 
67 void dcn21_dchvm_init(struct hubbub *hubbub)
68 {
69 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
70 	uint32_t riommu_active;
71 	int i;
72 
73 	//Init DCHVM block
74 	REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
75 
76 	//Poll until RIOMMU_ACTIVE = 1
77 	for (i = 0; i < 100; i++) {
78 		REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
79 
80 		if (riommu_active)
81 			break;
82 		else
83 			udelay(5);
84 	}
85 
86 	if (riommu_active) {
87 		//Reflect the power status of DCHUBBUB
88 		REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
89 
90 		//Start rIOMMU prefetching
91 		REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
92 
93 		// Enable dynamic clock gating
94 		REG_UPDATE_4(DCHVM_CLK_CTRL,
95 						HVM_DISPCLK_R_GATE_DIS, 0,
96 						HVM_DISPCLK_G_GATE_DIS, 0,
97 						HVM_DCFCLK_R_GATE_DIS, 0,
98 						HVM_DCFCLK_G_GATE_DIS, 0);
99 
100 		//Poll until HOSTVM_PREFETCH_DONE = 1
101 		REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
102 	}
103 }
104 
105 int hubbub21_init_dchub(struct hubbub *hubbub,
106 		struct dcn_hubbub_phys_addr_config *pa_config)
107 {
108 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
109 	struct dcn_vmid_page_table_config phys_config;
110 
111 	REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
112 			FB_BASE, pa_config->system_aperture.fb_base >> 24);
113 	REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
114 			FB_TOP, pa_config->system_aperture.fb_top >> 24);
115 	REG_SET(DCN_VM_FB_OFFSET, 0,
116 			FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
117 	REG_SET(DCN_VM_AGP_BOT, 0,
118 			AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
119 	REG_SET(DCN_VM_AGP_TOP, 0,
120 			AGP_TOP, pa_config->system_aperture.agp_top >> 24);
121 	REG_SET(DCN_VM_AGP_BASE, 0,
122 			AGP_BASE, pa_config->system_aperture.agp_base >> 24);
123 
124 	if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
125 		phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
126 		phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
127 		phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
128 		phys_config.depth = 0;
129 		phys_config.block_size = 0;
130 		// Init VMID 0 based on PA config
131 		dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
132 	}
133 
134 	dcn21_dchvm_init(hubbub);
135 
136 	return hubbub1->num_vmid;
137 }
138 
139 bool hubbub21_program_urgent_watermarks(
140 		struct hubbub *hubbub,
141 		struct dcn_watermark_set *watermarks,
142 		unsigned int refclk_mhz,
143 		bool safe_to_lower)
144 {
145 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
146 	uint32_t prog_wm_value;
147 	bool wm_pending = false;
148 
149 	/* Repeat for water mark set A, B, C and D. */
150 	/* clock state A */
151 	if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
152 		hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
153 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
154 				refclk_mhz, 0x1fffff);
155 		REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
156 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
157 				DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
158 
159 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
160 			"HW register value = 0x%x\n",
161 			watermarks->a.urgent_ns, prog_wm_value);
162 	} else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
163 		wm_pending = true;
164 
165 	/* determine the transfer time for a quantity of data for a particular requestor.*/
166 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
167 			> hubbub1->watermarks.a.frac_urg_bw_flip) {
168 		hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
169 
170 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
171 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
172 	} else if (watermarks->a.frac_urg_bw_flip
173 			< hubbub1->watermarks.a.frac_urg_bw_flip)
174 		wm_pending = true;
175 
176 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
177 			> hubbub1->watermarks.a.frac_urg_bw_nom) {
178 		hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
179 
180 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
181 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
182 	} else if (watermarks->a.frac_urg_bw_nom
183 			< hubbub1->watermarks.a.frac_urg_bw_nom)
184 		wm_pending = true;
185 
186 	if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
187 		hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
188 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
189 				refclk_mhz, 0x1fffff);
190 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
191 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
192 	} else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
193 		wm_pending = true;
194 
195 	/* clock state B */
196 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
197 		hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
198 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
199 				refclk_mhz, 0x1fffff);
200 		REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
201 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
202 				DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
203 
204 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
205 			"HW register value = 0x%x\n",
206 			watermarks->b.urgent_ns, prog_wm_value);
207 	} else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
208 		wm_pending = true;
209 
210 	/* determine the transfer time for a quantity of data for a particular requestor.*/
211 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
212 			> hubbub1->watermarks.a.frac_urg_bw_flip) {
213 		hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
214 
215 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
216 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
217 	} else if (watermarks->a.frac_urg_bw_flip
218 			< hubbub1->watermarks.a.frac_urg_bw_flip)
219 		wm_pending = true;
220 
221 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
222 			> hubbub1->watermarks.a.frac_urg_bw_nom) {
223 		hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
224 
225 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
226 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
227 	} else if (watermarks->a.frac_urg_bw_nom
228 			< hubbub1->watermarks.a.frac_urg_bw_nom)
229 		wm_pending = true;
230 
231 	if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
232 		hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
233 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
234 				refclk_mhz, 0x1fffff);
235 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
236 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
237 	} else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
238 		wm_pending = true;
239 
240 	/* clock state C */
241 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
242 		hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
243 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
244 				refclk_mhz, 0x1fffff);
245 		REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
246 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
247 				DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
248 
249 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
250 			"HW register value = 0x%x\n",
251 			watermarks->c.urgent_ns, prog_wm_value);
252 	} else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
253 		wm_pending = true;
254 
255 	/* determine the transfer time for a quantity of data for a particular requestor.*/
256 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
257 			> hubbub1->watermarks.a.frac_urg_bw_flip) {
258 		hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
259 
260 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
261 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
262 	} else if (watermarks->a.frac_urg_bw_flip
263 			< hubbub1->watermarks.a.frac_urg_bw_flip)
264 		wm_pending = true;
265 
266 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
267 			> hubbub1->watermarks.a.frac_urg_bw_nom) {
268 		hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
269 
270 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
271 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
272 	} else if (watermarks->a.frac_urg_bw_nom
273 			< hubbub1->watermarks.a.frac_urg_bw_nom)
274 		wm_pending = true;
275 
276 	if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
277 		hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
278 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
279 				refclk_mhz, 0x1fffff);
280 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
281 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
282 	} else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
283 		wm_pending = true;
284 
285 	/* clock state D */
286 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
287 		hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
288 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
289 				refclk_mhz, 0x1fffff);
290 		REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
291 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
292 				DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
293 
294 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
295 			"HW register value = 0x%x\n",
296 			watermarks->d.urgent_ns, prog_wm_value);
297 	} else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
298 		wm_pending = true;
299 
300 	/* determine the transfer time for a quantity of data for a particular requestor.*/
301 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
302 			> hubbub1->watermarks.a.frac_urg_bw_flip) {
303 		hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
304 
305 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
306 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
307 	} else if (watermarks->a.frac_urg_bw_flip
308 			< hubbub1->watermarks.a.frac_urg_bw_flip)
309 		wm_pending = true;
310 
311 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
312 			> hubbub1->watermarks.a.frac_urg_bw_nom) {
313 		hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
314 
315 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
316 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
317 	} else if (watermarks->a.frac_urg_bw_nom
318 			< hubbub1->watermarks.a.frac_urg_bw_nom)
319 		wm_pending = true;
320 
321 	if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
322 		hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
323 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
324 				refclk_mhz, 0x1fffff);
325 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
326 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
327 	} else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
328 		wm_pending = true;
329 
330 	return wm_pending;
331 }
332 
333 bool hubbub21_program_stutter_watermarks(
334 		struct hubbub *hubbub,
335 		struct dcn_watermark_set *watermarks,
336 		unsigned int refclk_mhz,
337 		bool safe_to_lower)
338 {
339 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
340 	uint32_t prog_wm_value;
341 	bool wm_pending = false;
342 
343 	/* clock state A */
344 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
345 			> hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
346 		hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
347 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
348 		prog_wm_value = convert_and_clamp(
349 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
350 				refclk_mhz, 0x1fffff);
351 		REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
352 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
353 				DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
354 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
355 			"HW register value = 0x%x\n",
356 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
357 	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
358 			< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
359 		wm_pending = true;
360 
361 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
362 			> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
363 		hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
364 				watermarks->a.cstate_pstate.cstate_exit_ns;
365 		prog_wm_value = convert_and_clamp(
366 				watermarks->a.cstate_pstate.cstate_exit_ns,
367 				refclk_mhz, 0x1fffff);
368 		REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
369 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
370 				DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
371 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
372 			"HW register value = 0x%x\n",
373 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
374 	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
375 			< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
376 		wm_pending = true;
377 
378 	/* clock state B */
379 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
380 			> hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
381 		hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
382 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
383 		prog_wm_value = convert_and_clamp(
384 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
385 				refclk_mhz, 0x1fffff);
386 		REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
387 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
388 				DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
389 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
390 			"HW register value = 0x%x\n",
391 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
392 	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
393 			< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
394 		wm_pending = true;
395 
396 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
397 			> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
398 		hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
399 				watermarks->b.cstate_pstate.cstate_exit_ns;
400 		prog_wm_value = convert_and_clamp(
401 				watermarks->b.cstate_pstate.cstate_exit_ns,
402 				refclk_mhz, 0x1fffff);
403 		REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
404 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
405 				DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
406 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
407 			"HW register value = 0x%x\n",
408 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
409 	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
410 			< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
411 		wm_pending = true;
412 
413 	/* clock state C */
414 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
415 			> hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
416 		hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
417 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
418 		prog_wm_value = convert_and_clamp(
419 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
420 				refclk_mhz, 0x1fffff);
421 		REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
422 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
423 				DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
424 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
425 			"HW register value = 0x%x\n",
426 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
427 	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
428 			< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
429 		wm_pending = true;
430 
431 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
432 			> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
433 		hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
434 				watermarks->c.cstate_pstate.cstate_exit_ns;
435 		prog_wm_value = convert_and_clamp(
436 				watermarks->c.cstate_pstate.cstate_exit_ns,
437 				refclk_mhz, 0x1fffff);
438 		REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
439 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
440 				DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
441 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
442 			"HW register value = 0x%x\n",
443 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
444 	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
445 			< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
446 		wm_pending = true;
447 
448 	/* clock state D */
449 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
450 			> hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
451 		hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
452 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
453 		prog_wm_value = convert_and_clamp(
454 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
455 				refclk_mhz, 0x1fffff);
456 		REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
457 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
458 				DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
459 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
460 			"HW register value = 0x%x\n",
461 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
462 	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
463 			< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
464 		wm_pending = true;
465 
466 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
467 			> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
468 		hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
469 				watermarks->d.cstate_pstate.cstate_exit_ns;
470 		prog_wm_value = convert_and_clamp(
471 				watermarks->d.cstate_pstate.cstate_exit_ns,
472 				refclk_mhz, 0x1fffff);
473 		REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
474 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
475 				DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
476 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
477 			"HW register value = 0x%x\n",
478 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
479 	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
480 			< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
481 		wm_pending = true;
482 
483 	return wm_pending;
484 }
485 
486 bool hubbub21_program_pstate_watermarks(
487 		struct hubbub *hubbub,
488 		struct dcn_watermark_set *watermarks,
489 		unsigned int refclk_mhz,
490 		bool safe_to_lower)
491 {
492 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
493 	uint32_t prog_wm_value;
494 
495 	bool wm_pending = false;
496 
497 	/* clock state A */
498 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
499 			> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
500 		hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
501 				watermarks->a.cstate_pstate.pstate_change_ns;
502 		prog_wm_value = convert_and_clamp(
503 				watermarks->a.cstate_pstate.pstate_change_ns,
504 				refclk_mhz, 0x1fffff);
505 		REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
506 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
507 				DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
508 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
509 			"HW register value = 0x%x\n\n",
510 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
511 	} else if (watermarks->a.cstate_pstate.pstate_change_ns
512 			< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
513 		wm_pending = true;
514 
515 	/* clock state B */
516 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
517 			> hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
518 		hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
519 				watermarks->b.cstate_pstate.pstate_change_ns;
520 		prog_wm_value = convert_and_clamp(
521 				watermarks->b.cstate_pstate.pstate_change_ns,
522 				refclk_mhz, 0x1fffff);
523 		REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
524 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
525 				DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
526 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
527 			"HW register value = 0x%x\n\n",
528 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
529 	} else if (watermarks->b.cstate_pstate.pstate_change_ns
530 			< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
531 		wm_pending = false;
532 
533 	/* clock state C */
534 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
535 			> hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
536 		hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
537 				watermarks->c.cstate_pstate.pstate_change_ns;
538 		prog_wm_value = convert_and_clamp(
539 				watermarks->c.cstate_pstate.pstate_change_ns,
540 				refclk_mhz, 0x1fffff);
541 		REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
542 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
543 				DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
544 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
545 			"HW register value = 0x%x\n\n",
546 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
547 	} else if (watermarks->c.cstate_pstate.pstate_change_ns
548 			< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
549 		wm_pending = true;
550 
551 	/* clock state D */
552 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
553 			> hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
554 		hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
555 				watermarks->d.cstate_pstate.pstate_change_ns;
556 		prog_wm_value = convert_and_clamp(
557 				watermarks->d.cstate_pstate.pstate_change_ns,
558 				refclk_mhz, 0x1fffff);
559 		REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
560 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
561 				DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
562 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
563 			"HW register value = 0x%x\n\n",
564 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
565 	} else if (watermarks->d.cstate_pstate.pstate_change_ns
566 			< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
567 		wm_pending = true;
568 
569 	return wm_pending;
570 }
571 
572 bool hubbub21_program_watermarks(
573 		struct hubbub *hubbub,
574 		struct dcn_watermark_set *watermarks,
575 		unsigned int refclk_mhz,
576 		bool safe_to_lower)
577 {
578 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
579 	bool wm_pending = false;
580 
581 	if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
582 		wm_pending = true;
583 
584 	if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
585 		wm_pending = true;
586 
587 	if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
588 		wm_pending = true;
589 
590 	/*
591 	 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
592 	 * If the memory controller is fully utilized and the DCHub requestors are
593 	 * well ahead of their amortized schedule, then it is safe to prevent the next winner
594 	 * from being committed and sent to the fabric.
595 	 * The utilization of the memory controller is approximated by ensuring that
596 	 * the number of outstanding requests is greater than a threshold specified
597 	 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
598 	 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
599 	 *
600 	 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
601 	 * to turn off it for now.
602 	 */
603 	REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
604 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
605 	REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
606 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
607 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
608 	REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
609 			DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
610 
611 	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
612 
613 	return wm_pending;
614 }
615 
616 void hubbub21_wm_read_state(struct hubbub *hubbub,
617 		struct dcn_hubbub_wm *wm)
618 {
619 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
620 	struct dcn_hubbub_wm_set *s;
621 
622 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
623 
624 	s = &wm->sets[0];
625 	s->wm_set = 0;
626 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
627 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
628 
629 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
630 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
631 
632 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
633 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
634 
635 	REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
636 			 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
637 
638 	s = &wm->sets[1];
639 	s->wm_set = 1;
640 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
641 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
642 
643 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
644 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
645 
646 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
647 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
648 
649 	REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
650 			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
651 
652 	s = &wm->sets[2];
653 	s->wm_set = 2;
654 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
655 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
656 
657 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
658 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
659 
660 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
661 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
662 
663 	REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
664 			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
665 
666 	s = &wm->sets[3];
667 	s->wm_set = 3;
668 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
669 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
670 
671 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
672 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
673 
674 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
675 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
676 
677 	REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
678 			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
679 }
680 
681 void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
682 {
683 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
684 	uint32_t prog_wm_value;
685 
686 	prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
687 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
688 }
689 
690 static const struct hubbub_funcs hubbub21_funcs = {
691 	.update_dchub = hubbub2_update_dchub,
692 	.init_dchub_sys_ctx = hubbub21_init_dchub,
693 	.init_vm_ctx = hubbub2_init_vm_ctx,
694 	.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
695 	.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
696 	.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
697 	.wm_read_state = hubbub21_wm_read_state,
698 	.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
699 	.program_watermarks = hubbub21_program_watermarks,
700 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
701 	.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
702 };
703 
704 void hubbub21_construct(struct dcn20_hubbub *hubbub,
705 	struct dc_context *ctx,
706 	const struct dcn_hubbub_registers *hubbub_regs,
707 	const struct dcn_hubbub_shift *hubbub_shift,
708 	const struct dcn_hubbub_mask *hubbub_mask)
709 {
710 	hubbub->base.ctx = ctx;
711 
712 	hubbub->base.funcs = &hubbub21_funcs;
713 
714 	hubbub->regs = hubbub_regs;
715 	hubbub->shifts = hubbub_shift;
716 	hubbub->masks = hubbub_mask;
717 
718 	hubbub->debug_test_index_pstate = 0xB;
719 	hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
720 }
721