1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 
27 #include "dcn30/dcn30_hubbub.h"
28 #include "dcn32_hubbub.h"
29 #include "dm_services.h"
30 #include "reg_helper.h"
31 
32 
33 #define CTX \
34 	hubbub2->base.ctx
35 #define DC_LOGGER \
36 	hubbub2->base.ctx->logger
37 #define REG(reg)\
38 	hubbub2->regs->reg
39 
40 #undef FN
41 #define FN(reg_name, field_name) \
42 	hubbub2->shifts->field_name, hubbub2->masks->field_name
43 
44 /**
45  * DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for
46  *                            DCN32
47  */
48 #define DCN32_CRB_SEGMENT_SIZE_KB 64
49 
dcn32_init_crb(struct hubbub * hubbub)50 static void dcn32_init_crb(struct hubbub *hubbub)
51 {
52 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
53 
54 	REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT,
55 		&hubbub2->det0_size);
56 
57 	REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT,
58 		&hubbub2->det1_size);
59 
60 	REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT,
61 		&hubbub2->det2_size);
62 
63 	REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT,
64 		&hubbub2->det3_size);
65 
66 	REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT,
67 		&hubbub2->compbuf_size_segments);
68 
69 	REG_SET_2(COMPBUF_RESERVED_SPACE, 0,
70 			COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32,
71 			COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128);
72 	REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
73 }
74 
hubbub32_set_request_limit(struct hubbub * hubbub,int memory_channel_count,int words_per_channel)75 void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
76 {
77 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
78 
79 	uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4;
80 
81 	ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long
82 	ASSERT(request_limit > 0); //field is only 24 bits long
83 
84 	if (request_limit > 0xFFF)
85 		request_limit = 0xFFF;
86 
87 	if (request_limit > 0)
88 		REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit);
89 }
90 
91 
dcn32_program_det_size(struct hubbub * hubbub,int hubp_inst,unsigned int det_buffer_size_in_kbyte)92 void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
93 {
94 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
95 
96 	unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB;
97 
98 	switch (hubp_inst) {
99 	case 0:
100 		REG_UPDATE(DCHUBBUB_DET0_CTRL,
101 					DET0_SIZE, det_size_segments);
102 		hubbub2->det0_size = det_size_segments;
103 		break;
104 	case 1:
105 		REG_UPDATE(DCHUBBUB_DET1_CTRL,
106 					DET1_SIZE, det_size_segments);
107 		hubbub2->det1_size = det_size_segments;
108 		break;
109 	case 2:
110 		REG_UPDATE(DCHUBBUB_DET2_CTRL,
111 					DET2_SIZE, det_size_segments);
112 		hubbub2->det2_size = det_size_segments;
113 		break;
114 	case 3:
115 		REG_UPDATE(DCHUBBUB_DET3_CTRL,
116 					DET3_SIZE, det_size_segments);
117 		hubbub2->det3_size = det_size_segments;
118 		break;
119 	default:
120 		break;
121 	}
122 	if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
123 			+ hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
124 		/* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
125 		DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) >  CRB segments (%d)\n",
126 						hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
127 						hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
128 	}
129 }
130 
dcn32_program_compbuf_size(struct hubbub * hubbub,unsigned int compbuf_size_kb,bool safe_to_increase)131 static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
132 {
133 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
134 	unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB;
135 
136 	if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) {
137 		if (compbuf_size_segments > hubbub2->compbuf_size_segments) {
138 			REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
139 			REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100);
140 			REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100);
141 			REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100);
142 		}
143 		/* Should never be hit, if it is we have an erroneous hw config*/
144 		ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
145 				+ hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs);
146 		REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments);
147 		hubbub2->compbuf_size_segments = compbuf_size_segments;
148 		ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments);
149 	}
150 }
151 
convert_and_clamp(uint32_t wm_ns,uint32_t refclk_mhz,uint32_t clamp_value)152 static uint32_t convert_and_clamp(
153 	uint32_t wm_ns,
154 	uint32_t refclk_mhz,
155 	uint32_t clamp_value)
156 {
157 	uint32_t ret_val = 0;
158 	ret_val = wm_ns * refclk_mhz;
159 
160 	ret_val /= 1000;
161 
162 	if (ret_val > clamp_value)
163 		ret_val = clamp_value;
164 
165 	return ret_val;
166 }
167 
hubbub32_program_urgent_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)168 bool hubbub32_program_urgent_watermarks(
169 		struct hubbub *hubbub,
170 		struct dcn_watermark_set *watermarks,
171 		unsigned int refclk_mhz,
172 		bool safe_to_lower)
173 {
174 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
175 	uint32_t prog_wm_value;
176 	bool wm_pending = false;
177 
178 	/* Repeat for water mark set A, B, C and D. */
179 	/* clock state A */
180 	if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
181 		hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
182 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
183 				refclk_mhz, 0x3fff);
184 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
185 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
186 
187 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
188 			"HW register value = 0x%x\n",
189 			watermarks->a.urgent_ns, prog_wm_value);
190 	} else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns)
191 		wm_pending = true;
192 
193 	/* determine the transfer time for a quantity of data for a particular requestor.*/
194 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
195 			> hubbub2->watermarks.a.frac_urg_bw_flip) {
196 		hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
197 
198 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
199 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
200 	} else if (watermarks->a.frac_urg_bw_flip
201 			< hubbub2->watermarks.a.frac_urg_bw_flip)
202 		wm_pending = true;
203 
204 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
205 			> hubbub2->watermarks.a.frac_urg_bw_nom) {
206 		hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
207 
208 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
209 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
210 	} else if (watermarks->a.frac_urg_bw_nom
211 			< hubbub2->watermarks.a.frac_urg_bw_nom)
212 		wm_pending = true;
213 
214 	if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
215 		hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
216 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
217 				refclk_mhz, 0x3fff);
218 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
219 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
220 	} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
221 		wm_pending = true;
222 
223 	/* clock state B */
224 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
225 		hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
226 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
227 				refclk_mhz, 0x3fff);
228 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
229 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
230 
231 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
232 			"HW register value = 0x%x\n",
233 			watermarks->b.urgent_ns, prog_wm_value);
234 	} else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns)
235 		wm_pending = true;
236 
237 	/* determine the transfer time for a quantity of data for a particular requestor.*/
238 	if (safe_to_lower || watermarks->b.frac_urg_bw_flip
239 			> hubbub2->watermarks.b.frac_urg_bw_flip) {
240 		hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip;
241 
242 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
243 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip);
244 	} else if (watermarks->b.frac_urg_bw_flip
245 			< hubbub2->watermarks.b.frac_urg_bw_flip)
246 		wm_pending = true;
247 
248 	if (safe_to_lower || watermarks->b.frac_urg_bw_nom
249 			> hubbub2->watermarks.b.frac_urg_bw_nom) {
250 		hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom;
251 
252 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
253 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom);
254 	} else if (watermarks->b.frac_urg_bw_nom
255 			< hubbub2->watermarks.b.frac_urg_bw_nom)
256 		wm_pending = true;
257 
258 	if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
259 		hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
260 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
261 				refclk_mhz, 0x3fff);
262 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
263 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
264 	} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
265 		wm_pending = true;
266 
267 	/* clock state C */
268 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
269 		hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
270 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
271 				refclk_mhz, 0x3fff);
272 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
273 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
274 
275 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
276 			"HW register value = 0x%x\n",
277 			watermarks->c.urgent_ns, prog_wm_value);
278 	} else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns)
279 		wm_pending = true;
280 
281 	/* determine the transfer time for a quantity of data for a particular requestor.*/
282 	if (safe_to_lower || watermarks->c.frac_urg_bw_flip
283 			> hubbub2->watermarks.c.frac_urg_bw_flip) {
284 		hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip;
285 
286 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
287 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip);
288 	} else if (watermarks->c.frac_urg_bw_flip
289 			< hubbub2->watermarks.c.frac_urg_bw_flip)
290 		wm_pending = true;
291 
292 	if (safe_to_lower || watermarks->c.frac_urg_bw_nom
293 			> hubbub2->watermarks.c.frac_urg_bw_nom) {
294 		hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom;
295 
296 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
297 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom);
298 	} else if (watermarks->c.frac_urg_bw_nom
299 			< hubbub2->watermarks.c.frac_urg_bw_nom)
300 		wm_pending = true;
301 
302 	if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
303 		hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
304 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
305 				refclk_mhz, 0x3fff);
306 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
307 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
308 	} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
309 		wm_pending = true;
310 
311 	/* clock state D */
312 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
313 		hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
314 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
315 				refclk_mhz, 0x3fff);
316 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
317 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
318 
319 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
320 			"HW register value = 0x%x\n",
321 			watermarks->d.urgent_ns, prog_wm_value);
322 	} else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns)
323 		wm_pending = true;
324 
325 	/* determine the transfer time for a quantity of data for a particular requestor.*/
326 	if (safe_to_lower || watermarks->d.frac_urg_bw_flip
327 			> hubbub2->watermarks.d.frac_urg_bw_flip) {
328 		hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip;
329 
330 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
331 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip);
332 	} else if (watermarks->d.frac_urg_bw_flip
333 			< hubbub2->watermarks.d.frac_urg_bw_flip)
334 		wm_pending = true;
335 
336 	if (safe_to_lower || watermarks->d.frac_urg_bw_nom
337 			> hubbub2->watermarks.d.frac_urg_bw_nom) {
338 		hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom;
339 
340 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
341 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom);
342 	} else if (watermarks->d.frac_urg_bw_nom
343 			< hubbub2->watermarks.d.frac_urg_bw_nom)
344 		wm_pending = true;
345 
346 	if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
347 		hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
348 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
349 				refclk_mhz, 0x3fff);
350 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
351 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
352 	} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
353 		wm_pending = true;
354 
355 	return wm_pending;
356 }
357 
hubbub32_program_stutter_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)358 bool hubbub32_program_stutter_watermarks(
359 		struct hubbub *hubbub,
360 		struct dcn_watermark_set *watermarks,
361 		unsigned int refclk_mhz,
362 		bool safe_to_lower)
363 {
364 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
365 	uint32_t prog_wm_value;
366 	bool wm_pending = false;
367 
368 	/* clock state A */
369 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
370 			> hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
371 		hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
372 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
373 		prog_wm_value = convert_and_clamp(
374 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
375 				refclk_mhz, 0xffff);
376 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
377 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
378 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
379 			"HW register value = 0x%x\n",
380 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
381 	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
382 			< hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
383 		wm_pending = true;
384 
385 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
386 			> hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) {
387 		hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns =
388 				watermarks->a.cstate_pstate.cstate_exit_ns;
389 		prog_wm_value = convert_and_clamp(
390 				watermarks->a.cstate_pstate.cstate_exit_ns,
391 				refclk_mhz, 0xffff);
392 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
393 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
394 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
395 			"HW register value = 0x%x\n",
396 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
397 	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
398 			< hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns)
399 		wm_pending = true;
400 
401 	/* clock state B */
402 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
403 			> hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
404 		hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
405 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
406 		prog_wm_value = convert_and_clamp(
407 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
408 				refclk_mhz, 0xffff);
409 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
410 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
411 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
412 			"HW register value = 0x%x\n",
413 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
414 	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
415 			< hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
416 		wm_pending = true;
417 
418 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
419 			> hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) {
420 		hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns =
421 				watermarks->b.cstate_pstate.cstate_exit_ns;
422 		prog_wm_value = convert_and_clamp(
423 				watermarks->b.cstate_pstate.cstate_exit_ns,
424 				refclk_mhz, 0xffff);
425 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
426 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
427 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
428 			"HW register value = 0x%x\n",
429 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
430 	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
431 			< hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns)
432 		wm_pending = true;
433 
434 	/* clock state C */
435 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
436 			> hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
437 		hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
438 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
439 		prog_wm_value = convert_and_clamp(
440 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
441 				refclk_mhz, 0xffff);
442 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
443 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
444 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
445 			"HW register value = 0x%x\n",
446 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
447 	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
448 			< hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
449 		wm_pending = true;
450 
451 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
452 			> hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) {
453 		hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns =
454 				watermarks->c.cstate_pstate.cstate_exit_ns;
455 		prog_wm_value = convert_and_clamp(
456 				watermarks->c.cstate_pstate.cstate_exit_ns,
457 				refclk_mhz, 0xffff);
458 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
459 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
460 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
461 			"HW register value = 0x%x\n",
462 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
463 	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
464 			< hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns)
465 		wm_pending = true;
466 
467 	/* clock state D */
468 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
469 			> hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
470 		hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
471 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
472 		prog_wm_value = convert_and_clamp(
473 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
474 				refclk_mhz, 0xffff);
475 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
476 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
477 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
478 			"HW register value = 0x%x\n",
479 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
480 	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
481 			< hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
482 		wm_pending = true;
483 
484 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
485 			> hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) {
486 		hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns =
487 				watermarks->d.cstate_pstate.cstate_exit_ns;
488 		prog_wm_value = convert_and_clamp(
489 				watermarks->d.cstate_pstate.cstate_exit_ns,
490 				refclk_mhz, 0xffff);
491 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
492 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
493 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
494 			"HW register value = 0x%x\n",
495 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
496 	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
497 			< hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns)
498 		wm_pending = true;
499 
500 	return wm_pending;
501 }
502 
503 
hubbub32_program_pstate_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)504 bool hubbub32_program_pstate_watermarks(
505 		struct hubbub *hubbub,
506 		struct dcn_watermark_set *watermarks,
507 		unsigned int refclk_mhz,
508 		bool safe_to_lower)
509 {
510 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
511 	uint32_t prog_wm_value;
512 
513 	bool wm_pending = false;
514 
515 	/* Section for UCLK_PSTATE_CHANGE_WATERMARKS */
516 	/* clock state A */
517 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
518 			> hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) {
519 		hubbub2->watermarks.a.cstate_pstate.pstate_change_ns =
520 				watermarks->a.cstate_pstate.pstate_change_ns;
521 		prog_wm_value = convert_and_clamp(
522 				watermarks->a.cstate_pstate.pstate_change_ns,
523 				refclk_mhz, 0xffff);
524 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0,
525 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value);
526 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
527 			"HW register value = 0x%x\n\n",
528 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
529 	} else if (watermarks->a.cstate_pstate.pstate_change_ns
530 			< hubbub2->watermarks.a.cstate_pstate.pstate_change_ns)
531 		wm_pending = true;
532 
533 	/* clock state B */
534 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
535 			> hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) {
536 		hubbub2->watermarks.b.cstate_pstate.pstate_change_ns =
537 				watermarks->b.cstate_pstate.pstate_change_ns;
538 		prog_wm_value = convert_and_clamp(
539 				watermarks->b.cstate_pstate.pstate_change_ns,
540 				refclk_mhz, 0xffff);
541 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0,
542 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value);
543 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
544 			"HW register value = 0x%x\n\n",
545 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
546 	} else if (watermarks->b.cstate_pstate.pstate_change_ns
547 			< hubbub2->watermarks.b.cstate_pstate.pstate_change_ns)
548 		wm_pending = true;
549 
550 	/* clock state C */
551 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
552 			> hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) {
553 		hubbub2->watermarks.c.cstate_pstate.pstate_change_ns =
554 				watermarks->c.cstate_pstate.pstate_change_ns;
555 		prog_wm_value = convert_and_clamp(
556 				watermarks->c.cstate_pstate.pstate_change_ns,
557 				refclk_mhz, 0xffff);
558 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 0,
559 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value);
560 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
561 			"HW register value = 0x%x\n\n",
562 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
563 	} else if (watermarks->c.cstate_pstate.pstate_change_ns
564 			< hubbub2->watermarks.c.cstate_pstate.pstate_change_ns)
565 		wm_pending = true;
566 
567 	/* clock state D */
568 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
569 			> hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) {
570 		hubbub2->watermarks.d.cstate_pstate.pstate_change_ns =
571 				watermarks->d.cstate_pstate.pstate_change_ns;
572 		prog_wm_value = convert_and_clamp(
573 				watermarks->d.cstate_pstate.pstate_change_ns,
574 				refclk_mhz, 0xffff);
575 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 0,
576 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value);
577 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
578 			"HW register value = 0x%x\n\n",
579 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
580 	} else if (watermarks->d.cstate_pstate.pstate_change_ns
581 			< hubbub2->watermarks.d.cstate_pstate.pstate_change_ns)
582 		wm_pending = true;
583 
584 	/* Section for FCLK_PSTATE_CHANGE_WATERMARKS */
585 	/* clock state A */
586 	if (safe_to_lower || watermarks->a.cstate_pstate.fclk_pstate_change_ns
587 			> hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) {
588 		hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns =
589 				watermarks->a.cstate_pstate.fclk_pstate_change_ns;
590 		prog_wm_value = convert_and_clamp(
591 				watermarks->a.cstate_pstate.fclk_pstate_change_ns,
592 				refclk_mhz, 0xffff);
593 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0,
594 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value);
595 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n"
596 			"HW register value = 0x%x\n\n",
597 			watermarks->a.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
598 	} else if (watermarks->a.cstate_pstate.fclk_pstate_change_ns
599 			< hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns)
600 		wm_pending = true;
601 
602 	/* clock state B */
603 	if (safe_to_lower || watermarks->b.cstate_pstate.fclk_pstate_change_ns
604 			> hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) {
605 		hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns =
606 				watermarks->b.cstate_pstate.fclk_pstate_change_ns;
607 		prog_wm_value = convert_and_clamp(
608 				watermarks->b.cstate_pstate.fclk_pstate_change_ns,
609 				refclk_mhz, 0xffff);
610 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0,
611 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value);
612 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n"
613 			"HW register value = 0x%x\n\n",
614 			watermarks->b.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
615 	} else if (watermarks->b.cstate_pstate.fclk_pstate_change_ns
616 			< hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns)
617 		wm_pending = true;
618 
619 	/* clock state C */
620 	if (safe_to_lower || watermarks->c.cstate_pstate.fclk_pstate_change_ns
621 			> hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) {
622 		hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns =
623 				watermarks->c.cstate_pstate.fclk_pstate_change_ns;
624 		prog_wm_value = convert_and_clamp(
625 				watermarks->c.cstate_pstate.fclk_pstate_change_ns,
626 				refclk_mhz, 0xffff);
627 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 0,
628 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value);
629 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_C calculated =%d\n"
630 			"HW register value = 0x%x\n\n",
631 			watermarks->c.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
632 	} else if (watermarks->c.cstate_pstate.fclk_pstate_change_ns
633 			< hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns)
634 		wm_pending = true;
635 
636 	/* clock state D */
637 	if (safe_to_lower || watermarks->d.cstate_pstate.fclk_pstate_change_ns
638 			> hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) {
639 		hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns =
640 				watermarks->d.cstate_pstate.fclk_pstate_change_ns;
641 		prog_wm_value = convert_and_clamp(
642 				watermarks->d.cstate_pstate.fclk_pstate_change_ns,
643 				refclk_mhz, 0xffff);
644 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 0,
645 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value);
646 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_D calculated =%d\n"
647 			"HW register value = 0x%x\n\n",
648 			watermarks->d.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
649 	} else if (watermarks->d.cstate_pstate.fclk_pstate_change_ns
650 			< hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns)
651 		wm_pending = true;
652 
653 	return wm_pending;
654 }
655 
656 
hubbub32_program_usr_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)657 bool hubbub32_program_usr_watermarks(
658 		struct hubbub *hubbub,
659 		struct dcn_watermark_set *watermarks,
660 		unsigned int refclk_mhz,
661 		bool safe_to_lower)
662 {
663 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
664 	uint32_t prog_wm_value;
665 
666 	bool wm_pending = false;
667 
668 	/* clock state A */
669 	if (safe_to_lower || watermarks->a.usr_retraining_ns
670 			> hubbub2->watermarks.a.usr_retraining_ns) {
671 		hubbub2->watermarks.a.usr_retraining_ns = watermarks->a.usr_retraining_ns;
672 		prog_wm_value = convert_and_clamp(
673 				watermarks->a.usr_retraining_ns,
674 				refclk_mhz, 0x3fff);
675 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0,
676 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, prog_wm_value);
677 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n"
678 			"HW register value = 0x%x\n\n",
679 			watermarks->a.usr_retraining_ns, prog_wm_value);
680 	} else if (watermarks->a.usr_retraining_ns
681 			< hubbub2->watermarks.a.usr_retraining_ns)
682 		wm_pending = true;
683 
684 	/* clock state B */
685 	if (safe_to_lower || watermarks->b.usr_retraining_ns
686 			> hubbub2->watermarks.b.usr_retraining_ns) {
687 		hubbub2->watermarks.b.usr_retraining_ns = watermarks->b.usr_retraining_ns;
688 		prog_wm_value = convert_and_clamp(
689 				watermarks->b.usr_retraining_ns,
690 				refclk_mhz, 0x3fff);
691 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0,
692 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, prog_wm_value);
693 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n"
694 			"HW register value = 0x%x\n\n",
695 			watermarks->b.usr_retraining_ns, prog_wm_value);
696 	} else if (watermarks->b.usr_retraining_ns
697 			< hubbub2->watermarks.b.usr_retraining_ns)
698 		wm_pending = true;
699 
700 	/* clock state C */
701 	if (safe_to_lower || watermarks->c.usr_retraining_ns
702 			> hubbub2->watermarks.c.usr_retraining_ns) {
703 		hubbub2->watermarks.c.usr_retraining_ns =
704 				watermarks->c.usr_retraining_ns;
705 		prog_wm_value = convert_and_clamp(
706 				watermarks->c.usr_retraining_ns,
707 				refclk_mhz, 0x3fff);
708 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 0,
709 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, prog_wm_value);
710 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_C calculated =%d\n"
711 			"HW register value = 0x%x\n\n",
712 			watermarks->c.usr_retraining_ns, prog_wm_value);
713 	} else if (watermarks->c.usr_retraining_ns
714 			< hubbub2->watermarks.c.usr_retraining_ns)
715 		wm_pending = true;
716 
717 	/* clock state D */
718 	if (safe_to_lower || watermarks->d.usr_retraining_ns
719 			> hubbub2->watermarks.d.usr_retraining_ns) {
720 		hubbub2->watermarks.d.usr_retraining_ns =
721 				watermarks->d.usr_retraining_ns;
722 		prog_wm_value = convert_and_clamp(
723 				watermarks->d.usr_retraining_ns,
724 				refclk_mhz, 0x3fff);
725 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 0,
726 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, prog_wm_value);
727 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_D calculated =%d\n"
728 			"HW register value = 0x%x\n\n",
729 			watermarks->d.usr_retraining_ns, prog_wm_value);
730 	} else if (watermarks->d.usr_retraining_ns
731 			< hubbub2->watermarks.d.usr_retraining_ns)
732 		wm_pending = true;
733 
734 	return wm_pending;
735 }
736 
hubbub32_force_usr_retraining_allow(struct hubbub * hubbub,bool allow)737 void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow)
738 {
739 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
740 
741 	/*
742 	 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE = 1 means enabling forcing value
743 	 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE = 1 or 0,  means value to be forced when force enable
744 	 */
745 
746 	REG_UPDATE_2(DCHUBBUB_ARB_USR_RETRAINING_CNTL,
747 			DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, allow,
748 			DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, allow);
749 }
750 
hubbub32_program_watermarks(struct hubbub * hubbub,struct dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)751 static bool hubbub32_program_watermarks(
752 		struct hubbub *hubbub,
753 		struct dcn_watermark_set *watermarks,
754 		unsigned int refclk_mhz,
755 		bool safe_to_lower)
756 {
757 	bool wm_pending = false;
758 
759 	if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
760 		wm_pending = true;
761 
762 	if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
763 		wm_pending = true;
764 
765 	if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
766 		wm_pending = true;
767 
768 	if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
769 		wm_pending = true;
770 
771 	/*
772 	 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
773 	 * If the memory controller is fully utilized and the DCHub requestors are
774 	 * well ahead of their amortized schedule, then it is safe to prevent the next winner
775 	 * from being committed and sent to the fabric.
776 	 * The utilization of the memory controller is approximated by ensuring that
777 	 * the number of outstanding requests is greater than a threshold specified
778 	 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
779 	 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
780 	 *
781 	 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF)
782 	 * to turn off it for now.
783 	 */
784 	/*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
785 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
786 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
787 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
788 
789 	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
790 
791 	hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
792 
793 	return wm_pending;
794 }
795 
796 /* Copy values from WM set A to all other sets */
hubbub32_init_watermarks(struct hubbub * hubbub)797 static void hubbub32_init_watermarks(struct hubbub *hubbub)
798 {
799 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
800 	uint32_t reg;
801 
802 	reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
803 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg);
804 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg);
805 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg);
806 
807 	reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A);
808 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg);
809 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg);
810 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg);
811 
812 	reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A);
813 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg);
814 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg);
815 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg);
816 
817 	reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A);
818 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg);
819 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg);
820 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg);
821 
822 	reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
823 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg);
824 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg);
825 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg);
826 
827 	reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
828 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg);
829 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg);
830 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg);
831 
832 	reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A);
833 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg);
834 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg);
835 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg);
836 
837 	reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A);
838 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg);
839 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg);
840 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg);
841 
842 	reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A);
843 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg);
844 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg);
845 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg);
846 }
847 
hubbub32_wm_read_state(struct hubbub * hubbub,struct dcn_hubbub_wm * wm)848 static void hubbub32_wm_read_state(struct hubbub *hubbub,
849 		struct dcn_hubbub_wm *wm)
850 {
851 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
852 	struct dcn_hubbub_wm_set *s;
853 
854 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
855 
856 	s = &wm->sets[0];
857 	s->wm_set = 0;
858 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
859 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
860 
861 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
862 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
863 
864 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
865 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
866 
867 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
868 			 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change);
869 
870 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
871 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain);
872 
873 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A,
874 			 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change);
875 
876 	s = &wm->sets[1];
877 	s->wm_set = 1;
878 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
879 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
880 
881 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
882 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
883 
884 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
885 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
886 
887 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
888 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change);
889 
890 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
891 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain);
892 
893 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B,
894 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change);
895 
896 	s = &wm->sets[2];
897 	s->wm_set = 2;
898 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
899 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
900 
901 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
902 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
903 
904 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
905 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
906 
907 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C,
908 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change);
909 
910 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C,
911 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain);
912 
913 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C,
914 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change);
915 
916 	s = &wm->sets[3];
917 	s->wm_set = 3;
918 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
919 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
920 
921 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
922 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
923 
924 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
925 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
926 
927 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D,
928 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change);
929 
930 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D,
931 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain);
932 
933 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D,
934 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change);
935 }
936 
hubbub32_force_wm_propagate_to_pipes(struct hubbub * hubbub)937 void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)
938 {
939 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
940 	uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
941 	uint32_t prog_wm_value = convert_and_clamp(hubbub2->watermarks.a.urgent_ns,
942 			refclk_mhz, 0x3fff);
943 
944 	REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
945 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
946 }
947 
hubbub32_init(struct hubbub * hubbub)948 void hubbub32_init(struct hubbub *hubbub)
949 {
950 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
951 
952 	/* Enable clock gate*/
953 	if (hubbub->ctx->dc->debug.disable_clock_gate) {
954 		/*done in hwseq*/
955 		/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
956 
957 		REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
958 			DISPCLK_R_DCHUBBUB_GATE_DIS, 1,
959 			DCFCLK_R_DCHUBBUB_GATE_DIS, 1);
960 	}
961 	/*
962 	ignore the "df_pre_cstate_req" from the SDP port control.
963 	only the DCN will determine when to connect the SDP port
964 	*/
965 	REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
966 			SDPIF_PORT_CONTROL, 1);
967 	/*Set SDP's max outstanding request to 512
968 	must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
969 	REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
970 			SDPIF_MAX_NUM_OUTSTANDING, 1);
971 	/*must set the registers back to 256 in zero frame buffer mode*/
972 	REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
973 			DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512,
974 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512);
975 }
976 
977 static const struct hubbub_funcs hubbub32_funcs = {
978 	.update_dchub = hubbub2_update_dchub,
979 	.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
980 	.init_vm_ctx = hubbub2_init_vm_ctx,
981 	.dcc_support_swizzle = hubbub3_dcc_support_swizzle,
982 	.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
983 	.get_dcc_compression_cap = hubbub3_get_dcc_compression_cap,
984 	.wm_read_state = hubbub32_wm_read_state,
985 	.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
986 	.program_watermarks = hubbub32_program_watermarks,
987 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
988 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
989 	.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
990 	.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
991 	.force_pstate_change_control = hubbub3_force_pstate_change_control,
992 	.init_watermarks = hubbub32_init_watermarks,
993 	.program_det_size = dcn32_program_det_size,
994 	.program_compbuf_size = dcn32_program_compbuf_size,
995 	.init_crb = dcn32_init_crb,
996 	.hubbub_read_state = hubbub2_read_state,
997 	.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
998 	.set_request_limit = hubbub32_set_request_limit
999 };
1000 
hubbub32_construct(struct dcn20_hubbub * hubbub2,struct dc_context * ctx,const struct dcn_hubbub_registers * hubbub_regs,const struct dcn_hubbub_shift * hubbub_shift,const struct dcn_hubbub_mask * hubbub_mask,int det_size_kb,int pixel_chunk_size_kb,int config_return_buffer_size_kb)1001 void hubbub32_construct(struct dcn20_hubbub *hubbub2,
1002 	struct dc_context *ctx,
1003 	const struct dcn_hubbub_registers *hubbub_regs,
1004 	const struct dcn_hubbub_shift *hubbub_shift,
1005 	const struct dcn_hubbub_mask *hubbub_mask,
1006 	int det_size_kb,
1007 	int pixel_chunk_size_kb,
1008 	int config_return_buffer_size_kb)
1009 {
1010 	hubbub2->base.ctx = ctx;
1011 	hubbub2->base.funcs = &hubbub32_funcs;
1012 	hubbub2->regs = hubbub_regs;
1013 	hubbub2->shifts = hubbub_shift;
1014 	hubbub2->masks = hubbub_mask;
1015 
1016 	hubbub2->debug_test_index_pstate = 0xB;
1017 	hubbub2->detile_buf_size = det_size_kb * 1024;
1018 	hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024;
1019 	hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN32_CRB_SEGMENT_SIZE_KB;
1020 }
1021