1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 
27 #include "dcn30/dcn30_hubbub.h"
28 #include "dcn32_hubbub.h"
29 #include "dm_services.h"
30 #include "reg_helper.h"
31 
32 
33 #define CTX \
34 	hubbub2->base.ctx
35 #define DC_LOGGER \
36 	hubbub2->base.ctx->logger
37 #define REG(reg)\
38 	hubbub2->regs->reg
39 
40 #undef FN
41 #define FN(reg_name, field_name) \
42 	hubbub2->shifts->field_name, hubbub2->masks->field_name
43 
44 /**
45  * @DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for
46  * DCN32
47  */
48 #define DCN32_CRB_SEGMENT_SIZE_KB 64
49 
50 static void dcn32_init_crb(struct hubbub *hubbub)
51 {
52 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
53 
54 	REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT,
55 		&hubbub2->det0_size);
56 
57 	REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT,
58 		&hubbub2->det1_size);
59 
60 	REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT,
61 		&hubbub2->det2_size);
62 
63 	REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT,
64 		&hubbub2->det3_size);
65 
66 	REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT,
67 		&hubbub2->compbuf_size_segments);
68 
69 	REG_SET_2(COMPBUF_RESERVED_SPACE, 0,
70 			COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32,
71 			COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128);
72 	REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
73 }
74 
75 void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
76 {
77 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
78 
79 	unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB;
80 
81 	switch (hubp_inst) {
82 	case 0:
83 		REG_UPDATE(DCHUBBUB_DET0_CTRL,
84 					DET0_SIZE, det_size_segments);
85 		hubbub2->det0_size = det_size_segments;
86 		break;
87 	case 1:
88 		REG_UPDATE(DCHUBBUB_DET1_CTRL,
89 					DET1_SIZE, det_size_segments);
90 		hubbub2->det1_size = det_size_segments;
91 		break;
92 	case 2:
93 		REG_UPDATE(DCHUBBUB_DET2_CTRL,
94 					DET2_SIZE, det_size_segments);
95 		hubbub2->det2_size = det_size_segments;
96 		break;
97 	case 3:
98 		REG_UPDATE(DCHUBBUB_DET3_CTRL,
99 					DET3_SIZE, det_size_segments);
100 		hubbub2->det3_size = det_size_segments;
101 		break;
102 	default:
103 		break;
104 	}
105 	if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
106 			+ hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
107 		/* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
108 		DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) >  CRB segments (%d)\n",
109 						hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
110 						hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
111 	}
112 }
113 
114 static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
115 {
116 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
117 	unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB;
118 
119 	if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) {
120 		if (compbuf_size_segments > hubbub2->compbuf_size_segments) {
121 			REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
122 			REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100);
123 			REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100);
124 			REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100);
125 		}
126 		/* Should never be hit, if it is we have an erroneous hw config*/
127 		ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
128 				+ hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs);
129 		REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments);
130 		hubbub2->compbuf_size_segments = compbuf_size_segments;
131 		ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments);
132 	}
133 }
134 
135 static uint32_t convert_and_clamp(
136 	uint32_t wm_ns,
137 	uint32_t refclk_mhz,
138 	uint32_t clamp_value)
139 {
140 	uint32_t ret_val = 0;
141 	ret_val = wm_ns * refclk_mhz;
142 
143 	ret_val /= 1000;
144 
145 	if (ret_val > clamp_value)
146 		ret_val = clamp_value;
147 
148 	return ret_val;
149 }
150 
151 bool hubbub32_program_urgent_watermarks(
152 		struct hubbub *hubbub,
153 		struct dcn_watermark_set *watermarks,
154 		unsigned int refclk_mhz,
155 		bool safe_to_lower)
156 {
157 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
158 	uint32_t prog_wm_value;
159 	bool wm_pending = false;
160 
161 	/* Repeat for water mark set A, B, C and D. */
162 	/* clock state A */
163 	if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
164 		hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
165 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
166 				refclk_mhz, 0x3fff);
167 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
168 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
169 
170 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
171 			"HW register value = 0x%x\n",
172 			watermarks->a.urgent_ns, prog_wm_value);
173 	} else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns)
174 		wm_pending = true;
175 
176 	/* determine the transfer time for a quantity of data for a particular requestor.*/
177 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
178 			> hubbub2->watermarks.a.frac_urg_bw_flip) {
179 		hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
180 
181 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
182 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
183 	} else if (watermarks->a.frac_urg_bw_flip
184 			< hubbub2->watermarks.a.frac_urg_bw_flip)
185 		wm_pending = true;
186 
187 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
188 			> hubbub2->watermarks.a.frac_urg_bw_nom) {
189 		hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
190 
191 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
192 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
193 	} else if (watermarks->a.frac_urg_bw_nom
194 			< hubbub2->watermarks.a.frac_urg_bw_nom)
195 		wm_pending = true;
196 
197 	if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
198 		hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
199 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
200 				refclk_mhz, 0x3fff);
201 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
202 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
203 	} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
204 		wm_pending = true;
205 
206 	/* clock state B */
207 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
208 		hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
209 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
210 				refclk_mhz, 0x3fff);
211 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
212 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
213 
214 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
215 			"HW register value = 0x%x\n",
216 			watermarks->b.urgent_ns, prog_wm_value);
217 	} else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns)
218 		wm_pending = true;
219 
220 	/* determine the transfer time for a quantity of data for a particular requestor.*/
221 	if (safe_to_lower || watermarks->b.frac_urg_bw_flip
222 			> hubbub2->watermarks.b.frac_urg_bw_flip) {
223 		hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip;
224 
225 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
226 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip);
227 	} else if (watermarks->b.frac_urg_bw_flip
228 			< hubbub2->watermarks.b.frac_urg_bw_flip)
229 		wm_pending = true;
230 
231 	if (safe_to_lower || watermarks->b.frac_urg_bw_nom
232 			> hubbub2->watermarks.b.frac_urg_bw_nom) {
233 		hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom;
234 
235 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
236 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom);
237 	} else if (watermarks->b.frac_urg_bw_nom
238 			< hubbub2->watermarks.b.frac_urg_bw_nom)
239 		wm_pending = true;
240 
241 	if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
242 		hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
243 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
244 				refclk_mhz, 0x3fff);
245 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
246 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
247 	} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
248 		wm_pending = true;
249 
250 	/* clock state C */
251 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
252 		hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
253 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
254 				refclk_mhz, 0x3fff);
255 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
256 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
257 
258 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
259 			"HW register value = 0x%x\n",
260 			watermarks->c.urgent_ns, prog_wm_value);
261 	} else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns)
262 		wm_pending = true;
263 
264 	/* determine the transfer time for a quantity of data for a particular requestor.*/
265 	if (safe_to_lower || watermarks->c.frac_urg_bw_flip
266 			> hubbub2->watermarks.c.frac_urg_bw_flip) {
267 		hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip;
268 
269 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
270 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip);
271 	} else if (watermarks->c.frac_urg_bw_flip
272 			< hubbub2->watermarks.c.frac_urg_bw_flip)
273 		wm_pending = true;
274 
275 	if (safe_to_lower || watermarks->c.frac_urg_bw_nom
276 			> hubbub2->watermarks.c.frac_urg_bw_nom) {
277 		hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom;
278 
279 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
280 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom);
281 	} else if (watermarks->c.frac_urg_bw_nom
282 			< hubbub2->watermarks.c.frac_urg_bw_nom)
283 		wm_pending = true;
284 
285 	if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
286 		hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
287 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
288 				refclk_mhz, 0x3fff);
289 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
290 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
291 	} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
292 		wm_pending = true;
293 
294 	/* clock state D */
295 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
296 		hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
297 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
298 				refclk_mhz, 0x3fff);
299 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
300 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
301 
302 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
303 			"HW register value = 0x%x\n",
304 			watermarks->d.urgent_ns, prog_wm_value);
305 	} else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns)
306 		wm_pending = true;
307 
308 	/* determine the transfer time for a quantity of data for a particular requestor.*/
309 	if (safe_to_lower || watermarks->d.frac_urg_bw_flip
310 			> hubbub2->watermarks.d.frac_urg_bw_flip) {
311 		hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip;
312 
313 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
314 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip);
315 	} else if (watermarks->d.frac_urg_bw_flip
316 			< hubbub2->watermarks.d.frac_urg_bw_flip)
317 		wm_pending = true;
318 
319 	if (safe_to_lower || watermarks->d.frac_urg_bw_nom
320 			> hubbub2->watermarks.d.frac_urg_bw_nom) {
321 		hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom;
322 
323 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
324 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom);
325 	} else if (watermarks->d.frac_urg_bw_nom
326 			< hubbub2->watermarks.d.frac_urg_bw_nom)
327 		wm_pending = true;
328 
329 	if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
330 		hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
331 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
332 				refclk_mhz, 0x3fff);
333 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
334 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
335 	} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
336 		wm_pending = true;
337 
338 	return wm_pending;
339 }
340 
341 bool hubbub32_program_stutter_watermarks(
342 		struct hubbub *hubbub,
343 		struct dcn_watermark_set *watermarks,
344 		unsigned int refclk_mhz,
345 		bool safe_to_lower)
346 {
347 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
348 	uint32_t prog_wm_value;
349 	bool wm_pending = false;
350 
351 	/* clock state A */
352 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
353 			> hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
354 		hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
355 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
356 		prog_wm_value = convert_and_clamp(
357 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
358 				refclk_mhz, 0xffff);
359 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
360 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
361 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
362 			"HW register value = 0x%x\n",
363 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
364 	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
365 			< hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
366 		wm_pending = true;
367 
368 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
369 			> hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) {
370 		hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns =
371 				watermarks->a.cstate_pstate.cstate_exit_ns;
372 		prog_wm_value = convert_and_clamp(
373 				watermarks->a.cstate_pstate.cstate_exit_ns,
374 				refclk_mhz, 0xffff);
375 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
376 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
377 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
378 			"HW register value = 0x%x\n",
379 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
380 	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
381 			< hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns)
382 		wm_pending = true;
383 
384 	/* clock state B */
385 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
386 			> hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
387 		hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
388 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
389 		prog_wm_value = convert_and_clamp(
390 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
391 				refclk_mhz, 0xffff);
392 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
393 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
394 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
395 			"HW register value = 0x%x\n",
396 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
397 	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
398 			< hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
399 		wm_pending = true;
400 
401 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
402 			> hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) {
403 		hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns =
404 				watermarks->b.cstate_pstate.cstate_exit_ns;
405 		prog_wm_value = convert_and_clamp(
406 				watermarks->b.cstate_pstate.cstate_exit_ns,
407 				refclk_mhz, 0xffff);
408 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
409 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
410 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
411 			"HW register value = 0x%x\n",
412 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
413 	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
414 			< hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns)
415 		wm_pending = true;
416 
417 	/* clock state C */
418 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
419 			> hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
420 		hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
421 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
422 		prog_wm_value = convert_and_clamp(
423 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
424 				refclk_mhz, 0xffff);
425 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
426 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
427 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
428 			"HW register value = 0x%x\n",
429 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
430 	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
431 			< hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
432 		wm_pending = true;
433 
434 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
435 			> hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) {
436 		hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns =
437 				watermarks->c.cstate_pstate.cstate_exit_ns;
438 		prog_wm_value = convert_and_clamp(
439 				watermarks->c.cstate_pstate.cstate_exit_ns,
440 				refclk_mhz, 0xffff);
441 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
442 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
443 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
444 			"HW register value = 0x%x\n",
445 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
446 	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
447 			< hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns)
448 		wm_pending = true;
449 
450 	/* clock state D */
451 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
452 			> hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
453 		hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
454 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
455 		prog_wm_value = convert_and_clamp(
456 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
457 				refclk_mhz, 0xffff);
458 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
459 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
460 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
461 			"HW register value = 0x%x\n",
462 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
463 	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
464 			< hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
465 		wm_pending = true;
466 
467 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
468 			> hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) {
469 		hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns =
470 				watermarks->d.cstate_pstate.cstate_exit_ns;
471 		prog_wm_value = convert_and_clamp(
472 				watermarks->d.cstate_pstate.cstate_exit_ns,
473 				refclk_mhz, 0xffff);
474 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
475 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
476 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
477 			"HW register value = 0x%x\n",
478 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
479 	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
480 			< hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns)
481 		wm_pending = true;
482 
483 	return wm_pending;
484 }
485 
486 
487 bool hubbub32_program_pstate_watermarks(
488 		struct hubbub *hubbub,
489 		struct dcn_watermark_set *watermarks,
490 		unsigned int refclk_mhz,
491 		bool safe_to_lower)
492 {
493 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
494 	uint32_t prog_wm_value;
495 
496 	bool wm_pending = false;
497 
498 	/* Section for UCLK_PSTATE_CHANGE_WATERMARKS */
499 	/* clock state A */
500 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
501 			> hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) {
502 		hubbub2->watermarks.a.cstate_pstate.pstate_change_ns =
503 				watermarks->a.cstate_pstate.pstate_change_ns;
504 		prog_wm_value = convert_and_clamp(
505 				watermarks->a.cstate_pstate.pstate_change_ns,
506 				refclk_mhz, 0xffff);
507 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0,
508 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value);
509 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
510 			"HW register value = 0x%x\n\n",
511 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
512 	} else if (watermarks->a.cstate_pstate.pstate_change_ns
513 			< hubbub2->watermarks.a.cstate_pstate.pstate_change_ns)
514 		wm_pending = true;
515 
516 	/* clock state B */
517 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
518 			> hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) {
519 		hubbub2->watermarks.b.cstate_pstate.pstate_change_ns =
520 				watermarks->b.cstate_pstate.pstate_change_ns;
521 		prog_wm_value = convert_and_clamp(
522 				watermarks->b.cstate_pstate.pstate_change_ns,
523 				refclk_mhz, 0xffff);
524 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0,
525 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value);
526 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
527 			"HW register value = 0x%x\n\n",
528 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
529 	} else if (watermarks->b.cstate_pstate.pstate_change_ns
530 			< hubbub2->watermarks.b.cstate_pstate.pstate_change_ns)
531 		wm_pending = true;
532 
533 	/* clock state C */
534 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
535 			> hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) {
536 		hubbub2->watermarks.c.cstate_pstate.pstate_change_ns =
537 				watermarks->c.cstate_pstate.pstate_change_ns;
538 		prog_wm_value = convert_and_clamp(
539 				watermarks->c.cstate_pstate.pstate_change_ns,
540 				refclk_mhz, 0xffff);
541 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 0,
542 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value);
543 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
544 			"HW register value = 0x%x\n\n",
545 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
546 	} else if (watermarks->c.cstate_pstate.pstate_change_ns
547 			< hubbub2->watermarks.c.cstate_pstate.pstate_change_ns)
548 		wm_pending = true;
549 
550 	/* clock state D */
551 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
552 			> hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) {
553 		hubbub2->watermarks.d.cstate_pstate.pstate_change_ns =
554 				watermarks->d.cstate_pstate.pstate_change_ns;
555 		prog_wm_value = convert_and_clamp(
556 				watermarks->d.cstate_pstate.pstate_change_ns,
557 				refclk_mhz, 0xffff);
558 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 0,
559 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value);
560 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
561 			"HW register value = 0x%x\n\n",
562 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
563 	} else if (watermarks->d.cstate_pstate.pstate_change_ns
564 			< hubbub2->watermarks.d.cstate_pstate.pstate_change_ns)
565 		wm_pending = true;
566 
567 	/* Section for FCLK_PSTATE_CHANGE_WATERMARKS */
568 	/* clock state A */
569 	if (safe_to_lower || watermarks->a.cstate_pstate.fclk_pstate_change_ns
570 			> hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) {
571 		hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns =
572 				watermarks->a.cstate_pstate.fclk_pstate_change_ns;
573 		prog_wm_value = convert_and_clamp(
574 				watermarks->a.cstate_pstate.fclk_pstate_change_ns,
575 				refclk_mhz, 0xffff);
576 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0,
577 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value);
578 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n"
579 			"HW register value = 0x%x\n\n",
580 			watermarks->a.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
581 	} else if (watermarks->a.cstate_pstate.fclk_pstate_change_ns
582 			< hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns)
583 		wm_pending = true;
584 
585 	/* clock state B */
586 	if (safe_to_lower || watermarks->b.cstate_pstate.fclk_pstate_change_ns
587 			> hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) {
588 		hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns =
589 				watermarks->b.cstate_pstate.fclk_pstate_change_ns;
590 		prog_wm_value = convert_and_clamp(
591 				watermarks->b.cstate_pstate.fclk_pstate_change_ns,
592 				refclk_mhz, 0xffff);
593 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0,
594 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value);
595 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n"
596 			"HW register value = 0x%x\n\n",
597 			watermarks->b.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
598 	} else if (watermarks->b.cstate_pstate.fclk_pstate_change_ns
599 			< hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns)
600 		wm_pending = true;
601 
602 	/* clock state C */
603 	if (safe_to_lower || watermarks->c.cstate_pstate.fclk_pstate_change_ns
604 			> hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) {
605 		hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns =
606 				watermarks->c.cstate_pstate.fclk_pstate_change_ns;
607 		prog_wm_value = convert_and_clamp(
608 				watermarks->c.cstate_pstate.fclk_pstate_change_ns,
609 				refclk_mhz, 0xffff);
610 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 0,
611 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value);
612 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_C calculated =%d\n"
613 			"HW register value = 0x%x\n\n",
614 			watermarks->c.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
615 	} else if (watermarks->c.cstate_pstate.fclk_pstate_change_ns
616 			< hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns)
617 		wm_pending = true;
618 
619 	/* clock state D */
620 	if (safe_to_lower || watermarks->d.cstate_pstate.fclk_pstate_change_ns
621 			> hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) {
622 		hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns =
623 				watermarks->d.cstate_pstate.fclk_pstate_change_ns;
624 		prog_wm_value = convert_and_clamp(
625 				watermarks->d.cstate_pstate.fclk_pstate_change_ns,
626 				refclk_mhz, 0xffff);
627 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 0,
628 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value);
629 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_D calculated =%d\n"
630 			"HW register value = 0x%x\n\n",
631 			watermarks->d.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
632 	} else if (watermarks->d.cstate_pstate.fclk_pstate_change_ns
633 			< hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns)
634 		wm_pending = true;
635 
636 	return wm_pending;
637 }
638 
639 
640 bool hubbub32_program_usr_watermarks(
641 		struct hubbub *hubbub,
642 		struct dcn_watermark_set *watermarks,
643 		unsigned int refclk_mhz,
644 		bool safe_to_lower)
645 {
646 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
647 	uint32_t prog_wm_value;
648 
649 	bool wm_pending = false;
650 
651 	/* clock state A */
652 	if (safe_to_lower || watermarks->a.usr_retraining_ns
653 			> hubbub2->watermarks.a.usr_retraining_ns) {
654 		hubbub2->watermarks.a.usr_retraining_ns = watermarks->a.usr_retraining_ns;
655 		prog_wm_value = convert_and_clamp(
656 				watermarks->a.usr_retraining_ns,
657 				refclk_mhz, 0x3fff);
658 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0,
659 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, prog_wm_value);
660 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n"
661 			"HW register value = 0x%x\n\n",
662 			watermarks->a.usr_retraining_ns, prog_wm_value);
663 	} else if (watermarks->a.usr_retraining_ns
664 			< hubbub2->watermarks.a.usr_retraining_ns)
665 		wm_pending = true;
666 
667 	/* clock state B */
668 	if (safe_to_lower || watermarks->b.usr_retraining_ns
669 			> hubbub2->watermarks.b.usr_retraining_ns) {
670 		hubbub2->watermarks.b.usr_retraining_ns = watermarks->b.usr_retraining_ns;
671 		prog_wm_value = convert_and_clamp(
672 				watermarks->b.usr_retraining_ns,
673 				refclk_mhz, 0x3fff);
674 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0,
675 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, prog_wm_value);
676 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n"
677 			"HW register value = 0x%x\n\n",
678 			watermarks->b.usr_retraining_ns, prog_wm_value);
679 	} else if (watermarks->b.usr_retraining_ns
680 			< hubbub2->watermarks.b.usr_retraining_ns)
681 		wm_pending = true;
682 
683 	/* clock state C */
684 	if (safe_to_lower || watermarks->c.usr_retraining_ns
685 			> hubbub2->watermarks.c.usr_retraining_ns) {
686 		hubbub2->watermarks.c.usr_retraining_ns =
687 				watermarks->c.usr_retraining_ns;
688 		prog_wm_value = convert_and_clamp(
689 				watermarks->c.usr_retraining_ns,
690 				refclk_mhz, 0x3fff);
691 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 0,
692 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, prog_wm_value);
693 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_C calculated =%d\n"
694 			"HW register value = 0x%x\n\n",
695 			watermarks->c.usr_retraining_ns, prog_wm_value);
696 	} else if (watermarks->c.usr_retraining_ns
697 			< hubbub2->watermarks.c.usr_retraining_ns)
698 		wm_pending = true;
699 
700 	/* clock state D */
701 	if (safe_to_lower || watermarks->d.usr_retraining_ns
702 			> hubbub2->watermarks.d.usr_retraining_ns) {
703 		hubbub2->watermarks.d.usr_retraining_ns =
704 				watermarks->d.usr_retraining_ns;
705 		prog_wm_value = convert_and_clamp(
706 				watermarks->d.usr_retraining_ns,
707 				refclk_mhz, 0x3fff);
708 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 0,
709 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, prog_wm_value);
710 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_D calculated =%d\n"
711 			"HW register value = 0x%x\n\n",
712 			watermarks->d.usr_retraining_ns, prog_wm_value);
713 	} else if (watermarks->d.usr_retraining_ns
714 			< hubbub2->watermarks.d.usr_retraining_ns)
715 		wm_pending = true;
716 
717 	return wm_pending;
718 }
719 
720 void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow)
721 {
722 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
723 
724 	/*
725 	 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE = 1 means enabling forcing value
726 	 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE = 1 or 0,  means value to be forced when force enable
727 	 */
728 
729 	REG_UPDATE_2(DCHUBBUB_ARB_USR_RETRAINING_CNTL,
730 			DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, allow,
731 			DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, allow);
732 }
733 
734 static bool hubbub32_program_watermarks(
735 		struct hubbub *hubbub,
736 		struct dcn_watermark_set *watermarks,
737 		unsigned int refclk_mhz,
738 		bool safe_to_lower)
739 {
740 	bool wm_pending = false;
741 
742 	if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
743 		wm_pending = true;
744 
745 	if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
746 		wm_pending = true;
747 
748 	if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
749 		wm_pending = true;
750 
751 	if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
752 		wm_pending = true;
753 
754 	/*
755 	 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
756 	 * If the memory controller is fully utilized and the DCHub requestors are
757 	 * well ahead of their amortized schedule, then it is safe to prevent the next winner
758 	 * from being committed and sent to the fabric.
759 	 * The utilization of the memory controller is approximated by ensuring that
760 	 * the number of outstanding requests is greater than a threshold specified
761 	 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
762 	 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
763 	 *
764 	 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF)
765 	 * to turn off it for now.
766 	 */
767 	/*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
768 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
769 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
770 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
771 
772 	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
773 
774 	hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
775 
776 	return wm_pending;
777 }
778 
779 /* Copy values from WM set A to all other sets */
780 static void hubbub32_init_watermarks(struct hubbub *hubbub)
781 {
782 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
783 	uint32_t reg;
784 
785 	reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
786 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg);
787 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg);
788 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg);
789 
790 	reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A);
791 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg);
792 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg);
793 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg);
794 
795 	reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A);
796 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg);
797 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg);
798 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg);
799 
800 	reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A);
801 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg);
802 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg);
803 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg);
804 
805 	reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
806 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg);
807 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg);
808 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg);
809 
810 	reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
811 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg);
812 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg);
813 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg);
814 
815 	reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A);
816 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg);
817 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg);
818 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg);
819 
820 	reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A);
821 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg);
822 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg);
823 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg);
824 
825 	reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A);
826 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg);
827 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg);
828 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg);
829 }
830 
831 static void hubbub32_wm_read_state(struct hubbub *hubbub,
832 		struct dcn_hubbub_wm *wm)
833 {
834 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
835 	struct dcn_hubbub_wm_set *s;
836 
837 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
838 
839 	s = &wm->sets[0];
840 	s->wm_set = 0;
841 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
842 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
843 
844 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
845 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
846 
847 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
848 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
849 
850 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
851 			 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
852 
853 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
854 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain);
855 
856 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A,
857 			 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change);
858 
859 	s = &wm->sets[1];
860 	s->wm_set = 1;
861 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
862 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
863 
864 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
865 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
866 
867 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
868 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
869 
870 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
871 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
872 
873 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
874 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain);
875 
876 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B,
877 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change);
878 
879 	s = &wm->sets[2];
880 	s->wm_set = 2;
881 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
882 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
883 
884 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
885 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
886 
887 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
888 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
889 
890 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C,
891 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
892 
893 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C,
894 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain);
895 
896 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C,
897 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change);
898 
899 	s = &wm->sets[3];
900 	s->wm_set = 3;
901 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
902 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
903 
904 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
905 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
906 
907 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
908 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
909 
910 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D,
911 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
912 
913 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D,
914 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain);
915 
916 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D,
917 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change);
918 }
919 
920 void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)
921 {
922 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
923 	uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
924 	uint32_t prog_wm_value = convert_and_clamp(hubbub2->watermarks.a.urgent_ns,
925 			refclk_mhz, 0x3fff);
926 
927 	REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
928 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
929 }
930 
931 static const struct hubbub_funcs hubbub32_funcs = {
932 	.update_dchub = hubbub2_update_dchub,
933 	.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
934 	.init_vm_ctx = hubbub2_init_vm_ctx,
935 	.dcc_support_swizzle = hubbub3_dcc_support_swizzle,
936 	.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
937 	.get_dcc_compression_cap = hubbub3_get_dcc_compression_cap,
938 	.wm_read_state = hubbub32_wm_read_state,
939 	.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
940 	.program_watermarks = hubbub32_program_watermarks,
941 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
942 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
943 	.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
944 	.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
945 	.force_pstate_change_control = hubbub3_force_pstate_change_control,
946 	.init_watermarks = hubbub32_init_watermarks,
947 	.program_det_size = dcn32_program_det_size,
948 	.program_compbuf_size = dcn32_program_compbuf_size,
949 	.init_crb = dcn32_init_crb,
950 	.hubbub_read_state = hubbub2_read_state,
951 	.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
952 };
953 
954 void hubbub32_construct(struct dcn20_hubbub *hubbub2,
955 	struct dc_context *ctx,
956 	const struct dcn_hubbub_registers *hubbub_regs,
957 	const struct dcn_hubbub_shift *hubbub_shift,
958 	const struct dcn_hubbub_mask *hubbub_mask,
959 	int det_size_kb,
960 	int pixel_chunk_size_kb,
961 	int config_return_buffer_size_kb)
962 {
963 	hubbub2->base.ctx = ctx;
964 	hubbub2->base.funcs = &hubbub32_funcs;
965 	hubbub2->regs = hubbub_regs;
966 	hubbub2->shifts = hubbub_shift;
967 	hubbub2->masks = hubbub_mask;
968 
969 	hubbub2->debug_test_index_pstate = 0xB;
970 	hubbub2->detile_buf_size = det_size_kb * 1024;
971 	hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024;
972 	hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN32_CRB_SEGMENT_SIZE_KB;
973 }
974