1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 
28 #include "dm_services.h"
29 #include "dcn10_hubp.h"
30 #include "dcn10_hubbub.h"
31 #include "reg_helper.h"
32 
33 #define CTX \
34 	hubbub1->base.ctx
35 #define DC_LOGGER \
36 	hubbub1->base.ctx->logger
37 #define REG(reg)\
38 	hubbub1->regs->reg
39 
40 #undef FN
41 #define FN(reg_name, field_name) \
42 	hubbub1->shifts->field_name, hubbub1->masks->field_name
43 
44 void hubbub1_wm_read_state(struct hubbub *hubbub,
45 		struct dcn_hubbub_wm *wm)
46 {
47 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
48 	struct dcn_hubbub_wm_set *s;
49 
50 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
51 
52 	s = &wm->sets[0];
53 	s->wm_set = 0;
54 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
55 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
56 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
57 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
58 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
59 	}
60 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
61 
62 	s = &wm->sets[1];
63 	s->wm_set = 1;
64 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
65 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
66 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
67 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
68 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
69 	}
70 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
71 
72 	s = &wm->sets[2];
73 	s->wm_set = 2;
74 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
75 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
76 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
77 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
78 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
79 	}
80 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
81 
82 	s = &wm->sets[3];
83 	s->wm_set = 3;
84 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
85 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
86 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
87 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
88 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
89 	}
90 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
91 }
92 
93 void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
94 {
95 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
96 
97 	/*
98 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
99 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
100 	 */
101 
102 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
103 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
104 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
105 }
106 
107 bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
108 {
109 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
110 	uint32_t enable = 0;
111 
112 	REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
113 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
114 
115 	return enable ? true : false;
116 }
117 
118 
119 bool hubbub1_verify_allow_pstate_change_high(
120 	struct hubbub *hubbub)
121 {
122 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
123 
124 	/* pstate latency is ~20us so if we wait over 40us and pstate allow
125 	 * still not asserted, we are probably stuck and going to hang
126 	 *
127 	 * TODO: Figure out why it takes ~100us on linux
128 	 * pstate takes around ~100us on linux. Unknown currently as to
129 	 * why it takes that long on linux
130 	 */
131 	const unsigned int pstate_wait_timeout_us = 200;
132 	const unsigned int pstate_wait_expected_timeout_us = 40;
133 	static unsigned int max_sampled_pstate_wait_us; /* data collection */
134 	static bool forced_pstate_allow; /* help with revert wa */
135 
136 	unsigned int debug_data;
137 	unsigned int i;
138 
139 	if (forced_pstate_allow) {
140 		/* we hacked to force pstate allow to prevent hang last time
141 		 * we verify_allow_pstate_change_high.  so disable force
142 		 * here so we can check status
143 		 */
144 		REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
145 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
146 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
147 		forced_pstate_allow = false;
148 	}
149 
150 	/* The following table only applies to DCN1 and DCN2,
151 	 * for newer DCNs, need to consult with HW IP folks to read RTL
152 	 * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
153 	 * description
154 	 * 0:     Pipe0 Plane0 Allow Pstate Change
155 	 * 1:     Pipe0 Plane1 Allow Pstate Change
156 	 * 2:     Pipe0 Cursor0 Allow Pstate Change
157 	 * 3:     Pipe0 Cursor1 Allow Pstate Change
158 	 * 4:     Pipe1 Plane0 Allow Pstate Change
159 	 * 5:     Pipe1 Plane1 Allow Pstate Change
160 	 * 6:     Pipe1 Cursor0 Allow Pstate Change
161 	 * 7:     Pipe1 Cursor1 Allow Pstate Change
162 	 * 8:     Pipe2 Plane0 Allow Pstate Change
163 	 * 9:     Pipe2 Plane1 Allow Pstate Change
164 	 * 10:    Pipe2 Cursor0 Allow Pstate Change
165 	 * 11:    Pipe2 Cursor1 Allow Pstate Change
166 	 * 12:    Pipe3 Plane0 Allow Pstate Change
167 	 * 13:    Pipe3 Plane1 Allow Pstate Change
168 	 * 14:    Pipe3 Cursor0 Allow Pstate Change
169 	 * 15:    Pipe3 Cursor1 Allow Pstate Change
170 	 * 16:    Pipe4 Plane0 Allow Pstate Change
171 	 * 17:    Pipe4 Plane1 Allow Pstate Change
172 	 * 18:    Pipe4 Cursor0 Allow Pstate Change
173 	 * 19:    Pipe4 Cursor1 Allow Pstate Change
174 	 * 20:    Pipe5 Plane0 Allow Pstate Change
175 	 * 21:    Pipe5 Plane1 Allow Pstate Change
176 	 * 22:    Pipe5 Cursor0 Allow Pstate Change
177 	 * 23:    Pipe5 Cursor1 Allow Pstate Change
178 	 * 24:    Pipe6 Plane0 Allow Pstate Change
179 	 * 25:    Pipe6 Plane1 Allow Pstate Change
180 	 * 26:    Pipe6 Cursor0 Allow Pstate Change
181 	 * 27:    Pipe6 Cursor1 Allow Pstate Change
182 	 * 28:    WB0 Allow Pstate Change
183 	 * 29:    WB1 Allow Pstate Change
184 	 * 30:    Arbiter's allow_pstate_change
185 	 * 31:    SOC pstate change request
186 	 */
187 
188 	REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
189 
190 	for (i = 0; i < pstate_wait_timeout_us; i++) {
191 		debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
192 
193 		if (debug_data & (1 << 30)) {
194 
195 			if (i > pstate_wait_expected_timeout_us)
196 				DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
197 						i);
198 
199 			return true;
200 		}
201 		if (max_sampled_pstate_wait_us < i)
202 			max_sampled_pstate_wait_us = i;
203 
204 		udelay(1);
205 	}
206 
207 	/* force pstate allow to prevent system hang
208 	 * and break to debugger to investigate
209 	 */
210 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
211 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
212 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
213 	forced_pstate_allow = true;
214 
215 	DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
216 			debug_data);
217 
218 	return false;
219 }
220 
221 static uint32_t convert_and_clamp(
222 	uint32_t wm_ns,
223 	uint32_t refclk_mhz,
224 	uint32_t clamp_value)
225 {
226 	uint32_t ret_val = 0;
227 	ret_val = wm_ns * refclk_mhz;
228 	ret_val /= 1000;
229 
230 	if (ret_val > clamp_value)
231 		ret_val = clamp_value;
232 
233 	return ret_val;
234 }
235 
236 
237 void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
238 {
239 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
240 
241 	REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
242 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
243 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
244 }
245 
246 bool hubbub1_program_urgent_watermarks(
247 		struct hubbub *hubbub,
248 		struct dcn_watermark_set *watermarks,
249 		unsigned int refclk_mhz,
250 		bool safe_to_lower)
251 {
252 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
253 	uint32_t prog_wm_value;
254 	bool wm_pending = false;
255 
256 	/* Repeat for water mark set A, B, C and D. */
257 	/* clock state A */
258 	if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
259 		hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
260 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
261 				refclk_mhz, 0x1fffff);
262 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
263 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
264 
265 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
266 			"HW register value = 0x%x\n",
267 			watermarks->a.urgent_ns, prog_wm_value);
268 	} else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
269 		wm_pending = true;
270 
271 	if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
272 		hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
273 		prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
274 				refclk_mhz, 0x1fffff);
275 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
276 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
277 			"HW register value = 0x%x\n",
278 			watermarks->a.pte_meta_urgent_ns, prog_wm_value);
279 	} else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
280 		wm_pending = true;
281 
282 	/* clock state B */
283 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
284 		hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
285 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
286 				refclk_mhz, 0x1fffff);
287 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
288 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
289 
290 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
291 			"HW register value = 0x%x\n",
292 			watermarks->b.urgent_ns, prog_wm_value);
293 	} else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
294 		wm_pending = true;
295 
296 	if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
297 		hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
298 		prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
299 				refclk_mhz, 0x1fffff);
300 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
301 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
302 			"HW register value = 0x%x\n",
303 			watermarks->b.pte_meta_urgent_ns, prog_wm_value);
304 	} else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
305 		wm_pending = true;
306 
307 	/* clock state C */
308 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
309 		hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
310 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
311 				refclk_mhz, 0x1fffff);
312 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
313 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
314 
315 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
316 			"HW register value = 0x%x\n",
317 			watermarks->c.urgent_ns, prog_wm_value);
318 	} else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
319 		wm_pending = true;
320 
321 	if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
322 		hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
323 		prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
324 				refclk_mhz, 0x1fffff);
325 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
326 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
327 			"HW register value = 0x%x\n",
328 			watermarks->c.pte_meta_urgent_ns, prog_wm_value);
329 	} else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
330 		wm_pending = true;
331 
332 	/* clock state D */
333 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
334 		hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
335 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
336 				refclk_mhz, 0x1fffff);
337 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
338 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
339 
340 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
341 			"HW register value = 0x%x\n",
342 			watermarks->d.urgent_ns, prog_wm_value);
343 	} else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
344 		wm_pending = true;
345 
346 	if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
347 		hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
348 		prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
349 				refclk_mhz, 0x1fffff);
350 		REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
351 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
352 			"HW register value = 0x%x\n",
353 			watermarks->d.pte_meta_urgent_ns, prog_wm_value);
354 	} else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
355 		wm_pending = true;
356 
357 	return wm_pending;
358 }
359 
360 bool hubbub1_program_stutter_watermarks(
361 		struct hubbub *hubbub,
362 		struct dcn_watermark_set *watermarks,
363 		unsigned int refclk_mhz,
364 		bool safe_to_lower)
365 {
366 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
367 	uint32_t prog_wm_value;
368 	bool wm_pending = false;
369 
370 	/* clock state A */
371 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
372 			> hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
373 		hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
374 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
375 		prog_wm_value = convert_and_clamp(
376 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
377 				refclk_mhz, 0x1fffff);
378 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
379 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
380 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
381 			"HW register value = 0x%x\n",
382 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
383 	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
384 			< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
385 		wm_pending = true;
386 
387 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
388 			> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
389 		hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
390 				watermarks->a.cstate_pstate.cstate_exit_ns;
391 		prog_wm_value = convert_and_clamp(
392 				watermarks->a.cstate_pstate.cstate_exit_ns,
393 				refclk_mhz, 0x1fffff);
394 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
395 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
396 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
397 			"HW register value = 0x%x\n",
398 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
399 	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
400 			< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
401 		wm_pending = true;
402 
403 	/* clock state B */
404 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
405 			> hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
406 		hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
407 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
408 		prog_wm_value = convert_and_clamp(
409 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
410 				refclk_mhz, 0x1fffff);
411 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
412 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
413 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
414 			"HW register value = 0x%x\n",
415 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
416 	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
417 			< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
418 		wm_pending = true;
419 
420 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
421 			> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
422 		hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
423 				watermarks->b.cstate_pstate.cstate_exit_ns;
424 		prog_wm_value = convert_and_clamp(
425 				watermarks->b.cstate_pstate.cstate_exit_ns,
426 				refclk_mhz, 0x1fffff);
427 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
428 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
429 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
430 			"HW register value = 0x%x\n",
431 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
432 	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
433 			< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
434 		wm_pending = true;
435 
436 	/* clock state C */
437 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
438 			> hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
439 		hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
440 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
441 		prog_wm_value = convert_and_clamp(
442 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
443 				refclk_mhz, 0x1fffff);
444 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
445 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
446 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
447 			"HW register value = 0x%x\n",
448 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
449 	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
450 			< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
451 		wm_pending = true;
452 
453 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
454 			> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
455 		hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
456 				watermarks->c.cstate_pstate.cstate_exit_ns;
457 		prog_wm_value = convert_and_clamp(
458 				watermarks->c.cstate_pstate.cstate_exit_ns,
459 				refclk_mhz, 0x1fffff);
460 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
461 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
462 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
463 			"HW register value = 0x%x\n",
464 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
465 	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
466 			< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
467 		wm_pending = true;
468 
469 	/* clock state D */
470 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
471 			> hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
472 		hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
473 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
474 		prog_wm_value = convert_and_clamp(
475 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
476 				refclk_mhz, 0x1fffff);
477 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
478 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
479 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
480 			"HW register value = 0x%x\n",
481 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
482 	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
483 			< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
484 		wm_pending = true;
485 
486 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
487 			> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
488 		hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
489 				watermarks->d.cstate_pstate.cstate_exit_ns;
490 		prog_wm_value = convert_and_clamp(
491 				watermarks->d.cstate_pstate.cstate_exit_ns,
492 				refclk_mhz, 0x1fffff);
493 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
494 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
495 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
496 			"HW register value = 0x%x\n",
497 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
498 	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
499 			< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
500 		wm_pending = true;
501 
502 	return wm_pending;
503 }
504 
505 bool hubbub1_program_pstate_watermarks(
506 		struct hubbub *hubbub,
507 		struct dcn_watermark_set *watermarks,
508 		unsigned int refclk_mhz,
509 		bool safe_to_lower)
510 {
511 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
512 	uint32_t prog_wm_value;
513 	bool wm_pending = false;
514 
515 	/* clock state A */
516 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
517 			> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
518 		hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
519 				watermarks->a.cstate_pstate.pstate_change_ns;
520 		prog_wm_value = convert_and_clamp(
521 				watermarks->a.cstate_pstate.pstate_change_ns,
522 				refclk_mhz, 0x1fffff);
523 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
524 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
525 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
526 			"HW register value = 0x%x\n\n",
527 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
528 	} else if (watermarks->a.cstate_pstate.pstate_change_ns
529 			< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
530 		wm_pending = true;
531 
532 	/* clock state B */
533 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
534 			> hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
535 		hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
536 				watermarks->b.cstate_pstate.pstate_change_ns;
537 		prog_wm_value = convert_and_clamp(
538 				watermarks->b.cstate_pstate.pstate_change_ns,
539 				refclk_mhz, 0x1fffff);
540 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
541 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
542 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
543 			"HW register value = 0x%x\n\n",
544 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
545 	} else if (watermarks->b.cstate_pstate.pstate_change_ns
546 			< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
547 		wm_pending = true;
548 
549 	/* clock state C */
550 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
551 			> hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
552 		hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
553 				watermarks->c.cstate_pstate.pstate_change_ns;
554 		prog_wm_value = convert_and_clamp(
555 				watermarks->c.cstate_pstate.pstate_change_ns,
556 				refclk_mhz, 0x1fffff);
557 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
558 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
559 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
560 			"HW register value = 0x%x\n\n",
561 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
562 	} else if (watermarks->c.cstate_pstate.pstate_change_ns
563 			< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
564 		wm_pending = true;
565 
566 	/* clock state D */
567 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
568 			> hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
569 		hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
570 				watermarks->d.cstate_pstate.pstate_change_ns;
571 		prog_wm_value = convert_and_clamp(
572 				watermarks->d.cstate_pstate.pstate_change_ns,
573 				refclk_mhz, 0x1fffff);
574 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
575 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
576 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
577 			"HW register value = 0x%x\n\n",
578 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
579 	} else if (watermarks->d.cstate_pstate.pstate_change_ns
580 			< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
581 		wm_pending = true;
582 
583 	return wm_pending;
584 }
585 
586 bool hubbub1_program_watermarks(
587 		struct hubbub *hubbub,
588 		struct dcn_watermark_set *watermarks,
589 		unsigned int refclk_mhz,
590 		bool safe_to_lower)
591 {
592 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
593 	bool wm_pending = false;
594 	/*
595 	 * Need to clamp to max of the register values (i.e. no wrap)
596 	 * for dcn1, all wm registers are 21-bit wide
597 	 */
598 	if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
599 		wm_pending = true;
600 
601 	if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
602 		wm_pending = true;
603 
604 	if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
605 		wm_pending = true;
606 
607 	REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
608 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
609 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
610 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
611 
612 	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
613 
614 #if 0
615 	REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
616 			DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
617 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
618 #endif
619 	return wm_pending;
620 }
621 
622 void hubbub1_update_dchub(
623 	struct hubbub *hubbub,
624 	struct dchub_init_data *dh_data)
625 {
626 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
627 
628 	if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
629 		ASSERT(false);
630 		/*should not come here*/
631 		return;
632 	}
633 	/* TODO: port code from dal2 */
634 	switch (dh_data->fb_mode) {
635 	case FRAME_BUFFER_MODE_ZFB_ONLY:
636 		/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
637 		REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
638 				SDPIF_FB_TOP, 0);
639 
640 		REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
641 				SDPIF_FB_BASE, 0x0FFFF);
642 
643 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
644 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
645 
646 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
647 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
648 
649 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
650 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
651 						dh_data->zfb_size_in_byte - 1) >> 22);
652 		break;
653 	case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
654 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
655 
656 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
657 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
658 
659 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
660 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
661 
662 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
663 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
664 						dh_data->zfb_size_in_byte - 1) >> 22);
665 		break;
666 	case FRAME_BUFFER_MODE_LOCAL_ONLY:
667 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
668 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
669 				SDPIF_AGP_BASE, 0);
670 
671 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
672 				SDPIF_AGP_BOT, 0X03FFFF);
673 
674 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
675 				SDPIF_AGP_TOP, 0);
676 		break;
677 	default:
678 		break;
679 	}
680 
681 	dh_data->dchub_initialzied = true;
682 	dh_data->dchub_info_valid = false;
683 }
684 
685 void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
686 {
687 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
688 
689 	uint32_t watermark_change_req;
690 
691 	REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
692 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
693 
694 	if (watermark_change_req)
695 		watermark_change_req = 0;
696 	else
697 		watermark_change_req = 1;
698 
699 	REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
700 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
701 }
702 
703 void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
704 {
705 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
706 
707 	uint32_t reset_en = reset ? 1 : 0;
708 
709 	REG_UPDATE(DCHUBBUB_SOFT_RESET,
710 			DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
711 }
712 
713 static bool hubbub1_dcc_support_swizzle(
714 		enum swizzle_mode_values swizzle,
715 		unsigned int bytes_per_element,
716 		enum segment_order *segment_order_horz,
717 		enum segment_order *segment_order_vert)
718 {
719 	bool standard_swizzle = false;
720 	bool display_swizzle = false;
721 
722 	switch (swizzle) {
723 	case DC_SW_4KB_S:
724 	case DC_SW_64KB_S:
725 	case DC_SW_VAR_S:
726 	case DC_SW_4KB_S_X:
727 	case DC_SW_64KB_S_X:
728 	case DC_SW_VAR_S_X:
729 		standard_swizzle = true;
730 		break;
731 	case DC_SW_4KB_D:
732 	case DC_SW_64KB_D:
733 	case DC_SW_VAR_D:
734 	case DC_SW_4KB_D_X:
735 	case DC_SW_64KB_D_X:
736 	case DC_SW_VAR_D_X:
737 		display_swizzle = true;
738 		break;
739 	default:
740 		break;
741 	}
742 
743 	if (bytes_per_element == 1 && standard_swizzle) {
744 		*segment_order_horz = segment_order__contiguous;
745 		*segment_order_vert = segment_order__na;
746 		return true;
747 	}
748 	if (bytes_per_element == 2 && standard_swizzle) {
749 		*segment_order_horz = segment_order__non_contiguous;
750 		*segment_order_vert = segment_order__contiguous;
751 		return true;
752 	}
753 	if (bytes_per_element == 4 && standard_swizzle) {
754 		*segment_order_horz = segment_order__non_contiguous;
755 		*segment_order_vert = segment_order__contiguous;
756 		return true;
757 	}
758 	if (bytes_per_element == 8 && standard_swizzle) {
759 		*segment_order_horz = segment_order__na;
760 		*segment_order_vert = segment_order__contiguous;
761 		return true;
762 	}
763 	if (bytes_per_element == 8 && display_swizzle) {
764 		*segment_order_horz = segment_order__contiguous;
765 		*segment_order_vert = segment_order__non_contiguous;
766 		return true;
767 	}
768 
769 	return false;
770 }
771 
772 static bool hubbub1_dcc_support_pixel_format(
773 		enum surface_pixel_format format,
774 		unsigned int *bytes_per_element)
775 {
776 	/* DML: get_bytes_per_element */
777 	switch (format) {
778 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
779 	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
780 		*bytes_per_element = 2;
781 		return true;
782 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
783 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
784 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
785 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
786 		*bytes_per_element = 4;
787 		return true;
788 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
789 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
790 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
791 		*bytes_per_element = 8;
792 		return true;
793 	default:
794 		return false;
795 	}
796 }
797 
798 static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
799 		unsigned int bytes_per_element)
800 {
801 	/* copied from DML.  might want to refactor DML to leverage from DML */
802 	/* DML : get_blk256_size */
803 	if (bytes_per_element == 1) {
804 		*blk256_width = 16;
805 		*blk256_height = 16;
806 	} else if (bytes_per_element == 2) {
807 		*blk256_width = 16;
808 		*blk256_height = 8;
809 	} else if (bytes_per_element == 4) {
810 		*blk256_width = 8;
811 		*blk256_height = 8;
812 	} else if (bytes_per_element == 8) {
813 		*blk256_width = 8;
814 		*blk256_height = 4;
815 	}
816 }
817 
818 static void hubbub1_det_request_size(
819 		unsigned int height,
820 		unsigned int width,
821 		unsigned int bpe,
822 		bool *req128_horz_wc,
823 		bool *req128_vert_wc)
824 {
825 	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
826 
827 	unsigned int blk256_height = 0;
828 	unsigned int blk256_width = 0;
829 	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
830 
831 	hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
832 
833 	swath_bytes_horz_wc = width * blk256_height * bpe;
834 	swath_bytes_vert_wc = height * blk256_width * bpe;
835 
836 	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
837 			false : /* full 256B request */
838 			true; /* half 128b request */
839 
840 	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
841 			false : /* full 256B request */
842 			true; /* half 128b request */
843 }
844 
845 static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
846 		const struct dc_dcc_surface_param *input,
847 		struct dc_surface_dcc_cap *output)
848 {
849 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
850 	struct dc *dc = hubbub1->base.ctx->dc;
851 
852 	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
853 	enum dcc_control dcc_control;
854 	unsigned int bpe;
855 	enum segment_order segment_order_horz, segment_order_vert;
856 	bool req128_horz_wc, req128_vert_wc;
857 
858 	memset(output, 0, sizeof(*output));
859 
860 	if (dc->debug.disable_dcc == DCC_DISABLE)
861 		return false;
862 
863 	if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe))
864 		return false;
865 
866 	if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
867 			&segment_order_horz, &segment_order_vert))
868 		return false;
869 
870 	hubbub1_det_request_size(input->surface_size.height,  input->surface_size.width,
871 			bpe, &req128_horz_wc, &req128_vert_wc);
872 
873 	if (!req128_horz_wc && !req128_vert_wc) {
874 		dcc_control = dcc_control__256_256_xxx;
875 	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
876 		if (!req128_horz_wc)
877 			dcc_control = dcc_control__256_256_xxx;
878 		else if (segment_order_horz == segment_order__contiguous)
879 			dcc_control = dcc_control__128_128_xxx;
880 		else
881 			dcc_control = dcc_control__256_64_64;
882 	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
883 		if (!req128_vert_wc)
884 			dcc_control = dcc_control__256_256_xxx;
885 		else if (segment_order_vert == segment_order__contiguous)
886 			dcc_control = dcc_control__128_128_xxx;
887 		else
888 			dcc_control = dcc_control__256_64_64;
889 	} else {
890 		if ((req128_horz_wc &&
891 			segment_order_horz == segment_order__non_contiguous) ||
892 			(req128_vert_wc &&
893 			segment_order_vert == segment_order__non_contiguous))
894 			/* access_dir not known, must use most constraining */
895 			dcc_control = dcc_control__256_64_64;
896 		else
897 			/* reg128 is true for either horz and vert
898 			 * but segment_order is contiguous
899 			 */
900 			dcc_control = dcc_control__128_128_xxx;
901 	}
902 
903 	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
904 		dcc_control != dcc_control__256_256_xxx)
905 		return false;
906 
907 	switch (dcc_control) {
908 	case dcc_control__256_256_xxx:
909 		output->grph.rgb.max_uncompressed_blk_size = 256;
910 		output->grph.rgb.max_compressed_blk_size = 256;
911 		output->grph.rgb.independent_64b_blks = false;
912 		break;
913 	case dcc_control__128_128_xxx:
914 		output->grph.rgb.max_uncompressed_blk_size = 128;
915 		output->grph.rgb.max_compressed_blk_size = 128;
916 		output->grph.rgb.independent_64b_blks = false;
917 		break;
918 	case dcc_control__256_64_64:
919 		output->grph.rgb.max_uncompressed_blk_size = 256;
920 		output->grph.rgb.max_compressed_blk_size = 64;
921 		output->grph.rgb.independent_64b_blks = true;
922 		break;
923 	default:
924 		ASSERT(false);
925 		break;
926 	}
927 
928 	output->capable = true;
929 	output->const_color_support = false;
930 
931 	return true;
932 }
933 
934 static const struct hubbub_funcs hubbub1_funcs = {
935 	.update_dchub = hubbub1_update_dchub,
936 	.dcc_support_swizzle = hubbub1_dcc_support_swizzle,
937 	.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
938 	.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
939 	.wm_read_state = hubbub1_wm_read_state,
940 	.program_watermarks = hubbub1_program_watermarks,
941 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
942 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
943 };
944 
945 void hubbub1_construct(struct hubbub *hubbub,
946 	struct dc_context *ctx,
947 	const struct dcn_hubbub_registers *hubbub_regs,
948 	const struct dcn_hubbub_shift *hubbub_shift,
949 	const struct dcn_hubbub_mask *hubbub_mask)
950 {
951 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
952 
953 	hubbub1->base.ctx = ctx;
954 
955 	hubbub1->base.funcs = &hubbub1_funcs;
956 
957 	hubbub1->regs = hubbub_regs;
958 	hubbub1->shifts = hubbub_shift;
959 	hubbub1->masks = hubbub_mask;
960 
961 	hubbub1->debug_test_index_pstate = 0x7;
962 	if (ctx->dce_version == DCN_VERSION_1_01)
963 		hubbub1->debug_test_index_pstate = 0xB;
964 }
965 
966