1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 
28 #include "dm_services.h"
29 #include "dcn10_hubp.h"
30 #include "dcn10_hubbub.h"
31 #include "reg_helper.h"
32 
33 #define CTX \
34 	hubbub1->base.ctx
35 #define DC_LOGGER \
36 	hubbub1->base.ctx->logger
37 #define REG(reg)\
38 	hubbub1->regs->reg
39 
40 #undef FN
41 #define FN(reg_name, field_name) \
42 	hubbub1->shifts->field_name, hubbub1->masks->field_name
43 
44 void hubbub1_wm_read_state(struct hubbub *hubbub,
45 		struct dcn_hubbub_wm *wm)
46 {
47 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
48 	struct dcn_hubbub_wm_set *s;
49 
50 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
51 
52 	s = &wm->sets[0];
53 	s->wm_set = 0;
54 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
55 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
56 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
57 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
58 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
59 	}
60 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
61 
62 	s = &wm->sets[1];
63 	s->wm_set = 1;
64 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
65 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
66 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
67 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
68 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
69 	}
70 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
71 
72 	s = &wm->sets[2];
73 	s->wm_set = 2;
74 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
75 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
76 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
77 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
78 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
79 	}
80 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
81 
82 	s = &wm->sets[3];
83 	s->wm_set = 3;
84 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
85 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
86 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
87 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
88 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
89 	}
90 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
91 }
92 
93 void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
94 {
95 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
96 
97 	/*
98 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
99 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
100 	 */
101 
102 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
103 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
104 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
105 }
106 
107 bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
108 {
109 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
110 	uint32_t enable = 0;
111 
112 	REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
113 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
114 
115 	return enable ? true : false;
116 }
117 
118 
119 bool hubbub1_verify_allow_pstate_change_high(
120 	struct hubbub *hubbub)
121 {
122 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
123 
124 	/* pstate latency is ~20us so if we wait over 40us and pstate allow
125 	 * still not asserted, we are probably stuck and going to hang
126 	 *
127 	 * TODO: Figure out why it takes ~100us on linux
128 	 * pstate takes around ~100us on linux. Unknown currently as to
129 	 * why it takes that long on linux
130 	 */
131 	static unsigned int pstate_wait_timeout_us = 200;
132 	static unsigned int pstate_wait_expected_timeout_us = 40;
133 	static unsigned int max_sampled_pstate_wait_us; /* data collection */
134 	static bool forced_pstate_allow; /* help with revert wa */
135 
136 	unsigned int debug_data;
137 	unsigned int i;
138 
139 	if (forced_pstate_allow) {
140 		/* we hacked to force pstate allow to prevent hang last time
141 		 * we verify_allow_pstate_change_high.  so disable force
142 		 * here so we can check status
143 		 */
144 		REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
145 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
146 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
147 		forced_pstate_allow = false;
148 	}
149 
150 	/* RV2:
151 	 * dchubbubdebugind, at: 0xB
152 	 * description
153 	 * 0:     Pipe0 Plane0 Allow Pstate Change
154 	 * 1:     Pipe0 Plane1 Allow Pstate Change
155 	 * 2:     Pipe0 Cursor0 Allow Pstate Change
156 	 * 3:     Pipe0 Cursor1 Allow Pstate Change
157 	 * 4:     Pipe1 Plane0 Allow Pstate Change
158 	 * 5:     Pipe1 Plane1 Allow Pstate Change
159 	 * 6:     Pipe1 Cursor0 Allow Pstate Change
160 	 * 7:     Pipe1 Cursor1 Allow Pstate Change
161 	 * 8:     Pipe2 Plane0 Allow Pstate Change
162 	 * 9:     Pipe2 Plane1 Allow Pstate Change
163 	 * 10:    Pipe2 Cursor0 Allow Pstate Change
164 	 * 11:    Pipe2 Cursor1 Allow Pstate Change
165 	 * 12:    Pipe3 Plane0 Allow Pstate Change
166 	 * 13:    Pipe3 Plane1 Allow Pstate Change
167 	 * 14:    Pipe3 Cursor0 Allow Pstate Change
168 	 * 15:    Pipe3 Cursor1 Allow Pstate Change
169 	 * 16:    Pipe4 Plane0 Allow Pstate Change
170 	 * 17:    Pipe4 Plane1 Allow Pstate Change
171 	 * 18:    Pipe4 Cursor0 Allow Pstate Change
172 	 * 19:    Pipe4 Cursor1 Allow Pstate Change
173 	 * 20:    Pipe5 Plane0 Allow Pstate Change
174 	 * 21:    Pipe5 Plane1 Allow Pstate Change
175 	 * 22:    Pipe5 Cursor0 Allow Pstate Change
176 	 * 23:    Pipe5 Cursor1 Allow Pstate Change
177 	 * 24:    Pipe6 Plane0 Allow Pstate Change
178 	 * 25:    Pipe6 Plane1 Allow Pstate Change
179 	 * 26:    Pipe6 Cursor0 Allow Pstate Change
180 	 * 27:    Pipe6 Cursor1 Allow Pstate Change
181 	 * 28:    WB0 Allow Pstate Change
182 	 * 29:    WB1 Allow Pstate Change
183 	 * 30:    Arbiter's allow_pstate_change
184 	 * 31:    SOC pstate change request"
185 	 *
186 	 * RV1:
187 	 * dchubbubdebugind, at: 0x7
188 	 * description "3-0:   Pipe0 cursor0 QOS
189 	 * 7-4:   Pipe1 cursor0 QOS
190 	 * 11-8:  Pipe2 cursor0 QOS
191 	 * 15-12: Pipe3 cursor0 QOS
192 	 * 16:    Pipe0 Plane0 Allow Pstate Change
193 	 * 17:    Pipe1 Plane0 Allow Pstate Change
194 	 * 18:    Pipe2 Plane0 Allow Pstate Change
195 	 * 19:    Pipe3 Plane0 Allow Pstate Change
196 	 * 20:    Pipe0 Plane1 Allow Pstate Change
197 	 * 21:    Pipe1 Plane1 Allow Pstate Change
198 	 * 22:    Pipe2 Plane1 Allow Pstate Change
199 	 * 23:    Pipe3 Plane1 Allow Pstate Change
200 	 * 24:    Pipe0 cursor0 Allow Pstate Change
201 	 * 25:    Pipe1 cursor0 Allow Pstate Change
202 	 * 26:    Pipe2 cursor0 Allow Pstate Change
203 	 * 27:    Pipe3 cursor0 Allow Pstate Change
204 	 * 28:    WB0 Allow Pstate Change
205 	 * 29:    WB1 Allow Pstate Change
206 	 * 30:    Arbiter's allow_pstate_change
207 	 * 31:    SOC pstate change request
208 	 */
209 
210 	REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
211 
212 	for (i = 0; i < pstate_wait_timeout_us; i++) {
213 		debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
214 
215 		if (debug_data & (1 << 30)) {
216 
217 			if (i > pstate_wait_expected_timeout_us)
218 				DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
219 						i);
220 
221 			return true;
222 		}
223 		if (max_sampled_pstate_wait_us < i)
224 			max_sampled_pstate_wait_us = i;
225 
226 		udelay(1);
227 	}
228 
229 	/* force pstate allow to prevent system hang
230 	 * and break to debugger to investigate
231 	 */
232 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
233 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
234 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
235 	forced_pstate_allow = true;
236 
237 	DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
238 			debug_data);
239 
240 	return false;
241 }
242 
243 static uint32_t convert_and_clamp(
244 	uint32_t wm_ns,
245 	uint32_t refclk_mhz,
246 	uint32_t clamp_value)
247 {
248 	uint32_t ret_val = 0;
249 	ret_val = wm_ns * refclk_mhz;
250 	ret_val /= 1000;
251 
252 	if (ret_val > clamp_value)
253 		ret_val = clamp_value;
254 
255 	return ret_val;
256 }
257 
258 
259 void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
260 {
261 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
262 
263 	REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
264 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
265 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
266 }
267 
268 void hubbub1_program_watermarks(
269 		struct hubbub *hubbub,
270 		struct dcn_watermark_set *watermarks,
271 		unsigned int refclk_mhz,
272 		bool safe_to_lower)
273 {
274 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
275 	/*
276 	 * Need to clamp to max of the register values (i.e. no wrap)
277 	 * for dcn1, all wm registers are 21-bit wide
278 	 */
279 	uint32_t prog_wm_value;
280 
281 
282 	/* Repeat for water mark set A, B, C and D. */
283 	/* clock state A */
284 	if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
285 		hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
286 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
287 				refclk_mhz, 0x1fffff);
288 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
289 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
290 
291 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
292 			"HW register value = 0x%x\n",
293 			watermarks->a.urgent_ns, prog_wm_value);
294 	}
295 
296 	if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A)) {
297 		if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
298 			hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
299 			prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
300 					refclk_mhz, 0x1fffff);
301 			REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
302 			DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
303 				"HW register value = 0x%x\n",
304 				watermarks->a.pte_meta_urgent_ns, prog_wm_value);
305 		}
306 	}
307 
308 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
309 		if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
310 				> hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
311 			hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
312 					watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
313 			prog_wm_value = convert_and_clamp(
314 					watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
315 					refclk_mhz, 0x1fffff);
316 			REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
317 					DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
318 			DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
319 				"HW register value = 0x%x\n",
320 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
321 		}
322 
323 		if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
324 				> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
325 			hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
326 					watermarks->a.cstate_pstate.cstate_exit_ns;
327 			prog_wm_value = convert_and_clamp(
328 					watermarks->a.cstate_pstate.cstate_exit_ns,
329 					refclk_mhz, 0x1fffff);
330 			REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
331 					DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
332 			DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
333 				"HW register value = 0x%x\n",
334 				watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
335 		}
336 	}
337 
338 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
339 			> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
340 		hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
341 				watermarks->a.cstate_pstate.pstate_change_ns;
342 		prog_wm_value = convert_and_clamp(
343 				watermarks->a.cstate_pstate.pstate_change_ns,
344 				refclk_mhz, 0x1fffff);
345 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
346 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
347 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
348 			"HW register value = 0x%x\n\n",
349 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
350 	}
351 
352 	/* clock state B */
353 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
354 		hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
355 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
356 				refclk_mhz, 0x1fffff);
357 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
358 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
359 
360 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
361 			"HW register value = 0x%x\n",
362 			watermarks->b.urgent_ns, prog_wm_value);
363 	}
364 
365 	if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B)) {
366 		if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
367 			hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
368 			prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
369 					refclk_mhz, 0x1fffff);
370 			REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
371 			DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
372 				"HW register value = 0x%x\n",
373 				watermarks->b.pte_meta_urgent_ns, prog_wm_value);
374 		}
375 	}
376 
377 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
378 		if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
379 				> hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
380 			hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
381 					watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
382 			prog_wm_value = convert_and_clamp(
383 					watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
384 					refclk_mhz, 0x1fffff);
385 			REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
386 					DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
387 			DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
388 				"HW register value = 0x%x\n",
389 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
390 		}
391 
392 		if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
393 				> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
394 			hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
395 					watermarks->b.cstate_pstate.cstate_exit_ns;
396 			prog_wm_value = convert_and_clamp(
397 					watermarks->b.cstate_pstate.cstate_exit_ns,
398 					refclk_mhz, 0x1fffff);
399 			REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
400 					DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
401 			DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
402 				"HW register value = 0x%x\n",
403 				watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
404 		}
405 	}
406 
407 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
408 			> hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
409 		hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
410 				watermarks->b.cstate_pstate.pstate_change_ns;
411 		prog_wm_value = convert_and_clamp(
412 				watermarks->b.cstate_pstate.pstate_change_ns,
413 				refclk_mhz, 0x1fffff);
414 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
415 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
416 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
417 			"HW register value = 0x%x\n\n",
418 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
419 	}
420 
421 	/* clock state C */
422 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
423 		hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
424 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
425 				refclk_mhz, 0x1fffff);
426 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
427 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
428 
429 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
430 			"HW register value = 0x%x\n",
431 			watermarks->c.urgent_ns, prog_wm_value);
432 	}
433 
434 	if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C)) {
435 		if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
436 			hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
437 			prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
438 					refclk_mhz, 0x1fffff);
439 			REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
440 			DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
441 				"HW register value = 0x%x\n",
442 				watermarks->c.pte_meta_urgent_ns, prog_wm_value);
443 		}
444 	}
445 
446 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
447 		if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
448 				> hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
449 			hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
450 					watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
451 			prog_wm_value = convert_and_clamp(
452 					watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
453 					refclk_mhz, 0x1fffff);
454 			REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
455 					DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
456 			DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
457 				"HW register value = 0x%x\n",
458 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
459 		}
460 
461 		if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
462 				> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
463 			hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
464 					watermarks->c.cstate_pstate.cstate_exit_ns;
465 			prog_wm_value = convert_and_clamp(
466 					watermarks->c.cstate_pstate.cstate_exit_ns,
467 					refclk_mhz, 0x1fffff);
468 			REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
469 					DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
470 			DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
471 				"HW register value = 0x%x\n",
472 				watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
473 		}
474 	}
475 
476 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
477 			> hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
478 		hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
479 				watermarks->c.cstate_pstate.pstate_change_ns;
480 		prog_wm_value = convert_and_clamp(
481 				watermarks->c.cstate_pstate.pstate_change_ns,
482 				refclk_mhz, 0x1fffff);
483 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
484 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
485 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
486 			"HW register value = 0x%x\n\n",
487 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
488 	}
489 
490 	/* clock state D */
491 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
492 		hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
493 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
494 				refclk_mhz, 0x1fffff);
495 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
496 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
497 
498 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
499 			"HW register value = 0x%x\n",
500 			watermarks->d.urgent_ns, prog_wm_value);
501 	}
502 
503 	if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)) {
504 		if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
505 			hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
506 			prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
507 					refclk_mhz, 0x1fffff);
508 			REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
509 			DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
510 				"HW register value = 0x%x\n",
511 				watermarks->d.pte_meta_urgent_ns, prog_wm_value);
512 		}
513 	}
514 
515 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
516 		if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
517 				> hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
518 			hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
519 					watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
520 			prog_wm_value = convert_and_clamp(
521 					watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
522 					refclk_mhz, 0x1fffff);
523 			REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
524 					DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
525 			DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
526 				"HW register value = 0x%x\n",
527 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
528 		}
529 
530 		if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
531 				> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
532 			hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
533 					watermarks->d.cstate_pstate.cstate_exit_ns;
534 			prog_wm_value = convert_and_clamp(
535 					watermarks->d.cstate_pstate.cstate_exit_ns,
536 					refclk_mhz, 0x1fffff);
537 			REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
538 					DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
539 			DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
540 				"HW register value = 0x%x\n",
541 				watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
542 		}
543 	}
544 
545 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
546 			> hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
547 		hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
548 				watermarks->d.cstate_pstate.pstate_change_ns;
549 		prog_wm_value = convert_and_clamp(
550 				watermarks->d.cstate_pstate.pstate_change_ns,
551 				refclk_mhz, 0x1fffff);
552 		REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
553 				DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
554 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
555 			"HW register value = 0x%x\n\n",
556 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
557 	}
558 
559 	REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
560 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
561 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
562 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
563 
564 	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
565 
566 #if 0
567 	REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
568 			DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
569 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
570 #endif
571 }
572 
573 void hubbub1_update_dchub(
574 	struct hubbub *hubbub,
575 	struct dchub_init_data *dh_data)
576 {
577 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
578 
579 	if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
580 		ASSERT(false);
581 		/*should not come here*/
582 		return;
583 	}
584 	/* TODO: port code from dal2 */
585 	switch (dh_data->fb_mode) {
586 	case FRAME_BUFFER_MODE_ZFB_ONLY:
587 		/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
588 		REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
589 				SDPIF_FB_TOP, 0);
590 
591 		REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
592 				SDPIF_FB_BASE, 0x0FFFF);
593 
594 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
595 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
596 
597 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
598 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
599 
600 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
601 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
602 						dh_data->zfb_size_in_byte - 1) >> 22);
603 		break;
604 	case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
605 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
606 
607 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
608 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
609 
610 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
611 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
612 
613 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
614 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
615 						dh_data->zfb_size_in_byte - 1) >> 22);
616 		break;
617 	case FRAME_BUFFER_MODE_LOCAL_ONLY:
618 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
619 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
620 				SDPIF_AGP_BASE, 0);
621 
622 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
623 				SDPIF_AGP_BOT, 0X03FFFF);
624 
625 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
626 				SDPIF_AGP_TOP, 0);
627 		break;
628 	default:
629 		break;
630 	}
631 
632 	dh_data->dchub_initialzied = true;
633 	dh_data->dchub_info_valid = false;
634 }
635 
636 void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
637 {
638 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
639 
640 	uint32_t watermark_change_req;
641 
642 	REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
643 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
644 
645 	if (watermark_change_req)
646 		watermark_change_req = 0;
647 	else
648 		watermark_change_req = 1;
649 
650 	REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
651 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
652 }
653 
654 void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
655 {
656 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
657 
658 	uint32_t reset_en = reset ? 1 : 0;
659 
660 	REG_UPDATE(DCHUBBUB_SOFT_RESET,
661 			DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
662 }
663 
664 static bool hubbub1_dcc_support_swizzle(
665 		enum swizzle_mode_values swizzle,
666 		unsigned int bytes_per_element,
667 		enum segment_order *segment_order_horz,
668 		enum segment_order *segment_order_vert)
669 {
670 	bool standard_swizzle = false;
671 	bool display_swizzle = false;
672 
673 	switch (swizzle) {
674 	case DC_SW_4KB_S:
675 	case DC_SW_64KB_S:
676 	case DC_SW_VAR_S:
677 	case DC_SW_4KB_S_X:
678 	case DC_SW_64KB_S_X:
679 	case DC_SW_VAR_S_X:
680 		standard_swizzle = true;
681 		break;
682 	case DC_SW_4KB_D:
683 	case DC_SW_64KB_D:
684 	case DC_SW_VAR_D:
685 	case DC_SW_4KB_D_X:
686 	case DC_SW_64KB_D_X:
687 	case DC_SW_VAR_D_X:
688 		display_swizzle = true;
689 		break;
690 	default:
691 		break;
692 	}
693 
694 	if (bytes_per_element == 1 && standard_swizzle) {
695 		*segment_order_horz = segment_order__contiguous;
696 		*segment_order_vert = segment_order__na;
697 		return true;
698 	}
699 	if (bytes_per_element == 2 && standard_swizzle) {
700 		*segment_order_horz = segment_order__non_contiguous;
701 		*segment_order_vert = segment_order__contiguous;
702 		return true;
703 	}
704 	if (bytes_per_element == 4 && standard_swizzle) {
705 		*segment_order_horz = segment_order__non_contiguous;
706 		*segment_order_vert = segment_order__contiguous;
707 		return true;
708 	}
709 	if (bytes_per_element == 8 && standard_swizzle) {
710 		*segment_order_horz = segment_order__na;
711 		*segment_order_vert = segment_order__contiguous;
712 		return true;
713 	}
714 	if (bytes_per_element == 8 && display_swizzle) {
715 		*segment_order_horz = segment_order__contiguous;
716 		*segment_order_vert = segment_order__non_contiguous;
717 		return true;
718 	}
719 
720 	return false;
721 }
722 
723 static bool hubbub1_dcc_support_pixel_format(
724 		enum surface_pixel_format format,
725 		unsigned int *bytes_per_element)
726 {
727 	/* DML: get_bytes_per_element */
728 	switch (format) {
729 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
730 	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
731 		*bytes_per_element = 2;
732 		return true;
733 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
734 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
735 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
736 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
737 		*bytes_per_element = 4;
738 		return true;
739 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
740 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
741 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
742 		*bytes_per_element = 8;
743 		return true;
744 	default:
745 		return false;
746 	}
747 }
748 
749 static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
750 		unsigned int bytes_per_element)
751 {
752 	/* copied from DML.  might want to refactor DML to leverage from DML */
753 	/* DML : get_blk256_size */
754 	if (bytes_per_element == 1) {
755 		*blk256_width = 16;
756 		*blk256_height = 16;
757 	} else if (bytes_per_element == 2) {
758 		*blk256_width = 16;
759 		*blk256_height = 8;
760 	} else if (bytes_per_element == 4) {
761 		*blk256_width = 8;
762 		*blk256_height = 8;
763 	} else if (bytes_per_element == 8) {
764 		*blk256_width = 8;
765 		*blk256_height = 4;
766 	}
767 }
768 
769 static void hubbub1_det_request_size(
770 		unsigned int height,
771 		unsigned int width,
772 		unsigned int bpe,
773 		bool *req128_horz_wc,
774 		bool *req128_vert_wc)
775 {
776 	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
777 
778 	unsigned int blk256_height = 0;
779 	unsigned int blk256_width = 0;
780 	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
781 
782 	hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
783 
784 	swath_bytes_horz_wc = height * blk256_height * bpe;
785 	swath_bytes_vert_wc = width * blk256_width * bpe;
786 
787 	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
788 			false : /* full 256B request */
789 			true; /* half 128b request */
790 
791 	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
792 			false : /* full 256B request */
793 			true; /* half 128b request */
794 }
795 
796 static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
797 		const struct dc_dcc_surface_param *input,
798 		struct dc_surface_dcc_cap *output)
799 {
800 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
801 	struct dc *dc = hubbub1->base.ctx->dc;
802 
803 	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
804 	enum dcc_control dcc_control;
805 	unsigned int bpe;
806 	enum segment_order segment_order_horz, segment_order_vert;
807 	bool req128_horz_wc, req128_vert_wc;
808 
809 	memset(output, 0, sizeof(*output));
810 
811 	if (dc->debug.disable_dcc == DCC_DISABLE)
812 		return false;
813 
814 	if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe))
815 		return false;
816 
817 	if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
818 			&segment_order_horz, &segment_order_vert))
819 		return false;
820 
821 	hubbub1_det_request_size(input->surface_size.height,  input->surface_size.width,
822 			bpe, &req128_horz_wc, &req128_vert_wc);
823 
824 	if (!req128_horz_wc && !req128_vert_wc) {
825 		dcc_control = dcc_control__256_256_xxx;
826 	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
827 		if (!req128_horz_wc)
828 			dcc_control = dcc_control__256_256_xxx;
829 		else if (segment_order_horz == segment_order__contiguous)
830 			dcc_control = dcc_control__128_128_xxx;
831 		else
832 			dcc_control = dcc_control__256_64_64;
833 	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
834 		if (!req128_vert_wc)
835 			dcc_control = dcc_control__256_256_xxx;
836 		else if (segment_order_vert == segment_order__contiguous)
837 			dcc_control = dcc_control__128_128_xxx;
838 		else
839 			dcc_control = dcc_control__256_64_64;
840 	} else {
841 		if ((req128_horz_wc &&
842 			segment_order_horz == segment_order__non_contiguous) ||
843 			(req128_vert_wc &&
844 			segment_order_vert == segment_order__non_contiguous))
845 			/* access_dir not known, must use most constraining */
846 			dcc_control = dcc_control__256_64_64;
847 		else
848 			/* reg128 is true for either horz and vert
849 			 * but segment_order is contiguous
850 			 */
851 			dcc_control = dcc_control__128_128_xxx;
852 	}
853 
854 	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
855 		dcc_control != dcc_control__256_256_xxx)
856 		return false;
857 
858 	switch (dcc_control) {
859 	case dcc_control__256_256_xxx:
860 		output->grph.rgb.max_uncompressed_blk_size = 256;
861 		output->grph.rgb.max_compressed_blk_size = 256;
862 		output->grph.rgb.independent_64b_blks = false;
863 		break;
864 	case dcc_control__128_128_xxx:
865 		output->grph.rgb.max_uncompressed_blk_size = 128;
866 		output->grph.rgb.max_compressed_blk_size = 128;
867 		output->grph.rgb.independent_64b_blks = false;
868 		break;
869 	case dcc_control__256_64_64:
870 		output->grph.rgb.max_uncompressed_blk_size = 256;
871 		output->grph.rgb.max_compressed_blk_size = 64;
872 		output->grph.rgb.independent_64b_blks = true;
873 		break;
874 	}
875 
876 	output->capable = true;
877 	output->const_color_support = false;
878 
879 	return true;
880 }
881 
882 static const struct hubbub_funcs hubbub1_funcs = {
883 	.update_dchub = hubbub1_update_dchub,
884 	.dcc_support_swizzle = hubbub1_dcc_support_swizzle,
885 	.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
886 	.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
887 	.wm_read_state = hubbub1_wm_read_state,
888 	.program_watermarks = hubbub1_program_watermarks,
889 };
890 
891 void hubbub1_construct(struct hubbub *hubbub,
892 	struct dc_context *ctx,
893 	const struct dcn_hubbub_registers *hubbub_regs,
894 	const struct dcn_hubbub_shift *hubbub_shift,
895 	const struct dcn_hubbub_mask *hubbub_mask)
896 {
897 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
898 
899 	hubbub1->base.ctx = ctx;
900 
901 	hubbub1->base.funcs = &hubbub1_funcs;
902 
903 	hubbub1->regs = hubbub_regs;
904 	hubbub1->shifts = hubbub_shift;
905 	hubbub1->masks = hubbub_mask;
906 
907 	hubbub1->debug_test_index_pstate = 0x7;
908 #if defined(CONFIG_DRM_AMD_DC_DCN1_01)
909 	if (ctx->dce_version == DCN_VERSION_1_01)
910 		hubbub1->debug_test_index_pstate = 0xB;
911 #endif
912 }
913 
914