xref: /openbmc/linux/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c (revision 6396bb221514d2876fd6dc0aa2a1f240d99b37bb)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "dcn10_hubp.h"
28 #include "dcn10_hubbub.h"
29 #include "reg_helper.h"
30 
31 #define CTX \
32 	hubbub->ctx
33 #define DC_LOGGER \
34 	hubbub->ctx->logger
35 #define REG(reg)\
36 	hubbub->regs->reg
37 
38 #undef FN
39 #define FN(reg_name, field_name) \
40 	hubbub->shifts->field_name, hubbub->masks->field_name
41 
42 void hubbub1_wm_read_state(struct hubbub *hubbub,
43 		struct dcn_hubbub_wm *wm)
44 {
45 	struct dcn_hubbub_wm_set *s;
46 
47 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
48 
49 	s = &wm->sets[0];
50 	s->wm_set = 0;
51 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
52 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
53 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
54 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
55 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
56 	}
57 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
58 
59 	s = &wm->sets[1];
60 	s->wm_set = 1;
61 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
62 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
63 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
64 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
65 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
66 	}
67 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
68 
69 	s = &wm->sets[2];
70 	s->wm_set = 2;
71 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
72 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
73 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
74 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
75 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
76 	}
77 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
78 
79 	s = &wm->sets[3];
80 	s->wm_set = 3;
81 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
82 	s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
83 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
84 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
85 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
86 	}
87 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
88 }
89 
90 bool hubbub1_verify_allow_pstate_change_high(
91 	struct hubbub *hubbub)
92 {
93 	/* pstate latency is ~20us so if we wait over 40us and pstate allow
94 	 * still not asserted, we are probably stuck and going to hang
95 	 *
96 	 * TODO: Figure out why it takes ~100us on linux
97 	 * pstate takes around ~100us on linux. Unknown currently as to
98 	 * why it takes that long on linux
99 	 */
100 	static unsigned int pstate_wait_timeout_us = 200;
101 	static unsigned int pstate_wait_expected_timeout_us = 40;
102 	static unsigned int max_sampled_pstate_wait_us; /* data collection */
103 	static bool forced_pstate_allow; /* help with revert wa */
104 
105 	unsigned int debug_data;
106 	unsigned int i;
107 
108 	if (forced_pstate_allow) {
109 		/* we hacked to force pstate allow to prevent hang last time
110 		 * we verify_allow_pstate_change_high.  so disable force
111 		 * here so we can check status
112 		 */
113 		REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
114 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
115 			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
116 		forced_pstate_allow = false;
117 	}
118 
119 	/* RV1:
120 	 * dchubbubdebugind, at: 0x7
121 	 * description "3-0:   Pipe0 cursor0 QOS
122 	 * 7-4:   Pipe1 cursor0 QOS
123 	 * 11-8:  Pipe2 cursor0 QOS
124 	 * 15-12: Pipe3 cursor0 QOS
125 	 * 16:    Pipe0 Plane0 Allow Pstate Change
126 	 * 17:    Pipe1 Plane0 Allow Pstate Change
127 	 * 18:    Pipe2 Plane0 Allow Pstate Change
128 	 * 19:    Pipe3 Plane0 Allow Pstate Change
129 	 * 20:    Pipe0 Plane1 Allow Pstate Change
130 	 * 21:    Pipe1 Plane1 Allow Pstate Change
131 	 * 22:    Pipe2 Plane1 Allow Pstate Change
132 	 * 23:    Pipe3 Plane1 Allow Pstate Change
133 	 * 24:    Pipe0 cursor0 Allow Pstate Change
134 	 * 25:    Pipe1 cursor0 Allow Pstate Change
135 	 * 26:    Pipe2 cursor0 Allow Pstate Change
136 	 * 27:    Pipe3 cursor0 Allow Pstate Change
137 	 * 28:    WB0 Allow Pstate Change
138 	 * 29:    WB1 Allow Pstate Change
139 	 * 30:    Arbiter's allow_pstate_change
140 	 * 31:    SOC pstate change request
141 	 */
142 
143 
144 	REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate);
145 
146 	for (i = 0; i < pstate_wait_timeout_us; i++) {
147 		debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
148 
149 		if (debug_data & (1 << 30)) {
150 
151 			if (i > pstate_wait_expected_timeout_us)
152 				DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
153 						i);
154 
155 			return true;
156 		}
157 		if (max_sampled_pstate_wait_us < i)
158 			max_sampled_pstate_wait_us = i;
159 
160 		udelay(1);
161 	}
162 
163 	/* force pstate allow to prevent system hang
164 	 * and break to debugger to investigate
165 	 */
166 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
167 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
168 		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
169 	forced_pstate_allow = true;
170 
171 	DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
172 			debug_data);
173 
174 	return false;
175 }
176 
177 static uint32_t convert_and_clamp(
178 	uint32_t wm_ns,
179 	uint32_t refclk_mhz,
180 	uint32_t clamp_value)
181 {
182 	uint32_t ret_val = 0;
183 	ret_val = wm_ns * refclk_mhz;
184 	ret_val /= 1000;
185 
186 	if (ret_val > clamp_value)
187 		ret_val = clamp_value;
188 
189 	return ret_val;
190 }
191 
192 
193 void hubbub1_program_watermarks(
194 		struct hubbub *hubbub,
195 		struct dcn_watermark_set *watermarks,
196 		unsigned int refclk_mhz)
197 {
198 	uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0;
199 	/*
200 	 * Need to clamp to max of the register values (i.e. no wrap)
201 	 * for dcn1, all wm registers are 21-bit wide
202 	 */
203 	uint32_t prog_wm_value;
204 
205 	REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
206 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
207 
208 	/* Repeat for water mark set A, B, C and D. */
209 	/* clock state A */
210 	prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
211 			refclk_mhz, 0x1fffff);
212 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
213 
214 	DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
215 		"HW register value = 0x%x\n",
216 		watermarks->a.urgent_ns, prog_wm_value);
217 
218 	prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
219 			refclk_mhz, 0x1fffff);
220 	REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
221 	DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
222 		"HW register value = 0x%x\n",
223 		watermarks->a.pte_meta_urgent_ns, prog_wm_value);
224 
225 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
226 		prog_wm_value = convert_and_clamp(
227 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
228 				refclk_mhz, 0x1fffff);
229 		REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
230 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
231 			"HW register value = 0x%x\n",
232 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
233 
234 
235 		prog_wm_value = convert_and_clamp(
236 				watermarks->a.cstate_pstate.cstate_exit_ns,
237 				refclk_mhz, 0x1fffff);
238 		REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
239 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
240 			"HW register value = 0x%x\n",
241 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
242 	}
243 
244 	prog_wm_value = convert_and_clamp(
245 			watermarks->a.cstate_pstate.pstate_change_ns,
246 			refclk_mhz, 0x1fffff);
247 	REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
248 	DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
249 		"HW register value = 0x%x\n\n",
250 		watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
251 
252 
253 	/* clock state B */
254 	prog_wm_value = convert_and_clamp(
255 			watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
256 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
257 	DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
258 		"HW register value = 0x%x\n",
259 		watermarks->b.urgent_ns, prog_wm_value);
260 
261 
262 	prog_wm_value = convert_and_clamp(
263 			watermarks->b.pte_meta_urgent_ns,
264 			refclk_mhz, 0x1fffff);
265 	REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
266 	DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
267 		"HW register value = 0x%x\n",
268 		watermarks->b.pte_meta_urgent_ns, prog_wm_value);
269 
270 
271 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
272 		prog_wm_value = convert_and_clamp(
273 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
274 				refclk_mhz, 0x1fffff);
275 		REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
276 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
277 			"HW register value = 0x%x\n",
278 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
279 
280 
281 		prog_wm_value = convert_and_clamp(
282 				watermarks->b.cstate_pstate.cstate_exit_ns,
283 				refclk_mhz, 0x1fffff);
284 		REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
285 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
286 			"HW register value = 0x%x\n",
287 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
288 	}
289 
290 	prog_wm_value = convert_and_clamp(
291 			watermarks->b.cstate_pstate.pstate_change_ns,
292 			refclk_mhz, 0x1fffff);
293 	REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
294 	DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
295 		"HW register value = 0x%x\n",
296 		watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
297 
298 	/* clock state C */
299 	prog_wm_value = convert_and_clamp(
300 			watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
301 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
302 	DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
303 		"HW register value = 0x%x\n",
304 		watermarks->c.urgent_ns, prog_wm_value);
305 
306 
307 	prog_wm_value = convert_and_clamp(
308 			watermarks->c.pte_meta_urgent_ns,
309 			refclk_mhz, 0x1fffff);
310 	REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
311 	DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
312 		"HW register value = 0x%x\n",
313 		watermarks->c.pte_meta_urgent_ns, prog_wm_value);
314 
315 
316 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
317 		prog_wm_value = convert_and_clamp(
318 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
319 				refclk_mhz, 0x1fffff);
320 		REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
321 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
322 			"HW register value = 0x%x\n",
323 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
324 
325 
326 		prog_wm_value = convert_and_clamp(
327 				watermarks->c.cstate_pstate.cstate_exit_ns,
328 				refclk_mhz, 0x1fffff);
329 		REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
330 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
331 			"HW register value = 0x%x\n",
332 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
333 	}
334 
335 	prog_wm_value = convert_and_clamp(
336 			watermarks->c.cstate_pstate.pstate_change_ns,
337 			refclk_mhz, 0x1fffff);
338 	REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
339 	DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
340 		"HW register value = 0x%x\n",
341 		watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
342 
343 	/* clock state D */
344 	prog_wm_value = convert_and_clamp(
345 			watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
346 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
347 	DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
348 		"HW register value = 0x%x\n",
349 		watermarks->d.urgent_ns, prog_wm_value);
350 
351 	prog_wm_value = convert_and_clamp(
352 			watermarks->d.pte_meta_urgent_ns,
353 			refclk_mhz, 0x1fffff);
354 	REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
355 	DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
356 		"HW register value = 0x%x\n",
357 		watermarks->d.pte_meta_urgent_ns, prog_wm_value);
358 
359 
360 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
361 		prog_wm_value = convert_and_clamp(
362 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
363 				refclk_mhz, 0x1fffff);
364 		REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
365 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
366 			"HW register value = 0x%x\n",
367 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
368 
369 
370 		prog_wm_value = convert_and_clamp(
371 				watermarks->d.cstate_pstate.cstate_exit_ns,
372 				refclk_mhz, 0x1fffff);
373 		REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
374 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
375 			"HW register value = 0x%x\n",
376 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
377 	}
378 
379 
380 	prog_wm_value = convert_and_clamp(
381 			watermarks->d.cstate_pstate.pstate_change_ns,
382 			refclk_mhz, 0x1fffff);
383 	REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
384 	DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
385 		"HW register value = 0x%x\n\n",
386 		watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
387 
388 	REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
389 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
390 
391 	REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
392 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
393 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
394 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
395 
396 	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
397 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
398 			DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, force_en);
399 
400 #if 0
401 	REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
402 			DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
403 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
404 #endif
405 }
406 
407 void hubbub1_update_dchub(
408 	struct hubbub *hubbub,
409 	struct dchub_init_data *dh_data)
410 {
411 	/* TODO: port code from dal2 */
412 	switch (dh_data->fb_mode) {
413 	case FRAME_BUFFER_MODE_ZFB_ONLY:
414 		/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
415 		REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
416 				SDPIF_FB_TOP, 0);
417 
418 		REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
419 				SDPIF_FB_BASE, 0x0FFFF);
420 
421 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
422 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
423 
424 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
425 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
426 
427 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
428 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
429 						dh_data->zfb_size_in_byte - 1) >> 22);
430 		break;
431 	case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
432 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
433 
434 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
435 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
436 
437 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
438 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
439 
440 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
441 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
442 						dh_data->zfb_size_in_byte - 1) >> 22);
443 		break;
444 	case FRAME_BUFFER_MODE_LOCAL_ONLY:
445 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
446 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
447 				SDPIF_AGP_BASE, 0);
448 
449 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
450 				SDPIF_AGP_BOT, 0X03FFFF);
451 
452 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
453 				SDPIF_AGP_TOP, 0);
454 		break;
455 	default:
456 		break;
457 	}
458 
459 	dh_data->dchub_initialzied = true;
460 	dh_data->dchub_info_valid = false;
461 }
462 
463 void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
464 {
465 	uint32_t watermark_change_req;
466 
467 	REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
468 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
469 
470 	if (watermark_change_req)
471 		watermark_change_req = 0;
472 	else
473 		watermark_change_req = 1;
474 
475 	REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
476 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
477 }
478 
479 void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
480 {
481 	uint32_t reset_en = reset ? 1 : 0;
482 
483 	REG_UPDATE(DCHUBBUB_SOFT_RESET,
484 			DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
485 }
486 
487 static bool hubbub1_dcc_support_swizzle(
488 		enum swizzle_mode_values swizzle,
489 		unsigned int bytes_per_element,
490 		enum segment_order *segment_order_horz,
491 		enum segment_order *segment_order_vert)
492 {
493 	bool standard_swizzle = false;
494 	bool display_swizzle = false;
495 
496 	switch (swizzle) {
497 	case DC_SW_4KB_S:
498 	case DC_SW_64KB_S:
499 	case DC_SW_VAR_S:
500 	case DC_SW_4KB_S_X:
501 	case DC_SW_64KB_S_X:
502 	case DC_SW_VAR_S_X:
503 		standard_swizzle = true;
504 		break;
505 	case DC_SW_4KB_D:
506 	case DC_SW_64KB_D:
507 	case DC_SW_VAR_D:
508 	case DC_SW_4KB_D_X:
509 	case DC_SW_64KB_D_X:
510 	case DC_SW_VAR_D_X:
511 		display_swizzle = true;
512 		break;
513 	default:
514 		break;
515 	}
516 
517 	if (bytes_per_element == 1 && standard_swizzle) {
518 		*segment_order_horz = segment_order__contiguous;
519 		*segment_order_vert = segment_order__na;
520 		return true;
521 	}
522 	if (bytes_per_element == 2 && standard_swizzle) {
523 		*segment_order_horz = segment_order__non_contiguous;
524 		*segment_order_vert = segment_order__contiguous;
525 		return true;
526 	}
527 	if (bytes_per_element == 4 && standard_swizzle) {
528 		*segment_order_horz = segment_order__non_contiguous;
529 		*segment_order_vert = segment_order__contiguous;
530 		return true;
531 	}
532 	if (bytes_per_element == 8 && standard_swizzle) {
533 		*segment_order_horz = segment_order__na;
534 		*segment_order_vert = segment_order__contiguous;
535 		return true;
536 	}
537 	if (bytes_per_element == 8 && display_swizzle) {
538 		*segment_order_horz = segment_order__contiguous;
539 		*segment_order_vert = segment_order__non_contiguous;
540 		return true;
541 	}
542 
543 	return false;
544 }
545 
546 static bool hubbub1_dcc_support_pixel_format(
547 		enum surface_pixel_format format,
548 		unsigned int *bytes_per_element)
549 {
550 	/* DML: get_bytes_per_element */
551 	switch (format) {
552 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
553 	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
554 		*bytes_per_element = 2;
555 		return true;
556 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
557 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
558 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
559 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
560 		*bytes_per_element = 4;
561 		return true;
562 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
563 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
564 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
565 		*bytes_per_element = 8;
566 		return true;
567 	default:
568 		return false;
569 	}
570 }
571 
572 static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
573 		unsigned int bytes_per_element)
574 {
575 	/* copied from DML.  might want to refactor DML to leverage from DML */
576 	/* DML : get_blk256_size */
577 	if (bytes_per_element == 1) {
578 		*blk256_width = 16;
579 		*blk256_height = 16;
580 	} else if (bytes_per_element == 2) {
581 		*blk256_width = 16;
582 		*blk256_height = 8;
583 	} else if (bytes_per_element == 4) {
584 		*blk256_width = 8;
585 		*blk256_height = 8;
586 	} else if (bytes_per_element == 8) {
587 		*blk256_width = 8;
588 		*blk256_height = 4;
589 	}
590 }
591 
592 static void hubbub1_det_request_size(
593 		unsigned int height,
594 		unsigned int width,
595 		unsigned int bpe,
596 		bool *req128_horz_wc,
597 		bool *req128_vert_wc)
598 {
599 	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
600 
601 	unsigned int blk256_height = 0;
602 	unsigned int blk256_width = 0;
603 	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
604 
605 	hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
606 
607 	swath_bytes_horz_wc = height * blk256_height * bpe;
608 	swath_bytes_vert_wc = width * blk256_width * bpe;
609 
610 	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
611 			false : /* full 256B request */
612 			true; /* half 128b request */
613 
614 	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
615 			false : /* full 256B request */
616 			true; /* half 128b request */
617 }
618 
619 static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
620 		const struct dc_dcc_surface_param *input,
621 		struct dc_surface_dcc_cap *output)
622 {
623 	struct dc *dc = hubbub->ctx->dc;
624 	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
625 	enum dcc_control dcc_control;
626 	unsigned int bpe;
627 	enum segment_order segment_order_horz, segment_order_vert;
628 	bool req128_horz_wc, req128_vert_wc;
629 
630 	memset(output, 0, sizeof(*output));
631 
632 	if (dc->debug.disable_dcc == DCC_DISABLE)
633 		return false;
634 
635 	if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe))
636 		return false;
637 
638 	if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
639 			&segment_order_horz, &segment_order_vert))
640 		return false;
641 
642 	hubbub1_det_request_size(input->surface_size.height,  input->surface_size.width,
643 			bpe, &req128_horz_wc, &req128_vert_wc);
644 
645 	if (!req128_horz_wc && !req128_vert_wc) {
646 		dcc_control = dcc_control__256_256_xxx;
647 	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
648 		if (!req128_horz_wc)
649 			dcc_control = dcc_control__256_256_xxx;
650 		else if (segment_order_horz == segment_order__contiguous)
651 			dcc_control = dcc_control__128_128_xxx;
652 		else
653 			dcc_control = dcc_control__256_64_64;
654 	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
655 		if (!req128_vert_wc)
656 			dcc_control = dcc_control__256_256_xxx;
657 		else if (segment_order_vert == segment_order__contiguous)
658 			dcc_control = dcc_control__128_128_xxx;
659 		else
660 			dcc_control = dcc_control__256_64_64;
661 	} else {
662 		if ((req128_horz_wc &&
663 			segment_order_horz == segment_order__non_contiguous) ||
664 			(req128_vert_wc &&
665 			segment_order_vert == segment_order__non_contiguous))
666 			/* access_dir not known, must use most constraining */
667 			dcc_control = dcc_control__256_64_64;
668 		else
669 			/* reg128 is true for either horz and vert
670 			 * but segment_order is contiguous
671 			 */
672 			dcc_control = dcc_control__128_128_xxx;
673 	}
674 
675 	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
676 		dcc_control != dcc_control__256_256_xxx)
677 		return false;
678 
679 	switch (dcc_control) {
680 	case dcc_control__256_256_xxx:
681 		output->grph.rgb.max_uncompressed_blk_size = 256;
682 		output->grph.rgb.max_compressed_blk_size = 256;
683 		output->grph.rgb.independent_64b_blks = false;
684 		break;
685 	case dcc_control__128_128_xxx:
686 		output->grph.rgb.max_uncompressed_blk_size = 128;
687 		output->grph.rgb.max_compressed_blk_size = 128;
688 		output->grph.rgb.independent_64b_blks = false;
689 		break;
690 	case dcc_control__256_64_64:
691 		output->grph.rgb.max_uncompressed_blk_size = 256;
692 		output->grph.rgb.max_compressed_blk_size = 64;
693 		output->grph.rgb.independent_64b_blks = true;
694 		break;
695 	}
696 
697 	output->capable = true;
698 	output->const_color_support = false;
699 
700 	return true;
701 }
702 
703 static const struct hubbub_funcs hubbub1_funcs = {
704 	.update_dchub = hubbub1_update_dchub,
705 	.dcc_support_swizzle = hubbub1_dcc_support_swizzle,
706 	.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
707 	.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
708 };
709 
710 void hubbub1_construct(struct hubbub *hubbub,
711 	struct dc_context *ctx,
712 	const struct dcn_hubbub_registers *hubbub_regs,
713 	const struct dcn_hubbub_shift *hubbub_shift,
714 	const struct dcn_hubbub_mask *hubbub_mask)
715 {
716 	hubbub->ctx = ctx;
717 
718 	hubbub->funcs = &hubbub1_funcs;
719 
720 	hubbub->regs = hubbub_regs;
721 	hubbub->shifts = hubbub_shift;
722 	hubbub->masks = hubbub_mask;
723 
724 	hubbub->debug_test_index_pstate = 0x7;
725 }
726 
727