1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 
27 #include "dcn20_hubbub.h"
28 #include "reg_helper.h"
29 
30 #define REG(reg)\
31 	hubbub1->regs->reg
32 
33 #define CTX \
34 	hubbub1->base.ctx
35 
36 #undef FN
37 #define FN(reg_name, field_name) \
38 	hubbub1->shifts->field_name, hubbub1->masks->field_name
39 
40 #define REG(reg)\
41 	hubbub1->regs->reg
42 
43 #define CTX \
44 	hubbub1->base.ctx
45 
46 #undef FN
47 #define FN(reg_name, field_name) \
48 	hubbub1->shifts->field_name, hubbub1->masks->field_name
49 
50 bool hubbub2_dcc_support_swizzle(
51 		enum swizzle_mode_values swizzle,
52 		unsigned int bytes_per_element,
53 		enum segment_order *segment_order_horz,
54 		enum segment_order *segment_order_vert)
55 {
56 	bool standard_swizzle = false;
57 	bool display_swizzle = false;
58 	bool render_swizzle = false;
59 
60 	switch (swizzle) {
61 	case DC_SW_4KB_S:
62 	case DC_SW_64KB_S:
63 	case DC_SW_VAR_S:
64 	case DC_SW_4KB_S_X:
65 	case DC_SW_64KB_S_X:
66 	case DC_SW_VAR_S_X:
67 		standard_swizzle = true;
68 		break;
69 	case DC_SW_64KB_R_X:
70 		render_swizzle = true;
71 		break;
72 	case DC_SW_4KB_D:
73 	case DC_SW_64KB_D:
74 	case DC_SW_VAR_D:
75 	case DC_SW_4KB_D_X:
76 	case DC_SW_64KB_D_X:
77 	case DC_SW_VAR_D_X:
78 		display_swizzle = true;
79 		break;
80 	default:
81 		break;
82 	}
83 
84 	if (standard_swizzle) {
85 		if (bytes_per_element == 1) {
86 			*segment_order_horz = segment_order__contiguous;
87 			*segment_order_vert = segment_order__na;
88 			return true;
89 		}
90 		if (bytes_per_element == 2) {
91 			*segment_order_horz = segment_order__non_contiguous;
92 			*segment_order_vert = segment_order__contiguous;
93 			return true;
94 		}
95 		if (bytes_per_element == 4) {
96 			*segment_order_horz = segment_order__non_contiguous;
97 			*segment_order_vert = segment_order__contiguous;
98 			return true;
99 		}
100 		if (bytes_per_element == 8) {
101 			*segment_order_horz = segment_order__na;
102 			*segment_order_vert = segment_order__contiguous;
103 			return true;
104 		}
105 	}
106 	if (render_swizzle) {
107 		if (bytes_per_element == 2) {
108 			*segment_order_horz = segment_order__contiguous;
109 			*segment_order_vert = segment_order__contiguous;
110 			return true;
111 		}
112 		if (bytes_per_element == 4) {
113 			*segment_order_horz = segment_order__non_contiguous;
114 			*segment_order_vert = segment_order__contiguous;
115 			return true;
116 		}
117 		if (bytes_per_element == 8) {
118 			*segment_order_horz = segment_order__contiguous;
119 			*segment_order_vert = segment_order__non_contiguous;
120 			return true;
121 		}
122 	}
123 	if (display_swizzle && bytes_per_element == 8) {
124 		*segment_order_horz = segment_order__contiguous;
125 		*segment_order_vert = segment_order__non_contiguous;
126 		return true;
127 	}
128 
129 	return false;
130 }
131 
132 bool hubbub2_dcc_support_pixel_format(
133 		enum surface_pixel_format format,
134 		unsigned int *bytes_per_element)
135 {
136 	/* DML: get_bytes_per_element */
137 	switch (format) {
138 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
139 	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
140 		*bytes_per_element = 2;
141 		return true;
142 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
143 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
144 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
145 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
146 	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
147 	case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
148 	case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
149 	case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
150 		*bytes_per_element = 4;
151 		return true;
152 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
153 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
154 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
155 		*bytes_per_element = 8;
156 		return true;
157 	default:
158 		return false;
159 	}
160 }
161 
162 static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
163 		unsigned int bytes_per_element)
164 {
165 	/* copied from DML.  might want to refactor DML to leverage from DML */
166 	/* DML : get_blk256_size */
167 	if (bytes_per_element == 1) {
168 		*blk256_width = 16;
169 		*blk256_height = 16;
170 	} else if (bytes_per_element == 2) {
171 		*blk256_width = 16;
172 		*blk256_height = 8;
173 	} else if (bytes_per_element == 4) {
174 		*blk256_width = 8;
175 		*blk256_height = 8;
176 	} else if (bytes_per_element == 8) {
177 		*blk256_width = 8;
178 		*blk256_height = 4;
179 	}
180 }
181 
182 static void hubbub2_det_request_size(
183 		unsigned int height,
184 		unsigned int width,
185 		unsigned int bpe,
186 		bool *req128_horz_wc,
187 		bool *req128_vert_wc)
188 {
189 	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
190 
191 	unsigned int blk256_height = 0;
192 	unsigned int blk256_width = 0;
193 	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
194 
195 	hubbub2_get_blk256_size(&blk256_width, &blk256_height, bpe);
196 
197 	swath_bytes_horz_wc = width * blk256_height * bpe;
198 	swath_bytes_vert_wc = height * blk256_width * bpe;
199 
200 	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
201 			false : /* full 256B request */
202 			true; /* half 128b request */
203 
204 	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
205 			false : /* full 256B request */
206 			true; /* half 128b request */
207 }
208 
209 bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
210 		const struct dc_dcc_surface_param *input,
211 		struct dc_surface_dcc_cap *output)
212 {
213 	struct dc *dc = hubbub->ctx->dc;
214 	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
215 	enum dcc_control dcc_control;
216 	unsigned int bpe;
217 	enum segment_order segment_order_horz, segment_order_vert;
218 	bool req128_horz_wc, req128_vert_wc;
219 
220 	memset(output, 0, sizeof(*output));
221 
222 	if (dc->debug.disable_dcc == DCC_DISABLE)
223 		return false;
224 
225 	if (!hubbub->funcs->dcc_support_pixel_format(input->format,
226 			&bpe))
227 		return false;
228 
229 	if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
230 			&segment_order_horz, &segment_order_vert))
231 		return false;
232 
233 	hubbub2_det_request_size(input->surface_size.height,  input->surface_size.width,
234 			bpe, &req128_horz_wc, &req128_vert_wc);
235 
236 	if (!req128_horz_wc && !req128_vert_wc) {
237 		dcc_control = dcc_control__256_256_xxx;
238 	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
239 		if (!req128_horz_wc)
240 			dcc_control = dcc_control__256_256_xxx;
241 		else if (segment_order_horz == segment_order__contiguous)
242 			dcc_control = dcc_control__128_128_xxx;
243 		else
244 			dcc_control = dcc_control__256_64_64;
245 	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
246 		if (!req128_vert_wc)
247 			dcc_control = dcc_control__256_256_xxx;
248 		else if (segment_order_vert == segment_order__contiguous)
249 			dcc_control = dcc_control__128_128_xxx;
250 		else
251 			dcc_control = dcc_control__256_64_64;
252 	} else {
253 		if ((req128_horz_wc &&
254 			segment_order_horz == segment_order__non_contiguous) ||
255 			(req128_vert_wc &&
256 			segment_order_vert == segment_order__non_contiguous))
257 			/* access_dir not known, must use most constraining */
258 			dcc_control = dcc_control__256_64_64;
259 		else
260 			/* reg128 is true for either horz and vert
261 			 * but segment_order is contiguous
262 			 */
263 			dcc_control = dcc_control__128_128_xxx;
264 	}
265 
266 	/* Exception for 64KB_R_X */
267 	if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X))
268 		dcc_control = dcc_control__128_128_xxx;
269 
270 	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
271 		dcc_control != dcc_control__256_256_xxx)
272 		return false;
273 
274 	switch (dcc_control) {
275 	case dcc_control__256_256_xxx:
276 		output->grph.rgb.max_uncompressed_blk_size = 256;
277 		output->grph.rgb.max_compressed_blk_size = 256;
278 		output->grph.rgb.independent_64b_blks = false;
279 		break;
280 	case dcc_control__128_128_xxx:
281 		output->grph.rgb.max_uncompressed_blk_size = 128;
282 		output->grph.rgb.max_compressed_blk_size = 128;
283 		output->grph.rgb.independent_64b_blks = false;
284 		break;
285 	case dcc_control__256_64_64:
286 		output->grph.rgb.max_uncompressed_blk_size = 256;
287 		output->grph.rgb.max_compressed_blk_size = 64;
288 		output->grph.rgb.independent_64b_blks = true;
289 		break;
290 	}
291 	output->capable = true;
292 	output->const_color_support = true;
293 
294 	return true;
295 }
296 
297 static void hubbub2_setup_vmid_ptb(struct hubbub *hubbub,
298 		uint64_t ptb,
299 		uint8_t vmid)
300 {
301 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
302 
303 	dcn20_vmid_set_ptb(&hubbub1->vmid[vmid], ptb);
304 }
305 
306 
307 void hubbub2_init_dchub(struct hubbub *hubbub,
308 		struct hubbub_addr_config *config)
309 {
310 	int i;
311 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
312 	struct dcn_vmid_page_table_config phys_config;
313 	struct dcn_vmid_page_table_config virt_config;
314 
315 	phys_config.depth = 0; // Depth 1
316 	phys_config.block_size = 0; // Block size 4KB
317 	phys_config.page_table_start_addr = config->pa_config.gart_config.page_table_start_addr;
318 	phys_config.page_table_end_addr = config->pa_config.gart_config.page_table_end_addr;
319 
320 	REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
321 			FB_BASE, config->pa_config.system_aperture.fb_base);
322 	REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
323 			FB_TOP, config->pa_config.system_aperture.fb_top);
324 	REG_SET(DCN_VM_FB_OFFSET, 0,
325 			FB_OFFSET, config->pa_config.system_aperture.fb_offset);
326 	REG_SET(DCN_VM_AGP_BOT, 0,
327 			AGP_BOT, config->pa_config.system_aperture.agp_bot);
328 	REG_SET(DCN_VM_AGP_TOP, 0,
329 			AGP_TOP, config->pa_config.system_aperture.agp_top);
330 	REG_SET(DCN_VM_AGP_BASE, 0,
331 			AGP_BASE, config->pa_config.system_aperture.agp_base);
332 
333 	// Init VMID 0 based on PA config
334 	dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
335 	dcn20_vmid_set_ptb(&hubbub1->vmid[0], config->pa_config.gart_config.page_table_base_addr);
336 
337 	// Init VMID 1-15 based on VA config
338 	for (i = 1; i < 16; i++) {
339 		virt_config.page_table_start_addr = config->va_config.page_table_start_addr;
340 		virt_config.page_table_end_addr = config->va_config.page_table_end_addr;
341 		virt_config.depth = config->va_config.page_table_depth;
342 		virt_config.block_size = config->va_config.page_table_block_size;
343 
344 		dcn20_vmid_setup(&hubbub1->vmid[i], &virt_config);
345 	}
346 }
347 
348 void hubbub2_update_dchub(struct hubbub *hubbub,
349 		struct dchub_init_data *dh_data)
350 {
351 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
352 
353 	if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
354 		ASSERT(false);
355 		/*should not come here*/
356 		return;
357 	}
358 	/* TODO: port code from dal2 */
359 	switch (dh_data->fb_mode) {
360 	case FRAME_BUFFER_MODE_ZFB_ONLY:
361 		/*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
362 		REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
363 				SDPIF_FB_TOP, 0);
364 
365 		REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
366 				SDPIF_FB_BASE, 0x0FFFF);
367 
368 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
369 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
370 
371 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
372 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
373 
374 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
375 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
376 						dh_data->zfb_size_in_byte - 1) >> 22);
377 		break;
378 	case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
379 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
380 
381 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
382 				SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
383 
384 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
385 				SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
386 
387 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
388 				SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
389 						dh_data->zfb_size_in_byte - 1) >> 22);
390 		break;
391 	case FRAME_BUFFER_MODE_LOCAL_ONLY:
392 		/*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
393 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
394 				SDPIF_AGP_BASE, 0);
395 
396 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
397 				SDPIF_AGP_BOT, 0X03FFFF);
398 
399 		REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
400 				SDPIF_AGP_TOP, 0);
401 		break;
402 	default:
403 		break;
404 	}
405 
406 	dh_data->dchub_initialzied = true;
407 	dh_data->dchub_info_valid = false;
408 }
409 
410 void hubbub2_wm_read_state(struct hubbub *hubbub,
411 		struct dcn_hubbub_wm *wm)
412 {
413 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
414 
415 	struct dcn_hubbub_wm_set *s;
416 
417 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
418 
419 	s = &wm->sets[0];
420 	s->wm_set = 0;
421 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
422 	if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A))
423 		s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
424 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
425 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
426 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
427 	}
428 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
429 
430 	s = &wm->sets[1];
431 	s->wm_set = 1;
432 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
433 	if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B))
434 		s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
435 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
436 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
437 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
438 	}
439 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
440 
441 	s = &wm->sets[2];
442 	s->wm_set = 2;
443 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
444 	if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C))
445 		s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
446 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
447 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
448 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
449 	}
450 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
451 
452 	s = &wm->sets[3];
453 	s->wm_set = 3;
454 	s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
455 	if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D))
456 		s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
457 	if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
458 		s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
459 		s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
460 	}
461 	s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
462 }
463 
464 void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
465 		unsigned int dccg_ref_freq_inKhz,
466 		unsigned int *dchub_ref_freq_inKhz)
467 {
468 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
469 	uint32_t ref_div = 0;
470 	uint32_t ref_en = 0;
471 
472 	REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div,
473 			DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en);
474 
475 	if (ref_en) {
476 		if (ref_div == 2)
477 			*dchub_ref_freq_inKhz = dccg_ref_freq_inKhz / 2;
478 		else
479 			*dchub_ref_freq_inKhz = dccg_ref_freq_inKhz;
480 
481 		// DC hub reference frequency must be around 50Mhz, otherwise there may be
482 		// overflow/underflow issues when doing HUBBUB programming
483 		if (*dchub_ref_freq_inKhz < 40000 || *dchub_ref_freq_inKhz > 60000)
484 			ASSERT_CRITICAL(false);
485 
486 		return;
487 	} else {
488 		*dchub_ref_freq_inKhz = dccg_ref_freq_inKhz;
489 
490 		// HUBBUB global timer must be enabled.
491 		ASSERT_CRITICAL(false);
492 		return;
493 	}
494 }
495 
496 static const struct hubbub_funcs hubbub2_funcs = {
497 	.update_dchub = hubbub2_update_dchub,
498 	.init_dchub = hubbub2_init_dchub,
499 	.setup_vmid_ptb = hubbub2_setup_vmid_ptb,
500 	.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
501 	.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
502 	.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
503 	.wm_read_state = hubbub2_wm_read_state,
504 	.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
505 	.program_watermarks = hubbub1_program_watermarks,
506 };
507 
508 void hubbub2_construct(struct dcn20_hubbub *hubbub,
509 	struct dc_context *ctx,
510 	const struct dcn_hubbub_registers *hubbub_regs,
511 	const struct dcn_hubbub_shift *hubbub_shift,
512 	const struct dcn_hubbub_mask *hubbub_mask)
513 {
514 	hubbub->base.ctx = ctx;
515 
516 	hubbub->base.funcs = &hubbub2_funcs;
517 
518 	hubbub->regs = hubbub_regs;
519 	hubbub->shifts = hubbub_shift;
520 	hubbub->masks = hubbub_mask;
521 
522 	hubbub->debug_test_index_pstate = 0xB;
523 }
524