1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 27 #include "dcn30/dcn30_hubbub.h" 28 #include "dcn32_hubbub.h" 29 #include "dm_services.h" 30 #include "reg_helper.h" 31 32 33 #define CTX \ 34 hubbub2->base.ctx 35 #define DC_LOGGER \ 36 hubbub2->base.ctx->logger 37 #define REG(reg)\ 38 hubbub2->regs->reg 39 40 #undef FN 41 #define FN(reg_name, field_name) \ 42 hubbub2->shifts->field_name, hubbub2->masks->field_name 43 44 #define DCN32_CRB_SEGMENT_SIZE_KB 64 45 46 static void dcn32_init_crb(struct hubbub *hubbub) 47 { 48 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 49 50 REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, 51 &hubbub2->det0_size); 52 53 REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, 54 &hubbub2->det1_size); 55 56 REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, 57 &hubbub2->det2_size); 58 59 REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, 60 &hubbub2->det3_size); 61 62 REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, 63 &hubbub2->compbuf_size_segments); 64 65 REG_SET_2(COMPBUF_RESERVED_SPACE, 0, 66 COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, 67 COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); 68 REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F); 69 } 70 71 static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) 72 { 73 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 74 75 unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; 76 77 switch (hubp_inst) { 78 case 0: 79 REG_UPDATE(DCHUBBUB_DET0_CTRL, 80 DET0_SIZE, det_size_segments); 81 hubbub2->det0_size = det_size_segments; 82 break; 83 case 1: 84 REG_UPDATE(DCHUBBUB_DET1_CTRL, 85 DET1_SIZE, det_size_segments); 86 hubbub2->det1_size = det_size_segments; 87 break; 88 case 2: 89 REG_UPDATE(DCHUBBUB_DET2_CTRL, 90 DET2_SIZE, det_size_segments); 91 hubbub2->det2_size = det_size_segments; 92 break; 93 case 3: 94 REG_UPDATE(DCHUBBUB_DET3_CTRL, 95 DET3_SIZE, det_size_segments); 96 hubbub2->det3_size = det_size_segments; 97 break; 98 default: 99 break; 100 } 101 /* Should never be hit, if it is we have an erroneous hw config*/ 102 ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size 103 + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); 104 } 105 106 static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) 107 { 108 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 109 unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; 110 111 if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { 112 if (compbuf_size_segments > hubbub2->compbuf_size_segments) { 113 REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); 114 REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); 115 REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); 116 REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); 117 } 118 /* Should never be hit, if it is we have an erroneous hw config*/ 119 ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size 120 + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); 121 REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); 122 hubbub2->compbuf_size_segments = compbuf_size_segments; 123 ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); 124 } 125 } 126 127 static uint32_t convert_and_clamp( 128 uint32_t wm_ns, 129 uint32_t refclk_mhz, 130 uint32_t clamp_value) 131 { 132 uint32_t ret_val = 0; 133 ret_val = wm_ns * refclk_mhz; 134 135 ret_val /= 1000; 136 137 if (ret_val > clamp_value) 138 ret_val = clamp_value; 139 140 return ret_val; 141 } 142 143 static bool hubbub32_program_urgent_watermarks( 144 struct hubbub *hubbub, 145 struct dcn_watermark_set *watermarks, 146 unsigned int refclk_mhz, 147 bool safe_to_lower) 148 { 149 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 150 uint32_t prog_wm_value; 151 bool wm_pending = false; 152 153 /* Repeat for water mark set A, B, C and D. */ 154 /* clock state A */ 155 if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { 156 hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; 157 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, 158 refclk_mhz, 0x3fff); 159 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, 160 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 161 162 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" 163 "HW register value = 0x%x\n", 164 watermarks->a.urgent_ns, prog_wm_value); 165 } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) 166 wm_pending = true; 167 168 /* determine the transfer time for a quantity of data for a particular requestor.*/ 169 if (safe_to_lower || watermarks->a.frac_urg_bw_flip 170 > hubbub2->watermarks.a.frac_urg_bw_flip) { 171 hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; 172 173 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, 174 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); 175 } else if (watermarks->a.frac_urg_bw_flip 176 < hubbub2->watermarks.a.frac_urg_bw_flip) 177 wm_pending = true; 178 179 if (safe_to_lower || watermarks->a.frac_urg_bw_nom 180 > hubbub2->watermarks.a.frac_urg_bw_nom) { 181 hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; 182 183 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, 184 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); 185 } else if (watermarks->a.frac_urg_bw_nom 186 < hubbub2->watermarks.a.frac_urg_bw_nom) 187 wm_pending = true; 188 189 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { 190 hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; 191 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, 192 refclk_mhz, 0x3fff); 193 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, 194 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); 195 } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) 196 wm_pending = true; 197 198 /* clock state B */ 199 if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { 200 hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; 201 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, 202 refclk_mhz, 0x3fff); 203 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, 204 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); 205 206 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" 207 "HW register value = 0x%x\n", 208 watermarks->b.urgent_ns, prog_wm_value); 209 } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) 210 wm_pending = true; 211 212 /* determine the transfer time for a quantity of data for a particular requestor.*/ 213 if (safe_to_lower || watermarks->b.frac_urg_bw_flip 214 > hubbub2->watermarks.b.frac_urg_bw_flip) { 215 hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; 216 217 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, 218 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); 219 } else if (watermarks->b.frac_urg_bw_flip 220 < hubbub2->watermarks.b.frac_urg_bw_flip) 221 wm_pending = true; 222 223 if (safe_to_lower || watermarks->b.frac_urg_bw_nom 224 > hubbub2->watermarks.b.frac_urg_bw_nom) { 225 hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; 226 227 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, 228 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); 229 } else if (watermarks->b.frac_urg_bw_nom 230 < hubbub2->watermarks.b.frac_urg_bw_nom) 231 wm_pending = true; 232 233 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { 234 hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; 235 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, 236 refclk_mhz, 0x3fff); 237 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, 238 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); 239 } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) 240 wm_pending = true; 241 242 /* clock state C */ 243 if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { 244 hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; 245 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, 246 refclk_mhz, 0x3fff); 247 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, 248 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); 249 250 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" 251 "HW register value = 0x%x\n", 252 watermarks->c.urgent_ns, prog_wm_value); 253 } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) 254 wm_pending = true; 255 256 /* determine the transfer time for a quantity of data for a particular requestor.*/ 257 if (safe_to_lower || watermarks->c.frac_urg_bw_flip 258 > hubbub2->watermarks.c.frac_urg_bw_flip) { 259 hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; 260 261 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, 262 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); 263 } else if (watermarks->c.frac_urg_bw_flip 264 < hubbub2->watermarks.c.frac_urg_bw_flip) 265 wm_pending = true; 266 267 if (safe_to_lower || watermarks->c.frac_urg_bw_nom 268 > hubbub2->watermarks.c.frac_urg_bw_nom) { 269 hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; 270 271 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, 272 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); 273 } else if (watermarks->c.frac_urg_bw_nom 274 < hubbub2->watermarks.c.frac_urg_bw_nom) 275 wm_pending = true; 276 277 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { 278 hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; 279 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, 280 refclk_mhz, 0x3fff); 281 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, 282 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); 283 } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) 284 wm_pending = true; 285 286 /* clock state D */ 287 if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { 288 hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; 289 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, 290 refclk_mhz, 0x3fff); 291 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, 292 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); 293 294 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" 295 "HW register value = 0x%x\n", 296 watermarks->d.urgent_ns, prog_wm_value); 297 } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) 298 wm_pending = true; 299 300 /* determine the transfer time for a quantity of data for a particular requestor.*/ 301 if (safe_to_lower || watermarks->d.frac_urg_bw_flip 302 > hubbub2->watermarks.d.frac_urg_bw_flip) { 303 hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; 304 305 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, 306 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); 307 } else if (watermarks->d.frac_urg_bw_flip 308 < hubbub2->watermarks.d.frac_urg_bw_flip) 309 wm_pending = true; 310 311 if (safe_to_lower || watermarks->d.frac_urg_bw_nom 312 > hubbub2->watermarks.d.frac_urg_bw_nom) { 313 hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; 314 315 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, 316 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); 317 } else if (watermarks->d.frac_urg_bw_nom 318 < hubbub2->watermarks.d.frac_urg_bw_nom) 319 wm_pending = true; 320 321 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { 322 hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; 323 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, 324 refclk_mhz, 0x3fff); 325 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, 326 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); 327 } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) 328 wm_pending = true; 329 330 return wm_pending; 331 } 332 333 static bool hubbub32_program_stutter_watermarks( 334 struct hubbub *hubbub, 335 struct dcn_watermark_set *watermarks, 336 unsigned int refclk_mhz, 337 bool safe_to_lower) 338 { 339 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 340 uint32_t prog_wm_value; 341 bool wm_pending = false; 342 343 /* clock state A */ 344 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns 345 > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { 346 hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = 347 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; 348 prog_wm_value = convert_and_clamp( 349 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, 350 refclk_mhz, 0xffff); 351 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, 352 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); 353 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" 354 "HW register value = 0x%x\n", 355 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 356 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns 357 < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) 358 wm_pending = true; 359 360 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns 361 > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { 362 hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = 363 watermarks->a.cstate_pstate.cstate_exit_ns; 364 prog_wm_value = convert_and_clamp( 365 watermarks->a.cstate_pstate.cstate_exit_ns, 366 refclk_mhz, 0xffff); 367 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, 368 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); 369 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" 370 "HW register value = 0x%x\n", 371 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); 372 } else if (watermarks->a.cstate_pstate.cstate_exit_ns 373 < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) 374 wm_pending = true; 375 376 /* clock state B */ 377 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns 378 > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { 379 hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = 380 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; 381 prog_wm_value = convert_and_clamp( 382 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, 383 refclk_mhz, 0xffff); 384 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, 385 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); 386 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" 387 "HW register value = 0x%x\n", 388 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 389 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns 390 < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) 391 wm_pending = true; 392 393 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns 394 > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { 395 hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = 396 watermarks->b.cstate_pstate.cstate_exit_ns; 397 prog_wm_value = convert_and_clamp( 398 watermarks->b.cstate_pstate.cstate_exit_ns, 399 refclk_mhz, 0xffff); 400 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, 401 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); 402 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" 403 "HW register value = 0x%x\n", 404 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); 405 } else if (watermarks->b.cstate_pstate.cstate_exit_ns 406 < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) 407 wm_pending = true; 408 409 /* clock state C */ 410 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns 411 > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { 412 hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = 413 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; 414 prog_wm_value = convert_and_clamp( 415 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, 416 refclk_mhz, 0xffff); 417 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, 418 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); 419 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" 420 "HW register value = 0x%x\n", 421 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 422 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns 423 < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) 424 wm_pending = true; 425 426 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns 427 > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { 428 hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = 429 watermarks->c.cstate_pstate.cstate_exit_ns; 430 prog_wm_value = convert_and_clamp( 431 watermarks->c.cstate_pstate.cstate_exit_ns, 432 refclk_mhz, 0xffff); 433 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, 434 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); 435 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" 436 "HW register value = 0x%x\n", 437 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); 438 } else if (watermarks->c.cstate_pstate.cstate_exit_ns 439 < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) 440 wm_pending = true; 441 442 /* clock state D */ 443 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns 444 > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { 445 hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = 446 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; 447 prog_wm_value = convert_and_clamp( 448 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, 449 refclk_mhz, 0xffff); 450 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, 451 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); 452 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" 453 "HW register value = 0x%x\n", 454 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 455 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns 456 < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) 457 wm_pending = true; 458 459 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns 460 > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { 461 hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = 462 watermarks->d.cstate_pstate.cstate_exit_ns; 463 prog_wm_value = convert_and_clamp( 464 watermarks->d.cstate_pstate.cstate_exit_ns, 465 refclk_mhz, 0xffff); 466 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, 467 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); 468 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" 469 "HW register value = 0x%x\n", 470 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); 471 } else if (watermarks->d.cstate_pstate.cstate_exit_ns 472 < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) 473 wm_pending = true; 474 475 return wm_pending; 476 } 477 478 479 static bool hubbub32_program_pstate_watermarks( 480 struct hubbub *hubbub, 481 struct dcn_watermark_set *watermarks, 482 unsigned int refclk_mhz, 483 bool safe_to_lower) 484 { 485 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 486 uint32_t prog_wm_value; 487 488 bool wm_pending = false; 489 490 /* Section for UCLK_PSTATE_CHANGE_WATERMARKS */ 491 /* clock state A */ 492 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns 493 > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { 494 hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = 495 watermarks->a.cstate_pstate.pstate_change_ns; 496 prog_wm_value = convert_and_clamp( 497 watermarks->a.cstate_pstate.pstate_change_ns, 498 refclk_mhz, 0xffff); 499 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0, 500 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); 501 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" 502 "HW register value = 0x%x\n\n", 503 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); 504 } else if (watermarks->a.cstate_pstate.pstate_change_ns 505 < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) 506 wm_pending = true; 507 508 /* clock state B */ 509 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns 510 > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { 511 hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = 512 watermarks->b.cstate_pstate.pstate_change_ns; 513 prog_wm_value = convert_and_clamp( 514 watermarks->b.cstate_pstate.pstate_change_ns, 515 refclk_mhz, 0xffff); 516 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0, 517 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); 518 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" 519 "HW register value = 0x%x\n\n", 520 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); 521 } else if (watermarks->b.cstate_pstate.pstate_change_ns 522 < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) 523 wm_pending = true; 524 525 /* clock state C */ 526 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns 527 > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { 528 hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = 529 watermarks->c.cstate_pstate.pstate_change_ns; 530 prog_wm_value = convert_and_clamp( 531 watermarks->c.cstate_pstate.pstate_change_ns, 532 refclk_mhz, 0xffff); 533 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 0, 534 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); 535 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" 536 "HW register value = 0x%x\n\n", 537 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); 538 } else if (watermarks->c.cstate_pstate.pstate_change_ns 539 < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) 540 wm_pending = true; 541 542 /* clock state D */ 543 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns 544 > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { 545 hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = 546 watermarks->d.cstate_pstate.pstate_change_ns; 547 prog_wm_value = convert_and_clamp( 548 watermarks->d.cstate_pstate.pstate_change_ns, 549 refclk_mhz, 0xffff); 550 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 0, 551 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); 552 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" 553 "HW register value = 0x%x\n\n", 554 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); 555 } else if (watermarks->d.cstate_pstate.pstate_change_ns 556 < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) 557 wm_pending = true; 558 559 /* Section for FCLK_PSTATE_CHANGE_WATERMARKS */ 560 /* clock state A */ 561 if (safe_to_lower || watermarks->a.cstate_pstate.fclk_pstate_change_ns 562 > hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) { 563 hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns = 564 watermarks->a.cstate_pstate.fclk_pstate_change_ns; 565 prog_wm_value = convert_and_clamp( 566 watermarks->a.cstate_pstate.fclk_pstate_change_ns, 567 refclk_mhz, 0xffff); 568 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0, 569 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); 570 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n" 571 "HW register value = 0x%x\n\n", 572 watermarks->a.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); 573 } else if (watermarks->a.cstate_pstate.fclk_pstate_change_ns 574 < hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) 575 wm_pending = true; 576 577 /* clock state B */ 578 if (safe_to_lower || watermarks->b.cstate_pstate.fclk_pstate_change_ns 579 > hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) { 580 hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns = 581 watermarks->b.cstate_pstate.fclk_pstate_change_ns; 582 prog_wm_value = convert_and_clamp( 583 watermarks->b.cstate_pstate.fclk_pstate_change_ns, 584 refclk_mhz, 0xffff); 585 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0, 586 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); 587 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n" 588 "HW register value = 0x%x\n\n", 589 watermarks->b.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); 590 } else if (watermarks->b.cstate_pstate.fclk_pstate_change_ns 591 < hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) 592 wm_pending = true; 593 594 /* clock state C */ 595 if (safe_to_lower || watermarks->c.cstate_pstate.fclk_pstate_change_ns 596 > hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) { 597 hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns = 598 watermarks->c.cstate_pstate.fclk_pstate_change_ns; 599 prog_wm_value = convert_and_clamp( 600 watermarks->c.cstate_pstate.fclk_pstate_change_ns, 601 refclk_mhz, 0xffff); 602 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 0, 603 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); 604 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_C calculated =%d\n" 605 "HW register value = 0x%x\n\n", 606 watermarks->c.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); 607 } else if (watermarks->c.cstate_pstate.fclk_pstate_change_ns 608 < hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) 609 wm_pending = true; 610 611 /* clock state D */ 612 if (safe_to_lower || watermarks->d.cstate_pstate.fclk_pstate_change_ns 613 > hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) { 614 hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns = 615 watermarks->d.cstate_pstate.fclk_pstate_change_ns; 616 prog_wm_value = convert_and_clamp( 617 watermarks->d.cstate_pstate.fclk_pstate_change_ns, 618 refclk_mhz, 0xffff); 619 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 0, 620 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); 621 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_D calculated =%d\n" 622 "HW register value = 0x%x\n\n", 623 watermarks->d.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); 624 } else if (watermarks->d.cstate_pstate.fclk_pstate_change_ns 625 < hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) 626 wm_pending = true; 627 628 return wm_pending; 629 } 630 631 632 static bool hubbub32_program_usr_watermarks( 633 struct hubbub *hubbub, 634 struct dcn_watermark_set *watermarks, 635 unsigned int refclk_mhz, 636 bool safe_to_lower) 637 { 638 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 639 uint32_t prog_wm_value; 640 641 bool wm_pending = false; 642 643 /* clock state A */ 644 if (safe_to_lower || watermarks->a.usr_retraining_ns 645 > hubbub2->watermarks.a.usr_retraining_ns) { 646 hubbub2->watermarks.a.usr_retraining_ns = watermarks->a.usr_retraining_ns; 647 prog_wm_value = convert_and_clamp( 648 watermarks->a.usr_retraining_ns, 649 refclk_mhz, 0x3fff); 650 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0, 651 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, prog_wm_value); 652 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n" 653 "HW register value = 0x%x\n\n", 654 watermarks->a.usr_retraining_ns, prog_wm_value); 655 } else if (watermarks->a.usr_retraining_ns 656 < hubbub2->watermarks.a.usr_retraining_ns) 657 wm_pending = true; 658 659 /* clock state B */ 660 if (safe_to_lower || watermarks->b.usr_retraining_ns 661 > hubbub2->watermarks.b.usr_retraining_ns) { 662 hubbub2->watermarks.b.usr_retraining_ns = watermarks->b.usr_retraining_ns; 663 prog_wm_value = convert_and_clamp( 664 watermarks->b.usr_retraining_ns, 665 refclk_mhz, 0x3fff); 666 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0, 667 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, prog_wm_value); 668 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n" 669 "HW register value = 0x%x\n\n", 670 watermarks->b.usr_retraining_ns, prog_wm_value); 671 } else if (watermarks->b.usr_retraining_ns 672 < hubbub2->watermarks.b.usr_retraining_ns) 673 wm_pending = true; 674 675 /* clock state C */ 676 if (safe_to_lower || watermarks->c.usr_retraining_ns 677 > hubbub2->watermarks.c.usr_retraining_ns) { 678 hubbub2->watermarks.c.usr_retraining_ns = 679 watermarks->c.usr_retraining_ns; 680 prog_wm_value = convert_and_clamp( 681 watermarks->c.usr_retraining_ns, 682 refclk_mhz, 0x3fff); 683 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 0, 684 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, prog_wm_value); 685 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_C calculated =%d\n" 686 "HW register value = 0x%x\n\n", 687 watermarks->c.usr_retraining_ns, prog_wm_value); 688 } else if (watermarks->c.usr_retraining_ns 689 < hubbub2->watermarks.c.usr_retraining_ns) 690 wm_pending = true; 691 692 /* clock state D */ 693 if (safe_to_lower || watermarks->d.usr_retraining_ns 694 > hubbub2->watermarks.d.usr_retraining_ns) { 695 hubbub2->watermarks.d.usr_retraining_ns = 696 watermarks->d.usr_retraining_ns; 697 prog_wm_value = convert_and_clamp( 698 watermarks->d.usr_retraining_ns, 699 refclk_mhz, 0x3fff); 700 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 0, 701 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, prog_wm_value); 702 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_D calculated =%d\n" 703 "HW register value = 0x%x\n\n", 704 watermarks->d.usr_retraining_ns, prog_wm_value); 705 } else if (watermarks->d.usr_retraining_ns 706 < hubbub2->watermarks.d.usr_retraining_ns) 707 wm_pending = true; 708 709 return wm_pending; 710 } 711 712 void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow) 713 { 714 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 715 716 /* 717 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE = 1 means enabling forcing value 718 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE = 1 or 0, means value to be forced when force enable 719 */ 720 721 REG_UPDATE_2(DCHUBBUB_ARB_USR_RETRAINING_CNTL, 722 DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, allow, 723 DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, allow); 724 } 725 726 static bool hubbub32_program_watermarks( 727 struct hubbub *hubbub, 728 struct dcn_watermark_set *watermarks, 729 unsigned int refclk_mhz, 730 bool safe_to_lower) 731 { 732 bool wm_pending = false; 733 734 if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 735 wm_pending = true; 736 737 if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 738 wm_pending = true; 739 740 if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 741 wm_pending = true; 742 743 if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 744 wm_pending = true; 745 746 /* 747 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. 748 * If the memory controller is fully utilized and the DCHub requestors are 749 * well ahead of their amortized schedule, then it is safe to prevent the next winner 750 * from being committed and sent to the fabric. 751 * The utilization of the memory controller is approximated by ensuring that 752 * the number of outstanding requests is greater than a threshold specified 753 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, 754 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. 755 * 756 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) 757 * to turn off it for now. 758 */ 759 /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, 760 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); 761 REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, 762 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ 763 764 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); 765 766 hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow); 767 768 return wm_pending; 769 } 770 771 /* Copy values from WM set A to all other sets */ 772 void hubbub32_init_watermarks(struct hubbub *hubbub) 773 { 774 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 775 uint32_t reg; 776 777 reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); 778 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); 779 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); 780 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); 781 782 reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); 783 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); 784 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); 785 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); 786 787 reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); 788 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); 789 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); 790 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); 791 792 reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); 793 REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); 794 REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); 795 REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); 796 797 reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); 798 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); 799 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); 800 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); 801 802 reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); 803 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); 804 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); 805 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); 806 807 reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A); 808 REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg); 809 REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg); 810 REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg); 811 812 reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A); 813 REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg); 814 REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg); 815 REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg); 816 817 reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A); 818 REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg); 819 REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg); 820 REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg); 821 } 822 823 void hubbub32_wm_read_state(struct hubbub *hubbub, 824 struct dcn_hubbub_wm *wm) 825 { 826 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 827 struct dcn_hubbub_wm_set *s; 828 829 memset(wm, 0, sizeof(struct dcn_hubbub_wm)); 830 831 s = &wm->sets[0]; 832 s->wm_set = 0; 833 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 834 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); 835 836 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 837 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); 838 839 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 840 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); 841 842 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 843 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_chanage); 844 845 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 846 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); 847 848 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 849 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change); 850 851 s = &wm->sets[1]; 852 s->wm_set = 1; 853 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 854 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); 855 856 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 857 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); 858 859 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 860 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); 861 862 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 863 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_chanage); 864 865 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 866 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); 867 868 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 869 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change); 870 871 s = &wm->sets[2]; 872 s->wm_set = 2; 873 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 874 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); 875 876 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 877 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); 878 879 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 880 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); 881 882 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 883 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_chanage); 884 885 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 886 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); 887 888 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 889 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change); 890 891 s = &wm->sets[3]; 892 s->wm_set = 3; 893 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 894 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); 895 896 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 897 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); 898 899 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 900 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); 901 902 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 903 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_chanage); 904 905 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 906 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); 907 908 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 909 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change); 910 } 911 912 void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub) 913 { 914 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 915 uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; 916 uint32_t prog_wm_value = convert_and_clamp(hubbub2->watermarks.a.urgent_ns, 917 refclk_mhz, 0x3fff); 918 919 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, 920 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 921 } 922 923 static const struct hubbub_funcs hubbub32_funcs = { 924 .update_dchub = hubbub2_update_dchub, 925 .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, 926 .init_vm_ctx = hubbub2_init_vm_ctx, 927 .dcc_support_swizzle = hubbub3_dcc_support_swizzle, 928 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, 929 .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, 930 .wm_read_state = hubbub32_wm_read_state, 931 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, 932 .program_watermarks = hubbub32_program_watermarks, 933 .allow_self_refresh_control = hubbub1_allow_self_refresh_control, 934 .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, 935 .force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes, 936 .force_pstate_change_control = hubbub3_force_pstate_change_control, 937 .init_watermarks = hubbub32_init_watermarks, 938 .program_det_size = dcn32_program_det_size, 939 .program_compbuf_size = dcn32_program_compbuf_size, 940 .init_crb = dcn32_init_crb, 941 .hubbub_read_state = hubbub2_read_state, 942 .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, 943 }; 944 945 void hubbub32_construct(struct dcn20_hubbub *hubbub2, 946 struct dc_context *ctx, 947 const struct dcn_hubbub_registers *hubbub_regs, 948 const struct dcn_hubbub_shift *hubbub_shift, 949 const struct dcn_hubbub_mask *hubbub_mask, 950 int det_size_kb, 951 int pixel_chunk_size_kb, 952 int config_return_buffer_size_kb) 953 { 954 hubbub2->base.ctx = ctx; 955 hubbub2->base.funcs = &hubbub32_funcs; 956 hubbub2->regs = hubbub_regs; 957 hubbub2->shifts = hubbub_shift; 958 hubbub2->masks = hubbub_mask; 959 960 hubbub2->debug_test_index_pstate = 0xB; 961 hubbub2->detile_buf_size = det_size_kb * 1024; 962 hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024; 963 hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN32_CRB_SEGMENT_SIZE_KB; 964 } 965