1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 27 #include "dcn30/dcn30_hubbub.h" 28 #include "dcn32_hubbub.h" 29 #include "dm_services.h" 30 #include "reg_helper.h" 31 32 33 #define CTX \ 34 hubbub2->base.ctx 35 #define DC_LOGGER \ 36 hubbub2->base.ctx->logger 37 #define REG(reg)\ 38 hubbub2->regs->reg 39 40 #undef FN 41 #define FN(reg_name, field_name) \ 42 hubbub2->shifts->field_name, hubbub2->masks->field_name 43 44 #define DCN32_CRB_SEGMENT_SIZE_KB 64 45 46 static void dcn32_init_crb(struct hubbub *hubbub) 47 { 48 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 49 50 REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, 51 &hubbub2->det0_size); 52 53 REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, 54 &hubbub2->det1_size); 55 56 REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, 57 &hubbub2->det2_size); 58 59 REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, 60 &hubbub2->det3_size); 61 62 REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, 63 &hubbub2->compbuf_size_segments); 64 65 REG_SET_2(COMPBUF_RESERVED_SPACE, 0, 66 COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, 67 COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); 68 REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F); 69 } 70 71 void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) 72 { 73 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 74 75 unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; 76 77 switch (hubp_inst) { 78 case 0: 79 REG_UPDATE(DCHUBBUB_DET0_CTRL, 80 DET0_SIZE, det_size_segments); 81 hubbub2->det0_size = det_size_segments; 82 break; 83 case 1: 84 REG_UPDATE(DCHUBBUB_DET1_CTRL, 85 DET1_SIZE, det_size_segments); 86 hubbub2->det1_size = det_size_segments; 87 break; 88 case 2: 89 REG_UPDATE(DCHUBBUB_DET2_CTRL, 90 DET2_SIZE, det_size_segments); 91 hubbub2->det2_size = det_size_segments; 92 break; 93 case 3: 94 REG_UPDATE(DCHUBBUB_DET3_CTRL, 95 DET3_SIZE, det_size_segments); 96 hubbub2->det3_size = det_size_segments; 97 break; 98 default: 99 break; 100 } 101 if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size 102 + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) { 103 /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */ 104 DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n", 105 hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size, 106 hubbub2->compbuf_size_segments, hubbub2->crb_size_segs); 107 } 108 } 109 110 static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) 111 { 112 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 113 unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; 114 115 if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { 116 if (compbuf_size_segments > hubbub2->compbuf_size_segments) { 117 REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); 118 REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); 119 REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); 120 REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); 121 } 122 /* Should never be hit, if it is we have an erroneous hw config*/ 123 ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size 124 + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); 125 REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); 126 hubbub2->compbuf_size_segments = compbuf_size_segments; 127 ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); 128 } 129 } 130 131 static uint32_t convert_and_clamp( 132 uint32_t wm_ns, 133 uint32_t refclk_mhz, 134 uint32_t clamp_value) 135 { 136 uint32_t ret_val = 0; 137 ret_val = wm_ns * refclk_mhz; 138 139 ret_val /= 1000; 140 141 if (ret_val > clamp_value) 142 ret_val = clamp_value; 143 144 return ret_val; 145 } 146 147 bool hubbub32_program_urgent_watermarks( 148 struct hubbub *hubbub, 149 struct dcn_watermark_set *watermarks, 150 unsigned int refclk_mhz, 151 bool safe_to_lower) 152 { 153 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 154 uint32_t prog_wm_value; 155 bool wm_pending = false; 156 157 /* Repeat for water mark set A, B, C and D. */ 158 /* clock state A */ 159 if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { 160 hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; 161 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, 162 refclk_mhz, 0x3fff); 163 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, 164 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 165 166 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" 167 "HW register value = 0x%x\n", 168 watermarks->a.urgent_ns, prog_wm_value); 169 } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) 170 wm_pending = true; 171 172 /* determine the transfer time for a quantity of data for a particular requestor.*/ 173 if (safe_to_lower || watermarks->a.frac_urg_bw_flip 174 > hubbub2->watermarks.a.frac_urg_bw_flip) { 175 hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; 176 177 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, 178 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); 179 } else if (watermarks->a.frac_urg_bw_flip 180 < hubbub2->watermarks.a.frac_urg_bw_flip) 181 wm_pending = true; 182 183 if (safe_to_lower || watermarks->a.frac_urg_bw_nom 184 > hubbub2->watermarks.a.frac_urg_bw_nom) { 185 hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; 186 187 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, 188 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); 189 } else if (watermarks->a.frac_urg_bw_nom 190 < hubbub2->watermarks.a.frac_urg_bw_nom) 191 wm_pending = true; 192 193 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { 194 hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; 195 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, 196 refclk_mhz, 0x3fff); 197 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, 198 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); 199 } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) 200 wm_pending = true; 201 202 /* clock state B */ 203 if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { 204 hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; 205 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, 206 refclk_mhz, 0x3fff); 207 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, 208 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); 209 210 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" 211 "HW register value = 0x%x\n", 212 watermarks->b.urgent_ns, prog_wm_value); 213 } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) 214 wm_pending = true; 215 216 /* determine the transfer time for a quantity of data for a particular requestor.*/ 217 if (safe_to_lower || watermarks->b.frac_urg_bw_flip 218 > hubbub2->watermarks.b.frac_urg_bw_flip) { 219 hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; 220 221 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, 222 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); 223 } else if (watermarks->b.frac_urg_bw_flip 224 < hubbub2->watermarks.b.frac_urg_bw_flip) 225 wm_pending = true; 226 227 if (safe_to_lower || watermarks->b.frac_urg_bw_nom 228 > hubbub2->watermarks.b.frac_urg_bw_nom) { 229 hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; 230 231 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, 232 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); 233 } else if (watermarks->b.frac_urg_bw_nom 234 < hubbub2->watermarks.b.frac_urg_bw_nom) 235 wm_pending = true; 236 237 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { 238 hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; 239 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, 240 refclk_mhz, 0x3fff); 241 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, 242 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); 243 } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) 244 wm_pending = true; 245 246 /* clock state C */ 247 if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { 248 hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; 249 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, 250 refclk_mhz, 0x3fff); 251 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, 252 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); 253 254 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" 255 "HW register value = 0x%x\n", 256 watermarks->c.urgent_ns, prog_wm_value); 257 } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) 258 wm_pending = true; 259 260 /* determine the transfer time for a quantity of data for a particular requestor.*/ 261 if (safe_to_lower || watermarks->c.frac_urg_bw_flip 262 > hubbub2->watermarks.c.frac_urg_bw_flip) { 263 hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; 264 265 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, 266 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); 267 } else if (watermarks->c.frac_urg_bw_flip 268 < hubbub2->watermarks.c.frac_urg_bw_flip) 269 wm_pending = true; 270 271 if (safe_to_lower || watermarks->c.frac_urg_bw_nom 272 > hubbub2->watermarks.c.frac_urg_bw_nom) { 273 hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; 274 275 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, 276 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); 277 } else if (watermarks->c.frac_urg_bw_nom 278 < hubbub2->watermarks.c.frac_urg_bw_nom) 279 wm_pending = true; 280 281 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { 282 hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; 283 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, 284 refclk_mhz, 0x3fff); 285 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, 286 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); 287 } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) 288 wm_pending = true; 289 290 /* clock state D */ 291 if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { 292 hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; 293 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, 294 refclk_mhz, 0x3fff); 295 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, 296 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); 297 298 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" 299 "HW register value = 0x%x\n", 300 watermarks->d.urgent_ns, prog_wm_value); 301 } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) 302 wm_pending = true; 303 304 /* determine the transfer time for a quantity of data for a particular requestor.*/ 305 if (safe_to_lower || watermarks->d.frac_urg_bw_flip 306 > hubbub2->watermarks.d.frac_urg_bw_flip) { 307 hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; 308 309 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, 310 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); 311 } else if (watermarks->d.frac_urg_bw_flip 312 < hubbub2->watermarks.d.frac_urg_bw_flip) 313 wm_pending = true; 314 315 if (safe_to_lower || watermarks->d.frac_urg_bw_nom 316 > hubbub2->watermarks.d.frac_urg_bw_nom) { 317 hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; 318 319 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, 320 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); 321 } else if (watermarks->d.frac_urg_bw_nom 322 < hubbub2->watermarks.d.frac_urg_bw_nom) 323 wm_pending = true; 324 325 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { 326 hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; 327 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, 328 refclk_mhz, 0x3fff); 329 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, 330 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); 331 } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) 332 wm_pending = true; 333 334 return wm_pending; 335 } 336 337 bool hubbub32_program_stutter_watermarks( 338 struct hubbub *hubbub, 339 struct dcn_watermark_set *watermarks, 340 unsigned int refclk_mhz, 341 bool safe_to_lower) 342 { 343 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 344 uint32_t prog_wm_value; 345 bool wm_pending = false; 346 347 /* clock state A */ 348 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns 349 > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { 350 hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = 351 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; 352 prog_wm_value = convert_and_clamp( 353 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, 354 refclk_mhz, 0xffff); 355 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, 356 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); 357 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" 358 "HW register value = 0x%x\n", 359 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 360 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns 361 < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) 362 wm_pending = true; 363 364 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns 365 > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { 366 hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = 367 watermarks->a.cstate_pstate.cstate_exit_ns; 368 prog_wm_value = convert_and_clamp( 369 watermarks->a.cstate_pstate.cstate_exit_ns, 370 refclk_mhz, 0xffff); 371 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, 372 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); 373 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" 374 "HW register value = 0x%x\n", 375 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); 376 } else if (watermarks->a.cstate_pstate.cstate_exit_ns 377 < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) 378 wm_pending = true; 379 380 /* clock state B */ 381 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns 382 > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { 383 hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = 384 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; 385 prog_wm_value = convert_and_clamp( 386 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, 387 refclk_mhz, 0xffff); 388 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, 389 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); 390 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" 391 "HW register value = 0x%x\n", 392 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 393 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns 394 < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) 395 wm_pending = true; 396 397 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns 398 > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { 399 hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = 400 watermarks->b.cstate_pstate.cstate_exit_ns; 401 prog_wm_value = convert_and_clamp( 402 watermarks->b.cstate_pstate.cstate_exit_ns, 403 refclk_mhz, 0xffff); 404 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, 405 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); 406 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" 407 "HW register value = 0x%x\n", 408 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); 409 } else if (watermarks->b.cstate_pstate.cstate_exit_ns 410 < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) 411 wm_pending = true; 412 413 /* clock state C */ 414 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns 415 > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { 416 hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = 417 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; 418 prog_wm_value = convert_and_clamp( 419 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, 420 refclk_mhz, 0xffff); 421 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, 422 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); 423 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" 424 "HW register value = 0x%x\n", 425 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 426 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns 427 < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) 428 wm_pending = true; 429 430 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns 431 > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { 432 hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = 433 watermarks->c.cstate_pstate.cstate_exit_ns; 434 prog_wm_value = convert_and_clamp( 435 watermarks->c.cstate_pstate.cstate_exit_ns, 436 refclk_mhz, 0xffff); 437 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, 438 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); 439 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" 440 "HW register value = 0x%x\n", 441 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); 442 } else if (watermarks->c.cstate_pstate.cstate_exit_ns 443 < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) 444 wm_pending = true; 445 446 /* clock state D */ 447 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns 448 > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { 449 hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = 450 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; 451 prog_wm_value = convert_and_clamp( 452 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, 453 refclk_mhz, 0xffff); 454 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, 455 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); 456 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" 457 "HW register value = 0x%x\n", 458 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 459 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns 460 < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) 461 wm_pending = true; 462 463 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns 464 > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { 465 hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = 466 watermarks->d.cstate_pstate.cstate_exit_ns; 467 prog_wm_value = convert_and_clamp( 468 watermarks->d.cstate_pstate.cstate_exit_ns, 469 refclk_mhz, 0xffff); 470 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, 471 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); 472 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" 473 "HW register value = 0x%x\n", 474 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); 475 } else if (watermarks->d.cstate_pstate.cstate_exit_ns 476 < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) 477 wm_pending = true; 478 479 return wm_pending; 480 } 481 482 483 bool hubbub32_program_pstate_watermarks( 484 struct hubbub *hubbub, 485 struct dcn_watermark_set *watermarks, 486 unsigned int refclk_mhz, 487 bool safe_to_lower) 488 { 489 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 490 uint32_t prog_wm_value; 491 492 bool wm_pending = false; 493 494 /* Section for UCLK_PSTATE_CHANGE_WATERMARKS */ 495 /* clock state A */ 496 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns 497 > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { 498 hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = 499 watermarks->a.cstate_pstate.pstate_change_ns; 500 prog_wm_value = convert_and_clamp( 501 watermarks->a.cstate_pstate.pstate_change_ns, 502 refclk_mhz, 0xffff); 503 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0, 504 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); 505 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" 506 "HW register value = 0x%x\n\n", 507 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); 508 } else if (watermarks->a.cstate_pstate.pstate_change_ns 509 < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) 510 wm_pending = true; 511 512 /* clock state B */ 513 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns 514 > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { 515 hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = 516 watermarks->b.cstate_pstate.pstate_change_ns; 517 prog_wm_value = convert_and_clamp( 518 watermarks->b.cstate_pstate.pstate_change_ns, 519 refclk_mhz, 0xffff); 520 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0, 521 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); 522 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" 523 "HW register value = 0x%x\n\n", 524 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); 525 } else if (watermarks->b.cstate_pstate.pstate_change_ns 526 < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) 527 wm_pending = true; 528 529 /* clock state C */ 530 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns 531 > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { 532 hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = 533 watermarks->c.cstate_pstate.pstate_change_ns; 534 prog_wm_value = convert_and_clamp( 535 watermarks->c.cstate_pstate.pstate_change_ns, 536 refclk_mhz, 0xffff); 537 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 0, 538 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); 539 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" 540 "HW register value = 0x%x\n\n", 541 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); 542 } else if (watermarks->c.cstate_pstate.pstate_change_ns 543 < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) 544 wm_pending = true; 545 546 /* clock state D */ 547 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns 548 > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { 549 hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = 550 watermarks->d.cstate_pstate.pstate_change_ns; 551 prog_wm_value = convert_and_clamp( 552 watermarks->d.cstate_pstate.pstate_change_ns, 553 refclk_mhz, 0xffff); 554 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 0, 555 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); 556 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" 557 "HW register value = 0x%x\n\n", 558 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); 559 } else if (watermarks->d.cstate_pstate.pstate_change_ns 560 < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) 561 wm_pending = true; 562 563 /* Section for FCLK_PSTATE_CHANGE_WATERMARKS */ 564 /* clock state A */ 565 if (safe_to_lower || watermarks->a.cstate_pstate.fclk_pstate_change_ns 566 > hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) { 567 hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns = 568 watermarks->a.cstate_pstate.fclk_pstate_change_ns; 569 prog_wm_value = convert_and_clamp( 570 watermarks->a.cstate_pstate.fclk_pstate_change_ns, 571 refclk_mhz, 0xffff); 572 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0, 573 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); 574 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n" 575 "HW register value = 0x%x\n\n", 576 watermarks->a.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); 577 } else if (watermarks->a.cstate_pstate.fclk_pstate_change_ns 578 < hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) 579 wm_pending = true; 580 581 /* clock state B */ 582 if (safe_to_lower || watermarks->b.cstate_pstate.fclk_pstate_change_ns 583 > hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) { 584 hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns = 585 watermarks->b.cstate_pstate.fclk_pstate_change_ns; 586 prog_wm_value = convert_and_clamp( 587 watermarks->b.cstate_pstate.fclk_pstate_change_ns, 588 refclk_mhz, 0xffff); 589 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0, 590 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); 591 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n" 592 "HW register value = 0x%x\n\n", 593 watermarks->b.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); 594 } else if (watermarks->b.cstate_pstate.fclk_pstate_change_ns 595 < hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) 596 wm_pending = true; 597 598 /* clock state C */ 599 if (safe_to_lower || watermarks->c.cstate_pstate.fclk_pstate_change_ns 600 > hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) { 601 hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns = 602 watermarks->c.cstate_pstate.fclk_pstate_change_ns; 603 prog_wm_value = convert_and_clamp( 604 watermarks->c.cstate_pstate.fclk_pstate_change_ns, 605 refclk_mhz, 0xffff); 606 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 0, 607 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); 608 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_C calculated =%d\n" 609 "HW register value = 0x%x\n\n", 610 watermarks->c.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); 611 } else if (watermarks->c.cstate_pstate.fclk_pstate_change_ns 612 < hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) 613 wm_pending = true; 614 615 /* clock state D */ 616 if (safe_to_lower || watermarks->d.cstate_pstate.fclk_pstate_change_ns 617 > hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) { 618 hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns = 619 watermarks->d.cstate_pstate.fclk_pstate_change_ns; 620 prog_wm_value = convert_and_clamp( 621 watermarks->d.cstate_pstate.fclk_pstate_change_ns, 622 refclk_mhz, 0xffff); 623 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 0, 624 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); 625 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_D calculated =%d\n" 626 "HW register value = 0x%x\n\n", 627 watermarks->d.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); 628 } else if (watermarks->d.cstate_pstate.fclk_pstate_change_ns 629 < hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) 630 wm_pending = true; 631 632 return wm_pending; 633 } 634 635 636 bool hubbub32_program_usr_watermarks( 637 struct hubbub *hubbub, 638 struct dcn_watermark_set *watermarks, 639 unsigned int refclk_mhz, 640 bool safe_to_lower) 641 { 642 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 643 uint32_t prog_wm_value; 644 645 bool wm_pending = false; 646 647 /* clock state A */ 648 if (safe_to_lower || watermarks->a.usr_retraining_ns 649 > hubbub2->watermarks.a.usr_retraining_ns) { 650 hubbub2->watermarks.a.usr_retraining_ns = watermarks->a.usr_retraining_ns; 651 prog_wm_value = convert_and_clamp( 652 watermarks->a.usr_retraining_ns, 653 refclk_mhz, 0x3fff); 654 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0, 655 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, prog_wm_value); 656 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n" 657 "HW register value = 0x%x\n\n", 658 watermarks->a.usr_retraining_ns, prog_wm_value); 659 } else if (watermarks->a.usr_retraining_ns 660 < hubbub2->watermarks.a.usr_retraining_ns) 661 wm_pending = true; 662 663 /* clock state B */ 664 if (safe_to_lower || watermarks->b.usr_retraining_ns 665 > hubbub2->watermarks.b.usr_retraining_ns) { 666 hubbub2->watermarks.b.usr_retraining_ns = watermarks->b.usr_retraining_ns; 667 prog_wm_value = convert_and_clamp( 668 watermarks->b.usr_retraining_ns, 669 refclk_mhz, 0x3fff); 670 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0, 671 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, prog_wm_value); 672 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n" 673 "HW register value = 0x%x\n\n", 674 watermarks->b.usr_retraining_ns, prog_wm_value); 675 } else if (watermarks->b.usr_retraining_ns 676 < hubbub2->watermarks.b.usr_retraining_ns) 677 wm_pending = true; 678 679 /* clock state C */ 680 if (safe_to_lower || watermarks->c.usr_retraining_ns 681 > hubbub2->watermarks.c.usr_retraining_ns) { 682 hubbub2->watermarks.c.usr_retraining_ns = 683 watermarks->c.usr_retraining_ns; 684 prog_wm_value = convert_and_clamp( 685 watermarks->c.usr_retraining_ns, 686 refclk_mhz, 0x3fff); 687 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 0, 688 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, prog_wm_value); 689 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_C calculated =%d\n" 690 "HW register value = 0x%x\n\n", 691 watermarks->c.usr_retraining_ns, prog_wm_value); 692 } else if (watermarks->c.usr_retraining_ns 693 < hubbub2->watermarks.c.usr_retraining_ns) 694 wm_pending = true; 695 696 /* clock state D */ 697 if (safe_to_lower || watermarks->d.usr_retraining_ns 698 > hubbub2->watermarks.d.usr_retraining_ns) { 699 hubbub2->watermarks.d.usr_retraining_ns = 700 watermarks->d.usr_retraining_ns; 701 prog_wm_value = convert_and_clamp( 702 watermarks->d.usr_retraining_ns, 703 refclk_mhz, 0x3fff); 704 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 0, 705 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, prog_wm_value); 706 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_D calculated =%d\n" 707 "HW register value = 0x%x\n\n", 708 watermarks->d.usr_retraining_ns, prog_wm_value); 709 } else if (watermarks->d.usr_retraining_ns 710 < hubbub2->watermarks.d.usr_retraining_ns) 711 wm_pending = true; 712 713 return wm_pending; 714 } 715 716 void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow) 717 { 718 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 719 720 /* 721 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE = 1 means enabling forcing value 722 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE = 1 or 0, means value to be forced when force enable 723 */ 724 725 REG_UPDATE_2(DCHUBBUB_ARB_USR_RETRAINING_CNTL, 726 DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, allow, 727 DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, allow); 728 } 729 730 static bool hubbub32_program_watermarks( 731 struct hubbub *hubbub, 732 struct dcn_watermark_set *watermarks, 733 unsigned int refclk_mhz, 734 bool safe_to_lower) 735 { 736 bool wm_pending = false; 737 738 if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 739 wm_pending = true; 740 741 if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 742 wm_pending = true; 743 744 if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 745 wm_pending = true; 746 747 if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 748 wm_pending = true; 749 750 /* 751 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. 752 * If the memory controller is fully utilized and the DCHub requestors are 753 * well ahead of their amortized schedule, then it is safe to prevent the next winner 754 * from being committed and sent to the fabric. 755 * The utilization of the memory controller is approximated by ensuring that 756 * the number of outstanding requests is greater than a threshold specified 757 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, 758 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. 759 * 760 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) 761 * to turn off it for now. 762 */ 763 /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, 764 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); 765 REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, 766 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ 767 768 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); 769 770 hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow); 771 772 return wm_pending; 773 } 774 775 /* Copy values from WM set A to all other sets */ 776 static void hubbub32_init_watermarks(struct hubbub *hubbub) 777 { 778 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 779 uint32_t reg; 780 781 reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); 782 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); 783 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); 784 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); 785 786 reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); 787 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); 788 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); 789 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); 790 791 reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); 792 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); 793 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); 794 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); 795 796 reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); 797 REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); 798 REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); 799 REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); 800 801 reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); 802 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); 803 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); 804 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); 805 806 reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); 807 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); 808 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); 809 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); 810 811 reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A); 812 REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg); 813 REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg); 814 REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg); 815 816 reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A); 817 REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg); 818 REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg); 819 REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg); 820 821 reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A); 822 REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg); 823 REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg); 824 REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg); 825 } 826 827 static void hubbub32_wm_read_state(struct hubbub *hubbub, 828 struct dcn_hubbub_wm *wm) 829 { 830 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 831 struct dcn_hubbub_wm_set *s; 832 833 memset(wm, 0, sizeof(struct dcn_hubbub_wm)); 834 835 s = &wm->sets[0]; 836 s->wm_set = 0; 837 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 838 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); 839 840 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 841 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); 842 843 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 844 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); 845 846 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 847 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_chanage); 848 849 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 850 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); 851 852 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 853 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change); 854 855 s = &wm->sets[1]; 856 s->wm_set = 1; 857 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 858 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); 859 860 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 861 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); 862 863 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 864 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); 865 866 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 867 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_chanage); 868 869 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 870 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); 871 872 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 873 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change); 874 875 s = &wm->sets[2]; 876 s->wm_set = 2; 877 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 878 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); 879 880 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 881 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); 882 883 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 884 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); 885 886 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 887 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_chanage); 888 889 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 890 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); 891 892 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 893 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change); 894 895 s = &wm->sets[3]; 896 s->wm_set = 3; 897 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 898 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); 899 900 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 901 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); 902 903 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 904 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); 905 906 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 907 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_chanage); 908 909 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 910 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); 911 912 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 913 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change); 914 } 915 916 void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub) 917 { 918 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 919 uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; 920 uint32_t prog_wm_value = convert_and_clamp(hubbub2->watermarks.a.urgent_ns, 921 refclk_mhz, 0x3fff); 922 923 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, 924 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 925 } 926 927 static const struct hubbub_funcs hubbub32_funcs = { 928 .update_dchub = hubbub2_update_dchub, 929 .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, 930 .init_vm_ctx = hubbub2_init_vm_ctx, 931 .dcc_support_swizzle = hubbub3_dcc_support_swizzle, 932 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, 933 .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, 934 .wm_read_state = hubbub32_wm_read_state, 935 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, 936 .program_watermarks = hubbub32_program_watermarks, 937 .allow_self_refresh_control = hubbub1_allow_self_refresh_control, 938 .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, 939 .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, 940 .force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes, 941 .force_pstate_change_control = hubbub3_force_pstate_change_control, 942 .init_watermarks = hubbub32_init_watermarks, 943 .program_det_size = dcn32_program_det_size, 944 .program_compbuf_size = dcn32_program_compbuf_size, 945 .init_crb = dcn32_init_crb, 946 .hubbub_read_state = hubbub2_read_state, 947 .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, 948 }; 949 950 void hubbub32_construct(struct dcn20_hubbub *hubbub2, 951 struct dc_context *ctx, 952 const struct dcn_hubbub_registers *hubbub_regs, 953 const struct dcn_hubbub_shift *hubbub_shift, 954 const struct dcn_hubbub_mask *hubbub_mask, 955 int det_size_kb, 956 int pixel_chunk_size_kb, 957 int config_return_buffer_size_kb) 958 { 959 hubbub2->base.ctx = ctx; 960 hubbub2->base.funcs = &hubbub32_funcs; 961 hubbub2->regs = hubbub_regs; 962 hubbub2->shifts = hubbub_shift; 963 hubbub2->masks = hubbub_mask; 964 965 hubbub2->debug_test_index_pstate = 0xB; 966 hubbub2->detile_buf_size = det_size_kb * 1024; 967 hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024; 968 hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN32_CRB_SEGMENT_SIZE_KB; 969 } 970