1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 27 #include "dcn30/dcn30_hubbub.h" 28 #include "dcn31_hubbub.h" 29 #include "dm_services.h" 30 #include "reg_helper.h" 31 32 33 #define CTX \ 34 hubbub2->base.ctx 35 #define DC_LOGGER \ 36 hubbub2->base.ctx->logger 37 #define REG(reg)\ 38 hubbub2->regs->reg 39 40 #undef FN 41 #define FN(reg_name, field_name) \ 42 hubbub2->shifts->field_name, hubbub2->masks->field_name 43 44 #ifdef NUM_VMID 45 #undef NUM_VMID 46 #endif 47 #define NUM_VMID 16 48 49 #define DCN31_CRB_SEGMENT_SIZE_KB 64 50 51 static void dcn31_init_crb(struct hubbub *hubbub) 52 { 53 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 54 55 REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, 56 &hubbub2->det0_size); 57 58 REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, 59 &hubbub2->det1_size); 60 61 REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, 62 &hubbub2->det2_size); 63 64 REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, 65 &hubbub2->det3_size); 66 67 REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, 68 &hubbub2->compbuf_size_segments); 69 70 REG_SET_2(COMPBUF_RESERVED_SPACE, 0, 71 COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, 72 COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); 73 REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x17F); 74 } 75 76 static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) 77 { 78 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 79 80 unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB; 81 82 switch (hubp_inst) { 83 case 0: 84 REG_UPDATE(DCHUBBUB_DET0_CTRL, 85 DET0_SIZE, det_size_segments); 86 hubbub2->det0_size = det_size_segments; 87 break; 88 case 1: 89 REG_UPDATE(DCHUBBUB_DET1_CTRL, 90 DET1_SIZE, det_size_segments); 91 hubbub2->det1_size = det_size_segments; 92 break; 93 case 2: 94 REG_UPDATE(DCHUBBUB_DET2_CTRL, 95 DET2_SIZE, det_size_segments); 96 hubbub2->det2_size = det_size_segments; 97 break; 98 case 3: 99 REG_UPDATE(DCHUBBUB_DET3_CTRL, 100 DET3_SIZE, det_size_segments); 101 hubbub2->det3_size = det_size_segments; 102 break; 103 default: 104 break; 105 } 106 /* Should never be hit, if it is we have an erroneous hw config*/ 107 ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size 108 + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); 109 } 110 111 static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) 112 { 113 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 114 unsigned int compbuf_size_segments = (compbuf_size_kb + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB; 115 116 if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { 117 if (compbuf_size_segments > hubbub2->compbuf_size_segments) { 118 REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); 119 REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); 120 REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); 121 REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); 122 } 123 /* Should never be hit, if it is we have an erroneous hw config*/ 124 ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size 125 + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); 126 REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); 127 hubbub2->compbuf_size_segments = compbuf_size_segments; 128 ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); 129 } 130 } 131 132 static uint32_t convert_and_clamp( 133 uint32_t wm_ns, 134 uint32_t refclk_mhz, 135 uint32_t clamp_value) 136 { 137 uint32_t ret_val = 0; 138 ret_val = wm_ns * refclk_mhz; 139 ret_val /= 1000; 140 141 if (ret_val > clamp_value) { 142 /* clamping WMs is abnormal, unexpected and may lead to underflow*/ 143 ASSERT(0); 144 ret_val = clamp_value; 145 } 146 147 return ret_val; 148 } 149 150 static bool hubbub31_program_urgent_watermarks( 151 struct hubbub *hubbub, 152 struct dcn_watermark_set *watermarks, 153 unsigned int refclk_mhz, 154 bool safe_to_lower) 155 { 156 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 157 uint32_t prog_wm_value; 158 bool wm_pending = false; 159 160 /* Repeat for water mark set A, B, C and D. */ 161 /* clock state A */ 162 if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { 163 hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; 164 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, 165 refclk_mhz, 0x3fff); 166 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, 167 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 168 169 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" 170 "HW register value = 0x%x\n", 171 watermarks->a.urgent_ns, prog_wm_value); 172 } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) 173 wm_pending = true; 174 175 /* determine the transfer time for a quantity of data for a particular requestor.*/ 176 if (safe_to_lower || watermarks->a.frac_urg_bw_flip 177 > hubbub2->watermarks.a.frac_urg_bw_flip) { 178 hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; 179 180 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, 181 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); 182 } else if (watermarks->a.frac_urg_bw_flip 183 < hubbub2->watermarks.a.frac_urg_bw_flip) 184 wm_pending = true; 185 186 if (safe_to_lower || watermarks->a.frac_urg_bw_nom 187 > hubbub2->watermarks.a.frac_urg_bw_nom) { 188 hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; 189 190 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, 191 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); 192 } else if (watermarks->a.frac_urg_bw_nom 193 < hubbub2->watermarks.a.frac_urg_bw_nom) 194 wm_pending = true; 195 196 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { 197 hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; 198 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, 199 refclk_mhz, 0x3fff); 200 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, 201 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); 202 } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) 203 wm_pending = true; 204 205 /* clock state B */ 206 if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { 207 hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; 208 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, 209 refclk_mhz, 0x3fff); 210 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, 211 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); 212 213 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" 214 "HW register value = 0x%x\n", 215 watermarks->b.urgent_ns, prog_wm_value); 216 } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) 217 wm_pending = true; 218 219 /* determine the transfer time for a quantity of data for a particular requestor.*/ 220 if (safe_to_lower || watermarks->b.frac_urg_bw_flip 221 > hubbub2->watermarks.b.frac_urg_bw_flip) { 222 hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; 223 224 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, 225 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); 226 } else if (watermarks->b.frac_urg_bw_flip 227 < hubbub2->watermarks.b.frac_urg_bw_flip) 228 wm_pending = true; 229 230 if (safe_to_lower || watermarks->b.frac_urg_bw_nom 231 > hubbub2->watermarks.b.frac_urg_bw_nom) { 232 hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; 233 234 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, 235 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); 236 } else if (watermarks->b.frac_urg_bw_nom 237 < hubbub2->watermarks.b.frac_urg_bw_nom) 238 wm_pending = true; 239 240 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { 241 hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; 242 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, 243 refclk_mhz, 0x3fff); 244 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, 245 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); 246 } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) 247 wm_pending = true; 248 249 /* clock state C */ 250 if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { 251 hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; 252 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, 253 refclk_mhz, 0x3fff); 254 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, 255 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); 256 257 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" 258 "HW register value = 0x%x\n", 259 watermarks->c.urgent_ns, prog_wm_value); 260 } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) 261 wm_pending = true; 262 263 /* determine the transfer time for a quantity of data for a particular requestor.*/ 264 if (safe_to_lower || watermarks->c.frac_urg_bw_flip 265 > hubbub2->watermarks.c.frac_urg_bw_flip) { 266 hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; 267 268 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, 269 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); 270 } else if (watermarks->c.frac_urg_bw_flip 271 < hubbub2->watermarks.c.frac_urg_bw_flip) 272 wm_pending = true; 273 274 if (safe_to_lower || watermarks->c.frac_urg_bw_nom 275 > hubbub2->watermarks.c.frac_urg_bw_nom) { 276 hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; 277 278 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, 279 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); 280 } else if (watermarks->c.frac_urg_bw_nom 281 < hubbub2->watermarks.c.frac_urg_bw_nom) 282 wm_pending = true; 283 284 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { 285 hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; 286 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, 287 refclk_mhz, 0x3fff); 288 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, 289 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); 290 } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) 291 wm_pending = true; 292 293 /* clock state D */ 294 if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { 295 hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; 296 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, 297 refclk_mhz, 0x3fff); 298 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, 299 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); 300 301 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" 302 "HW register value = 0x%x\n", 303 watermarks->d.urgent_ns, prog_wm_value); 304 } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) 305 wm_pending = true; 306 307 /* determine the transfer time for a quantity of data for a particular requestor.*/ 308 if (safe_to_lower || watermarks->d.frac_urg_bw_flip 309 > hubbub2->watermarks.d.frac_urg_bw_flip) { 310 hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; 311 312 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, 313 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); 314 } else if (watermarks->d.frac_urg_bw_flip 315 < hubbub2->watermarks.d.frac_urg_bw_flip) 316 wm_pending = true; 317 318 if (safe_to_lower || watermarks->d.frac_urg_bw_nom 319 > hubbub2->watermarks.d.frac_urg_bw_nom) { 320 hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; 321 322 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, 323 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); 324 } else if (watermarks->d.frac_urg_bw_nom 325 < hubbub2->watermarks.d.frac_urg_bw_nom) 326 wm_pending = true; 327 328 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { 329 hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; 330 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, 331 refclk_mhz, 0x3fff); 332 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, 333 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); 334 } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) 335 wm_pending = true; 336 337 return wm_pending; 338 } 339 340 static bool hubbub31_program_stutter_watermarks( 341 struct hubbub *hubbub, 342 struct dcn_watermark_set *watermarks, 343 unsigned int refclk_mhz, 344 bool safe_to_lower) 345 { 346 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 347 uint32_t prog_wm_value; 348 bool wm_pending = false; 349 350 /* clock state A */ 351 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns 352 > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { 353 hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = 354 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; 355 prog_wm_value = convert_and_clamp( 356 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, 357 refclk_mhz, 0xffff); 358 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, 359 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); 360 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" 361 "HW register value = 0x%x\n", 362 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 363 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns 364 < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) 365 wm_pending = true; 366 367 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns 368 > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { 369 hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = 370 watermarks->a.cstate_pstate.cstate_exit_ns; 371 prog_wm_value = convert_and_clamp( 372 watermarks->a.cstate_pstate.cstate_exit_ns, 373 refclk_mhz, 0xffff); 374 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, 375 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); 376 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" 377 "HW register value = 0x%x\n", 378 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); 379 } else if (watermarks->a.cstate_pstate.cstate_exit_ns 380 < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) 381 wm_pending = true; 382 383 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns 384 > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) { 385 hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = 386 watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; 387 prog_wm_value = convert_and_clamp( 388 watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, 389 refclk_mhz, 0xffff); 390 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, 391 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); 392 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" 393 "HW register value = 0x%x\n", 394 watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); 395 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns 396 < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) 397 wm_pending = true; 398 399 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns 400 > hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) { 401 hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns = 402 watermarks->a.cstate_pstate.cstate_exit_z8_ns; 403 prog_wm_value = convert_and_clamp( 404 watermarks->a.cstate_pstate.cstate_exit_z8_ns, 405 refclk_mhz, 0xffff); 406 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, 407 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); 408 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" 409 "HW register value = 0x%x\n", 410 watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); 411 } else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns 412 < hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) 413 wm_pending = true; 414 415 /* clock state B */ 416 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns 417 > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { 418 hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = 419 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; 420 prog_wm_value = convert_and_clamp( 421 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, 422 refclk_mhz, 0xffff); 423 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, 424 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); 425 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" 426 "HW register value = 0x%x\n", 427 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 428 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns 429 < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) 430 wm_pending = true; 431 432 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns 433 > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { 434 hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = 435 watermarks->b.cstate_pstate.cstate_exit_ns; 436 prog_wm_value = convert_and_clamp( 437 watermarks->b.cstate_pstate.cstate_exit_ns, 438 refclk_mhz, 0xffff); 439 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, 440 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); 441 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" 442 "HW register value = 0x%x\n", 443 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); 444 } else if (watermarks->b.cstate_pstate.cstate_exit_ns 445 < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) 446 wm_pending = true; 447 448 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns 449 > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) { 450 hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = 451 watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; 452 prog_wm_value = convert_and_clamp( 453 watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, 454 refclk_mhz, 0xffff); 455 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, 456 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); 457 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" 458 "HW register value = 0x%x\n", 459 watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); 460 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns 461 < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) 462 wm_pending = true; 463 464 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns 465 > hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) { 466 hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns = 467 watermarks->b.cstate_pstate.cstate_exit_z8_ns; 468 prog_wm_value = convert_and_clamp( 469 watermarks->b.cstate_pstate.cstate_exit_z8_ns, 470 refclk_mhz, 0xffff); 471 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, 472 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); 473 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" 474 "HW register value = 0x%x\n", 475 watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); 476 } else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns 477 < hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) 478 wm_pending = true; 479 480 /* clock state C */ 481 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns 482 > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { 483 hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = 484 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; 485 prog_wm_value = convert_and_clamp( 486 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, 487 refclk_mhz, 0xffff); 488 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, 489 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); 490 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" 491 "HW register value = 0x%x\n", 492 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 493 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns 494 < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) 495 wm_pending = true; 496 497 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns 498 > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { 499 hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = 500 watermarks->c.cstate_pstate.cstate_exit_ns; 501 prog_wm_value = convert_and_clamp( 502 watermarks->c.cstate_pstate.cstate_exit_ns, 503 refclk_mhz, 0xffff); 504 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, 505 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); 506 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" 507 "HW register value = 0x%x\n", 508 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); 509 } else if (watermarks->c.cstate_pstate.cstate_exit_ns 510 < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) 511 wm_pending = true; 512 513 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns 514 > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) { 515 hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = 516 watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; 517 prog_wm_value = convert_and_clamp( 518 watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, 519 refclk_mhz, 0xffff); 520 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, 521 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); 522 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" 523 "HW register value = 0x%x\n", 524 watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); 525 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns 526 < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) 527 wm_pending = true; 528 529 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns 530 > hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) { 531 hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns = 532 watermarks->c.cstate_pstate.cstate_exit_z8_ns; 533 prog_wm_value = convert_and_clamp( 534 watermarks->c.cstate_pstate.cstate_exit_z8_ns, 535 refclk_mhz, 0xffff); 536 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, 537 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); 538 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" 539 "HW register value = 0x%x\n", 540 watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); 541 } else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns 542 < hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) 543 wm_pending = true; 544 545 /* clock state D */ 546 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns 547 > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { 548 hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = 549 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; 550 prog_wm_value = convert_and_clamp( 551 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, 552 refclk_mhz, 0xffff); 553 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, 554 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); 555 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" 556 "HW register value = 0x%x\n", 557 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); 558 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns 559 < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) 560 wm_pending = true; 561 562 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns 563 > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { 564 hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = 565 watermarks->d.cstate_pstate.cstate_exit_ns; 566 prog_wm_value = convert_and_clamp( 567 watermarks->d.cstate_pstate.cstate_exit_ns, 568 refclk_mhz, 0xffff); 569 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, 570 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); 571 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" 572 "HW register value = 0x%x\n", 573 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); 574 } else if (watermarks->d.cstate_pstate.cstate_exit_ns 575 < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) 576 wm_pending = true; 577 578 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns 579 > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) { 580 hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = 581 watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; 582 prog_wm_value = convert_and_clamp( 583 watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, 584 refclk_mhz, 0xffff); 585 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, 586 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); 587 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" 588 "HW register value = 0x%x\n", 589 watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); 590 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns 591 < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) 592 wm_pending = true; 593 594 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns 595 > hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) { 596 hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns = 597 watermarks->d.cstate_pstate.cstate_exit_z8_ns; 598 prog_wm_value = convert_and_clamp( 599 watermarks->d.cstate_pstate.cstate_exit_z8_ns, 600 refclk_mhz, 0xffff); 601 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, 602 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); 603 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" 604 "HW register value = 0x%x\n", 605 watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); 606 } else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns 607 < hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) 608 wm_pending = true; 609 610 return wm_pending; 611 } 612 613 static bool hubbub31_program_pstate_watermarks( 614 struct hubbub *hubbub, 615 struct dcn_watermark_set *watermarks, 616 unsigned int refclk_mhz, 617 bool safe_to_lower) 618 { 619 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 620 uint32_t prog_wm_value; 621 622 bool wm_pending = false; 623 624 /* clock state A */ 625 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns 626 > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { 627 hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = 628 watermarks->a.cstate_pstate.pstate_change_ns; 629 prog_wm_value = convert_and_clamp( 630 watermarks->a.cstate_pstate.pstate_change_ns, 631 refclk_mhz, 0xffff); 632 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, 633 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); 634 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" 635 "HW register value = 0x%x\n\n", 636 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); 637 } else if (watermarks->a.cstate_pstate.pstate_change_ns 638 < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) 639 wm_pending = true; 640 641 /* clock state B */ 642 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns 643 > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { 644 hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = 645 watermarks->b.cstate_pstate.pstate_change_ns; 646 prog_wm_value = convert_and_clamp( 647 watermarks->b.cstate_pstate.pstate_change_ns, 648 refclk_mhz, 0xffff); 649 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, 650 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); 651 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" 652 "HW register value = 0x%x\n\n", 653 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); 654 } else if (watermarks->b.cstate_pstate.pstate_change_ns 655 < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) 656 wm_pending = false; 657 658 /* clock state C */ 659 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns 660 > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { 661 hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = 662 watermarks->c.cstate_pstate.pstate_change_ns; 663 prog_wm_value = convert_and_clamp( 664 watermarks->c.cstate_pstate.pstate_change_ns, 665 refclk_mhz, 0xffff); 666 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, 667 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); 668 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" 669 "HW register value = 0x%x\n\n", 670 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); 671 } else if (watermarks->c.cstate_pstate.pstate_change_ns 672 < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) 673 wm_pending = true; 674 675 /* clock state D */ 676 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns 677 > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { 678 hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = 679 watermarks->d.cstate_pstate.pstate_change_ns; 680 prog_wm_value = convert_and_clamp( 681 watermarks->d.cstate_pstate.pstate_change_ns, 682 refclk_mhz, 0xffff); 683 REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, 684 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); 685 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" 686 "HW register value = 0x%x\n\n", 687 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); 688 } else if (watermarks->d.cstate_pstate.pstate_change_ns 689 < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) 690 wm_pending = true; 691 692 return wm_pending; 693 } 694 695 static bool hubbub31_program_watermarks( 696 struct hubbub *hubbub, 697 struct dcn_watermark_set *watermarks, 698 unsigned int refclk_mhz, 699 bool safe_to_lower) 700 { 701 bool wm_pending = false; 702 703 if (hubbub31_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 704 wm_pending = true; 705 706 if (hubbub31_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 707 wm_pending = true; 708 709 if (hubbub31_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) 710 wm_pending = true; 711 712 /* 713 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. 714 * If the memory controller is fully utilized and the DCHub requestors are 715 * well ahead of their amortized schedule, then it is safe to prevent the next winner 716 * from being committed and sent to the fabric. 717 * The utilization of the memory controller is approximated by ensuring that 718 * the number of outstanding requests is greater than a threshold specified 719 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, 720 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. 721 * 722 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) 723 * to turn off it for now. 724 */ 725 /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, 726 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); 727 REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, 728 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ 729 730 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); 731 return wm_pending; 732 } 733 734 static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, 735 unsigned int bytes_per_element) 736 { 737 /* copied from DML. might want to refactor DML to leverage from DML */ 738 /* DML : get_blk256_size */ 739 if (bytes_per_element == 1) { 740 *blk256_width = 16; 741 *blk256_height = 16; 742 } else if (bytes_per_element == 2) { 743 *blk256_width = 16; 744 *blk256_height = 8; 745 } else if (bytes_per_element == 4) { 746 *blk256_width = 8; 747 *blk256_height = 8; 748 } else if (bytes_per_element == 8) { 749 *blk256_width = 8; 750 *blk256_height = 4; 751 } 752 } 753 754 static void hubbub31_det_request_size( 755 unsigned int detile_buf_size, 756 unsigned int height, 757 unsigned int width, 758 unsigned int bpe, 759 bool *req128_horz_wc, 760 bool *req128_vert_wc) 761 { 762 unsigned int blk256_height = 0; 763 unsigned int blk256_width = 0; 764 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; 765 766 hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe); 767 768 swath_bytes_horz_wc = width * blk256_height * bpe; 769 swath_bytes_vert_wc = height * blk256_width * bpe; 770 771 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? 772 false : /* full 256B request */ 773 true; /* half 128b request */ 774 775 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? 776 false : /* full 256B request */ 777 true; /* half 128b request */ 778 } 779 780 static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub, 781 const struct dc_dcc_surface_param *input, 782 struct dc_surface_dcc_cap *output) 783 { 784 struct dc *dc = hubbub->ctx->dc; 785 enum dcc_control dcc_control; 786 unsigned int bpe; 787 enum segment_order segment_order_horz, segment_order_vert; 788 bool req128_horz_wc, req128_vert_wc; 789 790 memset(output, 0, sizeof(*output)); 791 792 if (dc->debug.disable_dcc == DCC_DISABLE) 793 return false; 794 795 if (!hubbub->funcs->dcc_support_pixel_format(input->format, 796 &bpe)) 797 return false; 798 799 if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, 800 &segment_order_horz, &segment_order_vert)) 801 return false; 802 803 hubbub31_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, 804 input->surface_size.height, input->surface_size.width, 805 bpe, &req128_horz_wc, &req128_vert_wc); 806 807 if (!req128_horz_wc && !req128_vert_wc) { 808 dcc_control = dcc_control__256_256_xxx; 809 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { 810 if (!req128_horz_wc) 811 dcc_control = dcc_control__256_256_xxx; 812 else if (segment_order_horz == segment_order__contiguous) 813 dcc_control = dcc_control__128_128_xxx; 814 else 815 dcc_control = dcc_control__256_64_64; 816 } else if (input->scan == SCAN_DIRECTION_VERTICAL) { 817 if (!req128_vert_wc) 818 dcc_control = dcc_control__256_256_xxx; 819 else if (segment_order_vert == segment_order__contiguous) 820 dcc_control = dcc_control__128_128_xxx; 821 else 822 dcc_control = dcc_control__256_64_64; 823 } else { 824 if ((req128_horz_wc && 825 segment_order_horz == segment_order__non_contiguous) || 826 (req128_vert_wc && 827 segment_order_vert == segment_order__non_contiguous)) 828 /* access_dir not known, must use most constraining */ 829 dcc_control = dcc_control__256_64_64; 830 else 831 /* reg128 is true for either horz and vert 832 * but segment_order is contiguous 833 */ 834 dcc_control = dcc_control__128_128_xxx; 835 } 836 837 /* Exception for 64KB_R_X */ 838 if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) 839 dcc_control = dcc_control__128_128_xxx; 840 841 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && 842 dcc_control != dcc_control__256_256_xxx) 843 return false; 844 845 switch (dcc_control) { 846 case dcc_control__256_256_xxx: 847 output->grph.rgb.max_uncompressed_blk_size = 256; 848 output->grph.rgb.max_compressed_blk_size = 256; 849 output->grph.rgb.independent_64b_blks = false; 850 output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1; 851 output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; 852 break; 853 case dcc_control__128_128_xxx: 854 output->grph.rgb.max_uncompressed_blk_size = 128; 855 output->grph.rgb.max_compressed_blk_size = 128; 856 output->grph.rgb.independent_64b_blks = false; 857 output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1; 858 output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; 859 break; 860 case dcc_control__256_64_64: 861 output->grph.rgb.max_uncompressed_blk_size = 256; 862 output->grph.rgb.max_compressed_blk_size = 64; 863 output->grph.rgb.independent_64b_blks = true; 864 output->grph.rgb.dcc_controls.dcc_256_64_64 = 1; 865 break; 866 case dcc_control__256_128_128: 867 output->grph.rgb.max_uncompressed_blk_size = 256; 868 output->grph.rgb.max_compressed_blk_size = 128; 869 output->grph.rgb.independent_64b_blks = false; 870 output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; 871 break; 872 } 873 output->capable = true; 874 output->const_color_support = true; 875 876 return true; 877 } 878 879 int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, 880 struct dcn_hubbub_phys_addr_config *pa_config) 881 { 882 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 883 struct dcn_vmid_page_table_config phys_config; 884 885 REG_SET(DCN_VM_FB_LOCATION_BASE, 0, 886 FB_BASE, pa_config->system_aperture.fb_base >> 24); 887 REG_SET(DCN_VM_FB_LOCATION_TOP, 0, 888 FB_TOP, pa_config->system_aperture.fb_top >> 24); 889 REG_SET(DCN_VM_FB_OFFSET, 0, 890 FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); 891 REG_SET(DCN_VM_AGP_BOT, 0, 892 AGP_BOT, pa_config->system_aperture.agp_bot >> 24); 893 REG_SET(DCN_VM_AGP_TOP, 0, 894 AGP_TOP, pa_config->system_aperture.agp_top >> 24); 895 REG_SET(DCN_VM_AGP_BASE, 0, 896 AGP_BASE, pa_config->system_aperture.agp_base >> 24); 897 898 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { 899 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; 900 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; 901 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; 902 phys_config.depth = 0; 903 phys_config.block_size = 0; 904 // Init VMID 0 based on PA config 905 dcn20_vmid_setup(&hubbub2->vmid[0], &phys_config); 906 907 dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config); 908 } 909 910 dcn21_dchvm_init(hubbub); 911 912 return NUM_VMID; 913 } 914 915 static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub, 916 unsigned int dccg_ref_freq_inKhz, 917 unsigned int *dchub_ref_freq_inKhz) 918 { 919 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 920 uint32_t ref_div = 0; 921 uint32_t ref_en = 0; 922 unsigned int dc_refclk_khz = 24000; 923 924 REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, 925 DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); 926 927 if (ref_en) { 928 if (ref_div == 2) 929 *dchub_ref_freq_inKhz = dc_refclk_khz / 2; 930 else 931 *dchub_ref_freq_inKhz = dc_refclk_khz; 932 933 /* 934 * The external Reference Clock may change based on the board or 935 * platform requirements and the programmable integer divide must 936 * be programmed to provide a suitable DLG RefClk frequency between 937 * a minimum of 20MHz and maximum of 50MHz 938 */ 939 if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000) 940 ASSERT_CRITICAL(false); 941 942 return; 943 } else { 944 *dchub_ref_freq_inKhz = dc_refclk_khz; 945 946 // HUBBUB global timer must be enabled. 947 ASSERT_CRITICAL(false); 948 return; 949 } 950 } 951 952 static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) 953 { 954 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 955 956 /* 957 * Pstate latency is ~20us so if we wait over 40us and pstate allow 958 * still not asserted, we are probably stuck and going to hang 959 */ 960 const unsigned int pstate_wait_timeout_us = 100; 961 const unsigned int pstate_wait_expected_timeout_us = 40; 962 963 static unsigned int max_sampled_pstate_wait_us; /* data collection */ 964 static bool forced_pstate_allow; /* help with revert wa */ 965 966 unsigned int debug_data = 0; 967 unsigned int i; 968 969 if (forced_pstate_allow) { 970 /* we hacked to force pstate allow to prevent hang last time 971 * we verify_allow_pstate_change_high. so disable force 972 * here so we can check status 973 */ 974 REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, 975 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, 976 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); 977 forced_pstate_allow = false; 978 } 979 980 REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate); 981 982 for (i = 0; i < pstate_wait_timeout_us; i++) { 983 debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); 984 985 /* Debug bit is specific to ASIC. */ 986 if (debug_data & (1 << 26)) { 987 if (i > pstate_wait_expected_timeout_us) 988 DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); 989 return true; 990 } 991 if (max_sampled_pstate_wait_us < i) 992 max_sampled_pstate_wait_us = i; 993 994 udelay(1); 995 } 996 997 /* force pstate allow to prevent system hang 998 * and break to debugger to investigate 999 */ 1000 REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, 1001 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, 1002 DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); 1003 forced_pstate_allow = true; 1004 1005 DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", 1006 debug_data); 1007 1008 return false; 1009 } 1010 1011 void hubbub31_init(struct hubbub *hubbub) 1012 { 1013 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1014 1015 /*Enable clock gate*/ 1016 if (hubbub->ctx->dc->debug.disable_clock_gate) { 1017 /*done in hwseq*/ 1018 /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ 1019 REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, 1020 DISPCLK_R_DCHUBBUB_GATE_DIS, 0, 1021 DCFCLK_R_DCHUBBUB_GATE_DIS, 0); 1022 } 1023 1024 /* 1025 only the DCN will determine when to connect the SDP port 1026 */ 1027 REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1); 1028 } 1029 static const struct hubbub_funcs hubbub31_funcs = { 1030 .update_dchub = hubbub2_update_dchub, 1031 .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, 1032 .init_vm_ctx = hubbub2_init_vm_ctx, 1033 .dcc_support_swizzle = hubbub3_dcc_support_swizzle, 1034 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, 1035 .get_dcc_compression_cap = hubbub31_get_dcc_compression_cap, 1036 .wm_read_state = hubbub21_wm_read_state, 1037 .get_dchub_ref_freq = hubbub31_get_dchub_ref_freq, 1038 .program_watermarks = hubbub31_program_watermarks, 1039 .allow_self_refresh_control = hubbub1_allow_self_refresh_control, 1040 .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, 1041 .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, 1042 .program_det_size = dcn31_program_det_size, 1043 .program_compbuf_size = dcn31_program_compbuf_size, 1044 .init_crb = dcn31_init_crb, 1045 .hubbub_read_state = hubbub2_read_state, 1046 }; 1047 1048 void hubbub31_construct(struct dcn20_hubbub *hubbub31, 1049 struct dc_context *ctx, 1050 const struct dcn_hubbub_registers *hubbub_regs, 1051 const struct dcn_hubbub_shift *hubbub_shift, 1052 const struct dcn_hubbub_mask *hubbub_mask, 1053 int det_size_kb, 1054 int pixel_chunk_size_kb, 1055 int config_return_buffer_size_kb) 1056 { 1057 1058 hubbub3_construct(hubbub31, ctx, hubbub_regs, hubbub_shift, hubbub_mask); 1059 hubbub31->base.funcs = &hubbub31_funcs; 1060 hubbub31->detile_buf_size = det_size_kb * 1024; 1061 hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024; 1062 hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB; 1063 1064 hubbub31->debug_test_index_pstate = 0x6; 1065 } 1066 1067