1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/slab.h> 27 28 #include "dm_services.h" 29 #include "dc.h" 30 31 #include "dcn10_init.h" 32 33 #include "resource.h" 34 #include "include/irq_service_interface.h" 35 #include "dcn10_resource.h" 36 #include "dcn10_ipp.h" 37 #include "dcn10_mpc.h" 38 #include "irq/dcn10/irq_service_dcn10.h" 39 #include "dcn10_dpp.h" 40 #include "dcn10_optc.h" 41 #include "dcn10_hw_sequencer.h" 42 #include "dce110/dce110_hw_sequencer.h" 43 #include "dcn10_opp.h" 44 #include "dcn10_link_encoder.h" 45 #include "dcn10_stream_encoder.h" 46 #include "dce/dce_clock_source.h" 47 #include "dce/dce_audio.h" 48 #include "dce/dce_hwseq.h" 49 #include "virtual/virtual_stream_encoder.h" 50 #include "dce110/dce110_resource.h" 51 #include "dce112/dce112_resource.h" 52 #include "dcn10_hubp.h" 53 #include "dcn10_hubbub.h" 54 55 #include "soc15_hw_ip.h" 56 #include "vega10_ip_offset.h" 57 58 #include "dcn/dcn_1_0_offset.h" 59 #include "dcn/dcn_1_0_sh_mask.h" 60 61 #include "nbio/nbio_7_0_offset.h" 62 63 #include "mmhub/mmhub_9_1_offset.h" 64 #include "mmhub/mmhub_9_1_sh_mask.h" 65 66 #include "reg_helper.h" 67 #include "dce/dce_abm.h" 68 #include "dce/dce_dmcu.h" 69 #include "dce/dce_aux.h" 70 #include "dce/dce_i2c.h" 71 72 const struct _vcs_dpi_ip_params_st dcn1_0_ip = { 73 .rob_buffer_size_kbytes = 64, 74 .det_buffer_size_kbytes = 164, 75 .dpte_buffer_size_in_pte_reqs_luma = 42, 76 .dpp_output_buffer_pixels = 2560, 77 .opp_output_buffer_lines = 1, 78 .pixel_chunk_size_kbytes = 8, 79 .pte_enable = 1, 80 .pte_chunk_size_kbytes = 2, 81 .meta_chunk_size_kbytes = 2, 82 .writeback_chunk_size_kbytes = 2, 83 .line_buffer_size_bits = 589824, 84 .max_line_buffer_lines = 12, 85 .IsLineBufferBppFixed = 0, 86 .LineBufferFixedBpp = -1, 87 .writeback_luma_buffer_size_kbytes = 12, 88 .writeback_chroma_buffer_size_kbytes = 8, 89 .max_num_dpp = 4, 90 .max_num_wb = 2, 91 .max_dchub_pscl_bw_pix_per_clk = 4, 92 .max_pscl_lb_bw_pix_per_clk = 2, 93 .max_lb_vscl_bw_pix_per_clk = 4, 94 .max_vscl_hscl_bw_pix_per_clk = 4, 95 .max_hscl_ratio = 4, 96 .max_vscl_ratio = 4, 97 .hscl_mults = 4, 98 .vscl_mults = 4, 99 .max_hscl_taps = 8, 100 .max_vscl_taps = 8, 101 .dispclk_ramp_margin_percent = 1, 102 .underscan_factor = 1.10, 103 .min_vblank_lines = 14, 104 .dppclk_delay_subtotal = 90, 105 .dispclk_delay_subtotal = 42, 106 .dcfclk_cstate_latency = 10, 107 .max_inter_dcn_tile_repeaters = 8, 108 .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0, 109 .bug_forcing_LC_req_same_size_fixed = 0, 110 }; 111 112 const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = { 113 .sr_exit_time_us = 9.0, 114 .sr_enter_plus_exit_time_us = 11.0, 115 .urgent_latency_us = 4.0, 116 .writeback_latency_us = 12.0, 117 .ideal_dram_bw_after_urgent_percent = 80.0, 118 .max_request_size_bytes = 256, 119 .downspread_percent = 0.5, 120 .dram_page_open_time_ns = 50.0, 121 .dram_rw_turnaround_time_ns = 17.5, 122 .dram_return_buffer_per_channel_bytes = 8192, 123 .round_trip_ping_latency_dcfclk_cycles = 128, 124 .urgent_out_of_order_return_per_channel_bytes = 256, 125 .channel_interleave_bytes = 256, 126 .num_banks = 8, 127 .num_chans = 2, 128 .vmm_page_size_bytes = 4096, 129 .dram_clock_change_latency_us = 17.0, 130 .writeback_dram_clock_change_latency_us = 23.0, 131 .return_bus_width_bytes = 64, 132 }; 133 134 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL 135 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f 136 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 137 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f 138 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 139 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f 140 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 141 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f 142 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 143 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f 144 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 145 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f 146 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 147 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f 148 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 149 #endif 150 151 152 enum dcn10_clk_src_array_id { 153 DCN10_CLK_SRC_PLL0, 154 DCN10_CLK_SRC_PLL1, 155 DCN10_CLK_SRC_PLL2, 156 DCN10_CLK_SRC_PLL3, 157 DCN10_CLK_SRC_TOTAL, 158 DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3 159 }; 160 161 /* begin ********************* 162 * macros to expend register list macro defined in HW object header file */ 163 164 /* DCN */ 165 #define BASE_INNER(seg) \ 166 DCE_BASE__INST0_SEG ## seg 167 168 #define BASE(seg) \ 169 BASE_INNER(seg) 170 171 #define SR(reg_name)\ 172 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ 173 mm ## reg_name 174 175 #define SRI(reg_name, block, id)\ 176 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 177 mm ## block ## id ## _ ## reg_name 178 179 180 #define SRII(reg_name, block, id)\ 181 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 182 mm ## block ## id ## _ ## reg_name 183 184 #define VUPDATE_SRII(reg_name, block, id)\ 185 .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \ 186 mm ## reg_name ## 0 ## _ ## block ## id 187 188 /* set field/register/bitfield name */ 189 #define SFRB(field_name, reg_name, bitfield, post_fix)\ 190 .field_name = reg_name ## __ ## bitfield ## post_fix 191 192 /* NBIO */ 193 #define NBIO_BASE_INNER(seg) \ 194 NBIF_BASE__INST0_SEG ## seg 195 196 #define NBIO_BASE(seg) \ 197 NBIO_BASE_INNER(seg) 198 199 #define NBIO_SR(reg_name)\ 200 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ 201 mm ## reg_name 202 203 /* MMHUB */ 204 #define MMHUB_BASE_INNER(seg) \ 205 MMHUB_BASE__INST0_SEG ## seg 206 207 #define MMHUB_BASE(seg) \ 208 MMHUB_BASE_INNER(seg) 209 210 #define MMHUB_SR(reg_name)\ 211 .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ 212 mm ## reg_name 213 214 /* macros to expend register list macro defined in HW object header file 215 * end *********************/ 216 217 218 static const struct dce_dmcu_registers dmcu_regs = { 219 DMCU_DCN10_REG_LIST() 220 }; 221 222 static const struct dce_dmcu_shift dmcu_shift = { 223 DMCU_MASK_SH_LIST_DCN10(__SHIFT) 224 }; 225 226 static const struct dce_dmcu_mask dmcu_mask = { 227 DMCU_MASK_SH_LIST_DCN10(_MASK) 228 }; 229 230 static const struct dce_abm_registers abm_regs = { 231 ABM_DCN10_REG_LIST(0) 232 }; 233 234 static const struct dce_abm_shift abm_shift = { 235 ABM_MASK_SH_LIST_DCN10(__SHIFT) 236 }; 237 238 static const struct dce_abm_mask abm_mask = { 239 ABM_MASK_SH_LIST_DCN10(_MASK) 240 }; 241 242 #define stream_enc_regs(id)\ 243 [id] = {\ 244 SE_DCN_REG_LIST(id)\ 245 } 246 247 static const struct dcn10_stream_enc_registers stream_enc_regs[] = { 248 stream_enc_regs(0), 249 stream_enc_regs(1), 250 stream_enc_regs(2), 251 stream_enc_regs(3), 252 }; 253 254 static const struct dcn10_stream_encoder_shift se_shift = { 255 SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) 256 }; 257 258 static const struct dcn10_stream_encoder_mask se_mask = { 259 SE_COMMON_MASK_SH_LIST_DCN10(_MASK) 260 }; 261 262 #define audio_regs(id)\ 263 [id] = {\ 264 AUD_COMMON_REG_LIST(id)\ 265 } 266 267 static const struct dce_audio_registers audio_regs[] = { 268 audio_regs(0), 269 audio_regs(1), 270 audio_regs(2), 271 audio_regs(3), 272 }; 273 274 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ 275 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ 276 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ 277 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) 278 279 static const struct dce_audio_shift audio_shift = { 280 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) 281 }; 282 283 static const struct dce_audio_mask audio_mask = { 284 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) 285 }; 286 287 #define aux_regs(id)\ 288 [id] = {\ 289 AUX_REG_LIST(id)\ 290 } 291 292 static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { 293 aux_regs(0), 294 aux_regs(1), 295 aux_regs(2), 296 aux_regs(3) 297 }; 298 299 #define hpd_regs(id)\ 300 [id] = {\ 301 HPD_REG_LIST(id)\ 302 } 303 304 static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { 305 hpd_regs(0), 306 hpd_regs(1), 307 hpd_regs(2), 308 hpd_regs(3) 309 }; 310 311 #define link_regs(id)\ 312 [id] = {\ 313 LE_DCN10_REG_LIST(id), \ 314 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ 315 } 316 317 static const struct dcn10_link_enc_registers link_enc_regs[] = { 318 link_regs(0), 319 link_regs(1), 320 link_regs(2), 321 link_regs(3) 322 }; 323 324 static const struct dcn10_link_enc_shift le_shift = { 325 LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT) 326 }; 327 328 static const struct dcn10_link_enc_mask le_mask = { 329 LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) 330 }; 331 332 static const struct dce110_aux_registers_shift aux_shift = { 333 DCN10_AUX_MASK_SH_LIST(__SHIFT) 334 }; 335 336 static const struct dce110_aux_registers_mask aux_mask = { 337 DCN10_AUX_MASK_SH_LIST(_MASK) 338 }; 339 340 #define ipp_regs(id)\ 341 [id] = {\ 342 IPP_REG_LIST_DCN10(id),\ 343 } 344 345 static const struct dcn10_ipp_registers ipp_regs[] = { 346 ipp_regs(0), 347 ipp_regs(1), 348 ipp_regs(2), 349 ipp_regs(3), 350 }; 351 352 static const struct dcn10_ipp_shift ipp_shift = { 353 IPP_MASK_SH_LIST_DCN10(__SHIFT) 354 }; 355 356 static const struct dcn10_ipp_mask ipp_mask = { 357 IPP_MASK_SH_LIST_DCN10(_MASK), 358 }; 359 360 #define opp_regs(id)\ 361 [id] = {\ 362 OPP_REG_LIST_DCN10(id),\ 363 } 364 365 static const struct dcn10_opp_registers opp_regs[] = { 366 opp_regs(0), 367 opp_regs(1), 368 opp_regs(2), 369 opp_regs(3), 370 }; 371 372 static const struct dcn10_opp_shift opp_shift = { 373 OPP_MASK_SH_LIST_DCN10(__SHIFT) 374 }; 375 376 static const struct dcn10_opp_mask opp_mask = { 377 OPP_MASK_SH_LIST_DCN10(_MASK), 378 }; 379 380 #define aux_engine_regs(id)\ 381 [id] = {\ 382 AUX_COMMON_REG_LIST(id), \ 383 .AUX_RESET_MASK = 0 \ 384 } 385 386 static const struct dce110_aux_registers aux_engine_regs[] = { 387 aux_engine_regs(0), 388 aux_engine_regs(1), 389 aux_engine_regs(2), 390 aux_engine_regs(3), 391 aux_engine_regs(4), 392 aux_engine_regs(5) 393 }; 394 395 #define tf_regs(id)\ 396 [id] = {\ 397 TF_REG_LIST_DCN10(id),\ 398 } 399 400 static const struct dcn_dpp_registers tf_regs[] = { 401 tf_regs(0), 402 tf_regs(1), 403 tf_regs(2), 404 tf_regs(3), 405 }; 406 407 static const struct dcn_dpp_shift tf_shift = { 408 TF_REG_LIST_SH_MASK_DCN10(__SHIFT), 409 TF_DEBUG_REG_LIST_SH_DCN10 410 411 }; 412 413 static const struct dcn_dpp_mask tf_mask = { 414 TF_REG_LIST_SH_MASK_DCN10(_MASK), 415 TF_DEBUG_REG_LIST_MASK_DCN10 416 }; 417 418 static const struct dcn_mpc_registers mpc_regs = { 419 MPC_COMMON_REG_LIST_DCN1_0(0), 420 MPC_COMMON_REG_LIST_DCN1_0(1), 421 MPC_COMMON_REG_LIST_DCN1_0(2), 422 MPC_COMMON_REG_LIST_DCN1_0(3), 423 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0), 424 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1), 425 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2), 426 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3) 427 }; 428 429 static const struct dcn_mpc_shift mpc_shift = { 430 MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\ 431 SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT) 432 }; 433 434 static const struct dcn_mpc_mask mpc_mask = { 435 MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\ 436 SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK) 437 }; 438 439 #define tg_regs(id)\ 440 [id] = {TG_COMMON_REG_LIST_DCN1_0(id)} 441 442 static const struct dcn_optc_registers tg_regs[] = { 443 tg_regs(0), 444 tg_regs(1), 445 tg_regs(2), 446 tg_regs(3), 447 }; 448 449 static const struct dcn_optc_shift tg_shift = { 450 TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 451 }; 452 453 static const struct dcn_optc_mask tg_mask = { 454 TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) 455 }; 456 457 static const struct bios_registers bios_regs = { 458 NBIO_SR(BIOS_SCRATCH_3), 459 NBIO_SR(BIOS_SCRATCH_6) 460 }; 461 462 #define hubp_regs(id)\ 463 [id] = {\ 464 HUBP_REG_LIST_DCN10(id)\ 465 } 466 467 static const struct dcn_mi_registers hubp_regs[] = { 468 hubp_regs(0), 469 hubp_regs(1), 470 hubp_regs(2), 471 hubp_regs(3), 472 }; 473 474 static const struct dcn_mi_shift hubp_shift = { 475 HUBP_MASK_SH_LIST_DCN10(__SHIFT) 476 }; 477 478 static const struct dcn_mi_mask hubp_mask = { 479 HUBP_MASK_SH_LIST_DCN10(_MASK) 480 }; 481 482 static const struct dcn_hubbub_registers hubbub_reg = { 483 HUBBUB_REG_LIST_DCN10(0) 484 }; 485 486 static const struct dcn_hubbub_shift hubbub_shift = { 487 HUBBUB_MASK_SH_LIST_DCN10(__SHIFT) 488 }; 489 490 static const struct dcn_hubbub_mask hubbub_mask = { 491 HUBBUB_MASK_SH_LIST_DCN10(_MASK) 492 }; 493 494 static int map_transmitter_id_to_phy_instance( 495 enum transmitter transmitter) 496 { 497 switch (transmitter) { 498 case TRANSMITTER_UNIPHY_A: 499 return 0; 500 break; 501 case TRANSMITTER_UNIPHY_B: 502 return 1; 503 break; 504 case TRANSMITTER_UNIPHY_C: 505 return 2; 506 break; 507 case TRANSMITTER_UNIPHY_D: 508 return 3; 509 break; 510 default: 511 ASSERT(0); 512 return 0; 513 } 514 } 515 516 #define clk_src_regs(index, pllid)\ 517 [index] = {\ 518 CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ 519 } 520 521 static const struct dce110_clk_src_regs clk_src_regs[] = { 522 clk_src_regs(0, A), 523 clk_src_regs(1, B), 524 clk_src_regs(2, C), 525 clk_src_regs(3, D) 526 }; 527 528 static const struct dce110_clk_src_shift cs_shift = { 529 CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 530 }; 531 532 static const struct dce110_clk_src_mask cs_mask = { 533 CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) 534 }; 535 536 static const struct resource_caps res_cap = { 537 .num_timing_generator = 4, 538 .num_opp = 4, 539 .num_video_plane = 4, 540 .num_audio = 4, 541 .num_stream_encoder = 4, 542 .num_pll = 4, 543 .num_ddc = 4, 544 }; 545 546 static const struct resource_caps rv2_res_cap = { 547 .num_timing_generator = 3, 548 .num_opp = 3, 549 .num_video_plane = 3, 550 .num_audio = 3, 551 .num_stream_encoder = 3, 552 .num_pll = 3, 553 .num_ddc = 4, 554 }; 555 556 static const struct dc_plane_cap plane_cap = { 557 .type = DC_PLANE_TYPE_DCN_UNIVERSAL, 558 .blends_with_above = true, 559 .blends_with_below = true, 560 .per_pixel_alpha = true, 561 562 .pixel_format_support = { 563 .argb8888 = true, 564 .nv12 = true, 565 .fp16 = true, 566 .p010 = true 567 }, 568 569 .max_upscale_factor = { 570 .argb8888 = 16000, 571 .nv12 = 16000, 572 .fp16 = 1 573 }, 574 575 .max_downscale_factor = { 576 .argb8888 = 250, 577 .nv12 = 250, 578 .fp16 = 1 579 } 580 }; 581 582 static const struct dc_debug_options debug_defaults_drv = { 583 .sanity_checks = true, 584 .disable_dmcu = false, 585 .force_abm_enable = false, 586 .timing_trace = false, 587 .clock_trace = true, 588 589 /* raven smu dones't allow 0 disp clk, 590 * smu min disp clk limit is 50Mhz 591 * keep min disp clk 100Mhz avoid smu hang 592 */ 593 .min_disp_clk_khz = 100000, 594 595 .disable_pplib_clock_request = false, 596 .disable_pplib_wm_range = false, 597 .pplib_wm_report_mode = WM_REPORT_DEFAULT, 598 .pipe_split_policy = MPC_SPLIT_DYNAMIC, 599 .force_single_disp_pipe_split = true, 600 .disable_dcc = DCC_ENABLE, 601 .voltage_align_fclk = true, 602 .disable_stereo_support = true, 603 .vsr_support = true, 604 .performance_trace = false, 605 .az_endpoint_mute_only = true, 606 .recovery_enabled = false, /*enable this by default after testing.*/ 607 .max_downscale_src_width = 3840, 608 .underflow_assert_delay_us = 0xFFFFFFFF, 609 }; 610 611 static const struct dc_debug_options debug_defaults_diags = { 612 .disable_dmcu = false, 613 .force_abm_enable = false, 614 .timing_trace = true, 615 .clock_trace = true, 616 .disable_stutter = true, 617 .disable_pplib_clock_request = true, 618 .disable_pplib_wm_range = true, 619 .underflow_assert_delay_us = 0xFFFFFFFF, 620 }; 621 622 static void dcn10_dpp_destroy(struct dpp **dpp) 623 { 624 kfree(TO_DCN10_DPP(*dpp)); 625 *dpp = NULL; 626 } 627 628 static struct dpp *dcn10_dpp_create( 629 struct dc_context *ctx, 630 uint32_t inst) 631 { 632 struct dcn10_dpp *dpp = 633 kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL); 634 635 if (!dpp) 636 return NULL; 637 638 dpp1_construct(dpp, ctx, inst, 639 &tf_regs[inst], &tf_shift, &tf_mask); 640 return &dpp->base; 641 } 642 643 static struct input_pixel_processor *dcn10_ipp_create( 644 struct dc_context *ctx, uint32_t inst) 645 { 646 struct dcn10_ipp *ipp = 647 kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); 648 649 if (!ipp) { 650 BREAK_TO_DEBUGGER(); 651 return NULL; 652 } 653 654 dcn10_ipp_construct(ipp, ctx, inst, 655 &ipp_regs[inst], &ipp_shift, &ipp_mask); 656 return &ipp->base; 657 } 658 659 660 static struct output_pixel_processor *dcn10_opp_create( 661 struct dc_context *ctx, uint32_t inst) 662 { 663 struct dcn10_opp *opp = 664 kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL); 665 666 if (!opp) { 667 BREAK_TO_DEBUGGER(); 668 return NULL; 669 } 670 671 dcn10_opp_construct(opp, ctx, inst, 672 &opp_regs[inst], &opp_shift, &opp_mask); 673 return &opp->base; 674 } 675 676 struct dce_aux *dcn10_aux_engine_create( 677 struct dc_context *ctx, 678 uint32_t inst) 679 { 680 struct aux_engine_dce110 *aux_engine = 681 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); 682 683 if (!aux_engine) 684 return NULL; 685 686 dce110_aux_engine_construct(aux_engine, ctx, inst, 687 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 688 &aux_engine_regs[inst], 689 &aux_mask, 690 &aux_shift, 691 ctx->dc->caps.extended_aux_timeout_support); 692 693 return &aux_engine->base; 694 } 695 #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } 696 697 static const struct dce_i2c_registers i2c_hw_regs[] = { 698 i2c_inst_regs(1), 699 i2c_inst_regs(2), 700 i2c_inst_regs(3), 701 i2c_inst_regs(4), 702 i2c_inst_regs(5), 703 i2c_inst_regs(6), 704 }; 705 706 static const struct dce_i2c_shift i2c_shifts = { 707 I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) 708 }; 709 710 static const struct dce_i2c_mask i2c_masks = { 711 I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) 712 }; 713 714 struct dce_i2c_hw *dcn10_i2c_hw_create( 715 struct dc_context *ctx, 716 uint32_t inst) 717 { 718 struct dce_i2c_hw *dce_i2c_hw = 719 kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); 720 721 if (!dce_i2c_hw) 722 return NULL; 723 724 dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst, 725 &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); 726 727 return dce_i2c_hw; 728 } 729 static struct mpc *dcn10_mpc_create(struct dc_context *ctx) 730 { 731 struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), 732 GFP_KERNEL); 733 734 if (!mpc10) 735 return NULL; 736 737 dcn10_mpc_construct(mpc10, ctx, 738 &mpc_regs, 739 &mpc_shift, 740 &mpc_mask, 741 4); 742 743 return &mpc10->base; 744 } 745 746 static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) 747 { 748 struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub), 749 GFP_KERNEL); 750 751 if (!dcn10_hubbub) 752 return NULL; 753 754 hubbub1_construct(&dcn10_hubbub->base, ctx, 755 &hubbub_reg, 756 &hubbub_shift, 757 &hubbub_mask); 758 759 return &dcn10_hubbub->base; 760 } 761 762 static struct timing_generator *dcn10_timing_generator_create( 763 struct dc_context *ctx, 764 uint32_t instance) 765 { 766 struct optc *tgn10 = 767 kzalloc(sizeof(struct optc), GFP_KERNEL); 768 769 if (!tgn10) 770 return NULL; 771 772 tgn10->base.inst = instance; 773 tgn10->base.ctx = ctx; 774 775 tgn10->tg_regs = &tg_regs[instance]; 776 tgn10->tg_shift = &tg_shift; 777 tgn10->tg_mask = &tg_mask; 778 779 dcn10_timing_generator_init(tgn10); 780 781 return &tgn10->base; 782 } 783 784 static const struct encoder_feature_support link_enc_feature = { 785 .max_hdmi_deep_color = COLOR_DEPTH_121212, 786 .max_hdmi_pixel_clock = 600000, 787 .hdmi_ycbcr420_supported = true, 788 .dp_ycbcr420_supported = false, 789 .flags.bits.IS_HBR2_CAPABLE = true, 790 .flags.bits.IS_HBR3_CAPABLE = true, 791 .flags.bits.IS_TPS3_CAPABLE = true, 792 .flags.bits.IS_TPS4_CAPABLE = true 793 }; 794 795 struct link_encoder *dcn10_link_encoder_create( 796 const struct encoder_init_data *enc_init_data) 797 { 798 struct dcn10_link_encoder *enc10 = 799 kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); 800 int link_regs_id; 801 802 if (!enc10) 803 return NULL; 804 805 link_regs_id = 806 map_transmitter_id_to_phy_instance(enc_init_data->transmitter); 807 808 dcn10_link_encoder_construct(enc10, 809 enc_init_data, 810 &link_enc_feature, 811 &link_enc_regs[link_regs_id], 812 &link_enc_aux_regs[enc_init_data->channel - 1], 813 &link_enc_hpd_regs[enc_init_data->hpd_source], 814 &le_shift, 815 &le_mask); 816 817 return &enc10->base; 818 } 819 820 struct clock_source *dcn10_clock_source_create( 821 struct dc_context *ctx, 822 struct dc_bios *bios, 823 enum clock_source_id id, 824 const struct dce110_clk_src_regs *regs, 825 bool dp_clk_src) 826 { 827 struct dce110_clk_src *clk_src = 828 kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); 829 830 if (!clk_src) 831 return NULL; 832 833 if (dce112_clk_src_construct(clk_src, ctx, bios, id, 834 regs, &cs_shift, &cs_mask)) { 835 clk_src->base.dp_clk_src = dp_clk_src; 836 return &clk_src->base; 837 } 838 839 kfree(clk_src); 840 BREAK_TO_DEBUGGER(); 841 return NULL; 842 } 843 844 static void read_dce_straps( 845 struct dc_context *ctx, 846 struct resource_straps *straps) 847 { 848 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), 849 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); 850 } 851 852 static struct audio *create_audio( 853 struct dc_context *ctx, unsigned int inst) 854 { 855 return dce_audio_create(ctx, inst, 856 &audio_regs[inst], &audio_shift, &audio_mask); 857 } 858 859 static struct stream_encoder *dcn10_stream_encoder_create( 860 enum engine_id eng_id, 861 struct dc_context *ctx) 862 { 863 struct dcn10_stream_encoder *enc1 = 864 kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 865 866 if (!enc1) 867 return NULL; 868 869 dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, 870 &stream_enc_regs[eng_id], 871 &se_shift, &se_mask); 872 return &enc1->base; 873 } 874 875 static const struct dce_hwseq_registers hwseq_reg = { 876 HWSEQ_DCN1_REG_LIST() 877 }; 878 879 static const struct dce_hwseq_shift hwseq_shift = { 880 HWSEQ_DCN1_MASK_SH_LIST(__SHIFT) 881 }; 882 883 static const struct dce_hwseq_mask hwseq_mask = { 884 HWSEQ_DCN1_MASK_SH_LIST(_MASK) 885 }; 886 887 static struct dce_hwseq *dcn10_hwseq_create( 888 struct dc_context *ctx) 889 { 890 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); 891 892 if (hws) { 893 hws->ctx = ctx; 894 hws->regs = &hwseq_reg; 895 hws->shifts = &hwseq_shift; 896 hws->masks = &hwseq_mask; 897 hws->wa.DEGVIDCN10_253 = true; 898 hws->wa.false_optc_underflow = true; 899 hws->wa.DEGVIDCN10_254 = true; 900 } 901 return hws; 902 } 903 904 static const struct resource_create_funcs res_create_funcs = { 905 .read_dce_straps = read_dce_straps, 906 .create_audio = create_audio, 907 .create_stream_encoder = dcn10_stream_encoder_create, 908 .create_hwseq = dcn10_hwseq_create, 909 }; 910 911 static const struct resource_create_funcs res_create_maximus_funcs = { 912 .read_dce_straps = NULL, 913 .create_audio = NULL, 914 .create_stream_encoder = NULL, 915 .create_hwseq = dcn10_hwseq_create, 916 }; 917 918 void dcn10_clock_source_destroy(struct clock_source **clk_src) 919 { 920 kfree(TO_DCE110_CLK_SRC(*clk_src)); 921 *clk_src = NULL; 922 } 923 924 static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) 925 { 926 struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); 927 928 if (!pp_smu) 929 return pp_smu; 930 931 dm_pp_get_funcs(ctx, pp_smu); 932 return pp_smu; 933 } 934 935 static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) 936 { 937 unsigned int i; 938 939 for (i = 0; i < pool->base.stream_enc_count; i++) { 940 if (pool->base.stream_enc[i] != NULL) { 941 kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); 942 pool->base.stream_enc[i] = NULL; 943 } 944 } 945 946 if (pool->base.mpc != NULL) { 947 kfree(TO_DCN10_MPC(pool->base.mpc)); 948 pool->base.mpc = NULL; 949 } 950 951 if (pool->base.hubbub != NULL) { 952 kfree(pool->base.hubbub); 953 pool->base.hubbub = NULL; 954 } 955 956 for (i = 0; i < pool->base.pipe_count; i++) { 957 if (pool->base.opps[i] != NULL) 958 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); 959 960 if (pool->base.dpps[i] != NULL) 961 dcn10_dpp_destroy(&pool->base.dpps[i]); 962 963 if (pool->base.ipps[i] != NULL) 964 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); 965 966 if (pool->base.hubps[i] != NULL) { 967 kfree(TO_DCN10_HUBP(pool->base.hubps[i])); 968 pool->base.hubps[i] = NULL; 969 } 970 971 if (pool->base.irqs != NULL) { 972 dal_irq_service_destroy(&pool->base.irqs); 973 } 974 975 if (pool->base.timing_generators[i] != NULL) { 976 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); 977 pool->base.timing_generators[i] = NULL; 978 } 979 } 980 981 for (i = 0; i < pool->base.res_cap->num_ddc; i++) { 982 if (pool->base.engines[i] != NULL) 983 dce110_engine_destroy(&pool->base.engines[i]); 984 if (pool->base.hw_i2cs[i] != NULL) { 985 kfree(pool->base.hw_i2cs[i]); 986 pool->base.hw_i2cs[i] = NULL; 987 } 988 if (pool->base.sw_i2cs[i] != NULL) { 989 kfree(pool->base.sw_i2cs[i]); 990 pool->base.sw_i2cs[i] = NULL; 991 } 992 } 993 994 for (i = 0; i < pool->base.audio_count; i++) { 995 if (pool->base.audios[i]) 996 dce_aud_destroy(&pool->base.audios[i]); 997 } 998 999 for (i = 0; i < pool->base.clk_src_count; i++) { 1000 if (pool->base.clock_sources[i] != NULL) { 1001 dcn10_clock_source_destroy(&pool->base.clock_sources[i]); 1002 pool->base.clock_sources[i] = NULL; 1003 } 1004 } 1005 1006 if (pool->base.dp_clock_source != NULL) { 1007 dcn10_clock_source_destroy(&pool->base.dp_clock_source); 1008 pool->base.dp_clock_source = NULL; 1009 } 1010 1011 if (pool->base.abm != NULL) 1012 dce_abm_destroy(&pool->base.abm); 1013 1014 if (pool->base.dmcu != NULL) 1015 dce_dmcu_destroy(&pool->base.dmcu); 1016 1017 kfree(pool->base.pp_smu); 1018 } 1019 1020 static struct hubp *dcn10_hubp_create( 1021 struct dc_context *ctx, 1022 uint32_t inst) 1023 { 1024 struct dcn10_hubp *hubp1 = 1025 kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL); 1026 1027 if (!hubp1) 1028 return NULL; 1029 1030 dcn10_hubp_construct(hubp1, ctx, inst, 1031 &hubp_regs[inst], &hubp_shift, &hubp_mask); 1032 return &hubp1->base; 1033 } 1034 1035 static void get_pixel_clock_parameters( 1036 const struct pipe_ctx *pipe_ctx, 1037 struct pixel_clk_params *pixel_clk_params) 1038 { 1039 const struct dc_stream_state *stream = pipe_ctx->stream; 1040 pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; 1041 pixel_clk_params->encoder_object_id = stream->link->link_enc->id; 1042 pixel_clk_params->signal_type = pipe_ctx->stream->signal; 1043 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; 1044 /* TODO: un-hardcode*/ 1045 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * 1046 LINK_RATE_REF_FREQ_IN_KHZ; 1047 pixel_clk_params->flags.ENABLE_SS = 0; 1048 pixel_clk_params->color_depth = 1049 stream->timing.display_color_depth; 1050 pixel_clk_params->flags.DISPLAY_BLANKED = 1; 1051 pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; 1052 1053 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) 1054 pixel_clk_params->color_depth = COLOR_DEPTH_888; 1055 1056 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 1057 pixel_clk_params->requested_pix_clk_100hz /= 2; 1058 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) 1059 pixel_clk_params->requested_pix_clk_100hz *= 2; 1060 1061 } 1062 1063 static void build_clamping_params(struct dc_stream_state *stream) 1064 { 1065 stream->clamping.clamping_level = CLAMPING_FULL_RANGE; 1066 stream->clamping.c_depth = stream->timing.display_color_depth; 1067 stream->clamping.pixel_encoding = stream->timing.pixel_encoding; 1068 } 1069 1070 static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx) 1071 { 1072 1073 get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); 1074 1075 pipe_ctx->clock_source->funcs->get_pix_clk_dividers( 1076 pipe_ctx->clock_source, 1077 &pipe_ctx->stream_res.pix_clk_params, 1078 &pipe_ctx->pll_settings); 1079 1080 pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; 1081 1082 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 1083 &pipe_ctx->stream->bit_depth_params); 1084 build_clamping_params(pipe_ctx->stream); 1085 } 1086 1087 static enum dc_status build_mapped_resource( 1088 const struct dc *dc, 1089 struct dc_state *context, 1090 struct dc_stream_state *stream) 1091 { 1092 struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); 1093 1094 /*TODO Seems unneeded anymore */ 1095 /* if (old_context && resource_is_stream_unchanged(old_context, stream)) { 1096 if (stream != NULL && old_context->streams[i] != NULL) { 1097 todo: shouldn't have to copy missing parameter here 1098 resource_build_bit_depth_reduction_params(stream, 1099 &stream->bit_depth_params); 1100 stream->clamping.pixel_encoding = 1101 stream->timing.pixel_encoding; 1102 1103 resource_build_bit_depth_reduction_params(stream, 1104 &stream->bit_depth_params); 1105 build_clamping_params(stream); 1106 1107 continue; 1108 } 1109 } 1110 */ 1111 1112 if (!pipe_ctx) 1113 return DC_ERROR_UNEXPECTED; 1114 1115 build_pipe_hw_param(pipe_ctx); 1116 return DC_OK; 1117 } 1118 1119 enum dc_status dcn10_add_stream_to_ctx( 1120 struct dc *dc, 1121 struct dc_state *new_ctx, 1122 struct dc_stream_state *dc_stream) 1123 { 1124 enum dc_status result = DC_ERROR_UNEXPECTED; 1125 1126 result = resource_map_pool_resources(dc, new_ctx, dc_stream); 1127 1128 if (result == DC_OK) 1129 result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); 1130 1131 1132 if (result == DC_OK) 1133 result = build_mapped_resource(dc, new_ctx, dc_stream); 1134 1135 return result; 1136 } 1137 1138 static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( 1139 struct dc_state *context, 1140 const struct resource_pool *pool, 1141 struct dc_stream_state *stream) 1142 { 1143 struct resource_context *res_ctx = &context->res_ctx; 1144 struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); 1145 struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); 1146 1147 if (!head_pipe) { 1148 ASSERT(0); 1149 return NULL; 1150 } 1151 1152 if (!idle_pipe) 1153 return NULL; 1154 1155 idle_pipe->stream = head_pipe->stream; 1156 idle_pipe->stream_res.tg = head_pipe->stream_res.tg; 1157 idle_pipe->stream_res.abm = head_pipe->stream_res.abm; 1158 idle_pipe->stream_res.opp = head_pipe->stream_res.opp; 1159 1160 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; 1161 idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; 1162 idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; 1163 idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; 1164 1165 return idle_pipe; 1166 } 1167 1168 static bool dcn10_get_dcc_compression_cap(const struct dc *dc, 1169 const struct dc_dcc_surface_param *input, 1170 struct dc_surface_dcc_cap *output) 1171 { 1172 return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( 1173 dc->res_pool->hubbub, 1174 input, 1175 output); 1176 } 1177 1178 static void dcn10_destroy_resource_pool(struct resource_pool **pool) 1179 { 1180 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); 1181 1182 dcn10_resource_destruct(dcn10_pool); 1183 kfree(dcn10_pool); 1184 *pool = NULL; 1185 } 1186 1187 static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) 1188 { 1189 if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN 1190 && caps->max_video_width != 0 1191 && plane_state->src_rect.width > caps->max_video_width) 1192 return DC_FAIL_SURFACE_VALIDATE; 1193 1194 return DC_OK; 1195 } 1196 1197 static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context) 1198 { 1199 int i, j; 1200 bool video_down_scaled = false; 1201 bool video_large = false; 1202 bool desktop_large = false; 1203 bool dcc_disabled = false; 1204 1205 for (i = 0; i < context->stream_count; i++) { 1206 if (context->stream_status[i].plane_count == 0) 1207 continue; 1208 1209 if (context->stream_status[i].plane_count > 2) 1210 return DC_FAIL_UNSUPPORTED_1; 1211 1212 for (j = 0; j < context->stream_status[i].plane_count; j++) { 1213 struct dc_plane_state *plane = 1214 context->stream_status[i].plane_states[j]; 1215 1216 1217 if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 1218 1219 if (plane->src_rect.width > plane->dst_rect.width || 1220 plane->src_rect.height > plane->dst_rect.height) 1221 video_down_scaled = true; 1222 1223 if (plane->src_rect.width >= 3840) 1224 video_large = true; 1225 1226 } else { 1227 if (plane->src_rect.width >= 3840) 1228 desktop_large = true; 1229 if (!plane->dcc.enable) 1230 dcc_disabled = true; 1231 } 1232 } 1233 } 1234 1235 /* 1236 * Workaround: On DCN10 there is UMC issue that causes underflow when 1237 * playing 4k video on 4k desktop with video downscaled and single channel 1238 * memory 1239 */ 1240 if (video_large && desktop_large && video_down_scaled && dcc_disabled && 1241 dc->dcn_soc->number_of_channels == 1) 1242 return DC_FAIL_SURFACE_VALIDATE; 1243 1244 return DC_OK; 1245 } 1246 1247 static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state) 1248 { 1249 enum dc_status result = DC_OK; 1250 1251 enum surface_pixel_format surf_pix_format = plane_state->format; 1252 unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); 1253 1254 enum swizzle_mode_values swizzle = DC_SW_LINEAR; 1255 1256 if (bpp == 64) 1257 swizzle = DC_SW_64KB_D; 1258 else 1259 swizzle = DC_SW_64KB_S; 1260 1261 plane_state->tiling_info.gfx9.swizzle = swizzle; 1262 return result; 1263 } 1264 1265 struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( 1266 struct resource_context *res_ctx, 1267 const struct resource_pool *pool, 1268 struct dc_stream_state *stream) 1269 { 1270 int i; 1271 int j = -1; 1272 struct dc_link *link = stream->link; 1273 1274 for (i = 0; i < pool->stream_enc_count; i++) { 1275 if (!res_ctx->is_stream_enc_acquired[i] && 1276 pool->stream_enc[i]) { 1277 /* Store first available for MST second display 1278 * in daisy chain use case 1279 */ 1280 j = i; 1281 if (pool->stream_enc[i]->id == 1282 link->link_enc->preferred_engine) 1283 return pool->stream_enc[i]; 1284 } 1285 } 1286 1287 /* 1288 * For CZ and later, we can allow DIG FE and BE to differ for all display types 1289 */ 1290 1291 if (j >= 0) 1292 return pool->stream_enc[j]; 1293 1294 return NULL; 1295 } 1296 1297 static const struct dc_cap_funcs cap_funcs = { 1298 .get_dcc_compression_cap = dcn10_get_dcc_compression_cap 1299 }; 1300 1301 static const struct resource_funcs dcn10_res_pool_funcs = { 1302 .destroy = dcn10_destroy_resource_pool, 1303 .link_enc_create = dcn10_link_encoder_create, 1304 .validate_bandwidth = dcn_validate_bandwidth, 1305 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, 1306 .validate_plane = dcn10_validate_plane, 1307 .validate_global = dcn10_validate_global, 1308 .add_stream_to_ctx = dcn10_add_stream_to_ctx, 1309 .patch_unknown_plane_state = dcn10_patch_unknown_plane_state, 1310 .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link 1311 }; 1312 1313 static uint32_t read_pipe_fuses(struct dc_context *ctx) 1314 { 1315 uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); 1316 /* RV1 support max 4 pipes */ 1317 value = value & 0xf; 1318 return value; 1319 } 1320 1321 static bool dcn10_resource_construct( 1322 uint8_t num_virtual_links, 1323 struct dc *dc, 1324 struct dcn10_resource_pool *pool) 1325 { 1326 int i; 1327 int j; 1328 struct dc_context *ctx = dc->ctx; 1329 uint32_t pipe_fuses = read_pipe_fuses(ctx); 1330 1331 ctx->dc_bios->regs = &bios_regs; 1332 1333 if (ctx->dce_version == DCN_VERSION_1_01) 1334 pool->base.res_cap = &rv2_res_cap; 1335 else 1336 pool->base.res_cap = &res_cap; 1337 pool->base.funcs = &dcn10_res_pool_funcs; 1338 1339 /* 1340 * TODO fill in from actual raven resource when we create 1341 * more than virtual encoder 1342 */ 1343 1344 /************************************************* 1345 * Resource + asic cap harcoding * 1346 *************************************************/ 1347 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1348 1349 /* max pipe num for ASIC before check pipe fuses */ 1350 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1351 1352 if (dc->ctx->dce_version == DCN_VERSION_1_01) 1353 pool->base.pipe_count = 3; 1354 dc->caps.max_video_width = 3840; 1355 dc->caps.max_downscale_ratio = 200; 1356 dc->caps.i2c_speed_in_khz = 100; 1357 dc->caps.max_cursor_size = 256; 1358 dc->caps.max_slave_planes = 1; 1359 dc->caps.is_apu = true; 1360 dc->caps.post_blend_color_processing = false; 1361 dc->caps.extended_aux_timeout_support = false; 1362 1363 /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ 1364 dc->caps.force_dp_tps4_for_cp2520 = true; 1365 1366 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) 1367 dc->debug = debug_defaults_drv; 1368 else 1369 dc->debug = debug_defaults_diags; 1370 1371 /************************************************* 1372 * Create resources * 1373 *************************************************/ 1374 1375 pool->base.clock_sources[DCN10_CLK_SRC_PLL0] = 1376 dcn10_clock_source_create(ctx, ctx->dc_bios, 1377 CLOCK_SOURCE_COMBO_PHY_PLL0, 1378 &clk_src_regs[0], false); 1379 pool->base.clock_sources[DCN10_CLK_SRC_PLL1] = 1380 dcn10_clock_source_create(ctx, ctx->dc_bios, 1381 CLOCK_SOURCE_COMBO_PHY_PLL1, 1382 &clk_src_regs[1], false); 1383 pool->base.clock_sources[DCN10_CLK_SRC_PLL2] = 1384 dcn10_clock_source_create(ctx, ctx->dc_bios, 1385 CLOCK_SOURCE_COMBO_PHY_PLL2, 1386 &clk_src_regs[2], false); 1387 1388 if (dc->ctx->dce_version == DCN_VERSION_1_0) { 1389 pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = 1390 dcn10_clock_source_create(ctx, ctx->dc_bios, 1391 CLOCK_SOURCE_COMBO_PHY_PLL3, 1392 &clk_src_regs[3], false); 1393 } 1394 1395 pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; 1396 1397 if (dc->ctx->dce_version == DCN_VERSION_1_01) 1398 pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL; 1399 1400 pool->base.dp_clock_source = 1401 dcn10_clock_source_create(ctx, ctx->dc_bios, 1402 CLOCK_SOURCE_ID_DP_DTO, 1403 /* todo: not reuse phy_pll registers */ 1404 &clk_src_regs[0], true); 1405 1406 for (i = 0; i < pool->base.clk_src_count; i++) { 1407 if (pool->base.clock_sources[i] == NULL) { 1408 dm_error("DC: failed to create clock sources!\n"); 1409 BREAK_TO_DEBUGGER(); 1410 goto fail; 1411 } 1412 } 1413 1414 pool->base.dmcu = dcn10_dmcu_create(ctx, 1415 &dmcu_regs, 1416 &dmcu_shift, 1417 &dmcu_mask); 1418 if (pool->base.dmcu == NULL) { 1419 dm_error("DC: failed to create dmcu!\n"); 1420 BREAK_TO_DEBUGGER(); 1421 goto fail; 1422 } 1423 1424 pool->base.abm = dce_abm_create(ctx, 1425 &abm_regs, 1426 &abm_shift, 1427 &abm_mask); 1428 if (pool->base.abm == NULL) { 1429 dm_error("DC: failed to create abm!\n"); 1430 BREAK_TO_DEBUGGER(); 1431 goto fail; 1432 } 1433 1434 dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1); 1435 memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); 1436 memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); 1437 1438 if (dc->ctx->dce_version == DCN_VERSION_1_01) { 1439 struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; 1440 struct dcn_ip_params *dcn_ip = dc->dcn_ip; 1441 struct display_mode_lib *dml = &dc->dml; 1442 1443 dml->ip.max_num_dpp = 3; 1444 /* TODO how to handle 23.84? */ 1445 dcn_soc->dram_clock_change_latency = 23; 1446 dcn_ip->max_num_dpp = 3; 1447 } 1448 if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { 1449 dc->dcn_soc->urgent_latency = 3; 1450 dc->debug.disable_dmcu = true; 1451 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; 1452 } 1453 1454 1455 dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; 1456 ASSERT(dc->dcn_soc->number_of_channels < 3); 1457 if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ 1458 dc->dcn_soc->number_of_channels = 2; 1459 1460 if (dc->dcn_soc->number_of_channels == 1) { 1461 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; 1462 dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; 1463 dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; 1464 dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; 1465 if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { 1466 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; 1467 } 1468 } 1469 1470 pool->base.pp_smu = dcn10_pp_smu_create(ctx); 1471 1472 /* 1473 * Right now SMU/PPLIB and DAL all have the AZ D3 force PME notification * 1474 * implemented. So AZ D3 should work.For issue 197007. * 1475 */ 1476 if (pool->base.pp_smu != NULL 1477 && pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL) 1478 dc->debug.az_endpoint_mute_only = false; 1479 1480 if (!dc->debug.disable_pplib_clock_request) 1481 dcn_bw_update_from_pplib(dc); 1482 dcn_bw_sync_calcs_and_dml(dc); 1483 if (!dc->debug.disable_pplib_wm_range) { 1484 dc->res_pool = &pool->base; 1485 dcn_bw_notify_pplib_of_wm_ranges(dc); 1486 } 1487 1488 { 1489 struct irq_service_init_data init_data; 1490 init_data.ctx = dc->ctx; 1491 pool->base.irqs = dal_irq_service_dcn10_create(&init_data); 1492 if (!pool->base.irqs) 1493 goto fail; 1494 } 1495 1496 /* index to valid pipe resource */ 1497 j = 0; 1498 /* mem input -> ipp -> dpp -> opp -> TG */ 1499 for (i = 0; i < pool->base.pipe_count; i++) { 1500 /* if pipe is disabled, skip instance of HW pipe, 1501 * i.e, skip ASIC register instance 1502 */ 1503 if ((pipe_fuses & (1 << i)) != 0) 1504 continue; 1505 1506 pool->base.hubps[j] = dcn10_hubp_create(ctx, i); 1507 if (pool->base.hubps[j] == NULL) { 1508 BREAK_TO_DEBUGGER(); 1509 dm_error( 1510 "DC: failed to create memory input!\n"); 1511 goto fail; 1512 } 1513 1514 pool->base.ipps[j] = dcn10_ipp_create(ctx, i); 1515 if (pool->base.ipps[j] == NULL) { 1516 BREAK_TO_DEBUGGER(); 1517 dm_error( 1518 "DC: failed to create input pixel processor!\n"); 1519 goto fail; 1520 } 1521 1522 pool->base.dpps[j] = dcn10_dpp_create(ctx, i); 1523 if (pool->base.dpps[j] == NULL) { 1524 BREAK_TO_DEBUGGER(); 1525 dm_error( 1526 "DC: failed to create dpp!\n"); 1527 goto fail; 1528 } 1529 1530 pool->base.opps[j] = dcn10_opp_create(ctx, i); 1531 if (pool->base.opps[j] == NULL) { 1532 BREAK_TO_DEBUGGER(); 1533 dm_error( 1534 "DC: failed to create output pixel processor!\n"); 1535 goto fail; 1536 } 1537 1538 pool->base.timing_generators[j] = dcn10_timing_generator_create( 1539 ctx, i); 1540 if (pool->base.timing_generators[j] == NULL) { 1541 BREAK_TO_DEBUGGER(); 1542 dm_error("DC: failed to create tg!\n"); 1543 goto fail; 1544 } 1545 /* check next valid pipe */ 1546 j++; 1547 } 1548 1549 for (i = 0; i < pool->base.res_cap->num_ddc; i++) { 1550 pool->base.engines[i] = dcn10_aux_engine_create(ctx, i); 1551 if (pool->base.engines[i] == NULL) { 1552 BREAK_TO_DEBUGGER(); 1553 dm_error( 1554 "DC:failed to create aux engine!!\n"); 1555 goto fail; 1556 } 1557 pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i); 1558 if (pool->base.hw_i2cs[i] == NULL) { 1559 BREAK_TO_DEBUGGER(); 1560 dm_error( 1561 "DC:failed to create hw i2c!!\n"); 1562 goto fail; 1563 } 1564 pool->base.sw_i2cs[i] = NULL; 1565 } 1566 1567 /* valid pipe num */ 1568 pool->base.pipe_count = j; 1569 pool->base.timing_generator_count = j; 1570 1571 /* within dml lib, it is hard code to 4. If ASIC pipe is fused, 1572 * the value may be changed 1573 */ 1574 dc->dml.ip.max_num_dpp = pool->base.pipe_count; 1575 dc->dcn_ip->max_num_dpp = pool->base.pipe_count; 1576 1577 pool->base.mpc = dcn10_mpc_create(ctx); 1578 if (pool->base.mpc == NULL) { 1579 BREAK_TO_DEBUGGER(); 1580 dm_error("DC: failed to create mpc!\n"); 1581 goto fail; 1582 } 1583 1584 pool->base.hubbub = dcn10_hubbub_create(ctx); 1585 if (pool->base.hubbub == NULL) { 1586 BREAK_TO_DEBUGGER(); 1587 dm_error("DC: failed to create hubbub!\n"); 1588 goto fail; 1589 } 1590 1591 if (!resource_construct(num_virtual_links, dc, &pool->base, 1592 (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? 1593 &res_create_funcs : &res_create_maximus_funcs))) 1594 goto fail; 1595 1596 dcn10_hw_sequencer_construct(dc); 1597 dc->caps.max_planes = pool->base.pipe_count; 1598 1599 for (i = 0; i < dc->caps.max_planes; ++i) 1600 dc->caps.planes[i] = plane_cap; 1601 1602 dc->cap_funcs = cap_funcs; 1603 1604 return true; 1605 1606 fail: 1607 1608 dcn10_resource_destruct(pool); 1609 1610 return false; 1611 } 1612 1613 struct resource_pool *dcn10_create_resource_pool( 1614 const struct dc_init_data *init_data, 1615 struct dc *dc) 1616 { 1617 struct dcn10_resource_pool *pool = 1618 kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL); 1619 1620 if (!pool) 1621 return NULL; 1622 1623 if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool)) 1624 return &pool->base; 1625 1626 kfree(pool); 1627 BREAK_TO_DEBUGGER(); 1628 return NULL; 1629 } 1630