1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 #include <linux/i2c.h> 29 30 #include <drm/drm_probe_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include <drm/drm_edid.h> 33 34 #include "dm_services.h" 35 #include "amdgpu.h" 36 #include "dc.h" 37 #include "amdgpu_dm.h" 38 #include "amdgpu_dm_irq.h" 39 #include "amdgpu_dm_mst_types.h" 40 41 #include "dm_helpers.h" 42 43 struct monitor_patch_info { 44 unsigned int manufacturer_id; 45 unsigned int product_id; 46 void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); 47 unsigned int patch_param; 48 }; 49 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); 50 51 static const struct monitor_patch_info monitor_patch_table[] = { 52 {0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, 53 {0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, 54 }; 55 56 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) 57 { 58 if (edid_caps) 59 edid_caps->panel_patch.max_dsc_target_bpp_limit = param; 60 } 61 62 static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) 63 { 64 int i, ret = 0; 65 66 for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) 67 if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) 68 && (edid_caps->product_id == monitor_patch_table[i].product_id)) { 69 monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); 70 ret++; 71 } 72 73 return ret; 74 } 75 76 /* dm_helpers_parse_edid_caps 77 * 78 * Parse edid caps 79 * 80 * @edid: [in] pointer to edid 81 * edid_caps: [in] pointer to edid caps 82 * @return 83 * void 84 * */ 85 enum dc_edid_status dm_helpers_parse_edid_caps( 86 struct dc_context *ctx, 87 const struct dc_edid *edid, 88 struct dc_edid_caps *edid_caps) 89 { 90 struct edid *edid_buf = (struct edid *) edid->raw_edid; 91 struct cea_sad *sads; 92 int sad_count = -1; 93 int sadb_count = -1; 94 int i = 0; 95 int j = 0; 96 uint8_t *sadb = NULL; 97 98 enum dc_edid_status result = EDID_OK; 99 100 if (!edid_caps || !edid) 101 return EDID_BAD_INPUT; 102 103 if (!drm_edid_is_valid(edid_buf)) 104 result = EDID_BAD_CHECKSUM; 105 106 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 107 ((uint16_t) edid_buf->mfg_id[1])<<8; 108 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 109 ((uint16_t) edid_buf->prod_code[1])<<8; 110 edid_caps->serial_number = edid_buf->serial; 111 edid_caps->manufacture_week = edid_buf->mfg_week; 112 edid_caps->manufacture_year = edid_buf->mfg_year; 113 114 /* One of the four detailed_timings stores the monitor name. It's 115 * stored in an array of length 13. */ 116 for (i = 0; i < 4; i++) { 117 if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { 118 while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) { 119 if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') 120 break; 121 122 edid_caps->display_name[j] = 123 edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; 124 j++; 125 } 126 } 127 } 128 129 edid_caps->edid_hdmi = drm_detect_hdmi_monitor( 130 (struct edid *) edid->raw_edid); 131 132 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 133 if (sad_count <= 0) 134 return result; 135 136 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; 137 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 138 struct cea_sad *sad = &sads[i]; 139 140 edid_caps->audio_modes[i].format_code = sad->format; 141 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 142 edid_caps->audio_modes[i].sample_rate = sad->freq; 143 edid_caps->audio_modes[i].sample_size = sad->byte2; 144 } 145 146 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 147 148 if (sadb_count < 0) { 149 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 150 sadb_count = 0; 151 } 152 153 if (sadb_count) 154 edid_caps->speaker_flags = sadb[0]; 155 else 156 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 157 158 kfree(sads); 159 kfree(sadb); 160 161 amdgpu_dm_patch_edid_caps(edid_caps); 162 163 return result; 164 } 165 166 static void get_payload_table( 167 struct amdgpu_dm_connector *aconnector, 168 struct dp_mst_stream_allocation_table *proposed_table) 169 { 170 int i; 171 struct drm_dp_mst_topology_mgr *mst_mgr = 172 &aconnector->mst_port->mst_mgr; 173 174 mutex_lock(&mst_mgr->payload_lock); 175 176 proposed_table->stream_count = 0; 177 178 /* number of active streams */ 179 for (i = 0; i < mst_mgr->max_payloads; i++) { 180 if (mst_mgr->payloads[i].num_slots == 0) 181 break; /* end of vcp_id table */ 182 183 ASSERT(mst_mgr->payloads[i].payload_state != 184 DP_PAYLOAD_DELETE_LOCAL); 185 186 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || 187 mst_mgr->payloads[i].payload_state == 188 DP_PAYLOAD_REMOTE) { 189 190 struct dp_mst_stream_allocation *sa = 191 &proposed_table->stream_allocations[ 192 proposed_table->stream_count]; 193 194 sa->slot_count = mst_mgr->payloads[i].num_slots; 195 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; 196 proposed_table->stream_count++; 197 } 198 } 199 200 mutex_unlock(&mst_mgr->payload_lock); 201 } 202 203 void dm_helpers_dp_update_branch_info( 204 struct dc_context *ctx, 205 const struct dc_link *link) 206 {} 207 208 /* 209 * Writes payload allocation table in immediate downstream device. 210 */ 211 bool dm_helpers_dp_mst_write_payload_allocation_table( 212 struct dc_context *ctx, 213 const struct dc_stream_state *stream, 214 struct dp_mst_stream_allocation_table *proposed_table, 215 bool enable) 216 { 217 struct amdgpu_dm_connector *aconnector; 218 struct dm_connector_state *dm_conn_state; 219 struct drm_dp_mst_topology_mgr *mst_mgr; 220 struct drm_dp_mst_port *mst_port; 221 bool ret; 222 u8 link_coding_cap = DP_8b_10b_ENCODING; 223 224 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 225 /* Accessing the connector state is required for vcpi_slots allocation 226 * and directly relies on behaviour in commit check 227 * that blocks before commit guaranteeing that the state 228 * is not gonna be swapped while still in use in commit tail */ 229 230 if (!aconnector || !aconnector->mst_port) 231 return false; 232 233 dm_conn_state = to_dm_connector_state(aconnector->base.state); 234 235 mst_mgr = &aconnector->mst_port->mst_mgr; 236 237 if (!mst_mgr->mst_state) 238 return false; 239 240 mst_port = aconnector->port; 241 242 #if defined(CONFIG_DRM_AMD_DC_DCN) 243 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 244 #endif 245 246 if (enable) { 247 248 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, 249 dm_conn_state->pbn, 250 dm_conn_state->vcpi_slots); 251 if (!ret) 252 return false; 253 254 } else { 255 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); 256 } 257 258 /* It's OK for this to fail */ 259 drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); 260 261 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 262 * AUX message. The sequence is slot 1-63 allocated sequence for each 263 * stream. AMD ASIC stream slot allocation should follow the same 264 * sequence. copy DRM MST allocation to dc */ 265 266 get_payload_table(aconnector, proposed_table); 267 268 return true; 269 } 270 271 /* 272 * poll pending down reply 273 */ 274 void dm_helpers_dp_mst_poll_pending_down_reply( 275 struct dc_context *ctx, 276 const struct dc_link *link) 277 {} 278 279 /* 280 * Clear payload allocation table before enable MST DP link. 281 */ 282 void dm_helpers_dp_mst_clear_payload_allocation_table( 283 struct dc_context *ctx, 284 const struct dc_link *link) 285 {} 286 287 /* 288 * Polls for ACT (allocation change trigger) handled and sends 289 * ALLOCATE_PAYLOAD message. 290 */ 291 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 292 struct dc_context *ctx, 293 const struct dc_stream_state *stream) 294 { 295 struct amdgpu_dm_connector *aconnector; 296 struct drm_dp_mst_topology_mgr *mst_mgr; 297 int ret; 298 299 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 300 301 if (!aconnector || !aconnector->mst_port) 302 return ACT_FAILED; 303 304 mst_mgr = &aconnector->mst_port->mst_mgr; 305 306 if (!mst_mgr->mst_state) 307 return ACT_FAILED; 308 309 ret = drm_dp_check_act_status(mst_mgr); 310 311 if (ret) 312 return ACT_FAILED; 313 314 return ACT_SUCCESS; 315 } 316 317 bool dm_helpers_dp_mst_send_payload_allocation( 318 struct dc_context *ctx, 319 const struct dc_stream_state *stream, 320 bool enable) 321 { 322 struct amdgpu_dm_connector *aconnector; 323 struct drm_dp_mst_topology_mgr *mst_mgr; 324 struct drm_dp_mst_port *mst_port; 325 326 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 327 328 if (!aconnector || !aconnector->mst_port) 329 return false; 330 331 mst_port = aconnector->port; 332 333 mst_mgr = &aconnector->mst_port->mst_mgr; 334 335 if (!mst_mgr->mst_state) 336 return false; 337 338 /* It's OK for this to fail */ 339 drm_dp_update_payload_part2(mst_mgr); 340 341 if (!enable) 342 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); 343 344 return true; 345 } 346 347 void dm_dtn_log_begin(struct dc_context *ctx, 348 struct dc_log_buffer_ctx *log_ctx) 349 { 350 static const char msg[] = "[dtn begin]\n"; 351 352 if (!log_ctx) { 353 pr_info("%s", msg); 354 return; 355 } 356 357 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 358 } 359 360 __printf(3, 4) 361 void dm_dtn_log_append_v(struct dc_context *ctx, 362 struct dc_log_buffer_ctx *log_ctx, 363 const char *msg, ...) 364 { 365 va_list args; 366 size_t total; 367 int n; 368 369 if (!log_ctx) { 370 /* No context, redirect to dmesg. */ 371 struct va_format vaf; 372 373 vaf.fmt = msg; 374 vaf.va = &args; 375 376 va_start(args, msg); 377 pr_info("%pV", &vaf); 378 va_end(args); 379 380 return; 381 } 382 383 /* Measure the output. */ 384 va_start(args, msg); 385 n = vsnprintf(NULL, 0, msg, args); 386 va_end(args); 387 388 if (n <= 0) 389 return; 390 391 /* Reallocate the string buffer as needed. */ 392 total = log_ctx->pos + n + 1; 393 394 if (total > log_ctx->size) { 395 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); 396 397 if (buf) { 398 memcpy(buf, log_ctx->buf, log_ctx->pos); 399 kfree(log_ctx->buf); 400 401 log_ctx->buf = buf; 402 log_ctx->size = total; 403 } 404 } 405 406 if (!log_ctx->buf) 407 return; 408 409 /* Write the formatted string to the log buffer. */ 410 va_start(args, msg); 411 n = vscnprintf( 412 log_ctx->buf + log_ctx->pos, 413 log_ctx->size - log_ctx->pos, 414 msg, 415 args); 416 va_end(args); 417 418 if (n > 0) 419 log_ctx->pos += n; 420 } 421 422 void dm_dtn_log_end(struct dc_context *ctx, 423 struct dc_log_buffer_ctx *log_ctx) 424 { 425 static const char msg[] = "[dtn end]\n"; 426 427 if (!log_ctx) { 428 pr_info("%s", msg); 429 return; 430 } 431 432 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 433 } 434 435 bool dm_helpers_dp_mst_start_top_mgr( 436 struct dc_context *ctx, 437 const struct dc_link *link, 438 bool boot) 439 { 440 struct amdgpu_dm_connector *aconnector = link->priv; 441 442 if (!aconnector) { 443 DRM_ERROR("Failed to find connector for link!"); 444 return false; 445 } 446 447 if (boot) { 448 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 449 aconnector, aconnector->base.base.id); 450 return true; 451 } 452 453 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 454 aconnector, aconnector->base.base.id); 455 456 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); 457 } 458 459 void dm_helpers_dp_mst_stop_top_mgr( 460 struct dc_context *ctx, 461 struct dc_link *link) 462 { 463 struct amdgpu_dm_connector *aconnector = link->priv; 464 uint8_t i; 465 466 if (!aconnector) { 467 DRM_ERROR("Failed to find connector for link!"); 468 return; 469 } 470 471 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 472 aconnector, aconnector->base.base.id); 473 474 if (aconnector->mst_mgr.mst_state == true) { 475 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 476 477 for (i = 0; i < MAX_SINKS_PER_LINK; i++) { 478 if (link->remote_sinks[i] == NULL) 479 continue; 480 481 if (link->remote_sinks[i]->sink_signal == 482 SIGNAL_TYPE_DISPLAY_PORT_MST) { 483 dc_link_remove_remote_sink(link, link->remote_sinks[i]); 484 485 if (aconnector->dc_sink) { 486 dc_sink_release(aconnector->dc_sink); 487 aconnector->dc_sink = NULL; 488 aconnector->dc_link->cur_link_settings.lane_count = 0; 489 } 490 } 491 } 492 } 493 } 494 495 bool dm_helpers_dp_read_dpcd( 496 struct dc_context *ctx, 497 const struct dc_link *link, 498 uint32_t address, 499 uint8_t *data, 500 uint32_t size) 501 { 502 503 struct amdgpu_dm_connector *aconnector = link->priv; 504 505 if (!aconnector) { 506 DC_LOG_DC("Failed to find connector for link!\n"); 507 return false; 508 } 509 510 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, 511 data, size) > 0; 512 } 513 514 bool dm_helpers_dp_write_dpcd( 515 struct dc_context *ctx, 516 const struct dc_link *link, 517 uint32_t address, 518 const uint8_t *data, 519 uint32_t size) 520 { 521 struct amdgpu_dm_connector *aconnector = link->priv; 522 523 if (!aconnector) { 524 DRM_ERROR("Failed to find connector for link!"); 525 return false; 526 } 527 528 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 529 address, (uint8_t *)data, size) > 0; 530 } 531 532 bool dm_helpers_submit_i2c( 533 struct dc_context *ctx, 534 const struct dc_link *link, 535 struct i2c_command *cmd) 536 { 537 struct amdgpu_dm_connector *aconnector = link->priv; 538 struct i2c_msg *msgs; 539 int i = 0; 540 int num = cmd->number_of_payloads; 541 bool result; 542 543 if (!aconnector) { 544 DRM_ERROR("Failed to find connector for link!"); 545 return false; 546 } 547 548 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 549 550 if (!msgs) 551 return false; 552 553 for (i = 0; i < num; i++) { 554 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 555 msgs[i].addr = cmd->payloads[i].address; 556 msgs[i].len = cmd->payloads[i].length; 557 msgs[i].buf = cmd->payloads[i].data; 558 } 559 560 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 561 562 kfree(msgs); 563 564 return result; 565 } 566 bool dm_helpers_dp_write_dsc_enable( 567 struct dc_context *ctx, 568 const struct dc_stream_state *stream, 569 bool enable) 570 { 571 uint8_t enable_dsc = enable ? 1 : 0; 572 struct amdgpu_dm_connector *aconnector; 573 uint8_t ret = 0; 574 575 if (!stream) 576 return false; 577 578 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 579 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 580 581 if (!aconnector->dsc_aux) 582 return false; 583 584 ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); 585 } 586 587 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { 588 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 589 DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable"); 590 } 591 592 return (ret > 0); 593 } 594 595 bool dm_helpers_is_dp_sink_present(struct dc_link *link) 596 { 597 bool dp_sink_present; 598 struct amdgpu_dm_connector *aconnector = link->priv; 599 600 if (!aconnector) { 601 BUG_ON("Failed to find connector for link!"); 602 return true; 603 } 604 605 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 606 dp_sink_present = dc_link_is_dp_sink_present(link); 607 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 608 return dp_sink_present; 609 } 610 611 enum dc_edid_status dm_helpers_read_local_edid( 612 struct dc_context *ctx, 613 struct dc_link *link, 614 struct dc_sink *sink) 615 { 616 struct amdgpu_dm_connector *aconnector = link->priv; 617 struct drm_connector *connector = &aconnector->base; 618 struct i2c_adapter *ddc; 619 int retry = 3; 620 enum dc_edid_status edid_status; 621 struct edid *edid; 622 623 if (link->aux_mode) 624 ddc = &aconnector->dm_dp_aux.aux.ddc; 625 else 626 ddc = &aconnector->i2c->base; 627 628 /* some dongles read edid incorrectly the first time, 629 * do check sum and retry to make sure read correct edid. 630 */ 631 do { 632 633 edid = drm_get_edid(&aconnector->base, ddc); 634 635 /* DP Compliance Test 4.2.2.6 */ 636 if (link->aux_mode && connector->edid_corrupt) 637 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); 638 639 if (!edid && connector->edid_corrupt) { 640 connector->edid_corrupt = false; 641 return EDID_BAD_CHECKSUM; 642 } 643 644 if (!edid) 645 return EDID_NO_RESPONSE; 646 647 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 648 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 649 650 /* We don't need the original edid anymore */ 651 kfree(edid); 652 653 /* connector->display_info will be parsed from EDID and saved 654 * into drm_connector->display_info from edid by call stack 655 * below: 656 * drm_parse_ycbcr420_deep_color_info 657 * drm_parse_hdmi_forum_vsdb 658 * drm_parse_cea_ext 659 * drm_add_display_info 660 * drm_connector_update_edid_property 661 * 662 * drm_connector->display_info will be used by amdgpu_dm funcs, 663 * like fill_stream_properties_from_drm_display_mode 664 */ 665 amdgpu_dm_update_connector_after_detect(aconnector); 666 667 edid_status = dm_helpers_parse_edid_caps( 668 ctx, 669 &sink->dc_edid, 670 &sink->edid_caps); 671 672 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 673 674 if (edid_status != EDID_OK) 675 DRM_ERROR("EDID err: %d, on connector: %s", 676 edid_status, 677 aconnector->base.name); 678 679 /* DP Compliance Test 4.2.2.3 */ 680 if (link->aux_mode) 681 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]); 682 683 return edid_status; 684 } 685 int dm_helper_dmub_aux_transfer_sync( 686 struct dc_context *ctx, 687 const struct dc_link *link, 688 struct aux_payload *payload, 689 enum aux_return_code_type *operation_result) 690 { 691 return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx, 692 link->link_index, (void *)payload, 693 (void *)operation_result); 694 } 695 696 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, 697 const struct dc_link *link, 698 struct set_config_cmd_payload *payload, 699 enum set_config_status *operation_result) 700 { 701 return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx, 702 link->link_index, (void *)payload, 703 (void *)operation_result); 704 } 705 706 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 707 { 708 /* TODO: something */ 709 } 710 711 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) 712 { 713 // TODO: 714 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); 715 } 716 717 void *dm_helpers_allocate_gpu_mem( 718 struct dc_context *ctx, 719 enum dc_gpu_mem_alloc_type type, 720 size_t size, 721 long long *addr) 722 { 723 struct amdgpu_device *adev = ctx->driver_context; 724 struct dal_allocation *da; 725 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 726 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 727 int ret; 728 729 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 730 if (!da) 731 return NULL; 732 733 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 734 domain, &da->bo, 735 &da->gpu_addr, &da->cpu_ptr); 736 737 *addr = da->gpu_addr; 738 739 if (ret) { 740 kfree(da); 741 return NULL; 742 } 743 744 /* add da to list in dm */ 745 list_add(&da->list, &adev->dm.da_list); 746 747 return da->cpu_ptr; 748 } 749 750 void dm_helpers_free_gpu_mem( 751 struct dc_context *ctx, 752 enum dc_gpu_mem_alloc_type type, 753 void *pvMem) 754 { 755 struct amdgpu_device *adev = ctx->driver_context; 756 struct dal_allocation *da; 757 758 /* walk the da list in DM */ 759 list_for_each_entry(da, &adev->dm.da_list, list) { 760 if (pvMem == da->cpu_ptr) { 761 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 762 list_del(&da->list); 763 kfree(da); 764 break; 765 } 766 } 767 } 768 769 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 770 { 771 enum dc_irq_source irq_source; 772 bool ret; 773 774 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 775 776 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 777 778 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", 779 enable ? "en" : "dis", ret); 780 return ret; 781 } 782 783 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 784 { 785 /* TODO: virtual DPCD */ 786 struct dc_link *link = stream->link; 787 union down_spread_ctrl old_downspread; 788 union down_spread_ctrl new_downspread; 789 790 if (link->aux_access_disabled) 791 return; 792 793 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 794 &old_downspread.raw, 795 sizeof(old_downspread))) 796 return; 797 798 new_downspread.raw = old_downspread.raw; 799 new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 800 (stream->ignore_msa_timing_param) ? 1 : 0; 801 802 if (new_downspread.raw != old_downspread.raw) 803 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 804 &new_downspread.raw, 805 sizeof(new_downspread)); 806 } 807 808 #if defined(CONFIG_DRM_AMD_DC_DCN) 809 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) 810 { 811 // FPGA programming for this clock in diags framework that 812 // needs to go through dm layer, therefore leave dummy interace here 813 } 814 815 816 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) 817 { 818 /* TODO: add peridic detection implementation */ 819 } 820 #endif 821