1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 * Author: Chris Zhong <zyw@rock-chips.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/component.h> 9 #include <linux/extcon.h> 10 #include <linux/firmware.h> 11 #include <linux/mfd/syscon.h> 12 #include <linux/phy/phy.h> 13 #include <linux/regmap.h> 14 #include <linux/reset.h> 15 16 #include <sound/hdmi-codec.h> 17 18 #include <drm/drm_atomic_helper.h> 19 #include <drm/drm_dp_helper.h> 20 #include <drm/drm_edid.h> 21 #include <drm/drm_of.h> 22 #include <drm/drm_probe_helper.h> 23 24 #include "cdn-dp-core.h" 25 #include "cdn-dp-reg.h" 26 #include "rockchip_drm_vop.h" 27 28 #define connector_to_dp(c) \ 29 container_of(c, struct cdn_dp_device, connector) 30 31 #define encoder_to_dp(c) \ 32 container_of(c, struct cdn_dp_device, encoder) 33 34 #define GRF_SOC_CON9 0x6224 35 #define DP_SEL_VOP_LIT BIT(12) 36 #define GRF_SOC_CON26 0x6268 37 #define DPTX_HPD_SEL (3 << 12) 38 #define DPTX_HPD_DEL (2 << 12) 39 #define DPTX_HPD_SEL_MASK (3 << 28) 40 41 #define CDN_FW_TIMEOUT_MS (64 * 1000) 42 #define CDN_DPCD_TIMEOUT_MS 5000 43 #define CDN_DP_FIRMWARE "rockchip/dptx.bin" 44 45 struct cdn_dp_data { 46 u8 max_phy; 47 }; 48 49 struct cdn_dp_data rk3399_cdn_dp = { 50 .max_phy = 2, 51 }; 52 53 static const struct of_device_id cdn_dp_dt_ids[] = { 54 { .compatible = "rockchip,rk3399-cdn-dp", 55 .data = (void *)&rk3399_cdn_dp }, 56 {} 57 }; 58 59 MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids); 60 61 static int cdn_dp_grf_write(struct cdn_dp_device *dp, 62 unsigned int reg, unsigned int val) 63 { 64 int ret; 65 66 ret = clk_prepare_enable(dp->grf_clk); 67 if (ret) { 68 DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n"); 69 return ret; 70 } 71 72 ret = regmap_write(dp->grf, reg, val); 73 if (ret) { 74 DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); 75 return ret; 76 } 77 78 clk_disable_unprepare(dp->grf_clk); 79 80 return 0; 81 } 82 83 static int cdn_dp_clk_enable(struct cdn_dp_device *dp) 84 { 85 int ret; 86 unsigned long rate; 87 88 ret = clk_prepare_enable(dp->pclk); 89 if (ret < 0) { 90 DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret); 91 goto err_pclk; 92 } 93 94 ret = clk_prepare_enable(dp->core_clk); 95 if (ret < 0) { 96 DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret); 97 goto err_core_clk; 98 } 99 100 ret = pm_runtime_get_sync(dp->dev); 101 if (ret < 0) { 102 DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); 103 goto err_pm_runtime_get; 104 } 105 106 reset_control_assert(dp->core_rst); 107 reset_control_assert(dp->dptx_rst); 108 reset_control_assert(dp->apb_rst); 109 reset_control_deassert(dp->core_rst); 110 reset_control_deassert(dp->dptx_rst); 111 reset_control_deassert(dp->apb_rst); 112 113 rate = clk_get_rate(dp->core_clk); 114 if (!rate) { 115 DRM_DEV_ERROR(dp->dev, "get clk rate failed\n"); 116 ret = -EINVAL; 117 goto err_set_rate; 118 } 119 120 cdn_dp_set_fw_clk(dp, rate); 121 cdn_dp_clock_reset(dp); 122 123 return 0; 124 125 err_set_rate: 126 pm_runtime_put(dp->dev); 127 err_pm_runtime_get: 128 clk_disable_unprepare(dp->core_clk); 129 err_core_clk: 130 clk_disable_unprepare(dp->pclk); 131 err_pclk: 132 return ret; 133 } 134 135 static void cdn_dp_clk_disable(struct cdn_dp_device *dp) 136 { 137 pm_runtime_put_sync(dp->dev); 138 clk_disable_unprepare(dp->pclk); 139 clk_disable_unprepare(dp->core_clk); 140 } 141 142 static int cdn_dp_get_port_lanes(struct cdn_dp_port *port) 143 { 144 struct extcon_dev *edev = port->extcon; 145 union extcon_property_value property; 146 int dptx; 147 u8 lanes; 148 149 dptx = extcon_get_state(edev, EXTCON_DISP_DP); 150 if (dptx > 0) { 151 extcon_get_property(edev, EXTCON_DISP_DP, 152 EXTCON_PROP_USB_SS, &property); 153 if (property.intval) 154 lanes = 2; 155 else 156 lanes = 4; 157 } else { 158 lanes = 0; 159 } 160 161 return lanes; 162 } 163 164 static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count) 165 { 166 int ret; 167 u8 value; 168 169 *sink_count = 0; 170 ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1); 171 if (ret) 172 return ret; 173 174 *sink_count = DP_GET_SINK_COUNT(value); 175 return 0; 176 } 177 178 static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp) 179 { 180 struct cdn_dp_port *port; 181 int i, lanes; 182 183 for (i = 0; i < dp->ports; i++) { 184 port = dp->port[i]; 185 lanes = cdn_dp_get_port_lanes(port); 186 if (lanes) 187 return port; 188 } 189 return NULL; 190 } 191 192 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp) 193 { 194 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); 195 struct cdn_dp_port *port; 196 u8 sink_count = 0; 197 198 if (dp->active_port < 0 || dp->active_port >= dp->ports) { 199 DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n"); 200 return false; 201 } 202 203 port = dp->port[dp->active_port]; 204 205 /* 206 * Attempt to read sink count, retry in case the sink may not be ready. 207 * 208 * Sinks are *supposed* to come up within 1ms from an off state, but 209 * some docks need more time to power up. 210 */ 211 while (time_before(jiffies, timeout)) { 212 if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) 213 return false; 214 215 if (!cdn_dp_get_sink_count(dp, &sink_count)) 216 return sink_count ? true : false; 217 218 usleep_range(5000, 10000); 219 } 220 221 DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); 222 return false; 223 } 224 225 static enum drm_connector_status 226 cdn_dp_connector_detect(struct drm_connector *connector, bool force) 227 { 228 struct cdn_dp_device *dp = connector_to_dp(connector); 229 enum drm_connector_status status = connector_status_disconnected; 230 231 mutex_lock(&dp->lock); 232 if (dp->connected) 233 status = connector_status_connected; 234 mutex_unlock(&dp->lock); 235 236 return status; 237 } 238 239 static void cdn_dp_connector_destroy(struct drm_connector *connector) 240 { 241 drm_connector_unregister(connector); 242 drm_connector_cleanup(connector); 243 } 244 245 static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { 246 .detect = cdn_dp_connector_detect, 247 .destroy = cdn_dp_connector_destroy, 248 .fill_modes = drm_helper_probe_single_connector_modes, 249 .reset = drm_atomic_helper_connector_reset, 250 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 251 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 252 }; 253 254 static int cdn_dp_connector_get_modes(struct drm_connector *connector) 255 { 256 struct cdn_dp_device *dp = connector_to_dp(connector); 257 struct edid *edid; 258 int ret = 0; 259 260 mutex_lock(&dp->lock); 261 edid = dp->edid; 262 if (edid) { 263 DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", 264 edid->width_cm, edid->height_cm); 265 266 dp->sink_has_audio = drm_detect_monitor_audio(edid); 267 ret = drm_add_edid_modes(connector, edid); 268 if (ret) 269 drm_connector_update_edid_property(connector, 270 edid); 271 } 272 mutex_unlock(&dp->lock); 273 274 return ret; 275 } 276 277 static int cdn_dp_connector_mode_valid(struct drm_connector *connector, 278 struct drm_display_mode *mode) 279 { 280 struct cdn_dp_device *dp = connector_to_dp(connector); 281 struct drm_display_info *display_info = &dp->connector.display_info; 282 u32 requested, actual, rate, sink_max, source_max = 0; 283 u8 lanes, bpc; 284 285 /* If DP is disconnected, every mode is invalid */ 286 if (!dp->connected) 287 return MODE_BAD; 288 289 switch (display_info->bpc) { 290 case 10: 291 bpc = 10; 292 break; 293 case 6: 294 bpc = 6; 295 break; 296 default: 297 bpc = 8; 298 break; 299 } 300 301 requested = mode->clock * bpc * 3 / 1000; 302 303 source_max = dp->lanes; 304 sink_max = drm_dp_max_lane_count(dp->dpcd); 305 lanes = min(source_max, sink_max); 306 307 source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE); 308 sink_max = drm_dp_max_link_rate(dp->dpcd); 309 rate = min(source_max, sink_max); 310 311 actual = rate * lanes / 100; 312 313 /* efficiency is about 0.8 */ 314 actual = actual * 8 / 10; 315 316 if (requested > actual) { 317 DRM_DEV_DEBUG_KMS(dp->dev, 318 "requested=%d, actual=%d, clock=%d\n", 319 requested, actual, mode->clock); 320 return MODE_CLOCK_HIGH; 321 } 322 323 return MODE_OK; 324 } 325 326 static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { 327 .get_modes = cdn_dp_connector_get_modes, 328 .mode_valid = cdn_dp_connector_mode_valid, 329 }; 330 331 static int cdn_dp_firmware_init(struct cdn_dp_device *dp) 332 { 333 int ret; 334 const u32 *iram_data, *dram_data; 335 const struct firmware *fw = dp->fw; 336 const struct cdn_firmware_header *hdr; 337 338 hdr = (struct cdn_firmware_header *)fw->data; 339 if (fw->size != le32_to_cpu(hdr->size_bytes)) { 340 DRM_DEV_ERROR(dp->dev, "firmware is invalid\n"); 341 return -EINVAL; 342 } 343 344 iram_data = (const u32 *)(fw->data + hdr->header_size); 345 dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size); 346 347 ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size, 348 dram_data, hdr->dram_size); 349 if (ret) 350 return ret; 351 352 ret = cdn_dp_set_firmware_active(dp, true); 353 if (ret) { 354 DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret); 355 return ret; 356 } 357 358 return cdn_dp_event_config(dp); 359 } 360 361 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) 362 { 363 int ret; 364 365 if (!cdn_dp_check_sink_connection(dp)) 366 return -ENODEV; 367 368 ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, 369 DP_RECEIVER_CAP_SIZE); 370 if (ret) { 371 DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); 372 return ret; 373 } 374 375 kfree(dp->edid); 376 dp->edid = drm_do_get_edid(&dp->connector, 377 cdn_dp_get_edid_block, dp); 378 return 0; 379 } 380 381 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) 382 { 383 union extcon_property_value property; 384 int ret; 385 386 if (!port->phy_enabled) { 387 ret = phy_power_on(port->phy); 388 if (ret) { 389 DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n", 390 ret); 391 goto err_phy; 392 } 393 port->phy_enabled = true; 394 } 395 396 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 397 DPTX_HPD_SEL_MASK | DPTX_HPD_SEL); 398 if (ret) { 399 DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret); 400 goto err_power_on; 401 } 402 403 ret = cdn_dp_get_hpd_status(dp); 404 if (ret <= 0) { 405 if (!ret) 406 DRM_DEV_ERROR(dp->dev, "hpd does not exist\n"); 407 goto err_power_on; 408 } 409 410 ret = extcon_get_property(port->extcon, EXTCON_DISP_DP, 411 EXTCON_PROP_USB_TYPEC_POLARITY, &property); 412 if (ret) { 413 DRM_DEV_ERROR(dp->dev, "get property failed\n"); 414 goto err_power_on; 415 } 416 417 port->lanes = cdn_dp_get_port_lanes(port); 418 ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval); 419 if (ret) { 420 DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n", 421 ret); 422 goto err_power_on; 423 } 424 425 dp->active_port = port->id; 426 return 0; 427 428 err_power_on: 429 if (phy_power_off(port->phy)) 430 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); 431 else 432 port->phy_enabled = false; 433 434 err_phy: 435 cdn_dp_grf_write(dp, GRF_SOC_CON26, 436 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); 437 return ret; 438 } 439 440 static int cdn_dp_disable_phy(struct cdn_dp_device *dp, 441 struct cdn_dp_port *port) 442 { 443 int ret; 444 445 if (port->phy_enabled) { 446 ret = phy_power_off(port->phy); 447 if (ret) { 448 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); 449 return ret; 450 } 451 } 452 453 port->phy_enabled = false; 454 port->lanes = 0; 455 dp->active_port = -1; 456 return 0; 457 } 458 459 static int cdn_dp_disable(struct cdn_dp_device *dp) 460 { 461 int ret, i; 462 463 if (!dp->active) 464 return 0; 465 466 for (i = 0; i < dp->ports; i++) 467 cdn_dp_disable_phy(dp, dp->port[i]); 468 469 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 470 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); 471 if (ret) { 472 DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n", 473 ret); 474 return ret; 475 } 476 477 cdn_dp_set_firmware_active(dp, false); 478 cdn_dp_clk_disable(dp); 479 dp->active = false; 480 dp->link.rate = 0; 481 dp->link.num_lanes = 0; 482 if (!dp->connected) { 483 kfree(dp->edid); 484 dp->edid = NULL; 485 } 486 487 return 0; 488 } 489 490 static int cdn_dp_enable(struct cdn_dp_device *dp) 491 { 492 int ret, i, lanes; 493 struct cdn_dp_port *port; 494 495 port = cdn_dp_connected_port(dp); 496 if (!port) { 497 DRM_DEV_ERROR(dp->dev, 498 "Can't enable without connection\n"); 499 return -ENODEV; 500 } 501 502 if (dp->active) 503 return 0; 504 505 ret = cdn_dp_clk_enable(dp); 506 if (ret) 507 return ret; 508 509 ret = cdn_dp_firmware_init(dp); 510 if (ret) { 511 DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret); 512 goto err_clk_disable; 513 } 514 515 /* only enable the port that connected with downstream device */ 516 for (i = port->id; i < dp->ports; i++) { 517 port = dp->port[i]; 518 lanes = cdn_dp_get_port_lanes(port); 519 if (lanes) { 520 ret = cdn_dp_enable_phy(dp, port); 521 if (ret) 522 continue; 523 524 ret = cdn_dp_get_sink_capability(dp); 525 if (ret) { 526 cdn_dp_disable_phy(dp, port); 527 } else { 528 dp->active = true; 529 dp->lanes = port->lanes; 530 return 0; 531 } 532 } 533 } 534 535 err_clk_disable: 536 cdn_dp_clk_disable(dp); 537 return ret; 538 } 539 540 static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, 541 struct drm_display_mode *mode, 542 struct drm_display_mode *adjusted) 543 { 544 struct cdn_dp_device *dp = encoder_to_dp(encoder); 545 struct drm_display_info *display_info = &dp->connector.display_info; 546 struct video_info *video = &dp->video_info; 547 548 switch (display_info->bpc) { 549 case 10: 550 video->color_depth = 10; 551 break; 552 case 6: 553 video->color_depth = 6; 554 break; 555 default: 556 video->color_depth = 8; 557 break; 558 } 559 560 video->color_fmt = PXL_RGB; 561 video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); 562 video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); 563 564 memcpy(&dp->mode, adjusted, sizeof(*mode)); 565 } 566 567 static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) 568 { 569 u8 link_status[DP_LINK_STATUS_SIZE]; 570 struct cdn_dp_port *port = cdn_dp_connected_port(dp); 571 u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); 572 573 if (!port || !dp->link.rate || !dp->link.num_lanes) 574 return false; 575 576 if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, 577 DP_LINK_STATUS_SIZE)) { 578 DRM_ERROR("Failed to get link status\n"); 579 return false; 580 } 581 582 /* if link training is requested we should perform it always */ 583 return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); 584 } 585 586 static void cdn_dp_encoder_enable(struct drm_encoder *encoder) 587 { 588 struct cdn_dp_device *dp = encoder_to_dp(encoder); 589 int ret, val; 590 591 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 592 if (ret < 0) { 593 DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); 594 return; 595 } 596 597 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", 598 (ret) ? "LIT" : "BIG"); 599 if (ret) 600 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); 601 else 602 val = DP_SEL_VOP_LIT << 16; 603 604 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); 605 if (ret) 606 return; 607 608 mutex_lock(&dp->lock); 609 610 ret = cdn_dp_enable(dp); 611 if (ret) { 612 DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", 613 ret); 614 goto out; 615 } 616 if (!cdn_dp_check_link_status(dp)) { 617 ret = cdn_dp_train_link(dp); 618 if (ret) { 619 DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret); 620 goto out; 621 } 622 } 623 624 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE); 625 if (ret) { 626 DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret); 627 goto out; 628 } 629 630 ret = cdn_dp_config_video(dp); 631 if (ret) { 632 DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret); 633 goto out; 634 } 635 636 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID); 637 if (ret) { 638 DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret); 639 goto out; 640 } 641 out: 642 mutex_unlock(&dp->lock); 643 } 644 645 static void cdn_dp_encoder_disable(struct drm_encoder *encoder) 646 { 647 struct cdn_dp_device *dp = encoder_to_dp(encoder); 648 int ret; 649 650 mutex_lock(&dp->lock); 651 if (dp->active) { 652 ret = cdn_dp_disable(dp); 653 if (ret) { 654 DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", 655 ret); 656 } 657 } 658 mutex_unlock(&dp->lock); 659 660 /* 661 * In the following 2 cases, we need to run the event_work to re-enable 662 * the DP: 663 * 1. If there is not just one port device is connected, and remove one 664 * device from a port, the DP will be disabled here, at this case, 665 * run the event_work to re-open DP for the other port. 666 * 2. If re-training or re-config failed, the DP will be disabled here. 667 * run the event_work to re-connect it. 668 */ 669 if (!dp->connected && cdn_dp_connected_port(dp)) 670 schedule_work(&dp->event_work); 671 } 672 673 static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder, 674 struct drm_crtc_state *crtc_state, 675 struct drm_connector_state *conn_state) 676 { 677 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 678 679 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 680 s->output_type = DRM_MODE_CONNECTOR_DisplayPort; 681 682 return 0; 683 } 684 685 static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { 686 .mode_set = cdn_dp_encoder_mode_set, 687 .enable = cdn_dp_encoder_enable, 688 .disable = cdn_dp_encoder_disable, 689 .atomic_check = cdn_dp_encoder_atomic_check, 690 }; 691 692 static const struct drm_encoder_funcs cdn_dp_encoder_funcs = { 693 .destroy = drm_encoder_cleanup, 694 }; 695 696 static int cdn_dp_parse_dt(struct cdn_dp_device *dp) 697 { 698 struct device *dev = dp->dev; 699 struct device_node *np = dev->of_node; 700 struct platform_device *pdev = to_platform_device(dev); 701 struct resource *res; 702 703 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 704 if (IS_ERR(dp->grf)) { 705 DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n"); 706 return PTR_ERR(dp->grf); 707 } 708 709 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 710 dp->regs = devm_ioremap_resource(dev, res); 711 if (IS_ERR(dp->regs)) { 712 DRM_DEV_ERROR(dev, "ioremap reg failed\n"); 713 return PTR_ERR(dp->regs); 714 } 715 716 dp->core_clk = devm_clk_get(dev, "core-clk"); 717 if (IS_ERR(dp->core_clk)) { 718 DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n"); 719 return PTR_ERR(dp->core_clk); 720 } 721 722 dp->pclk = devm_clk_get(dev, "pclk"); 723 if (IS_ERR(dp->pclk)) { 724 DRM_DEV_ERROR(dev, "cannot get pclk\n"); 725 return PTR_ERR(dp->pclk); 726 } 727 728 dp->spdif_clk = devm_clk_get(dev, "spdif"); 729 if (IS_ERR(dp->spdif_clk)) { 730 DRM_DEV_ERROR(dev, "cannot get spdif_clk\n"); 731 return PTR_ERR(dp->spdif_clk); 732 } 733 734 dp->grf_clk = devm_clk_get(dev, "grf"); 735 if (IS_ERR(dp->grf_clk)) { 736 DRM_DEV_ERROR(dev, "cannot get grf clk\n"); 737 return PTR_ERR(dp->grf_clk); 738 } 739 740 dp->spdif_rst = devm_reset_control_get(dev, "spdif"); 741 if (IS_ERR(dp->spdif_rst)) { 742 DRM_DEV_ERROR(dev, "no spdif reset control found\n"); 743 return PTR_ERR(dp->spdif_rst); 744 } 745 746 dp->dptx_rst = devm_reset_control_get(dev, "dptx"); 747 if (IS_ERR(dp->dptx_rst)) { 748 DRM_DEV_ERROR(dev, "no uphy reset control found\n"); 749 return PTR_ERR(dp->dptx_rst); 750 } 751 752 dp->core_rst = devm_reset_control_get(dev, "core"); 753 if (IS_ERR(dp->core_rst)) { 754 DRM_DEV_ERROR(dev, "no core reset control found\n"); 755 return PTR_ERR(dp->core_rst); 756 } 757 758 dp->apb_rst = devm_reset_control_get(dev, "apb"); 759 if (IS_ERR(dp->apb_rst)) { 760 DRM_DEV_ERROR(dev, "no apb reset control found\n"); 761 return PTR_ERR(dp->apb_rst); 762 } 763 764 return 0; 765 } 766 767 static int cdn_dp_audio_hw_params(struct device *dev, void *data, 768 struct hdmi_codec_daifmt *daifmt, 769 struct hdmi_codec_params *params) 770 { 771 struct cdn_dp_device *dp = dev_get_drvdata(dev); 772 struct audio_info audio = { 773 .sample_width = params->sample_width, 774 .sample_rate = params->sample_rate, 775 .channels = params->channels, 776 }; 777 int ret; 778 779 mutex_lock(&dp->lock); 780 if (!dp->active) { 781 ret = -ENODEV; 782 goto out; 783 } 784 785 switch (daifmt->fmt) { 786 case HDMI_I2S: 787 audio.format = AFMT_I2S; 788 break; 789 case HDMI_SPDIF: 790 audio.format = AFMT_SPDIF; 791 break; 792 default: 793 DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); 794 ret = -EINVAL; 795 goto out; 796 } 797 798 ret = cdn_dp_audio_config(dp, &audio); 799 if (!ret) 800 dp->audio_info = audio; 801 802 out: 803 mutex_unlock(&dp->lock); 804 return ret; 805 } 806 807 static void cdn_dp_audio_shutdown(struct device *dev, void *data) 808 { 809 struct cdn_dp_device *dp = dev_get_drvdata(dev); 810 int ret; 811 812 mutex_lock(&dp->lock); 813 if (!dp->active) 814 goto out; 815 816 ret = cdn_dp_audio_stop(dp, &dp->audio_info); 817 if (!ret) 818 dp->audio_info.format = AFMT_UNUSED; 819 out: 820 mutex_unlock(&dp->lock); 821 } 822 823 static int cdn_dp_audio_digital_mute(struct device *dev, void *data, 824 bool enable) 825 { 826 struct cdn_dp_device *dp = dev_get_drvdata(dev); 827 int ret; 828 829 mutex_lock(&dp->lock); 830 if (!dp->active) { 831 ret = -ENODEV; 832 goto out; 833 } 834 835 ret = cdn_dp_audio_mute(dp, enable); 836 837 out: 838 mutex_unlock(&dp->lock); 839 return ret; 840 } 841 842 static int cdn_dp_audio_get_eld(struct device *dev, void *data, 843 u8 *buf, size_t len) 844 { 845 struct cdn_dp_device *dp = dev_get_drvdata(dev); 846 847 memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); 848 849 return 0; 850 } 851 852 static const struct hdmi_codec_ops audio_codec_ops = { 853 .hw_params = cdn_dp_audio_hw_params, 854 .audio_shutdown = cdn_dp_audio_shutdown, 855 .digital_mute = cdn_dp_audio_digital_mute, 856 .get_eld = cdn_dp_audio_get_eld, 857 }; 858 859 static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, 860 struct device *dev) 861 { 862 struct hdmi_codec_pdata codec_data = { 863 .i2s = 1, 864 .spdif = 1, 865 .ops = &audio_codec_ops, 866 .max_i2s_channels = 8, 867 }; 868 869 dp->audio_pdev = platform_device_register_data( 870 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, 871 &codec_data, sizeof(codec_data)); 872 873 return PTR_ERR_OR_ZERO(dp->audio_pdev); 874 } 875 876 static int cdn_dp_request_firmware(struct cdn_dp_device *dp) 877 { 878 int ret; 879 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS); 880 unsigned long sleep = 1000; 881 882 WARN_ON(!mutex_is_locked(&dp->lock)); 883 884 if (dp->fw_loaded) 885 return 0; 886 887 /* Drop the lock before getting the firmware to avoid blocking boot */ 888 mutex_unlock(&dp->lock); 889 890 while (time_before(jiffies, timeout)) { 891 ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev); 892 if (ret == -ENOENT) { 893 msleep(sleep); 894 sleep *= 2; 895 continue; 896 } else if (ret) { 897 DRM_DEV_ERROR(dp->dev, 898 "failed to request firmware: %d\n", ret); 899 goto out; 900 } 901 902 dp->fw_loaded = true; 903 ret = 0; 904 goto out; 905 } 906 907 DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n"); 908 ret = -ETIMEDOUT; 909 out: 910 mutex_lock(&dp->lock); 911 return ret; 912 } 913 914 static void cdn_dp_pd_event_work(struct work_struct *work) 915 { 916 struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, 917 event_work); 918 struct drm_connector *connector = &dp->connector; 919 enum drm_connector_status old_status; 920 921 int ret; 922 923 mutex_lock(&dp->lock); 924 925 if (dp->suspended) 926 goto out; 927 928 ret = cdn_dp_request_firmware(dp); 929 if (ret) 930 goto out; 931 932 dp->connected = true; 933 934 /* Not connected, notify userspace to disable the block */ 935 if (!cdn_dp_connected_port(dp)) { 936 DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n"); 937 dp->connected = false; 938 939 /* Connected but not enabled, enable the block */ 940 } else if (!dp->active) { 941 DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n"); 942 ret = cdn_dp_enable(dp); 943 if (ret) { 944 DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret); 945 dp->connected = false; 946 } 947 948 /* Enabled and connected to a dongle without a sink, notify userspace */ 949 } else if (!cdn_dp_check_sink_connection(dp)) { 950 DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n"); 951 dp->connected = false; 952 953 /* Enabled and connected with a sink, re-train if requested */ 954 } else if (!cdn_dp_check_link_status(dp)) { 955 unsigned int rate = dp->link.rate; 956 unsigned int lanes = dp->link.num_lanes; 957 struct drm_display_mode *mode = &dp->mode; 958 959 DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); 960 ret = cdn_dp_train_link(dp); 961 if (ret) { 962 dp->connected = false; 963 DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret); 964 goto out; 965 } 966 967 /* If training result is changed, update the video config */ 968 if (mode->clock && 969 (rate != dp->link.rate || lanes != dp->link.num_lanes)) { 970 ret = cdn_dp_config_video(dp); 971 if (ret) { 972 dp->connected = false; 973 DRM_DEV_ERROR(dp->dev, 974 "Failed to config video %d\n", 975 ret); 976 } 977 } 978 } 979 980 out: 981 mutex_unlock(&dp->lock); 982 983 old_status = connector->status; 984 connector->status = connector->funcs->detect(connector, false); 985 if (old_status != connector->status) 986 drm_kms_helper_hotplug_event(dp->drm_dev); 987 } 988 989 static int cdn_dp_pd_event(struct notifier_block *nb, 990 unsigned long event, void *priv) 991 { 992 struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port, 993 event_nb); 994 struct cdn_dp_device *dp = port->dp; 995 996 /* 997 * It would be nice to be able to just do the work inline right here. 998 * However, we need to make a bunch of calls that might sleep in order 999 * to turn on the block/phy, so use a worker instead. 1000 */ 1001 schedule_work(&dp->event_work); 1002 1003 return NOTIFY_DONE; 1004 } 1005 1006 static int cdn_dp_bind(struct device *dev, struct device *master, void *data) 1007 { 1008 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1009 struct drm_encoder *encoder; 1010 struct drm_connector *connector; 1011 struct cdn_dp_port *port; 1012 struct drm_device *drm_dev = data; 1013 int ret, i; 1014 1015 ret = cdn_dp_parse_dt(dp); 1016 if (ret < 0) 1017 return ret; 1018 1019 dp->drm_dev = drm_dev; 1020 dp->connected = false; 1021 dp->active = false; 1022 dp->active_port = -1; 1023 dp->fw_loaded = false; 1024 1025 INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); 1026 1027 encoder = &dp->encoder; 1028 1029 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, 1030 dev->of_node); 1031 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1032 1033 ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs, 1034 DRM_MODE_ENCODER_TMDS, NULL); 1035 if (ret) { 1036 DRM_ERROR("failed to initialize encoder with drm\n"); 1037 return ret; 1038 } 1039 1040 drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); 1041 1042 connector = &dp->connector; 1043 connector->polled = DRM_CONNECTOR_POLL_HPD; 1044 connector->dpms = DRM_MODE_DPMS_OFF; 1045 1046 ret = drm_connector_init(drm_dev, connector, 1047 &cdn_dp_atomic_connector_funcs, 1048 DRM_MODE_CONNECTOR_DisplayPort); 1049 if (ret) { 1050 DRM_ERROR("failed to initialize connector with drm\n"); 1051 goto err_free_encoder; 1052 } 1053 1054 drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); 1055 1056 ret = drm_connector_attach_encoder(connector, encoder); 1057 if (ret) { 1058 DRM_ERROR("failed to attach connector and encoder\n"); 1059 goto err_free_connector; 1060 } 1061 1062 for (i = 0; i < dp->ports; i++) { 1063 port = dp->port[i]; 1064 1065 port->event_nb.notifier_call = cdn_dp_pd_event; 1066 ret = devm_extcon_register_notifier(dp->dev, port->extcon, 1067 EXTCON_DISP_DP, 1068 &port->event_nb); 1069 if (ret) { 1070 DRM_DEV_ERROR(dev, 1071 "register EXTCON_DISP_DP notifier err\n"); 1072 goto err_free_connector; 1073 } 1074 } 1075 1076 pm_runtime_enable(dev); 1077 1078 schedule_work(&dp->event_work); 1079 1080 return 0; 1081 1082 err_free_connector: 1083 drm_connector_cleanup(connector); 1084 err_free_encoder: 1085 drm_encoder_cleanup(encoder); 1086 return ret; 1087 } 1088 1089 static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) 1090 { 1091 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1092 struct drm_encoder *encoder = &dp->encoder; 1093 struct drm_connector *connector = &dp->connector; 1094 1095 cancel_work_sync(&dp->event_work); 1096 cdn_dp_encoder_disable(encoder); 1097 encoder->funcs->destroy(encoder); 1098 connector->funcs->destroy(connector); 1099 1100 pm_runtime_disable(dev); 1101 if (dp->fw_loaded) 1102 release_firmware(dp->fw); 1103 kfree(dp->edid); 1104 dp->edid = NULL; 1105 } 1106 1107 static const struct component_ops cdn_dp_component_ops = { 1108 .bind = cdn_dp_bind, 1109 .unbind = cdn_dp_unbind, 1110 }; 1111 1112 int cdn_dp_suspend(struct device *dev) 1113 { 1114 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1115 int ret = 0; 1116 1117 mutex_lock(&dp->lock); 1118 if (dp->active) 1119 ret = cdn_dp_disable(dp); 1120 dp->suspended = true; 1121 mutex_unlock(&dp->lock); 1122 1123 return ret; 1124 } 1125 1126 int cdn_dp_resume(struct device *dev) 1127 { 1128 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1129 1130 mutex_lock(&dp->lock); 1131 dp->suspended = false; 1132 if (dp->fw_loaded) 1133 schedule_work(&dp->event_work); 1134 mutex_unlock(&dp->lock); 1135 1136 return 0; 1137 } 1138 1139 static int cdn_dp_probe(struct platform_device *pdev) 1140 { 1141 struct device *dev = &pdev->dev; 1142 const struct of_device_id *match; 1143 struct cdn_dp_data *dp_data; 1144 struct cdn_dp_port *port; 1145 struct cdn_dp_device *dp; 1146 struct extcon_dev *extcon; 1147 struct phy *phy; 1148 int i; 1149 1150 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 1151 if (!dp) 1152 return -ENOMEM; 1153 dp->dev = dev; 1154 1155 match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); 1156 dp_data = (struct cdn_dp_data *)match->data; 1157 1158 for (i = 0; i < dp_data->max_phy; i++) { 1159 extcon = extcon_get_edev_by_phandle(dev, i); 1160 phy = devm_of_phy_get_by_index(dev, dev->of_node, i); 1161 1162 if (PTR_ERR(extcon) == -EPROBE_DEFER || 1163 PTR_ERR(phy) == -EPROBE_DEFER) 1164 return -EPROBE_DEFER; 1165 1166 if (IS_ERR(extcon) || IS_ERR(phy)) 1167 continue; 1168 1169 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1170 if (!port) 1171 return -ENOMEM; 1172 1173 port->extcon = extcon; 1174 port->phy = phy; 1175 port->dp = dp; 1176 port->id = i; 1177 dp->port[dp->ports++] = port; 1178 } 1179 1180 if (!dp->ports) { 1181 DRM_DEV_ERROR(dev, "missing extcon or phy\n"); 1182 return -EINVAL; 1183 } 1184 1185 mutex_init(&dp->lock); 1186 dev_set_drvdata(dev, dp); 1187 1188 cdn_dp_audio_codec_init(dp, dev); 1189 1190 return component_add(dev, &cdn_dp_component_ops); 1191 } 1192 1193 static int cdn_dp_remove(struct platform_device *pdev) 1194 { 1195 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1196 1197 platform_device_unregister(dp->audio_pdev); 1198 cdn_dp_suspend(dp->dev); 1199 component_del(&pdev->dev, &cdn_dp_component_ops); 1200 1201 return 0; 1202 } 1203 1204 static void cdn_dp_shutdown(struct platform_device *pdev) 1205 { 1206 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1207 1208 cdn_dp_suspend(dp->dev); 1209 } 1210 1211 static const struct dev_pm_ops cdn_dp_pm_ops = { 1212 SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend, 1213 cdn_dp_resume) 1214 }; 1215 1216 struct platform_driver cdn_dp_driver = { 1217 .probe = cdn_dp_probe, 1218 .remove = cdn_dp_remove, 1219 .shutdown = cdn_dp_shutdown, 1220 .driver = { 1221 .name = "cdn-dp", 1222 .owner = THIS_MODULE, 1223 .of_match_table = of_match_ptr(cdn_dp_dt_ids), 1224 .pm = &cdn_dp_pm_ops, 1225 }, 1226 }; 1227