1 /* 2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Author: Chris Zhong <zyw@rock-chips.com> 4 * 5 * This software is licensed under the terms of the GNU General Public 6 * License version 2, as published by the Free Software Foundation, and 7 * may be copied, distributed, and modified under those terms. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <drm/drmP.h> 16 #include <drm/drm_atomic_helper.h> 17 #include <drm/drm_crtc_helper.h> 18 #include <drm/drm_dp_helper.h> 19 #include <drm/drm_edid.h> 20 #include <drm/drm_of.h> 21 22 #include <linux/clk.h> 23 #include <linux/component.h> 24 #include <linux/extcon.h> 25 #include <linux/firmware.h> 26 #include <linux/regmap.h> 27 #include <linux/reset.h> 28 #include <linux/mfd/syscon.h> 29 #include <linux/phy/phy.h> 30 31 #include <sound/hdmi-codec.h> 32 33 #include "cdn-dp-core.h" 34 #include "cdn-dp-reg.h" 35 #include "rockchip_drm_vop.h" 36 37 #define connector_to_dp(c) \ 38 container_of(c, struct cdn_dp_device, connector) 39 40 #define encoder_to_dp(c) \ 41 container_of(c, struct cdn_dp_device, encoder) 42 43 #define GRF_SOC_CON9 0x6224 44 #define DP_SEL_VOP_LIT BIT(12) 45 #define GRF_SOC_CON26 0x6268 46 #define UPHY_SEL_BIT 3 47 #define UPHY_SEL_MASK BIT(19) 48 #define DPTX_HPD_SEL (3 << 12) 49 #define DPTX_HPD_DEL (2 << 12) 50 #define DPTX_HPD_SEL_MASK (3 << 28) 51 52 #define CDN_FW_TIMEOUT_MS (64 * 1000) 53 #define CDN_DPCD_TIMEOUT_MS 5000 54 #define CDN_DP_FIRMWARE "rockchip/dptx.bin" 55 56 struct cdn_dp_data { 57 u8 max_phy; 58 }; 59 60 struct cdn_dp_data rk3399_cdn_dp = { 61 .max_phy = 2, 62 }; 63 64 static const struct of_device_id cdn_dp_dt_ids[] = { 65 { .compatible = "rockchip,rk3399-cdn-dp", 66 .data = (void *)&rk3399_cdn_dp }, 67 {} 68 }; 69 70 MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids); 71 72 static int cdn_dp_grf_write(struct cdn_dp_device *dp, 73 unsigned int reg, unsigned int val) 74 { 75 int ret; 76 77 ret = clk_prepare_enable(dp->grf_clk); 78 if (ret) { 79 DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n"); 80 return ret; 81 } 82 83 ret = regmap_write(dp->grf, reg, val); 84 if (ret) { 85 DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); 86 return ret; 87 } 88 89 clk_disable_unprepare(dp->grf_clk); 90 91 return 0; 92 } 93 94 static int cdn_dp_clk_enable(struct cdn_dp_device *dp) 95 { 96 int ret; 97 unsigned long rate; 98 99 ret = clk_prepare_enable(dp->pclk); 100 if (ret < 0) { 101 DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret); 102 goto err_pclk; 103 } 104 105 ret = clk_prepare_enable(dp->core_clk); 106 if (ret < 0) { 107 DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret); 108 goto err_core_clk; 109 } 110 111 ret = pm_runtime_get_sync(dp->dev); 112 if (ret < 0) { 113 DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); 114 goto err_pm_runtime_get; 115 } 116 117 reset_control_assert(dp->core_rst); 118 reset_control_assert(dp->dptx_rst); 119 reset_control_assert(dp->apb_rst); 120 reset_control_deassert(dp->core_rst); 121 reset_control_deassert(dp->dptx_rst); 122 reset_control_deassert(dp->apb_rst); 123 124 rate = clk_get_rate(dp->core_clk); 125 if (!rate) { 126 DRM_DEV_ERROR(dp->dev, "get clk rate failed\n"); 127 ret = -EINVAL; 128 goto err_set_rate; 129 } 130 131 cdn_dp_set_fw_clk(dp, rate); 132 cdn_dp_clock_reset(dp); 133 134 return 0; 135 136 err_set_rate: 137 pm_runtime_put(dp->dev); 138 err_pm_runtime_get: 139 clk_disable_unprepare(dp->core_clk); 140 err_core_clk: 141 clk_disable_unprepare(dp->pclk); 142 err_pclk: 143 return ret; 144 } 145 146 static void cdn_dp_clk_disable(struct cdn_dp_device *dp) 147 { 148 pm_runtime_put_sync(dp->dev); 149 clk_disable_unprepare(dp->pclk); 150 clk_disable_unprepare(dp->core_clk); 151 } 152 153 static int cdn_dp_get_port_lanes(struct cdn_dp_port *port) 154 { 155 struct extcon_dev *edev = port->extcon; 156 union extcon_property_value property; 157 int dptx; 158 u8 lanes; 159 160 dptx = extcon_get_state(edev, EXTCON_DISP_DP); 161 if (dptx > 0) { 162 extcon_get_property(edev, EXTCON_DISP_DP, 163 EXTCON_PROP_USB_SS, &property); 164 if (property.intval) 165 lanes = 2; 166 else 167 lanes = 4; 168 } else { 169 lanes = 0; 170 } 171 172 return lanes; 173 } 174 175 static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count) 176 { 177 int ret; 178 u8 value; 179 180 *sink_count = 0; 181 ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1); 182 if (ret) 183 return ret; 184 185 *sink_count = DP_GET_SINK_COUNT(value); 186 return 0; 187 } 188 189 static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp) 190 { 191 struct cdn_dp_port *port; 192 int i, lanes; 193 194 for (i = 0; i < dp->ports; i++) { 195 port = dp->port[i]; 196 lanes = cdn_dp_get_port_lanes(port); 197 if (lanes) 198 return port; 199 } 200 return NULL; 201 } 202 203 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp) 204 { 205 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); 206 struct cdn_dp_port *port; 207 u8 sink_count = 0; 208 209 if (dp->active_port < 0 || dp->active_port >= dp->ports) { 210 DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n"); 211 return false; 212 } 213 214 port = dp->port[dp->active_port]; 215 216 /* 217 * Attempt to read sink count, retry in case the sink may not be ready. 218 * 219 * Sinks are *supposed* to come up within 1ms from an off state, but 220 * some docks need more time to power up. 221 */ 222 while (time_before(jiffies, timeout)) { 223 if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) 224 return false; 225 226 if (!cdn_dp_get_sink_count(dp, &sink_count)) 227 return sink_count ? true : false; 228 229 usleep_range(5000, 10000); 230 } 231 232 DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); 233 return false; 234 } 235 236 static enum drm_connector_status 237 cdn_dp_connector_detect(struct drm_connector *connector, bool force) 238 { 239 struct cdn_dp_device *dp = connector_to_dp(connector); 240 enum drm_connector_status status = connector_status_disconnected; 241 242 mutex_lock(&dp->lock); 243 if (dp->connected) 244 status = connector_status_connected; 245 mutex_unlock(&dp->lock); 246 247 return status; 248 } 249 250 static void cdn_dp_connector_destroy(struct drm_connector *connector) 251 { 252 drm_connector_unregister(connector); 253 drm_connector_cleanup(connector); 254 } 255 256 static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { 257 .detect = cdn_dp_connector_detect, 258 .destroy = cdn_dp_connector_destroy, 259 .fill_modes = drm_helper_probe_single_connector_modes, 260 .reset = drm_atomic_helper_connector_reset, 261 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 262 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 263 }; 264 265 static int cdn_dp_connector_get_modes(struct drm_connector *connector) 266 { 267 struct cdn_dp_device *dp = connector_to_dp(connector); 268 struct edid *edid; 269 int ret = 0; 270 271 mutex_lock(&dp->lock); 272 edid = dp->edid; 273 if (edid) { 274 DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", 275 edid->width_cm, edid->height_cm); 276 277 dp->sink_has_audio = drm_detect_monitor_audio(edid); 278 ret = drm_add_edid_modes(connector, edid); 279 if (ret) { 280 drm_mode_connector_update_edid_property(connector, 281 edid); 282 drm_edid_to_eld(connector, edid); 283 } 284 } 285 mutex_unlock(&dp->lock); 286 287 return ret; 288 } 289 290 static int cdn_dp_connector_mode_valid(struct drm_connector *connector, 291 struct drm_display_mode *mode) 292 { 293 struct cdn_dp_device *dp = connector_to_dp(connector); 294 struct drm_display_info *display_info = &dp->connector.display_info; 295 u32 requested, actual, rate, sink_max, source_max = 0; 296 u8 lanes, bpc; 297 298 /* If DP is disconnected, every mode is invalid */ 299 if (!dp->connected) 300 return MODE_BAD; 301 302 switch (display_info->bpc) { 303 case 10: 304 bpc = 10; 305 break; 306 case 6: 307 bpc = 6; 308 break; 309 default: 310 bpc = 8; 311 break; 312 } 313 314 requested = mode->clock * bpc * 3 / 1000; 315 316 source_max = dp->lanes; 317 sink_max = drm_dp_max_lane_count(dp->dpcd); 318 lanes = min(source_max, sink_max); 319 320 source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE); 321 sink_max = drm_dp_max_link_rate(dp->dpcd); 322 rate = min(source_max, sink_max); 323 324 actual = rate * lanes / 100; 325 326 /* efficiency is about 0.8 */ 327 actual = actual * 8 / 10; 328 329 if (requested > actual) { 330 DRM_DEV_DEBUG_KMS(dp->dev, 331 "requested=%d, actual=%d, clock=%d\n", 332 requested, actual, mode->clock); 333 return MODE_CLOCK_HIGH; 334 } 335 336 return MODE_OK; 337 } 338 339 static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { 340 .get_modes = cdn_dp_connector_get_modes, 341 .mode_valid = cdn_dp_connector_mode_valid, 342 }; 343 344 static int cdn_dp_firmware_init(struct cdn_dp_device *dp) 345 { 346 int ret; 347 const u32 *iram_data, *dram_data; 348 const struct firmware *fw = dp->fw; 349 const struct cdn_firmware_header *hdr; 350 351 hdr = (struct cdn_firmware_header *)fw->data; 352 if (fw->size != le32_to_cpu(hdr->size_bytes)) { 353 DRM_DEV_ERROR(dp->dev, "firmware is invalid\n"); 354 return -EINVAL; 355 } 356 357 iram_data = (const u32 *)(fw->data + hdr->header_size); 358 dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size); 359 360 ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size, 361 dram_data, hdr->dram_size); 362 if (ret) 363 return ret; 364 365 ret = cdn_dp_set_firmware_active(dp, true); 366 if (ret) { 367 DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret); 368 return ret; 369 } 370 371 return cdn_dp_event_config(dp); 372 } 373 374 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) 375 { 376 int ret; 377 378 if (!cdn_dp_check_sink_connection(dp)) 379 return -ENODEV; 380 381 ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, 382 DP_RECEIVER_CAP_SIZE); 383 if (ret) { 384 DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); 385 return ret; 386 } 387 388 kfree(dp->edid); 389 dp->edid = drm_do_get_edid(&dp->connector, 390 cdn_dp_get_edid_block, dp); 391 return 0; 392 } 393 394 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) 395 { 396 union extcon_property_value property; 397 int ret; 398 399 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 400 (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK); 401 if (ret) 402 return ret; 403 404 if (!port->phy_enabled) { 405 ret = phy_power_on(port->phy); 406 if (ret) { 407 DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n", 408 ret); 409 goto err_phy; 410 } 411 port->phy_enabled = true; 412 } 413 414 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 415 DPTX_HPD_SEL_MASK | DPTX_HPD_SEL); 416 if (ret) { 417 DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret); 418 goto err_power_on; 419 } 420 421 ret = cdn_dp_get_hpd_status(dp); 422 if (ret <= 0) { 423 if (!ret) 424 DRM_DEV_ERROR(dp->dev, "hpd does not exist\n"); 425 goto err_power_on; 426 } 427 428 ret = extcon_get_property(port->extcon, EXTCON_DISP_DP, 429 EXTCON_PROP_USB_TYPEC_POLARITY, &property); 430 if (ret) { 431 DRM_DEV_ERROR(dp->dev, "get property failed\n"); 432 goto err_power_on; 433 } 434 435 port->lanes = cdn_dp_get_port_lanes(port); 436 ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval); 437 if (ret) { 438 DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n", 439 ret); 440 goto err_power_on; 441 } 442 443 dp->active_port = port->id; 444 return 0; 445 446 err_power_on: 447 if (phy_power_off(port->phy)) 448 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); 449 else 450 port->phy_enabled = false; 451 452 err_phy: 453 cdn_dp_grf_write(dp, GRF_SOC_CON26, 454 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); 455 return ret; 456 } 457 458 static int cdn_dp_disable_phy(struct cdn_dp_device *dp, 459 struct cdn_dp_port *port) 460 { 461 int ret; 462 463 if (port->phy_enabled) { 464 ret = phy_power_off(port->phy); 465 if (ret) { 466 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); 467 return ret; 468 } 469 } 470 471 port->phy_enabled = false; 472 port->lanes = 0; 473 dp->active_port = -1; 474 return 0; 475 } 476 477 static int cdn_dp_disable(struct cdn_dp_device *dp) 478 { 479 int ret, i; 480 481 if (!dp->active) 482 return 0; 483 484 for (i = 0; i < dp->ports; i++) 485 cdn_dp_disable_phy(dp, dp->port[i]); 486 487 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, 488 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); 489 if (ret) { 490 DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n", 491 ret); 492 return ret; 493 } 494 495 cdn_dp_set_firmware_active(dp, false); 496 cdn_dp_clk_disable(dp); 497 dp->active = false; 498 dp->link.rate = 0; 499 dp->link.num_lanes = 0; 500 if (!dp->connected) { 501 kfree(dp->edid); 502 dp->edid = NULL; 503 } 504 505 return 0; 506 } 507 508 static int cdn_dp_enable(struct cdn_dp_device *dp) 509 { 510 int ret, i, lanes; 511 struct cdn_dp_port *port; 512 513 port = cdn_dp_connected_port(dp); 514 if (!port) { 515 DRM_DEV_ERROR(dp->dev, 516 "Can't enable without connection\n"); 517 return -ENODEV; 518 } 519 520 if (dp->active) 521 return 0; 522 523 ret = cdn_dp_clk_enable(dp); 524 if (ret) 525 return ret; 526 527 ret = cdn_dp_firmware_init(dp); 528 if (ret) { 529 DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret); 530 goto err_clk_disable; 531 } 532 533 /* only enable the port that connected with downstream device */ 534 for (i = port->id; i < dp->ports; i++) { 535 port = dp->port[i]; 536 lanes = cdn_dp_get_port_lanes(port); 537 if (lanes) { 538 ret = cdn_dp_enable_phy(dp, port); 539 if (ret) 540 continue; 541 542 ret = cdn_dp_get_sink_capability(dp); 543 if (ret) { 544 cdn_dp_disable_phy(dp, port); 545 } else { 546 dp->active = true; 547 dp->lanes = port->lanes; 548 return 0; 549 } 550 } 551 } 552 553 err_clk_disable: 554 cdn_dp_clk_disable(dp); 555 return ret; 556 } 557 558 static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, 559 struct drm_display_mode *mode, 560 struct drm_display_mode *adjusted) 561 { 562 struct cdn_dp_device *dp = encoder_to_dp(encoder); 563 struct drm_display_info *display_info = &dp->connector.display_info; 564 struct video_info *video = &dp->video_info; 565 566 switch (display_info->bpc) { 567 case 10: 568 video->color_depth = 10; 569 break; 570 case 6: 571 video->color_depth = 6; 572 break; 573 default: 574 video->color_depth = 8; 575 break; 576 } 577 578 video->color_fmt = PXL_RGB; 579 video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); 580 video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); 581 582 memcpy(&dp->mode, adjusted, sizeof(*mode)); 583 } 584 585 static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) 586 { 587 u8 link_status[DP_LINK_STATUS_SIZE]; 588 struct cdn_dp_port *port = cdn_dp_connected_port(dp); 589 u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); 590 591 if (!port || !dp->link.rate || !dp->link.num_lanes) 592 return false; 593 594 if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, 595 DP_LINK_STATUS_SIZE)) { 596 DRM_ERROR("Failed to get link status\n"); 597 return false; 598 } 599 600 /* if link training is requested we should perform it always */ 601 return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); 602 } 603 604 static void cdn_dp_encoder_enable(struct drm_encoder *encoder) 605 { 606 struct cdn_dp_device *dp = encoder_to_dp(encoder); 607 int ret, val; 608 609 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 610 if (ret < 0) { 611 DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); 612 return; 613 } 614 615 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", 616 (ret) ? "LIT" : "BIG"); 617 if (ret) 618 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); 619 else 620 val = DP_SEL_VOP_LIT << 16; 621 622 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); 623 if (ret) 624 return; 625 626 mutex_lock(&dp->lock); 627 628 ret = cdn_dp_enable(dp); 629 if (ret) { 630 DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", 631 ret); 632 goto out; 633 } 634 if (!cdn_dp_check_link_status(dp)) { 635 ret = cdn_dp_train_link(dp); 636 if (ret) { 637 DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret); 638 goto out; 639 } 640 } 641 642 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE); 643 if (ret) { 644 DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret); 645 goto out; 646 } 647 648 ret = cdn_dp_config_video(dp); 649 if (ret) { 650 DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret); 651 goto out; 652 } 653 654 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID); 655 if (ret) { 656 DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret); 657 goto out; 658 } 659 out: 660 mutex_unlock(&dp->lock); 661 } 662 663 static void cdn_dp_encoder_disable(struct drm_encoder *encoder) 664 { 665 struct cdn_dp_device *dp = encoder_to_dp(encoder); 666 int ret; 667 668 mutex_lock(&dp->lock); 669 if (dp->active) { 670 ret = cdn_dp_disable(dp); 671 if (ret) { 672 DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", 673 ret); 674 } 675 } 676 mutex_unlock(&dp->lock); 677 678 /* 679 * In the following 2 cases, we need to run the event_work to re-enable 680 * the DP: 681 * 1. If there is not just one port device is connected, and remove one 682 * device from a port, the DP will be disabled here, at this case, 683 * run the event_work to re-open DP for the other port. 684 * 2. If re-training or re-config failed, the DP will be disabled here. 685 * run the event_work to re-connect it. 686 */ 687 if (!dp->connected && cdn_dp_connected_port(dp)) 688 schedule_work(&dp->event_work); 689 } 690 691 static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder, 692 struct drm_crtc_state *crtc_state, 693 struct drm_connector_state *conn_state) 694 { 695 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 696 697 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 698 s->output_type = DRM_MODE_CONNECTOR_DisplayPort; 699 700 return 0; 701 } 702 703 static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { 704 .mode_set = cdn_dp_encoder_mode_set, 705 .enable = cdn_dp_encoder_enable, 706 .disable = cdn_dp_encoder_disable, 707 .atomic_check = cdn_dp_encoder_atomic_check, 708 }; 709 710 static const struct drm_encoder_funcs cdn_dp_encoder_funcs = { 711 .destroy = drm_encoder_cleanup, 712 }; 713 714 static int cdn_dp_parse_dt(struct cdn_dp_device *dp) 715 { 716 struct device *dev = dp->dev; 717 struct device_node *np = dev->of_node; 718 struct platform_device *pdev = to_platform_device(dev); 719 struct resource *res; 720 721 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 722 if (IS_ERR(dp->grf)) { 723 DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n"); 724 return PTR_ERR(dp->grf); 725 } 726 727 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 728 dp->regs = devm_ioremap_resource(dev, res); 729 if (IS_ERR(dp->regs)) { 730 DRM_DEV_ERROR(dev, "ioremap reg failed\n"); 731 return PTR_ERR(dp->regs); 732 } 733 734 dp->core_clk = devm_clk_get(dev, "core-clk"); 735 if (IS_ERR(dp->core_clk)) { 736 DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n"); 737 return PTR_ERR(dp->core_clk); 738 } 739 740 dp->pclk = devm_clk_get(dev, "pclk"); 741 if (IS_ERR(dp->pclk)) { 742 DRM_DEV_ERROR(dev, "cannot get pclk\n"); 743 return PTR_ERR(dp->pclk); 744 } 745 746 dp->spdif_clk = devm_clk_get(dev, "spdif"); 747 if (IS_ERR(dp->spdif_clk)) { 748 DRM_DEV_ERROR(dev, "cannot get spdif_clk\n"); 749 return PTR_ERR(dp->spdif_clk); 750 } 751 752 dp->grf_clk = devm_clk_get(dev, "grf"); 753 if (IS_ERR(dp->grf_clk)) { 754 DRM_DEV_ERROR(dev, "cannot get grf clk\n"); 755 return PTR_ERR(dp->grf_clk); 756 } 757 758 dp->spdif_rst = devm_reset_control_get(dev, "spdif"); 759 if (IS_ERR(dp->spdif_rst)) { 760 DRM_DEV_ERROR(dev, "no spdif reset control found\n"); 761 return PTR_ERR(dp->spdif_rst); 762 } 763 764 dp->dptx_rst = devm_reset_control_get(dev, "dptx"); 765 if (IS_ERR(dp->dptx_rst)) { 766 DRM_DEV_ERROR(dev, "no uphy reset control found\n"); 767 return PTR_ERR(dp->dptx_rst); 768 } 769 770 dp->core_rst = devm_reset_control_get(dev, "core"); 771 if (IS_ERR(dp->core_rst)) { 772 DRM_DEV_ERROR(dev, "no core reset control found\n"); 773 return PTR_ERR(dp->core_rst); 774 } 775 776 dp->apb_rst = devm_reset_control_get(dev, "apb"); 777 if (IS_ERR(dp->apb_rst)) { 778 DRM_DEV_ERROR(dev, "no apb reset control found\n"); 779 return PTR_ERR(dp->apb_rst); 780 } 781 782 return 0; 783 } 784 785 static int cdn_dp_audio_hw_params(struct device *dev, void *data, 786 struct hdmi_codec_daifmt *daifmt, 787 struct hdmi_codec_params *params) 788 { 789 struct cdn_dp_device *dp = dev_get_drvdata(dev); 790 struct audio_info audio = { 791 .sample_width = params->sample_width, 792 .sample_rate = params->sample_rate, 793 .channels = params->channels, 794 }; 795 int ret; 796 797 mutex_lock(&dp->lock); 798 if (!dp->active) { 799 ret = -ENODEV; 800 goto out; 801 } 802 803 switch (daifmt->fmt) { 804 case HDMI_I2S: 805 audio.format = AFMT_I2S; 806 break; 807 case HDMI_SPDIF: 808 audio.format = AFMT_SPDIF; 809 break; 810 default: 811 DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); 812 ret = -EINVAL; 813 goto out; 814 } 815 816 ret = cdn_dp_audio_config(dp, &audio); 817 if (!ret) 818 dp->audio_info = audio; 819 820 out: 821 mutex_unlock(&dp->lock); 822 return ret; 823 } 824 825 static void cdn_dp_audio_shutdown(struct device *dev, void *data) 826 { 827 struct cdn_dp_device *dp = dev_get_drvdata(dev); 828 int ret; 829 830 mutex_lock(&dp->lock); 831 if (!dp->active) 832 goto out; 833 834 ret = cdn_dp_audio_stop(dp, &dp->audio_info); 835 if (!ret) 836 dp->audio_info.format = AFMT_UNUSED; 837 out: 838 mutex_unlock(&dp->lock); 839 } 840 841 static int cdn_dp_audio_digital_mute(struct device *dev, void *data, 842 bool enable) 843 { 844 struct cdn_dp_device *dp = dev_get_drvdata(dev); 845 int ret; 846 847 mutex_lock(&dp->lock); 848 if (!dp->active) { 849 ret = -ENODEV; 850 goto out; 851 } 852 853 ret = cdn_dp_audio_mute(dp, enable); 854 855 out: 856 mutex_unlock(&dp->lock); 857 return ret; 858 } 859 860 static int cdn_dp_audio_get_eld(struct device *dev, void *data, 861 u8 *buf, size_t len) 862 { 863 struct cdn_dp_device *dp = dev_get_drvdata(dev); 864 865 memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); 866 867 return 0; 868 } 869 870 static const struct hdmi_codec_ops audio_codec_ops = { 871 .hw_params = cdn_dp_audio_hw_params, 872 .audio_shutdown = cdn_dp_audio_shutdown, 873 .digital_mute = cdn_dp_audio_digital_mute, 874 .get_eld = cdn_dp_audio_get_eld, 875 }; 876 877 static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, 878 struct device *dev) 879 { 880 struct hdmi_codec_pdata codec_data = { 881 .i2s = 1, 882 .spdif = 1, 883 .ops = &audio_codec_ops, 884 .max_i2s_channels = 8, 885 }; 886 887 dp->audio_pdev = platform_device_register_data( 888 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, 889 &codec_data, sizeof(codec_data)); 890 891 return PTR_ERR_OR_ZERO(dp->audio_pdev); 892 } 893 894 static int cdn_dp_request_firmware(struct cdn_dp_device *dp) 895 { 896 int ret; 897 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS); 898 unsigned long sleep = 1000; 899 900 WARN_ON(!mutex_is_locked(&dp->lock)); 901 902 if (dp->fw_loaded) 903 return 0; 904 905 /* Drop the lock before getting the firmware to avoid blocking boot */ 906 mutex_unlock(&dp->lock); 907 908 while (time_before(jiffies, timeout)) { 909 ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev); 910 if (ret == -ENOENT) { 911 msleep(sleep); 912 sleep *= 2; 913 continue; 914 } else if (ret) { 915 DRM_DEV_ERROR(dp->dev, 916 "failed to request firmware: %d\n", ret); 917 goto out; 918 } 919 920 dp->fw_loaded = true; 921 ret = 0; 922 goto out; 923 } 924 925 DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n"); 926 ret = -ETIMEDOUT; 927 out: 928 mutex_lock(&dp->lock); 929 return ret; 930 } 931 932 static void cdn_dp_pd_event_work(struct work_struct *work) 933 { 934 struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, 935 event_work); 936 struct drm_connector *connector = &dp->connector; 937 enum drm_connector_status old_status; 938 939 int ret; 940 941 mutex_lock(&dp->lock); 942 943 if (dp->suspended) 944 goto out; 945 946 ret = cdn_dp_request_firmware(dp); 947 if (ret) 948 goto out; 949 950 dp->connected = true; 951 952 /* Not connected, notify userspace to disable the block */ 953 if (!cdn_dp_connected_port(dp)) { 954 DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n"); 955 dp->connected = false; 956 957 /* Connected but not enabled, enable the block */ 958 } else if (!dp->active) { 959 DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n"); 960 ret = cdn_dp_enable(dp); 961 if (ret) { 962 DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret); 963 dp->connected = false; 964 } 965 966 /* Enabled and connected to a dongle without a sink, notify userspace */ 967 } else if (!cdn_dp_check_sink_connection(dp)) { 968 DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n"); 969 dp->connected = false; 970 971 /* Enabled and connected with a sink, re-train if requested */ 972 } else if (!cdn_dp_check_link_status(dp)) { 973 unsigned int rate = dp->link.rate; 974 unsigned int lanes = dp->link.num_lanes; 975 struct drm_display_mode *mode = &dp->mode; 976 977 DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); 978 ret = cdn_dp_train_link(dp); 979 if (ret) { 980 dp->connected = false; 981 DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret); 982 goto out; 983 } 984 985 /* If training result is changed, update the video config */ 986 if (mode->clock && 987 (rate != dp->link.rate || lanes != dp->link.num_lanes)) { 988 ret = cdn_dp_config_video(dp); 989 if (ret) { 990 dp->connected = false; 991 DRM_DEV_ERROR(dp->dev, 992 "Failed to config video %d\n", 993 ret); 994 } 995 } 996 } 997 998 out: 999 mutex_unlock(&dp->lock); 1000 1001 old_status = connector->status; 1002 connector->status = connector->funcs->detect(connector, false); 1003 if (old_status != connector->status) 1004 drm_kms_helper_hotplug_event(dp->drm_dev); 1005 } 1006 1007 static int cdn_dp_pd_event(struct notifier_block *nb, 1008 unsigned long event, void *priv) 1009 { 1010 struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port, 1011 event_nb); 1012 struct cdn_dp_device *dp = port->dp; 1013 1014 /* 1015 * It would be nice to be able to just do the work inline right here. 1016 * However, we need to make a bunch of calls that might sleep in order 1017 * to turn on the block/phy, so use a worker instead. 1018 */ 1019 schedule_work(&dp->event_work); 1020 1021 return NOTIFY_DONE; 1022 } 1023 1024 static int cdn_dp_bind(struct device *dev, struct device *master, void *data) 1025 { 1026 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1027 struct drm_encoder *encoder; 1028 struct drm_connector *connector; 1029 struct cdn_dp_port *port; 1030 struct drm_device *drm_dev = data; 1031 int ret, i; 1032 1033 ret = cdn_dp_parse_dt(dp); 1034 if (ret < 0) 1035 return ret; 1036 1037 dp->drm_dev = drm_dev; 1038 dp->connected = false; 1039 dp->active = false; 1040 dp->active_port = -1; 1041 dp->fw_loaded = false; 1042 1043 INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); 1044 1045 encoder = &dp->encoder; 1046 1047 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, 1048 dev->of_node); 1049 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); 1050 1051 ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs, 1052 DRM_MODE_ENCODER_TMDS, NULL); 1053 if (ret) { 1054 DRM_ERROR("failed to initialize encoder with drm\n"); 1055 return ret; 1056 } 1057 1058 drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); 1059 1060 connector = &dp->connector; 1061 connector->polled = DRM_CONNECTOR_POLL_HPD; 1062 connector->dpms = DRM_MODE_DPMS_OFF; 1063 1064 ret = drm_connector_init(drm_dev, connector, 1065 &cdn_dp_atomic_connector_funcs, 1066 DRM_MODE_CONNECTOR_DisplayPort); 1067 if (ret) { 1068 DRM_ERROR("failed to initialize connector with drm\n"); 1069 goto err_free_encoder; 1070 } 1071 1072 drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); 1073 1074 ret = drm_mode_connector_attach_encoder(connector, encoder); 1075 if (ret) { 1076 DRM_ERROR("failed to attach connector and encoder\n"); 1077 goto err_free_connector; 1078 } 1079 1080 for (i = 0; i < dp->ports; i++) { 1081 port = dp->port[i]; 1082 1083 port->event_nb.notifier_call = cdn_dp_pd_event; 1084 ret = devm_extcon_register_notifier(dp->dev, port->extcon, 1085 EXTCON_DISP_DP, 1086 &port->event_nb); 1087 if (ret) { 1088 DRM_DEV_ERROR(dev, 1089 "register EXTCON_DISP_DP notifier err\n"); 1090 goto err_free_connector; 1091 } 1092 } 1093 1094 pm_runtime_enable(dev); 1095 1096 schedule_work(&dp->event_work); 1097 1098 return 0; 1099 1100 err_free_connector: 1101 drm_connector_cleanup(connector); 1102 err_free_encoder: 1103 drm_encoder_cleanup(encoder); 1104 return ret; 1105 } 1106 1107 static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) 1108 { 1109 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1110 struct drm_encoder *encoder = &dp->encoder; 1111 struct drm_connector *connector = &dp->connector; 1112 1113 cancel_work_sync(&dp->event_work); 1114 cdn_dp_encoder_disable(encoder); 1115 encoder->funcs->destroy(encoder); 1116 connector->funcs->destroy(connector); 1117 1118 pm_runtime_disable(dev); 1119 if (dp->fw_loaded) 1120 release_firmware(dp->fw); 1121 kfree(dp->edid); 1122 dp->edid = NULL; 1123 } 1124 1125 static const struct component_ops cdn_dp_component_ops = { 1126 .bind = cdn_dp_bind, 1127 .unbind = cdn_dp_unbind, 1128 }; 1129 1130 int cdn_dp_suspend(struct device *dev) 1131 { 1132 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1133 int ret = 0; 1134 1135 mutex_lock(&dp->lock); 1136 if (dp->active) 1137 ret = cdn_dp_disable(dp); 1138 dp->suspended = true; 1139 mutex_unlock(&dp->lock); 1140 1141 return ret; 1142 } 1143 1144 int cdn_dp_resume(struct device *dev) 1145 { 1146 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1147 1148 mutex_lock(&dp->lock); 1149 dp->suspended = false; 1150 if (dp->fw_loaded) 1151 schedule_work(&dp->event_work); 1152 mutex_unlock(&dp->lock); 1153 1154 return 0; 1155 } 1156 1157 static int cdn_dp_probe(struct platform_device *pdev) 1158 { 1159 struct device *dev = &pdev->dev; 1160 const struct of_device_id *match; 1161 struct cdn_dp_data *dp_data; 1162 struct cdn_dp_port *port; 1163 struct cdn_dp_device *dp; 1164 struct extcon_dev *extcon; 1165 struct phy *phy; 1166 int i; 1167 1168 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 1169 if (!dp) 1170 return -ENOMEM; 1171 dp->dev = dev; 1172 1173 match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); 1174 dp_data = (struct cdn_dp_data *)match->data; 1175 1176 for (i = 0; i < dp_data->max_phy; i++) { 1177 extcon = extcon_get_edev_by_phandle(dev, i); 1178 phy = devm_of_phy_get_by_index(dev, dev->of_node, i); 1179 1180 if (PTR_ERR(extcon) == -EPROBE_DEFER || 1181 PTR_ERR(phy) == -EPROBE_DEFER) 1182 return -EPROBE_DEFER; 1183 1184 if (IS_ERR(extcon) || IS_ERR(phy)) 1185 continue; 1186 1187 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 1188 if (!port) 1189 return -ENOMEM; 1190 1191 port->extcon = extcon; 1192 port->phy = phy; 1193 port->dp = dp; 1194 port->id = i; 1195 dp->port[dp->ports++] = port; 1196 } 1197 1198 if (!dp->ports) { 1199 DRM_DEV_ERROR(dev, "missing extcon or phy\n"); 1200 return -EINVAL; 1201 } 1202 1203 mutex_init(&dp->lock); 1204 dev_set_drvdata(dev, dp); 1205 1206 cdn_dp_audio_codec_init(dp, dev); 1207 1208 return component_add(dev, &cdn_dp_component_ops); 1209 } 1210 1211 static int cdn_dp_remove(struct platform_device *pdev) 1212 { 1213 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1214 1215 platform_device_unregister(dp->audio_pdev); 1216 cdn_dp_suspend(dp->dev); 1217 component_del(&pdev->dev, &cdn_dp_component_ops); 1218 1219 return 0; 1220 } 1221 1222 static void cdn_dp_shutdown(struct platform_device *pdev) 1223 { 1224 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1225 1226 cdn_dp_suspend(dp->dev); 1227 } 1228 1229 static const struct dev_pm_ops cdn_dp_pm_ops = { 1230 SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend, 1231 cdn_dp_resume) 1232 }; 1233 1234 struct platform_driver cdn_dp_driver = { 1235 .probe = cdn_dp_probe, 1236 .remove = cdn_dp_remove, 1237 .shutdown = cdn_dp_shutdown, 1238 .driver = { 1239 .name = "cdn-dp", 1240 .owner = THIS_MODULE, 1241 .of_match_table = of_match_ptr(cdn_dp_dt_ids), 1242 .pm = &cdn_dp_pm_ops, 1243 }, 1244 }; 1245