1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence MHDP8546 DP bridge driver. 4 * 5 * Copyright (C) 2020 Cadence Design Systems, Inc. 6 * 7 * Authors: Quentin Schulz <quentin.schulz@free-electrons.com> 8 * Swapnil Jakhade <sjakhade@cadence.com> 9 * Yuti Amonkar <yamonkar@cadence.com> 10 * Tomi Valkeinen <tomi.valkeinen@ti.com> 11 * Jyri Sarha <jsarha@ti.com> 12 * 13 * TODO: 14 * - Implement optimized mailbox communication using mailbox interrupts 15 * - Add support for power management 16 * - Add support for features like audio, MST and fast link training 17 * - Implement request_fw_cancel to handle HW_STATE 18 * - Fix asynchronous loading of firmware implementation 19 * - Add DRM helper function for cdns_mhdp_lower_link_rate 20 */ 21 22 #include <linux/clk.h> 23 #include <linux/delay.h> 24 #include <linux/err.h> 25 #include <linux/firmware.h> 26 #include <linux/io.h> 27 #include <linux/iopoll.h> 28 #include <linux/irq.h> 29 #include <linux/module.h> 30 #include <linux/of.h> 31 #include <linux/of_device.h> 32 #include <linux/phy/phy.h> 33 #include <linux/phy/phy-dp.h> 34 #include <linux/platform_device.h> 35 #include <linux/slab.h> 36 #include <linux/wait.h> 37 38 #include <drm/drm_atomic.h> 39 #include <drm/drm_atomic_helper.h> 40 #include <drm/drm_atomic_state_helper.h> 41 #include <drm/drm_bridge.h> 42 #include <drm/drm_connector.h> 43 #include <drm/drm_crtc_helper.h> 44 #include <drm/drm_dp_helper.h> 45 #include <drm/drm_hdcp.h> 46 #include <drm/drm_modeset_helper_vtables.h> 47 #include <drm/drm_print.h> 48 #include <drm/drm_probe_helper.h> 49 50 #include <asm/unaligned.h> 51 52 #include "cdns-mhdp8546-core.h" 53 #include "cdns-mhdp8546-hdcp.h" 54 #include "cdns-mhdp8546-j721e.h" 55 56 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp) 57 { 58 int ret, empty; 59 60 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); 61 62 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY, 63 empty, !empty, MAILBOX_RETRY_US, 64 MAILBOX_TIMEOUT_US); 65 if (ret < 0) 66 return ret; 67 68 return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff; 69 } 70 71 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val) 72 { 73 int ret, full; 74 75 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); 76 77 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL, 78 full, !full, MAILBOX_RETRY_US, 79 MAILBOX_TIMEOUT_US); 80 if (ret < 0) 81 return ret; 82 83 writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA); 84 85 return 0; 86 } 87 88 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp, 89 u8 module_id, u8 opcode, 90 u16 req_size) 91 { 92 u32 mbox_size, i; 93 u8 header[4]; 94 int ret; 95 96 /* read the header of the message */ 97 for (i = 0; i < sizeof(header); i++) { 98 ret = cdns_mhdp_mailbox_read(mhdp); 99 if (ret < 0) 100 return ret; 101 102 header[i] = ret; 103 } 104 105 mbox_size = get_unaligned_be16(header + 2); 106 107 if (opcode != header[0] || module_id != header[1] || 108 req_size != mbox_size) { 109 /* 110 * If the message in mailbox is not what we want, we need to 111 * clear the mailbox by reading its contents. 112 */ 113 for (i = 0; i < mbox_size; i++) 114 if (cdns_mhdp_mailbox_read(mhdp) < 0) 115 break; 116 117 return -EINVAL; 118 } 119 120 return 0; 121 } 122 123 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp, 124 u8 *buff, u16 buff_size) 125 { 126 u32 i; 127 int ret; 128 129 for (i = 0; i < buff_size; i++) { 130 ret = cdns_mhdp_mailbox_read(mhdp); 131 if (ret < 0) 132 return ret; 133 134 buff[i] = ret; 135 } 136 137 return 0; 138 } 139 140 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id, 141 u8 opcode, u16 size, u8 *message) 142 { 143 u8 header[4]; 144 int ret, i; 145 146 header[0] = opcode; 147 header[1] = module_id; 148 put_unaligned_be16(size, header + 2); 149 150 for (i = 0; i < sizeof(header); i++) { 151 ret = cdns_mhdp_mailbox_write(mhdp, header[i]); 152 if (ret) 153 return ret; 154 } 155 156 for (i = 0; i < size; i++) { 157 ret = cdns_mhdp_mailbox_write(mhdp, message[i]); 158 if (ret) 159 return ret; 160 } 161 162 return 0; 163 } 164 165 static 166 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value) 167 { 168 u8 msg[4], resp[8]; 169 int ret; 170 171 put_unaligned_be32(addr, msg); 172 173 mutex_lock(&mhdp->mbox_mutex); 174 175 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL, 176 GENERAL_REGISTER_READ, 177 sizeof(msg), msg); 178 if (ret) 179 goto out; 180 181 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL, 182 GENERAL_REGISTER_READ, 183 sizeof(resp)); 184 if (ret) 185 goto out; 186 187 ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp)); 188 if (ret) 189 goto out; 190 191 /* Returned address value should be the same as requested */ 192 if (memcmp(msg, resp, sizeof(msg))) { 193 ret = -EINVAL; 194 goto out; 195 } 196 197 *value = get_unaligned_be32(resp + 4); 198 199 out: 200 mutex_unlock(&mhdp->mbox_mutex); 201 if (ret) { 202 dev_err(mhdp->dev, "Failed to read register\n"); 203 *value = 0; 204 } 205 206 return ret; 207 } 208 209 static 210 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val) 211 { 212 u8 msg[6]; 213 int ret; 214 215 put_unaligned_be16(addr, msg); 216 put_unaligned_be32(val, msg + 2); 217 218 mutex_lock(&mhdp->mbox_mutex); 219 220 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, 221 DPTX_WRITE_REGISTER, sizeof(msg), msg); 222 223 mutex_unlock(&mhdp->mbox_mutex); 224 225 return ret; 226 } 227 228 static 229 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr, 230 u8 start_bit, u8 bits_no, u32 val) 231 { 232 u8 field[8]; 233 int ret; 234 235 put_unaligned_be16(addr, field); 236 field[2] = start_bit; 237 field[3] = bits_no; 238 put_unaligned_be32(val, field + 4); 239 240 mutex_lock(&mhdp->mbox_mutex); 241 242 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, 243 DPTX_WRITE_FIELD, sizeof(field), field); 244 245 mutex_unlock(&mhdp->mbox_mutex); 246 247 return ret; 248 } 249 250 static 251 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp, 252 u32 addr, u8 *data, u16 len) 253 { 254 u8 msg[5], reg[5]; 255 int ret; 256 257 put_unaligned_be16(len, msg); 258 put_unaligned_be24(addr, msg + 2); 259 260 mutex_lock(&mhdp->mbox_mutex); 261 262 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, 263 DPTX_READ_DPCD, sizeof(msg), msg); 264 if (ret) 265 goto out; 266 267 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, 268 DPTX_READ_DPCD, 269 sizeof(reg) + len); 270 if (ret) 271 goto out; 272 273 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); 274 if (ret) 275 goto out; 276 277 ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len); 278 279 out: 280 mutex_unlock(&mhdp->mbox_mutex); 281 282 return ret; 283 } 284 285 static 286 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value) 287 { 288 u8 msg[6], reg[5]; 289 int ret; 290 291 put_unaligned_be16(1, msg); 292 put_unaligned_be24(addr, msg + 2); 293 msg[5] = value; 294 295 mutex_lock(&mhdp->mbox_mutex); 296 297 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, 298 DPTX_WRITE_DPCD, sizeof(msg), msg); 299 if (ret) 300 goto out; 301 302 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, 303 DPTX_WRITE_DPCD, sizeof(reg)); 304 if (ret) 305 goto out; 306 307 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); 308 if (ret) 309 goto out; 310 311 if (addr != get_unaligned_be24(reg + 2)) 312 ret = -EINVAL; 313 314 out: 315 mutex_unlock(&mhdp->mbox_mutex); 316 317 if (ret) 318 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret); 319 return ret; 320 } 321 322 static 323 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable) 324 { 325 u8 msg[5]; 326 int ret, i; 327 328 msg[0] = GENERAL_MAIN_CONTROL; 329 msg[1] = MB_MODULE_ID_GENERAL; 330 msg[2] = 0; 331 msg[3] = 1; 332 msg[4] = enable ? FW_ACTIVE : FW_STANDBY; 333 334 mutex_lock(&mhdp->mbox_mutex); 335 336 for (i = 0; i < sizeof(msg); i++) { 337 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]); 338 if (ret) 339 goto out; 340 } 341 342 /* read the firmware state */ 343 ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg)); 344 if (ret) 345 goto out; 346 347 ret = 0; 348 349 out: 350 mutex_unlock(&mhdp->mbox_mutex); 351 352 if (ret < 0) 353 dev_err(mhdp->dev, "set firmware active failed\n"); 354 return ret; 355 } 356 357 static 358 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp) 359 { 360 u8 status; 361 int ret; 362 363 mutex_lock(&mhdp->mbox_mutex); 364 365 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, 366 DPTX_HPD_STATE, 0, NULL); 367 if (ret) 368 goto err_get_hpd; 369 370 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, 371 DPTX_HPD_STATE, 372 sizeof(status)); 373 if (ret) 374 goto err_get_hpd; 375 376 ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status)); 377 if (ret) 378 goto err_get_hpd; 379 380 mutex_unlock(&mhdp->mbox_mutex); 381 382 dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__, 383 status ? "" : "un"); 384 385 return status; 386 387 err_get_hpd: 388 mutex_unlock(&mhdp->mbox_mutex); 389 390 return ret; 391 } 392 393 static 394 int cdns_mhdp_get_edid_block(void *data, u8 *edid, 395 unsigned int block, size_t length) 396 { 397 struct cdns_mhdp_device *mhdp = data; 398 u8 msg[2], reg[2], i; 399 int ret; 400 401 mutex_lock(&mhdp->mbox_mutex); 402 403 for (i = 0; i < 4; i++) { 404 msg[0] = block / 2; 405 msg[1] = block % 2; 406 407 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, 408 DPTX_GET_EDID, sizeof(msg), msg); 409 if (ret) 410 continue; 411 412 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, 413 DPTX_GET_EDID, 414 sizeof(reg) + length); 415 if (ret) 416 continue; 417 418 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); 419 if (ret) 420 continue; 421 422 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length); 423 if (ret) 424 continue; 425 426 if (reg[0] == length && reg[1] == block / 2) 427 break; 428 } 429 430 mutex_unlock(&mhdp->mbox_mutex); 431 432 if (ret) 433 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n", 434 block, ret); 435 436 return ret; 437 } 438 439 static 440 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp) 441 { 442 u8 event = 0; 443 int ret; 444 445 mutex_lock(&mhdp->mbox_mutex); 446 447 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, 448 DPTX_READ_EVENT, 0, NULL); 449 if (ret) 450 goto out; 451 452 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, 453 DPTX_READ_EVENT, sizeof(event)); 454 if (ret < 0) 455 goto out; 456 457 ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event)); 458 out: 459 mutex_unlock(&mhdp->mbox_mutex); 460 461 if (ret < 0) 462 return ret; 463 464 dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__, 465 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "", 466 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "", 467 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "", 468 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : ""); 469 470 return event; 471 } 472 473 static 474 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes, 475 unsigned int udelay, const u8 *lanes_data, 476 u8 link_status[DP_LINK_STATUS_SIZE]) 477 { 478 u8 payload[7]; 479 u8 hdr[5]; /* For DPCD read response header */ 480 u32 addr; 481 int ret; 482 483 if (nlanes != 4 && nlanes != 2 && nlanes != 1) { 484 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes); 485 ret = -EINVAL; 486 goto out; 487 } 488 489 payload[0] = nlanes; 490 put_unaligned_be16(udelay, payload + 1); 491 memcpy(payload + 3, lanes_data, nlanes); 492 493 mutex_lock(&mhdp->mbox_mutex); 494 495 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, 496 DPTX_ADJUST_LT, 497 sizeof(payload), payload); 498 if (ret) 499 goto out; 500 501 /* Yes, read the DPCD read command response */ 502 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, 503 DPTX_READ_DPCD, 504 sizeof(hdr) + DP_LINK_STATUS_SIZE); 505 if (ret) 506 goto out; 507 508 ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr)); 509 if (ret) 510 goto out; 511 512 addr = get_unaligned_be24(hdr + 2); 513 if (addr != DP_LANE0_1_STATUS) 514 goto out; 515 516 ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status, 517 DP_LINK_STATUS_SIZE); 518 519 out: 520 mutex_unlock(&mhdp->mbox_mutex); 521 522 if (ret) 523 dev_err(mhdp->dev, "Failed to adjust Link Training.\n"); 524 525 return ret; 526 } 527 528 /** 529 * cdns_mhdp_link_power_up() - power up a DisplayPort link 530 * @aux: DisplayPort AUX channel 531 * @link: pointer to a structure containing the link configuration 532 * 533 * Returns 0 on success or a negative error code on failure. 534 */ 535 static 536 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link) 537 { 538 u8 value; 539 int err; 540 541 /* DP_SET_POWER register is only available on DPCD v1.1 and later */ 542 if (link->revision < 0x11) 543 return 0; 544 545 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); 546 if (err < 0) 547 return err; 548 549 value &= ~DP_SET_POWER_MASK; 550 value |= DP_SET_POWER_D0; 551 552 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); 553 if (err < 0) 554 return err; 555 556 /* 557 * According to the DP 1.1 specification, a "Sink Device must exit the 558 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink 559 * Control Field" (register 0x600). 560 */ 561 usleep_range(1000, 2000); 562 563 return 0; 564 } 565 566 /** 567 * cdns_mhdp_link_power_down() - power down a DisplayPort link 568 * @aux: DisplayPort AUX channel 569 * @link: pointer to a structure containing the link configuration 570 * 571 * Returns 0 on success or a negative error code on failure. 572 */ 573 static 574 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux, 575 struct cdns_mhdp_link *link) 576 { 577 u8 value; 578 int err; 579 580 /* DP_SET_POWER register is only available on DPCD v1.1 and later */ 581 if (link->revision < 0x11) 582 return 0; 583 584 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); 585 if (err < 0) 586 return err; 587 588 value &= ~DP_SET_POWER_MASK; 589 value |= DP_SET_POWER_D3; 590 591 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); 592 if (err < 0) 593 return err; 594 595 return 0; 596 } 597 598 /** 599 * cdns_mhdp_link_configure() - configure a DisplayPort link 600 * @aux: DisplayPort AUX channel 601 * @link: pointer to a structure containing the link configuration 602 * 603 * Returns 0 on success or a negative error code on failure. 604 */ 605 static 606 int cdns_mhdp_link_configure(struct drm_dp_aux *aux, 607 struct cdns_mhdp_link *link) 608 { 609 u8 values[2]; 610 int err; 611 612 values[0] = drm_dp_link_rate_to_bw_code(link->rate); 613 values[1] = link->num_lanes; 614 615 if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) 616 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 617 618 err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); 619 if (err < 0) 620 return err; 621 622 return 0; 623 } 624 625 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp) 626 { 627 return min(mhdp->host.link_rate, mhdp->sink.link_rate); 628 } 629 630 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp) 631 { 632 return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt); 633 } 634 635 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp) 636 { 637 return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp); 638 } 639 640 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp) 641 { 642 /* Check if SSC is supported by both sides */ 643 return mhdp->host.ssc && mhdp->sink.ssc; 644 } 645 646 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp) 647 { 648 dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged); 649 650 if (mhdp->plugged) 651 return connector_status_connected; 652 else 653 return connector_status_disconnected; 654 } 655 656 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp) 657 { 658 u32 major_num, minor_num, revision; 659 u32 fw_ver, lib_ver; 660 661 fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8) 662 | readl(mhdp->regs + CDNS_VER_L); 663 664 lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8) 665 | readl(mhdp->regs + CDNS_LIB_L_ADDR); 666 667 if (lib_ver < 33984) { 668 /* 669 * Older FW versions with major number 1, used to store FW 670 * version information by storing repository revision number 671 * in registers. This is for identifying these FW versions. 672 */ 673 major_num = 1; 674 minor_num = 2; 675 if (fw_ver == 26098) { 676 revision = 15; 677 } else if (lib_ver == 0 && fw_ver == 0) { 678 revision = 17; 679 } else { 680 dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n", 681 fw_ver, lib_ver); 682 return -ENODEV; 683 } 684 } else { 685 /* To identify newer FW versions with major number 2 onwards. */ 686 major_num = fw_ver / 10000; 687 minor_num = (fw_ver / 100) % 100; 688 revision = (fw_ver % 10000) % 100; 689 } 690 691 dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num, 692 revision); 693 return 0; 694 } 695 696 static int cdns_mhdp_fw_activate(const struct firmware *fw, 697 struct cdns_mhdp_device *mhdp) 698 { 699 unsigned int reg; 700 int ret; 701 702 /* Release uCPU reset and stall it. */ 703 writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL); 704 705 memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size); 706 707 /* Leave debug mode, release stall */ 708 writel(0, mhdp->regs + CDNS_APB_CTRL); 709 710 /* 711 * Wait for the KEEP_ALIVE "message" on the first 8 bits. 712 * Updated each sched "tick" (~2ms) 713 */ 714 ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg, 715 reg & CDNS_KEEP_ALIVE_MASK, 500, 716 CDNS_KEEP_ALIVE_TIMEOUT); 717 if (ret) { 718 dev_err(mhdp->dev, 719 "device didn't give any life sign: reg %d\n", reg); 720 return ret; 721 } 722 723 ret = cdns_mhdp_check_fw_version(mhdp); 724 if (ret) 725 return ret; 726 727 /* Init events to 0 as it's not cleared by FW at boot but on read */ 728 readl(mhdp->regs + CDNS_SW_EVENT0); 729 readl(mhdp->regs + CDNS_SW_EVENT1); 730 readl(mhdp->regs + CDNS_SW_EVENT2); 731 readl(mhdp->regs + CDNS_SW_EVENT3); 732 733 /* Activate uCPU */ 734 ret = cdns_mhdp_set_firmware_active(mhdp, true); 735 if (ret) 736 return ret; 737 738 spin_lock(&mhdp->start_lock); 739 740 mhdp->hw_state = MHDP_HW_READY; 741 742 /* 743 * Here we must keep the lock while enabling the interrupts 744 * since it would otherwise be possible that interrupt enable 745 * code is executed after the bridge is detached. The similar 746 * situation is not possible in attach()/detach() callbacks 747 * since the hw_state changes from MHDP_HW_READY to 748 * MHDP_HW_STOPPED happens only due to driver removal when 749 * bridge should already be detached. 750 */ 751 if (mhdp->bridge_attached) 752 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, 753 mhdp->regs + CDNS_APB_INT_MASK); 754 755 spin_unlock(&mhdp->start_lock); 756 757 wake_up(&mhdp->fw_load_wq); 758 dev_dbg(mhdp->dev, "DP FW activated\n"); 759 760 return 0; 761 } 762 763 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context) 764 { 765 struct cdns_mhdp_device *mhdp = context; 766 bool bridge_attached; 767 int ret; 768 769 dev_dbg(mhdp->dev, "firmware callback\n"); 770 771 if (!fw || !fw->data) { 772 dev_err(mhdp->dev, "%s: No firmware.\n", __func__); 773 return; 774 } 775 776 ret = cdns_mhdp_fw_activate(fw, mhdp); 777 778 release_firmware(fw); 779 780 if (ret) 781 return; 782 783 /* 784 * XXX how to make sure the bridge is still attached when 785 * calling drm_kms_helper_hotplug_event() after releasing 786 * the lock? We should not hold the spin lock when 787 * calling drm_kms_helper_hotplug_event() since it may 788 * cause a dead lock. FB-dev console calls detect from the 789 * same thread just down the call stack started here. 790 */ 791 spin_lock(&mhdp->start_lock); 792 bridge_attached = mhdp->bridge_attached; 793 spin_unlock(&mhdp->start_lock); 794 if (bridge_attached) { 795 if (mhdp->connector.dev) 796 drm_kms_helper_hotplug_event(mhdp->bridge.dev); 797 else 798 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp)); 799 } 800 } 801 802 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp) 803 { 804 int ret; 805 806 ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev, 807 GFP_KERNEL, mhdp, cdns_mhdp_fw_cb); 808 if (ret) { 809 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n", 810 FW_NAME, ret); 811 return ret; 812 } 813 814 return 0; 815 } 816 817 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux, 818 struct drm_dp_aux_msg *msg) 819 { 820 struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev); 821 int ret; 822 823 if (msg->request != DP_AUX_NATIVE_WRITE && 824 msg->request != DP_AUX_NATIVE_READ) 825 return -EOPNOTSUPP; 826 827 if (msg->request == DP_AUX_NATIVE_WRITE) { 828 const u8 *buf = msg->buffer; 829 unsigned int i; 830 831 for (i = 0; i < msg->size; ++i) { 832 ret = cdns_mhdp_dpcd_write(mhdp, 833 msg->address + i, buf[i]); 834 if (!ret) 835 continue; 836 837 dev_err(mhdp->dev, 838 "Failed to write DPCD addr %u\n", 839 msg->address + i); 840 841 return ret; 842 } 843 } else { 844 ret = cdns_mhdp_dpcd_read(mhdp, msg->address, 845 msg->buffer, msg->size); 846 if (ret) { 847 dev_err(mhdp->dev, 848 "Failed to read DPCD addr %u\n", 849 msg->address); 850 851 return ret; 852 } 853 } 854 855 return msg->size; 856 } 857 858 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp) 859 { 860 union phy_configure_opts phy_cfg; 861 u32 reg32; 862 int ret; 863 864 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, 865 DP_TRAINING_PATTERN_DISABLE); 866 867 /* Reset PHY configuration */ 868 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); 869 if (!mhdp->host.scrambler) 870 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; 871 872 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); 873 874 cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD, 875 mhdp->sink.enhanced & mhdp->host.enhanced); 876 877 cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN, 878 CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes)); 879 880 cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link); 881 phy_cfg.dp.link_rate = mhdp->link.rate / 100; 882 phy_cfg.dp.lanes = mhdp->link.num_lanes; 883 884 memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage)); 885 memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre)); 886 887 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); 888 phy_cfg.dp.set_lanes = true; 889 phy_cfg.dp.set_rate = true; 890 phy_cfg.dp.set_voltages = true; 891 ret = phy_configure(mhdp->phy, &phy_cfg); 892 if (ret) { 893 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", 894 __func__, ret); 895 return ret; 896 } 897 898 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, 899 CDNS_PHY_COMMON_CONFIG | 900 CDNS_PHY_TRAINING_EN | 901 CDNS_PHY_TRAINING_TYPE(1) | 902 CDNS_PHY_SCRAMBLER_BYPASS); 903 904 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, 905 DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE); 906 907 return 0; 908 } 909 910 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp, 911 u8 link_status[DP_LINK_STATUS_SIZE], 912 u8 lanes_data[CDNS_DP_MAX_NUM_LANES], 913 union phy_configure_opts *phy_cfg) 914 { 915 u8 adjust, max_pre_emph, max_volt_swing; 916 u8 set_volt, set_pre; 917 unsigned int i; 918 919 max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis) 920 << DP_TRAIN_PRE_EMPHASIS_SHIFT; 921 max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing); 922 923 for (i = 0; i < mhdp->link.num_lanes; i++) { 924 /* Check if Voltage swing and pre-emphasis are within limits */ 925 adjust = drm_dp_get_adjust_request_voltage(link_status, i); 926 set_volt = min(adjust, max_volt_swing); 927 928 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i); 929 set_pre = min(adjust, max_pre_emph) 930 >> DP_TRAIN_PRE_EMPHASIS_SHIFT; 931 932 /* 933 * Voltage swing level and pre-emphasis level combination is 934 * not allowed: leaving pre-emphasis as-is, and adjusting 935 * voltage swing. 936 */ 937 if (set_volt + set_pre > 3) 938 set_volt = 3 - set_pre; 939 940 phy_cfg->dp.voltage[i] = set_volt; 941 lanes_data[i] = set_volt; 942 943 if (set_volt == max_volt_swing) 944 lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED; 945 946 phy_cfg->dp.pre[i] = set_pre; 947 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT); 948 949 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) 950 lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 951 } 952 } 953 954 static 955 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], 956 unsigned int lane, u8 volt) 957 { 958 unsigned int s = ((lane & 1) ? 959 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 960 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 961 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1); 962 963 link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s); 964 link_status[idx] |= volt << s; 965 } 966 967 static 968 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], 969 unsigned int lane, u8 pre_emphasis) 970 { 971 unsigned int s = ((lane & 1) ? 972 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 973 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 974 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1); 975 976 link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s); 977 link_status[idx] |= pre_emphasis << s; 978 } 979 980 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp, 981 u8 link_status[DP_LINK_STATUS_SIZE]) 982 { 983 u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); 984 u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); 985 unsigned int i; 986 u8 volt, pre; 987 988 for (i = 0; i < mhdp->link.num_lanes; i++) { 989 volt = drm_dp_get_adjust_request_voltage(link_status, i); 990 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i); 991 if (volt + pre > 3) 992 cdns_mhdp_set_adjust_request_voltage(link_status, i, 993 3 - pre); 994 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING) 995 cdns_mhdp_set_adjust_request_voltage(link_status, i, 996 max_volt); 997 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS) 998 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, 999 i, max_pre); 1000 } 1001 } 1002 1003 static void cdns_mhdp_print_lt_status(const char *prefix, 1004 struct cdns_mhdp_device *mhdp, 1005 union phy_configure_opts *phy_cfg) 1006 { 1007 char vs[8] = "0/0/0/0"; 1008 char pe[8] = "0/0/0/0"; 1009 unsigned int i; 1010 1011 for (i = 0; i < mhdp->link.num_lanes; i++) { 1012 vs[i * 2] = '0' + phy_cfg->dp.voltage[i]; 1013 pe[i * 2] = '0' + phy_cfg->dp.pre[i]; 1014 } 1015 1016 vs[i * 2 - 1] = '\0'; 1017 pe[i * 2 - 1] = '\0'; 1018 1019 dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n", 1020 prefix, 1021 mhdp->link.num_lanes, mhdp->link.rate / 100, 1022 vs, pe); 1023 } 1024 1025 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp, 1026 u8 eq_tps, 1027 unsigned int training_interval) 1028 { 1029 u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0; 1030 u8 link_status[DP_LINK_STATUS_SIZE]; 1031 union phy_configure_opts phy_cfg; 1032 u32 reg32; 1033 int ret; 1034 bool r; 1035 1036 dev_dbg(mhdp->dev, "Starting EQ phase\n"); 1037 1038 /* Enable link training TPS[eq_tps] in PHY */ 1039 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN | 1040 CDNS_PHY_TRAINING_TYPE(eq_tps); 1041 if (eq_tps != 4) 1042 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; 1043 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); 1044 1045 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, 1046 (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE : 1047 CDNS_DP_TRAINING_PATTERN_4); 1048 1049 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status); 1050 1051 do { 1052 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data, 1053 &phy_cfg); 1054 phy_cfg.dp.lanes = mhdp->link.num_lanes; 1055 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); 1056 phy_cfg.dp.set_lanes = false; 1057 phy_cfg.dp.set_rate = false; 1058 phy_cfg.dp.set_voltages = true; 1059 ret = phy_configure(mhdp->phy, &phy_cfg); 1060 if (ret) { 1061 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", 1062 __func__, ret); 1063 goto err; 1064 } 1065 1066 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 1067 training_interval, lanes_data, link_status); 1068 1069 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes); 1070 if (!r) 1071 goto err; 1072 1073 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) { 1074 cdns_mhdp_print_lt_status("EQ phase ok", mhdp, 1075 &phy_cfg); 1076 return true; 1077 } 1078 1079 fail_counter_short++; 1080 1081 cdns_mhdp_adjust_requested_eq(mhdp, link_status); 1082 } while (fail_counter_short < 5); 1083 1084 err: 1085 cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg); 1086 1087 return false; 1088 } 1089 1090 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp, 1091 u8 link_status[DP_LINK_STATUS_SIZE], 1092 u8 *req_volt, u8 *req_pre) 1093 { 1094 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); 1095 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); 1096 unsigned int i; 1097 1098 for (i = 0; i < mhdp->link.num_lanes; i++) { 1099 u8 val; 1100 1101 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ? 1102 max_volt : req_volt[i]; 1103 cdns_mhdp_set_adjust_request_voltage(link_status, i, val); 1104 1105 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ? 1106 max_pre : req_pre[i]; 1107 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val); 1108 } 1109 } 1110 1111 static 1112 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done, 1113 bool *same_before_adjust, bool *max_swing_reached, 1114 u8 before_cr[CDNS_DP_MAX_NUM_LANES], 1115 u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt, 1116 u8 *req_pre) 1117 { 1118 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); 1119 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); 1120 bool same_pre, same_volt; 1121 unsigned int i; 1122 u8 adjust; 1123 1124 *same_before_adjust = false; 1125 *max_swing_reached = false; 1126 *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes); 1127 1128 for (i = 0; i < mhdp->link.num_lanes; i++) { 1129 adjust = drm_dp_get_adjust_request_voltage(after_cr, i); 1130 req_volt[i] = min(adjust, max_volt); 1131 1132 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >> 1133 DP_TRAIN_PRE_EMPHASIS_SHIFT; 1134 req_pre[i] = min(adjust, max_pre); 1135 1136 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) == 1137 req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1138 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) == 1139 req_volt[i]; 1140 if (same_pre && same_volt) 1141 *same_before_adjust = true; 1142 1143 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */ 1144 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) { 1145 *max_swing_reached = true; 1146 return; 1147 } 1148 } 1149 } 1150 1151 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp) 1152 { 1153 u8 lanes_data[CDNS_DP_MAX_NUM_LANES], 1154 fail_counter_short = 0, fail_counter_cr_long = 0; 1155 u8 link_status[DP_LINK_STATUS_SIZE]; 1156 bool cr_done; 1157 union phy_configure_opts phy_cfg; 1158 int ret; 1159 1160 dev_dbg(mhdp->dev, "Starting CR phase\n"); 1161 1162 ret = cdns_mhdp_link_training_init(mhdp); 1163 if (ret) 1164 goto err; 1165 1166 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status); 1167 1168 do { 1169 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {}; 1170 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {}; 1171 bool same_before_adjust, max_swing_reached; 1172 1173 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data, 1174 &phy_cfg); 1175 phy_cfg.dp.lanes = mhdp->link.num_lanes; 1176 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); 1177 phy_cfg.dp.set_lanes = false; 1178 phy_cfg.dp.set_rate = false; 1179 phy_cfg.dp.set_voltages = true; 1180 ret = phy_configure(mhdp->phy, &phy_cfg); 1181 if (ret) { 1182 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", 1183 __func__, ret); 1184 goto err; 1185 } 1186 1187 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100, 1188 lanes_data, link_status); 1189 1190 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust, 1191 &max_swing_reached, lanes_data, 1192 link_status, 1193 requested_adjust_volt_swing, 1194 requested_adjust_pre_emphasis); 1195 1196 if (max_swing_reached) { 1197 dev_err(mhdp->dev, "CR: max swing reached\n"); 1198 goto err; 1199 } 1200 1201 if (cr_done) { 1202 cdns_mhdp_print_lt_status("CR phase ok", mhdp, 1203 &phy_cfg); 1204 return true; 1205 } 1206 1207 /* Not all CR_DONE bits set */ 1208 fail_counter_cr_long++; 1209 1210 if (same_before_adjust) { 1211 fail_counter_short++; 1212 continue; 1213 } 1214 1215 fail_counter_short = 0; 1216 /* 1217 * Voltage swing/pre-emphasis adjust requested 1218 * during CR phase 1219 */ 1220 cdns_mhdp_adjust_requested_cr(mhdp, link_status, 1221 requested_adjust_volt_swing, 1222 requested_adjust_pre_emphasis); 1223 } while (fail_counter_short < 5 && fail_counter_cr_long < 10); 1224 1225 err: 1226 cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg); 1227 1228 return false; 1229 } 1230 1231 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link) 1232 { 1233 switch (drm_dp_link_rate_to_bw_code(link->rate)) { 1234 case DP_LINK_BW_2_7: 1235 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62); 1236 break; 1237 case DP_LINK_BW_5_4: 1238 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7); 1239 break; 1240 case DP_LINK_BW_8_1: 1241 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4); 1242 break; 1243 } 1244 } 1245 1246 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp, 1247 unsigned int training_interval) 1248 { 1249 u32 reg32; 1250 const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp); 1251 int ret; 1252 1253 while (1) { 1254 if (!cdns_mhdp_link_training_cr(mhdp)) { 1255 if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) != 1256 DP_LINK_BW_1_62) { 1257 dev_dbg(mhdp->dev, 1258 "Reducing link rate during CR phase\n"); 1259 cdns_mhdp_lower_link_rate(&mhdp->link); 1260 1261 continue; 1262 } else if (mhdp->link.num_lanes > 1) { 1263 dev_dbg(mhdp->dev, 1264 "Reducing lanes number during CR phase\n"); 1265 mhdp->link.num_lanes >>= 1; 1266 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp); 1267 1268 continue; 1269 } 1270 1271 dev_err(mhdp->dev, 1272 "Link training failed during CR phase\n"); 1273 goto err; 1274 } 1275 1276 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps, 1277 training_interval)) 1278 break; 1279 1280 if (mhdp->link.num_lanes > 1) { 1281 dev_dbg(mhdp->dev, 1282 "Reducing lanes number during EQ phase\n"); 1283 mhdp->link.num_lanes >>= 1; 1284 1285 continue; 1286 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) != 1287 DP_LINK_BW_1_62) { 1288 dev_dbg(mhdp->dev, 1289 "Reducing link rate during EQ phase\n"); 1290 cdns_mhdp_lower_link_rate(&mhdp->link); 1291 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp); 1292 1293 continue; 1294 } 1295 1296 dev_err(mhdp->dev, "Link training failed during EQ phase\n"); 1297 goto err; 1298 } 1299 1300 dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n", 1301 mhdp->link.num_lanes, mhdp->link.rate / 100); 1302 1303 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, 1304 mhdp->host.scrambler ? 0 : 1305 DP_LINK_SCRAMBLING_DISABLE); 1306 1307 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, ®32); 1308 if (ret < 0) { 1309 dev_err(mhdp->dev, 1310 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", 1311 ret); 1312 return ret; 1313 } 1314 reg32 &= ~GENMASK(1, 0); 1315 reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes); 1316 reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC; 1317 reg32 |= CDNS_DP_FRAMER_EN; 1318 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32); 1319 1320 /* Reset PHY config */ 1321 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); 1322 if (!mhdp->host.scrambler) 1323 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; 1324 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); 1325 1326 return 0; 1327 err: 1328 /* Reset PHY config */ 1329 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); 1330 if (!mhdp->host.scrambler) 1331 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; 1332 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); 1333 1334 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, 1335 DP_TRAINING_PATTERN_DISABLE); 1336 1337 return -EIO; 1338 } 1339 1340 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp, 1341 u32 interval) 1342 { 1343 if (interval == 0) 1344 return 400; 1345 if (interval < 5) 1346 return 4000 << (interval - 1); 1347 dev_err(mhdp->dev, 1348 "wrong training interval returned by DPCD: %d\n", interval); 1349 return 0; 1350 } 1351 1352 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp) 1353 { 1354 unsigned int link_rate; 1355 1356 /* Get source capabilities based on PHY attributes */ 1357 1358 mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width; 1359 if (!mhdp->host.lanes_cnt) 1360 mhdp->host.lanes_cnt = 4; 1361 1362 link_rate = mhdp->phy->attrs.max_link_rate; 1363 if (!link_rate) 1364 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1); 1365 else 1366 /* PHY uses Mb/s, DRM uses tens of kb/s. */ 1367 link_rate *= 100; 1368 1369 mhdp->host.link_rate = link_rate; 1370 mhdp->host.volt_swing = CDNS_VOLT_SWING(3); 1371 mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3); 1372 mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) | 1373 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) | 1374 CDNS_SUPPORT_TPS(4); 1375 mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL; 1376 mhdp->host.fast_link = false; 1377 mhdp->host.enhanced = true; 1378 mhdp->host.scrambler = true; 1379 mhdp->host.ssc = false; 1380 } 1381 1382 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp, 1383 u8 dpcd[DP_RECEIVER_CAP_SIZE]) 1384 { 1385 mhdp->sink.link_rate = mhdp->link.rate; 1386 mhdp->sink.lanes_cnt = mhdp->link.num_lanes; 1387 mhdp->sink.enhanced = !!(mhdp->link.capabilities & 1388 DP_LINK_CAP_ENHANCED_FRAMING); 1389 1390 /* Set SSC support */ 1391 mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] & 1392 DP_MAX_DOWNSPREAD_0_5); 1393 1394 /* Set TPS support */ 1395 mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2); 1396 if (drm_dp_tps3_supported(dpcd)) 1397 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3); 1398 if (drm_dp_tps4_supported(dpcd)) 1399 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4); 1400 1401 /* Set fast link support */ 1402 mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] & 1403 DP_NO_AUX_HANDSHAKE_LINK_TRAINING); 1404 } 1405 1406 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp) 1407 { 1408 u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2]; 1409 u32 resp, interval, interval_us; 1410 u8 ext_cap_chk = 0; 1411 unsigned int addr; 1412 int err; 1413 1414 WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); 1415 1416 drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL, 1417 &ext_cap_chk); 1418 1419 if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT) 1420 addr = DP_DP13_DPCD_REV; 1421 else 1422 addr = DP_DPCD_REV; 1423 1424 err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE); 1425 if (err < 0) { 1426 dev_err(mhdp->dev, "Failed to read receiver capabilities\n"); 1427 return err; 1428 } 1429 1430 mhdp->link.revision = dpcd[0]; 1431 mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]); 1432 mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK; 1433 1434 if (dpcd[2] & DP_ENHANCED_FRAME_CAP) 1435 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; 1436 1437 dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n"); 1438 cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link); 1439 1440 cdns_mhdp_fill_sink_caps(mhdp, dpcd); 1441 1442 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp); 1443 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp); 1444 1445 /* Disable framer for link training */ 1446 err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp); 1447 if (err < 0) { 1448 dev_err(mhdp->dev, 1449 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", 1450 err); 1451 return err; 1452 } 1453 1454 resp &= ~CDNS_DP_FRAMER_EN; 1455 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp); 1456 1457 /* Spread AMP if required, enable 8b/10b coding */ 1458 amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0; 1459 amp[1] = DP_SET_ANSI_8B10B; 1460 drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2); 1461 1462 if (mhdp->host.fast_link & mhdp->sink.fast_link) { 1463 dev_err(mhdp->dev, "fastlink not supported\n"); 1464 return -EOPNOTSUPP; 1465 } 1466 1467 interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK; 1468 interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval); 1469 if (!interval_us || 1470 cdns_mhdp_link_training(mhdp, interval_us)) { 1471 dev_err(mhdp->dev, "Link training failed. Exiting.\n"); 1472 return -EIO; 1473 } 1474 1475 mhdp->link_up = true; 1476 1477 return 0; 1478 } 1479 1480 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp) 1481 { 1482 WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); 1483 1484 if (mhdp->plugged) 1485 cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link); 1486 1487 mhdp->link_up = false; 1488 } 1489 1490 static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp, 1491 struct drm_connector *connector) 1492 { 1493 if (!mhdp->plugged) 1494 return NULL; 1495 1496 return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp); 1497 } 1498 1499 static int cdns_mhdp_get_modes(struct drm_connector *connector) 1500 { 1501 struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector); 1502 struct edid *edid; 1503 int num_modes; 1504 1505 if (!mhdp->plugged) 1506 return 0; 1507 1508 edid = cdns_mhdp_get_edid(mhdp, connector); 1509 if (!edid) { 1510 dev_err(mhdp->dev, "Failed to read EDID\n"); 1511 return 0; 1512 } 1513 1514 drm_connector_update_edid_property(connector, edid); 1515 num_modes = drm_add_edid_modes(connector, edid); 1516 kfree(edid); 1517 1518 /* 1519 * HACK: Warn about unsupported display formats until we deal 1520 * with them correctly. 1521 */ 1522 if (connector->display_info.color_formats && 1523 !(connector->display_info.color_formats & 1524 mhdp->display_fmt.color_format)) 1525 dev_warn(mhdp->dev, 1526 "%s: No supported color_format found (0x%08x)\n", 1527 __func__, connector->display_info.color_formats); 1528 1529 if (connector->display_info.bpc && 1530 connector->display_info.bpc < mhdp->display_fmt.bpc) 1531 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n", 1532 __func__, connector->display_info.bpc, 1533 mhdp->display_fmt.bpc); 1534 1535 return num_modes; 1536 } 1537 1538 static int cdns_mhdp_connector_detect(struct drm_connector *conn, 1539 struct drm_modeset_acquire_ctx *ctx, 1540 bool force) 1541 { 1542 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); 1543 1544 return cdns_mhdp_detect(mhdp); 1545 } 1546 1547 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt) 1548 { 1549 u32 bpp; 1550 1551 if (fmt->y_only) 1552 return fmt->bpc; 1553 1554 switch (fmt->color_format) { 1555 case DRM_COLOR_FORMAT_RGB444: 1556 case DRM_COLOR_FORMAT_YCRCB444: 1557 bpp = fmt->bpc * 3; 1558 break; 1559 case DRM_COLOR_FORMAT_YCRCB422: 1560 bpp = fmt->bpc * 2; 1561 break; 1562 case DRM_COLOR_FORMAT_YCRCB420: 1563 bpp = fmt->bpc * 3 / 2; 1564 break; 1565 default: 1566 bpp = fmt->bpc * 3; 1567 WARN_ON(1); 1568 } 1569 return bpp; 1570 } 1571 1572 static 1573 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp, 1574 const struct drm_display_mode *mode, 1575 unsigned int lanes, unsigned int rate) 1576 { 1577 u32 max_bw, req_bw, bpp; 1578 1579 /* 1580 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8 1581 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the 1582 * value thus equals the bandwidth in 10kb/s units, which matches the 1583 * units of the rate parameter. 1584 */ 1585 1586 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); 1587 req_bw = mode->clock * bpp / 8; 1588 max_bw = lanes * rate; 1589 if (req_bw > max_bw) { 1590 dev_dbg(mhdp->dev, 1591 "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n", 1592 mode->name, req_bw, max_bw); 1593 1594 return false; 1595 } 1596 1597 return true; 1598 } 1599 1600 static 1601 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn, 1602 struct drm_display_mode *mode) 1603 { 1604 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); 1605 1606 mutex_lock(&mhdp->link_mutex); 1607 1608 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, 1609 mhdp->link.rate)) { 1610 mutex_unlock(&mhdp->link_mutex); 1611 return MODE_CLOCK_HIGH; 1612 } 1613 1614 mutex_unlock(&mhdp->link_mutex); 1615 return MODE_OK; 1616 } 1617 1618 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn, 1619 struct drm_atomic_state *state) 1620 { 1621 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); 1622 struct drm_connector_state *old_state, *new_state; 1623 struct drm_crtc_state *crtc_state; 1624 u64 old_cp, new_cp; 1625 1626 if (!mhdp->hdcp_supported) 1627 return 0; 1628 1629 old_state = drm_atomic_get_old_connector_state(state, conn); 1630 new_state = drm_atomic_get_new_connector_state(state, conn); 1631 old_cp = old_state->content_protection; 1632 new_cp = new_state->content_protection; 1633 1634 if (old_state->hdcp_content_type != new_state->hdcp_content_type && 1635 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1636 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1637 goto mode_changed; 1638 } 1639 1640 if (!new_state->crtc) { 1641 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 1642 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1643 return 0; 1644 } 1645 1646 if (old_cp == new_cp || 1647 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 1648 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) 1649 return 0; 1650 1651 mode_changed: 1652 crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); 1653 crtc_state->mode_changed = true; 1654 1655 return 0; 1656 } 1657 1658 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = { 1659 .detect_ctx = cdns_mhdp_connector_detect, 1660 .get_modes = cdns_mhdp_get_modes, 1661 .mode_valid = cdns_mhdp_mode_valid, 1662 .atomic_check = cdns_mhdp_connector_atomic_check, 1663 }; 1664 1665 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = { 1666 .fill_modes = drm_helper_probe_single_connector_modes, 1667 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1668 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1669 .reset = drm_atomic_helper_connector_reset, 1670 .destroy = drm_connector_cleanup, 1671 }; 1672 1673 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp) 1674 { 1675 u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36; 1676 struct drm_connector *conn = &mhdp->connector; 1677 struct drm_bridge *bridge = &mhdp->bridge; 1678 int ret; 1679 1680 if (!bridge->encoder) { 1681 dev_err(mhdp->dev, "Parent encoder object not found"); 1682 return -ENODEV; 1683 } 1684 1685 conn->polled = DRM_CONNECTOR_POLL_HPD; 1686 1687 ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs, 1688 DRM_MODE_CONNECTOR_DisplayPort); 1689 if (ret) { 1690 dev_err(mhdp->dev, "Failed to initialize connector with drm\n"); 1691 return ret; 1692 } 1693 1694 drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs); 1695 1696 ret = drm_display_info_set_bus_formats(&conn->display_info, 1697 &bus_format, 1); 1698 if (ret) 1699 return ret; 1700 1701 ret = drm_connector_attach_encoder(conn, bridge->encoder); 1702 if (ret) { 1703 dev_err(mhdp->dev, "Failed to attach connector to encoder\n"); 1704 return ret; 1705 } 1706 1707 if (mhdp->hdcp_supported) 1708 ret = drm_connector_attach_content_protection_property(conn, true); 1709 1710 return ret; 1711 } 1712 1713 static int cdns_mhdp_attach(struct drm_bridge *bridge, 1714 enum drm_bridge_attach_flags flags) 1715 { 1716 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 1717 bool hw_ready; 1718 int ret; 1719 1720 dev_dbg(mhdp->dev, "%s\n", __func__); 1721 1722 if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { 1723 ret = cdns_mhdp_connector_init(mhdp); 1724 if (ret) 1725 return ret; 1726 } 1727 1728 spin_lock(&mhdp->start_lock); 1729 1730 mhdp->bridge_attached = true; 1731 hw_ready = mhdp->hw_state == MHDP_HW_READY; 1732 1733 spin_unlock(&mhdp->start_lock); 1734 1735 /* Enable SW event interrupts */ 1736 if (hw_ready) 1737 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, 1738 mhdp->regs + CDNS_APB_INT_MASK); 1739 1740 return 0; 1741 } 1742 1743 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp, 1744 const struct drm_display_mode *mode) 1745 { 1746 unsigned int dp_framer_sp = 0, msa_horizontal_1, 1747 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl, 1748 misc0 = 0, misc1 = 0, pxl_repr, 1749 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync, 1750 dp_vertical_1; 1751 u8 stream_id = mhdp->stream_id; 1752 u32 bpp, bpc, pxlfmt, framer; 1753 int ret; 1754 1755 pxlfmt = mhdp->display_fmt.color_format; 1756 bpc = mhdp->display_fmt.bpc; 1757 1758 /* 1759 * If YCBCR supported and stream not SD, use ITU709 1760 * Need to handle ITU version with YCBCR420 when supported 1761 */ 1762 if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 || 1763 pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720) 1764 misc0 = DP_YCBCR_COEFFICIENTS_ITU709; 1765 1766 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); 1767 1768 switch (pxlfmt) { 1769 case DRM_COLOR_FORMAT_RGB444: 1770 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT; 1771 misc0 |= DP_COLOR_FORMAT_RGB; 1772 break; 1773 case DRM_COLOR_FORMAT_YCRCB444: 1774 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT; 1775 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA; 1776 break; 1777 case DRM_COLOR_FORMAT_YCRCB422: 1778 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT; 1779 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA; 1780 break; 1781 case DRM_COLOR_FORMAT_YCRCB420: 1782 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT; 1783 break; 1784 default: 1785 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT; 1786 } 1787 1788 switch (bpc) { 1789 case 6: 1790 misc0 |= DP_TEST_BIT_DEPTH_6; 1791 pxl_repr |= CDNS_DP_FRAMER_6_BPC; 1792 break; 1793 case 8: 1794 misc0 |= DP_TEST_BIT_DEPTH_8; 1795 pxl_repr |= CDNS_DP_FRAMER_8_BPC; 1796 break; 1797 case 10: 1798 misc0 |= DP_TEST_BIT_DEPTH_10; 1799 pxl_repr |= CDNS_DP_FRAMER_10_BPC; 1800 break; 1801 case 12: 1802 misc0 |= DP_TEST_BIT_DEPTH_12; 1803 pxl_repr |= CDNS_DP_FRAMER_12_BPC; 1804 break; 1805 case 16: 1806 misc0 |= DP_TEST_BIT_DEPTH_16; 1807 pxl_repr |= CDNS_DP_FRAMER_16_BPC; 1808 break; 1809 } 1810 1811 bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE; 1812 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1813 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT; 1814 1815 cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id), 1816 bnd_hsync2vsync); 1817 1818 hsync2vsync_pol_ctrl = 0; 1819 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 1820 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW; 1821 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 1822 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW; 1823 cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id), 1824 hsync2vsync_pol_ctrl); 1825 1826 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr); 1827 1828 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1829 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE; 1830 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 1831 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW; 1832 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 1833 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW; 1834 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp); 1835 1836 front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay; 1837 back_porch = mode->crtc_htotal - mode->crtc_hsync_end; 1838 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id), 1839 CDNS_DP_FRONT_PORCH(front_porch) | 1840 CDNS_DP_BACK_PORCH(back_porch)); 1841 1842 cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id), 1843 mode->crtc_hdisplay * bpp / 8); 1844 1845 msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start; 1846 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id), 1847 CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) | 1848 CDNS_DP_MSAH0_HSYNC_START(msa_h0)); 1849 1850 hsync = mode->crtc_hsync_end - mode->crtc_hsync_start; 1851 msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) | 1852 CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay); 1853 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 1854 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW; 1855 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id), 1856 msa_horizontal_1); 1857 1858 msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start; 1859 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id), 1860 CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) | 1861 CDNS_DP_MSAV0_VSYNC_START(msa_v0)); 1862 1863 vsync = mode->crtc_vsync_end - mode->crtc_vsync_start; 1864 msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) | 1865 CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay); 1866 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 1867 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW; 1868 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id), 1869 msa_vertical_1); 1870 1871 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && 1872 mode->crtc_vtotal % 2 == 0) 1873 misc1 = DP_TEST_INTERLACED; 1874 if (mhdp->display_fmt.y_only) 1875 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY; 1876 /* Use VSC SDP for Y420 */ 1877 if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420) 1878 misc1 = CDNS_DP_TEST_VSC_SDP; 1879 1880 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id), 1881 misc0 | (misc1 << 8)); 1882 1883 cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id), 1884 CDNS_DP_H_HSYNC_WIDTH(hsync) | 1885 CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay)); 1886 1887 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id), 1888 CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) | 1889 CDNS_DP_V0_VSTART(msa_v0)); 1890 1891 dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal); 1892 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && 1893 mode->crtc_vtotal % 2 == 0) 1894 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN; 1895 1896 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1); 1897 1898 cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1, 1899 (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1900 CDNS_DP_VB_ID_INTERLACED : 0); 1901 1902 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer); 1903 if (ret < 0) { 1904 dev_err(mhdp->dev, 1905 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", 1906 ret); 1907 return; 1908 } 1909 framer |= CDNS_DP_FRAMER_EN; 1910 framer &= ~CDNS_DP_NO_VIDEO_MODE; 1911 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer); 1912 } 1913 1914 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp, 1915 const struct drm_display_mode *mode) 1916 { 1917 u32 rate, vs, required_bandwidth, available_bandwidth; 1918 s32 line_thresh1, line_thresh2, line_thresh = 0; 1919 int pxlclock = mode->crtc_clock; 1920 u32 tu_size = 64; 1921 u32 bpp; 1922 1923 /* Get rate in MSymbols per second per lane */ 1924 rate = mhdp->link.rate / 1000; 1925 1926 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); 1927 1928 required_bandwidth = pxlclock * bpp / 8; 1929 available_bandwidth = mhdp->link.num_lanes * rate; 1930 1931 vs = tu_size * required_bandwidth / available_bandwidth; 1932 vs /= 1000; 1933 1934 if (vs == tu_size) 1935 vs = tu_size - 1; 1936 1937 line_thresh1 = ((vs + 1) << 5) * 8 / bpp; 1938 line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5); 1939 line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes; 1940 line_thresh = (line_thresh >> 5) + 2; 1941 1942 mhdp->stream_id = 0; 1943 1944 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU, 1945 CDNS_DP_FRAMER_TU_VS(vs) | 1946 CDNS_DP_FRAMER_TU_SIZE(tu_size) | 1947 CDNS_DP_FRAMER_TU_CNT_RST_EN); 1948 1949 cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0), 1950 line_thresh & GENMASK(5, 0)); 1951 1952 cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0), 1953 CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ? 1954 0 : tu_size - vs)); 1955 1956 cdns_mhdp_configure_video(mhdp, mode); 1957 } 1958 1959 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge, 1960 struct drm_bridge_state *bridge_state) 1961 { 1962 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 1963 struct drm_atomic_state *state = bridge_state->base.state; 1964 struct cdns_mhdp_bridge_state *mhdp_state; 1965 struct drm_crtc_state *crtc_state; 1966 struct drm_connector *connector; 1967 struct drm_connector_state *conn_state; 1968 struct drm_bridge_state *new_state; 1969 const struct drm_display_mode *mode; 1970 u32 resp; 1971 int ret; 1972 1973 dev_dbg(mhdp->dev, "bridge enable\n"); 1974 1975 mutex_lock(&mhdp->link_mutex); 1976 1977 if (mhdp->plugged && !mhdp->link_up) { 1978 ret = cdns_mhdp_link_up(mhdp); 1979 if (ret < 0) 1980 goto out; 1981 } 1982 1983 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable) 1984 mhdp->info->ops->enable(mhdp); 1985 1986 /* Enable VIF clock for stream 0 */ 1987 ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp); 1988 if (ret < 0) { 1989 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret); 1990 goto out; 1991 } 1992 1993 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR, 1994 resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN); 1995 1996 connector = drm_atomic_get_new_connector_for_encoder(state, 1997 bridge->encoder); 1998 if (WARN_ON(!connector)) 1999 goto out; 2000 2001 conn_state = drm_atomic_get_new_connector_state(state, connector); 2002 if (WARN_ON(!conn_state)) 2003 goto out; 2004 2005 if (mhdp->hdcp_supported && 2006 mhdp->hw_state == MHDP_HW_READY && 2007 conn_state->content_protection == 2008 DRM_MODE_CONTENT_PROTECTION_DESIRED) { 2009 mutex_unlock(&mhdp->link_mutex); 2010 cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type); 2011 mutex_lock(&mhdp->link_mutex); 2012 } 2013 2014 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); 2015 if (WARN_ON(!crtc_state)) 2016 goto out; 2017 2018 mode = &crtc_state->adjusted_mode; 2019 2020 new_state = drm_atomic_get_new_bridge_state(state, bridge); 2021 if (WARN_ON(!new_state)) 2022 goto out; 2023 2024 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, 2025 mhdp->link.rate)) { 2026 ret = -EINVAL; 2027 goto out; 2028 } 2029 2030 cdns_mhdp_sst_enable(mhdp, mode); 2031 2032 mhdp_state = to_cdns_mhdp_bridge_state(new_state); 2033 2034 mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); 2035 drm_mode_set_name(mhdp_state->current_mode); 2036 2037 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name); 2038 2039 mhdp->bridge_enabled = true; 2040 2041 out: 2042 mutex_unlock(&mhdp->link_mutex); 2043 if (ret < 0) 2044 schedule_work(&mhdp->modeset_retry_work); 2045 } 2046 2047 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge, 2048 struct drm_bridge_state *bridge_state) 2049 { 2050 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 2051 u32 resp; 2052 2053 dev_dbg(mhdp->dev, "%s\n", __func__); 2054 2055 mutex_lock(&mhdp->link_mutex); 2056 2057 if (mhdp->hdcp_supported) 2058 cdns_mhdp_hdcp_disable(mhdp); 2059 2060 mhdp->bridge_enabled = false; 2061 cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp); 2062 resp &= ~CDNS_DP_FRAMER_EN; 2063 resp |= CDNS_DP_NO_VIDEO_MODE; 2064 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp); 2065 2066 cdns_mhdp_link_down(mhdp); 2067 2068 /* Disable VIF clock for stream 0 */ 2069 cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp); 2070 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR, 2071 resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN)); 2072 2073 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable) 2074 mhdp->info->ops->disable(mhdp); 2075 2076 mutex_unlock(&mhdp->link_mutex); 2077 } 2078 2079 static void cdns_mhdp_detach(struct drm_bridge *bridge) 2080 { 2081 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 2082 2083 dev_dbg(mhdp->dev, "%s\n", __func__); 2084 2085 spin_lock(&mhdp->start_lock); 2086 2087 mhdp->bridge_attached = false; 2088 2089 spin_unlock(&mhdp->start_lock); 2090 2091 writel(~0, mhdp->regs + CDNS_APB_INT_MASK); 2092 } 2093 2094 static struct drm_bridge_state * 2095 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge) 2096 { 2097 struct cdns_mhdp_bridge_state *state; 2098 2099 state = kzalloc(sizeof(*state), GFP_KERNEL); 2100 if (!state) 2101 return NULL; 2102 2103 __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base); 2104 2105 return &state->base; 2106 } 2107 2108 static void 2109 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge, 2110 struct drm_bridge_state *state) 2111 { 2112 struct cdns_mhdp_bridge_state *cdns_mhdp_state; 2113 2114 cdns_mhdp_state = to_cdns_mhdp_bridge_state(state); 2115 2116 if (cdns_mhdp_state->current_mode) { 2117 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode); 2118 cdns_mhdp_state->current_mode = NULL; 2119 } 2120 2121 kfree(cdns_mhdp_state); 2122 } 2123 2124 static struct drm_bridge_state * 2125 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge) 2126 { 2127 struct cdns_mhdp_bridge_state *cdns_mhdp_state; 2128 2129 cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL); 2130 if (!cdns_mhdp_state) 2131 return NULL; 2132 2133 __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base); 2134 2135 return &cdns_mhdp_state->base; 2136 } 2137 2138 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge, 2139 struct drm_bridge_state *bridge_state, 2140 struct drm_crtc_state *crtc_state, 2141 struct drm_connector_state *conn_state) 2142 { 2143 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 2144 const struct drm_display_mode *mode = &crtc_state->adjusted_mode; 2145 2146 mutex_lock(&mhdp->link_mutex); 2147 2148 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, 2149 mhdp->link.rate)) { 2150 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n", 2151 __func__, mode->name, mhdp->link.num_lanes, 2152 mhdp->link.rate / 100); 2153 mutex_unlock(&mhdp->link_mutex); 2154 return -EINVAL; 2155 } 2156 2157 mutex_unlock(&mhdp->link_mutex); 2158 return 0; 2159 } 2160 2161 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge) 2162 { 2163 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 2164 2165 return cdns_mhdp_detect(mhdp); 2166 } 2167 2168 static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge, 2169 struct drm_connector *connector) 2170 { 2171 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 2172 2173 return cdns_mhdp_get_edid(mhdp, connector); 2174 } 2175 2176 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge) 2177 { 2178 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 2179 2180 /* Enable SW event interrupts */ 2181 if (mhdp->bridge_attached) 2182 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, 2183 mhdp->regs + CDNS_APB_INT_MASK); 2184 } 2185 2186 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge) 2187 { 2188 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); 2189 2190 writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK); 2191 } 2192 2193 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = { 2194 .atomic_enable = cdns_mhdp_atomic_enable, 2195 .atomic_disable = cdns_mhdp_atomic_disable, 2196 .atomic_check = cdns_mhdp_atomic_check, 2197 .attach = cdns_mhdp_attach, 2198 .detach = cdns_mhdp_detach, 2199 .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state, 2200 .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state, 2201 .atomic_reset = cdns_mhdp_bridge_atomic_reset, 2202 .detect = cdns_mhdp_bridge_detect, 2203 .get_edid = cdns_mhdp_bridge_get_edid, 2204 .hpd_enable = cdns_mhdp_bridge_hpd_enable, 2205 .hpd_disable = cdns_mhdp_bridge_hpd_disable, 2206 }; 2207 2208 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse) 2209 { 2210 int hpd_event, hpd_status; 2211 2212 *hpd_pulse = false; 2213 2214 hpd_event = cdns_mhdp_read_hpd_event(mhdp); 2215 2216 /* Getting event bits failed, bail out */ 2217 if (hpd_event < 0) { 2218 dev_warn(mhdp->dev, "%s: read event failed: %d\n", 2219 __func__, hpd_event); 2220 return false; 2221 } 2222 2223 hpd_status = cdns_mhdp_get_hpd_status(mhdp); 2224 if (hpd_status < 0) { 2225 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n", 2226 __func__, hpd_status); 2227 return false; 2228 } 2229 2230 if (hpd_event & DPTX_READ_EVENT_HPD_PULSE) 2231 *hpd_pulse = true; 2232 2233 return !!hpd_status; 2234 } 2235 2236 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp) 2237 { 2238 struct cdns_mhdp_bridge_state *cdns_bridge_state; 2239 struct drm_display_mode *current_mode; 2240 bool old_plugged = mhdp->plugged; 2241 struct drm_bridge_state *state; 2242 u8 status[DP_LINK_STATUS_SIZE]; 2243 bool hpd_pulse; 2244 int ret = 0; 2245 2246 mutex_lock(&mhdp->link_mutex); 2247 2248 mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse); 2249 2250 if (!mhdp->plugged) { 2251 cdns_mhdp_link_down(mhdp); 2252 mhdp->link.rate = mhdp->host.link_rate; 2253 mhdp->link.num_lanes = mhdp->host.lanes_cnt; 2254 goto out; 2255 } 2256 2257 /* 2258 * If we get a HPD pulse event and we were and still are connected, 2259 * check the link status. If link status is ok, there's nothing to do 2260 * as we don't handle DP interrupts. If link status is bad, continue 2261 * with full link setup. 2262 */ 2263 if (hpd_pulse && old_plugged == mhdp->plugged) { 2264 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status); 2265 2266 /* 2267 * If everything looks fine, just return, as we don't handle 2268 * DP IRQs. 2269 */ 2270 if (ret > 0 && 2271 drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) && 2272 drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes)) 2273 goto out; 2274 2275 /* If link is bad, mark link as down so that we do a new LT */ 2276 mhdp->link_up = false; 2277 } 2278 2279 if (!mhdp->link_up) { 2280 ret = cdns_mhdp_link_up(mhdp); 2281 if (ret < 0) 2282 goto out; 2283 } 2284 2285 if (mhdp->bridge_enabled) { 2286 state = drm_priv_to_bridge_state(mhdp->bridge.base.state); 2287 if (!state) { 2288 ret = -EINVAL; 2289 goto out; 2290 } 2291 2292 cdns_bridge_state = to_cdns_mhdp_bridge_state(state); 2293 if (!cdns_bridge_state) { 2294 ret = -EINVAL; 2295 goto out; 2296 } 2297 2298 current_mode = cdns_bridge_state->current_mode; 2299 if (!current_mode) { 2300 ret = -EINVAL; 2301 goto out; 2302 } 2303 2304 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes, 2305 mhdp->link.rate)) { 2306 ret = -EINVAL; 2307 goto out; 2308 } 2309 2310 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, 2311 current_mode->name); 2312 2313 cdns_mhdp_sst_enable(mhdp, current_mode); 2314 } 2315 out: 2316 mutex_unlock(&mhdp->link_mutex); 2317 return ret; 2318 } 2319 2320 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work) 2321 { 2322 struct cdns_mhdp_device *mhdp; 2323 struct drm_connector *conn; 2324 2325 mhdp = container_of(work, typeof(*mhdp), modeset_retry_work); 2326 2327 conn = &mhdp->connector; 2328 2329 /* Grab the locks before changing connector property */ 2330 mutex_lock(&conn->dev->mode_config.mutex); 2331 2332 /* 2333 * Set connector link status to BAD and send a Uevent to notify 2334 * userspace to do a modeset. 2335 */ 2336 drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD); 2337 mutex_unlock(&conn->dev->mode_config.mutex); 2338 2339 /* Send Hotplug uevent so userspace can reprobe */ 2340 drm_kms_helper_hotplug_event(mhdp->bridge.dev); 2341 } 2342 2343 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data) 2344 { 2345 struct cdns_mhdp_device *mhdp = data; 2346 u32 apb_stat, sw_ev0; 2347 bool bridge_attached; 2348 2349 apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS); 2350 if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT)) 2351 return IRQ_NONE; 2352 2353 sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0); 2354 2355 /* 2356 * Calling drm_kms_helper_hotplug_event() when not attached 2357 * to drm device causes an oops because the drm_bridge->dev 2358 * is NULL. See cdns_mhdp_fw_cb() comments for details about the 2359 * problems related drm_kms_helper_hotplug_event() call. 2360 */ 2361 spin_lock(&mhdp->start_lock); 2362 bridge_attached = mhdp->bridge_attached; 2363 spin_unlock(&mhdp->start_lock); 2364 2365 if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) { 2366 schedule_work(&mhdp->hpd_work); 2367 } 2368 2369 if (sw_ev0 & ~CDNS_DPTX_HPD) { 2370 mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD); 2371 wake_up(&mhdp->sw_events_wq); 2372 } 2373 2374 return IRQ_HANDLED; 2375 } 2376 2377 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event) 2378 { 2379 u32 ret; 2380 2381 ret = wait_event_timeout(mhdp->sw_events_wq, 2382 mhdp->sw_events & event, 2383 msecs_to_jiffies(500)); 2384 if (!ret) { 2385 dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event); 2386 goto sw_event_out; 2387 } 2388 2389 ret = mhdp->sw_events; 2390 mhdp->sw_events &= ~event; 2391 2392 sw_event_out: 2393 return ret; 2394 } 2395 2396 static void cdns_mhdp_hpd_work(struct work_struct *work) 2397 { 2398 struct cdns_mhdp_device *mhdp = container_of(work, 2399 struct cdns_mhdp_device, 2400 hpd_work); 2401 int ret; 2402 2403 ret = cdns_mhdp_update_link_status(mhdp); 2404 if (mhdp->connector.dev) { 2405 if (ret < 0) 2406 schedule_work(&mhdp->modeset_retry_work); 2407 else 2408 drm_kms_helper_hotplug_event(mhdp->bridge.dev); 2409 } else { 2410 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp)); 2411 } 2412 } 2413 2414 static int cdns_mhdp_probe(struct platform_device *pdev) 2415 { 2416 struct device *dev = &pdev->dev; 2417 struct cdns_mhdp_device *mhdp; 2418 unsigned long rate; 2419 struct clk *clk; 2420 int ret; 2421 int irq; 2422 2423 mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL); 2424 if (!mhdp) 2425 return -ENOMEM; 2426 2427 clk = devm_clk_get(dev, NULL); 2428 if (IS_ERR(clk)) { 2429 dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk)); 2430 return PTR_ERR(clk); 2431 } 2432 2433 mhdp->clk = clk; 2434 mhdp->dev = dev; 2435 mutex_init(&mhdp->mbox_mutex); 2436 mutex_init(&mhdp->link_mutex); 2437 spin_lock_init(&mhdp->start_lock); 2438 2439 drm_dp_aux_init(&mhdp->aux); 2440 mhdp->aux.dev = dev; 2441 mhdp->aux.transfer = cdns_mhdp_transfer; 2442 2443 mhdp->regs = devm_platform_ioremap_resource(pdev, 0); 2444 if (IS_ERR(mhdp->regs)) { 2445 dev_err(dev, "Failed to get memory resource\n"); 2446 return PTR_ERR(mhdp->regs); 2447 } 2448 2449 mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb"); 2450 if (IS_ERR(mhdp->sapb_regs)) { 2451 mhdp->hdcp_supported = false; 2452 dev_warn(dev, 2453 "Failed to get SAPB memory resource, HDCP not supported\n"); 2454 } else { 2455 mhdp->hdcp_supported = true; 2456 } 2457 2458 mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0); 2459 if (IS_ERR(mhdp->phy)) { 2460 dev_err(dev, "no PHY configured\n"); 2461 return PTR_ERR(mhdp->phy); 2462 } 2463 2464 platform_set_drvdata(pdev, mhdp); 2465 2466 mhdp->info = of_device_get_match_data(dev); 2467 2468 clk_prepare_enable(clk); 2469 2470 pm_runtime_enable(dev); 2471 ret = pm_runtime_get_sync(dev); 2472 if (ret < 0) { 2473 dev_err(dev, "pm_runtime_get_sync failed\n"); 2474 pm_runtime_disable(dev); 2475 goto clk_disable; 2476 } 2477 2478 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) { 2479 ret = mhdp->info->ops->init(mhdp); 2480 if (ret != 0) { 2481 dev_err(dev, "MHDP platform initialization failed: %d\n", 2482 ret); 2483 goto runtime_put; 2484 } 2485 } 2486 2487 rate = clk_get_rate(clk); 2488 writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L); 2489 writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H); 2490 2491 dev_dbg(dev, "func clk rate %lu Hz\n", rate); 2492 2493 writel(~0, mhdp->regs + CDNS_APB_INT_MASK); 2494 2495 irq = platform_get_irq(pdev, 0); 2496 ret = devm_request_threaded_irq(mhdp->dev, irq, NULL, 2497 cdns_mhdp_irq_handler, IRQF_ONESHOT, 2498 "mhdp8546", mhdp); 2499 if (ret) { 2500 dev_err(dev, "cannot install IRQ %d\n", irq); 2501 ret = -EIO; 2502 goto plat_fini; 2503 } 2504 2505 cdns_mhdp_fill_host_caps(mhdp); 2506 2507 /* Initialize link rate and num of lanes to host values */ 2508 mhdp->link.rate = mhdp->host.link_rate; 2509 mhdp->link.num_lanes = mhdp->host.lanes_cnt; 2510 2511 /* The only currently supported format */ 2512 mhdp->display_fmt.y_only = false; 2513 mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444; 2514 mhdp->display_fmt.bpc = 8; 2515 2516 mhdp->bridge.of_node = pdev->dev.of_node; 2517 mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs; 2518 mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | 2519 DRM_BRIDGE_OP_HPD; 2520 mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; 2521 if (mhdp->info) 2522 mhdp->bridge.timings = mhdp->info->timings; 2523 2524 ret = phy_init(mhdp->phy); 2525 if (ret) { 2526 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret); 2527 goto plat_fini; 2528 } 2529 2530 /* Initialize the work for modeset in case of link train failure */ 2531 INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn); 2532 INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work); 2533 2534 init_waitqueue_head(&mhdp->fw_load_wq); 2535 init_waitqueue_head(&mhdp->sw_events_wq); 2536 2537 ret = cdns_mhdp_load_firmware(mhdp); 2538 if (ret) 2539 goto phy_exit; 2540 2541 if (mhdp->hdcp_supported) 2542 cdns_mhdp_hdcp_init(mhdp); 2543 2544 drm_bridge_add(&mhdp->bridge); 2545 2546 return 0; 2547 2548 phy_exit: 2549 phy_exit(mhdp->phy); 2550 plat_fini: 2551 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit) 2552 mhdp->info->ops->exit(mhdp); 2553 runtime_put: 2554 pm_runtime_put_sync(dev); 2555 pm_runtime_disable(dev); 2556 clk_disable: 2557 clk_disable_unprepare(mhdp->clk); 2558 2559 return ret; 2560 } 2561 2562 static int cdns_mhdp_remove(struct platform_device *pdev) 2563 { 2564 struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev); 2565 unsigned long timeout = msecs_to_jiffies(100); 2566 bool stop_fw = false; 2567 int ret; 2568 2569 drm_bridge_remove(&mhdp->bridge); 2570 2571 ret = wait_event_timeout(mhdp->fw_load_wq, 2572 mhdp->hw_state == MHDP_HW_READY, 2573 timeout); 2574 if (ret == 0) 2575 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n", 2576 __func__); 2577 else 2578 stop_fw = true; 2579 2580 spin_lock(&mhdp->start_lock); 2581 mhdp->hw_state = MHDP_HW_STOPPED; 2582 spin_unlock(&mhdp->start_lock); 2583 2584 if (stop_fw) 2585 ret = cdns_mhdp_set_firmware_active(mhdp, false); 2586 2587 phy_exit(mhdp->phy); 2588 2589 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit) 2590 mhdp->info->ops->exit(mhdp); 2591 2592 pm_runtime_put_sync(&pdev->dev); 2593 pm_runtime_disable(&pdev->dev); 2594 2595 cancel_work_sync(&mhdp->modeset_retry_work); 2596 flush_scheduled_work(); 2597 2598 clk_disable_unprepare(mhdp->clk); 2599 2600 return ret; 2601 } 2602 2603 static const struct of_device_id mhdp_ids[] = { 2604 { .compatible = "cdns,mhdp8546", }, 2605 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E 2606 { .compatible = "ti,j721e-mhdp8546", 2607 .data = &(const struct cdns_mhdp_platform_info) { 2608 .timings = &mhdp_ti_j721e_bridge_timings, 2609 .ops = &mhdp_ti_j721e_ops, 2610 }, 2611 }, 2612 #endif 2613 { /* sentinel */ } 2614 }; 2615 MODULE_DEVICE_TABLE(of, mhdp_ids); 2616 2617 static struct platform_driver mhdp_driver = { 2618 .driver = { 2619 .name = "cdns-mhdp8546", 2620 .of_match_table = of_match_ptr(mhdp_ids), 2621 }, 2622 .probe = cdns_mhdp_probe, 2623 .remove = cdns_mhdp_remove, 2624 }; 2625 module_platform_driver(mhdp_driver); 2626 2627 MODULE_FIRMWARE(FW_NAME); 2628 2629 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>"); 2630 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>"); 2631 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>"); 2632 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); 2633 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>"); 2634 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver"); 2635 MODULE_LICENSE("GPL"); 2636 MODULE_ALIAS("platform:cdns-mhdp8546"); 2637