1 /* 2 * Copyright © 2014 Red Hat 3 * 4 * Permission to use, copy, modify, distribute, and sell this software and its 5 * documentation for any purpose is hereby granted without fee, provided that 6 * the above copyright notice appear in all copies and that both that copyright 7 * notice and this permission notice appear in supporting documentation, and 8 * that the name of the copyright holders not be used in advertising or 9 * publicity pertaining to distribution of the software without specific, 10 * written prior permission. The copyright holders make no representations 11 * about the suitability of this software for any purpose. It is provided "as 12 * is" without express or implied warranty. 13 * 14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 20 * OF THIS SOFTWARE. 21 */ 22 23 #include <linux/bitfield.h> 24 #include <linux/delay.h> 25 #include <linux/errno.h> 26 #include <linux/i2c.h> 27 #include <linux/init.h> 28 #include <linux/kernel.h> 29 #include <linux/random.h> 30 #include <linux/sched.h> 31 #include <linux/seq_file.h> 32 #include <linux/iopoll.h> 33 34 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 35 #include <linux/stacktrace.h> 36 #include <linux/sort.h> 37 #include <linux/timekeeping.h> 38 #include <linux/math64.h> 39 #endif 40 41 #include <drm/display/drm_dp_mst_helper.h> 42 #include <drm/drm_atomic.h> 43 #include <drm/drm_atomic_helper.h> 44 #include <drm/drm_drv.h> 45 #include <drm/drm_edid.h> 46 #include <drm/drm_print.h> 47 #include <drm/drm_probe_helper.h> 48 49 #include "drm_dp_helper_internal.h" 50 #include "drm_dp_mst_topology_internal.h" 51 52 /** 53 * DOC: dp mst helper 54 * 55 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport 56 * protocol. The helpers contain a topology manager and bandwidth manager. 57 * The helpers encapsulate the sending and received of sideband msgs. 58 */ 59 struct drm_dp_pending_up_req { 60 struct drm_dp_sideband_msg_hdr hdr; 61 struct drm_dp_sideband_msg_req_body msg; 62 struct list_head next; 63 }; 64 65 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 66 char *buf); 67 68 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); 69 70 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 71 int id, 72 struct drm_dp_payload *payload); 73 74 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 75 struct drm_dp_mst_port *port, 76 int offset, int size, u8 *bytes); 77 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 78 struct drm_dp_mst_port *port, 79 int offset, int size, u8 *bytes); 80 81 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 82 struct drm_dp_mst_branch *mstb); 83 84 static void 85 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, 86 struct drm_dp_mst_branch *mstb); 87 88 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 89 struct drm_dp_mst_branch *mstb, 90 struct drm_dp_mst_port *port); 91 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 92 u8 *guid); 93 94 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port); 95 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port); 96 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); 97 98 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, 99 struct drm_dp_mst_branch *branch); 100 101 #define DBG_PREFIX "[dp_mst]" 102 103 #define DP_STR(x) [DP_ ## x] = #x 104 105 static const char *drm_dp_mst_req_type_str(u8 req_type) 106 { 107 static const char * const req_type_str[] = { 108 DP_STR(GET_MSG_TRANSACTION_VERSION), 109 DP_STR(LINK_ADDRESS), 110 DP_STR(CONNECTION_STATUS_NOTIFY), 111 DP_STR(ENUM_PATH_RESOURCES), 112 DP_STR(ALLOCATE_PAYLOAD), 113 DP_STR(QUERY_PAYLOAD), 114 DP_STR(RESOURCE_STATUS_NOTIFY), 115 DP_STR(CLEAR_PAYLOAD_ID_TABLE), 116 DP_STR(REMOTE_DPCD_READ), 117 DP_STR(REMOTE_DPCD_WRITE), 118 DP_STR(REMOTE_I2C_READ), 119 DP_STR(REMOTE_I2C_WRITE), 120 DP_STR(POWER_UP_PHY), 121 DP_STR(POWER_DOWN_PHY), 122 DP_STR(SINK_EVENT_NOTIFY), 123 DP_STR(QUERY_STREAM_ENC_STATUS), 124 }; 125 126 if (req_type >= ARRAY_SIZE(req_type_str) || 127 !req_type_str[req_type]) 128 return "unknown"; 129 130 return req_type_str[req_type]; 131 } 132 133 #undef DP_STR 134 #define DP_STR(x) [DP_NAK_ ## x] = #x 135 136 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason) 137 { 138 static const char * const nak_reason_str[] = { 139 DP_STR(WRITE_FAILURE), 140 DP_STR(INVALID_READ), 141 DP_STR(CRC_FAILURE), 142 DP_STR(BAD_PARAM), 143 DP_STR(DEFER), 144 DP_STR(LINK_FAILURE), 145 DP_STR(NO_RESOURCES), 146 DP_STR(DPCD_FAIL), 147 DP_STR(I2C_NAK), 148 DP_STR(ALLOCATE_FAIL), 149 }; 150 151 if (nak_reason >= ARRAY_SIZE(nak_reason_str) || 152 !nak_reason_str[nak_reason]) 153 return "unknown"; 154 155 return nak_reason_str[nak_reason]; 156 } 157 158 #undef DP_STR 159 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x 160 161 static const char *drm_dp_mst_sideband_tx_state_str(int state) 162 { 163 static const char * const sideband_reason_str[] = { 164 DP_STR(QUEUED), 165 DP_STR(START_SEND), 166 DP_STR(SENT), 167 DP_STR(RX), 168 DP_STR(TIMEOUT), 169 }; 170 171 if (state >= ARRAY_SIZE(sideband_reason_str) || 172 !sideband_reason_str[state]) 173 return "unknown"; 174 175 return sideband_reason_str[state]; 176 } 177 178 static int 179 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) 180 { 181 int i; 182 u8 unpacked_rad[16]; 183 184 for (i = 0; i < lct; i++) { 185 if (i % 2) 186 unpacked_rad[i] = rad[i / 2] >> 4; 187 else 188 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); 189 } 190 191 /* TODO: Eventually add something to printk so we can format the rad 192 * like this: 1.2.3 193 */ 194 return snprintf(out, len, "%*phC", lct, unpacked_rad); 195 } 196 197 /* sideband msg handling */ 198 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) 199 { 200 u8 bitmask = 0x80; 201 u8 bitshift = 7; 202 u8 array_index = 0; 203 int number_of_bits = num_nibbles * 4; 204 u8 remainder = 0; 205 206 while (number_of_bits != 0) { 207 number_of_bits--; 208 remainder <<= 1; 209 remainder |= (data[array_index] & bitmask) >> bitshift; 210 bitmask >>= 1; 211 bitshift--; 212 if (bitmask == 0) { 213 bitmask = 0x80; 214 bitshift = 7; 215 array_index++; 216 } 217 if ((remainder & 0x10) == 0x10) 218 remainder ^= 0x13; 219 } 220 221 number_of_bits = 4; 222 while (number_of_bits != 0) { 223 number_of_bits--; 224 remainder <<= 1; 225 if ((remainder & 0x10) != 0) 226 remainder ^= 0x13; 227 } 228 229 return remainder; 230 } 231 232 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) 233 { 234 u8 bitmask = 0x80; 235 u8 bitshift = 7; 236 u8 array_index = 0; 237 int number_of_bits = number_of_bytes * 8; 238 u16 remainder = 0; 239 240 while (number_of_bits != 0) { 241 number_of_bits--; 242 remainder <<= 1; 243 remainder |= (data[array_index] & bitmask) >> bitshift; 244 bitmask >>= 1; 245 bitshift--; 246 if (bitmask == 0) { 247 bitmask = 0x80; 248 bitshift = 7; 249 array_index++; 250 } 251 if ((remainder & 0x100) == 0x100) 252 remainder ^= 0xd5; 253 } 254 255 number_of_bits = 8; 256 while (number_of_bits != 0) { 257 number_of_bits--; 258 remainder <<= 1; 259 if ((remainder & 0x100) != 0) 260 remainder ^= 0xd5; 261 } 262 263 return remainder & 0xff; 264 } 265 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) 266 { 267 u8 size = 3; 268 269 size += (hdr->lct / 2); 270 return size; 271 } 272 273 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 274 u8 *buf, int *len) 275 { 276 int idx = 0; 277 int i; 278 u8 crc4; 279 280 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); 281 for (i = 0; i < (hdr->lct / 2); i++) 282 buf[idx++] = hdr->rad[i]; 283 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | 284 (hdr->msg_len & 0x3f); 285 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); 286 287 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); 288 buf[idx - 1] |= (crc4 & 0xf); 289 290 *len = idx; 291 } 292 293 static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr, 294 struct drm_dp_sideband_msg_hdr *hdr, 295 u8 *buf, int buflen, u8 *hdrlen) 296 { 297 u8 crc4; 298 u8 len; 299 int i; 300 u8 idx; 301 302 if (buf[0] == 0) 303 return false; 304 len = 3; 305 len += ((buf[0] & 0xf0) >> 4) / 2; 306 if (len > buflen) 307 return false; 308 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); 309 310 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { 311 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); 312 return false; 313 } 314 315 hdr->lct = (buf[0] & 0xf0) >> 4; 316 hdr->lcr = (buf[0] & 0xf); 317 idx = 1; 318 for (i = 0; i < (hdr->lct / 2); i++) 319 hdr->rad[i] = buf[idx++]; 320 hdr->broadcast = (buf[idx] >> 7) & 0x1; 321 hdr->path_msg = (buf[idx] >> 6) & 0x1; 322 hdr->msg_len = buf[idx] & 0x3f; 323 idx++; 324 hdr->somt = (buf[idx] >> 7) & 0x1; 325 hdr->eomt = (buf[idx] >> 6) & 0x1; 326 hdr->seqno = (buf[idx] >> 4) & 0x1; 327 idx++; 328 *hdrlen = idx; 329 return true; 330 } 331 332 void 333 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, 334 struct drm_dp_sideband_msg_tx *raw) 335 { 336 int idx = 0; 337 int i; 338 u8 *buf = raw->msg; 339 340 buf[idx++] = req->req_type & 0x7f; 341 342 switch (req->req_type) { 343 case DP_ENUM_PATH_RESOURCES: 344 case DP_POWER_DOWN_PHY: 345 case DP_POWER_UP_PHY: 346 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; 347 idx++; 348 break; 349 case DP_ALLOCATE_PAYLOAD: 350 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | 351 (req->u.allocate_payload.number_sdp_streams & 0xf); 352 idx++; 353 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); 354 idx++; 355 buf[idx] = (req->u.allocate_payload.pbn >> 8); 356 idx++; 357 buf[idx] = (req->u.allocate_payload.pbn & 0xff); 358 idx++; 359 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { 360 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | 361 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); 362 idx++; 363 } 364 if (req->u.allocate_payload.number_sdp_streams & 1) { 365 i = req->u.allocate_payload.number_sdp_streams - 1; 366 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; 367 idx++; 368 } 369 break; 370 case DP_QUERY_PAYLOAD: 371 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; 372 idx++; 373 buf[idx] = (req->u.query_payload.vcpi & 0x7f); 374 idx++; 375 break; 376 case DP_REMOTE_DPCD_READ: 377 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; 378 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; 379 idx++; 380 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; 381 idx++; 382 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); 383 idx++; 384 buf[idx] = (req->u.dpcd_read.num_bytes); 385 idx++; 386 break; 387 388 case DP_REMOTE_DPCD_WRITE: 389 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; 390 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; 391 idx++; 392 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; 393 idx++; 394 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); 395 idx++; 396 buf[idx] = (req->u.dpcd_write.num_bytes); 397 idx++; 398 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); 399 idx += req->u.dpcd_write.num_bytes; 400 break; 401 case DP_REMOTE_I2C_READ: 402 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; 403 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); 404 idx++; 405 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { 406 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; 407 idx++; 408 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; 409 idx++; 410 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); 411 idx += req->u.i2c_read.transactions[i].num_bytes; 412 413 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; 414 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); 415 idx++; 416 } 417 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; 418 idx++; 419 buf[idx] = (req->u.i2c_read.num_bytes_read); 420 idx++; 421 break; 422 423 case DP_REMOTE_I2C_WRITE: 424 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; 425 idx++; 426 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; 427 idx++; 428 buf[idx] = (req->u.i2c_write.num_bytes); 429 idx++; 430 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); 431 idx += req->u.i2c_write.num_bytes; 432 break; 433 case DP_QUERY_STREAM_ENC_STATUS: { 434 const struct drm_dp_query_stream_enc_status *msg; 435 436 msg = &req->u.enc_status; 437 buf[idx] = msg->stream_id; 438 idx++; 439 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id)); 440 idx += sizeof(msg->client_id); 441 buf[idx] = 0; 442 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event); 443 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0; 444 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior); 445 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0; 446 idx++; 447 } 448 break; 449 } 450 raw->cur_len = idx; 451 } 452 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req); 453 454 /* Decode a sideband request we've encoded, mainly used for debugging */ 455 int 456 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw, 457 struct drm_dp_sideband_msg_req_body *req) 458 { 459 const u8 *buf = raw->msg; 460 int i, idx = 0; 461 462 req->req_type = buf[idx++] & 0x7f; 463 switch (req->req_type) { 464 case DP_ENUM_PATH_RESOURCES: 465 case DP_POWER_DOWN_PHY: 466 case DP_POWER_UP_PHY: 467 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf; 468 break; 469 case DP_ALLOCATE_PAYLOAD: 470 { 471 struct drm_dp_allocate_payload *a = 472 &req->u.allocate_payload; 473 474 a->number_sdp_streams = buf[idx] & 0xf; 475 a->port_number = (buf[idx] >> 4) & 0xf; 476 477 WARN_ON(buf[++idx] & 0x80); 478 a->vcpi = buf[idx] & 0x7f; 479 480 a->pbn = buf[++idx] << 8; 481 a->pbn |= buf[++idx]; 482 483 idx++; 484 for (i = 0; i < a->number_sdp_streams; i++) { 485 a->sdp_stream_sink[i] = 486 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf; 487 } 488 } 489 break; 490 case DP_QUERY_PAYLOAD: 491 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf; 492 WARN_ON(buf[++idx] & 0x80); 493 req->u.query_payload.vcpi = buf[idx] & 0x7f; 494 break; 495 case DP_REMOTE_DPCD_READ: 496 { 497 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read; 498 499 r->port_number = (buf[idx] >> 4) & 0xf; 500 501 r->dpcd_address = (buf[idx] << 16) & 0xf0000; 502 r->dpcd_address |= (buf[++idx] << 8) & 0xff00; 503 r->dpcd_address |= buf[++idx] & 0xff; 504 505 r->num_bytes = buf[++idx]; 506 } 507 break; 508 case DP_REMOTE_DPCD_WRITE: 509 { 510 struct drm_dp_remote_dpcd_write *w = 511 &req->u.dpcd_write; 512 513 w->port_number = (buf[idx] >> 4) & 0xf; 514 515 w->dpcd_address = (buf[idx] << 16) & 0xf0000; 516 w->dpcd_address |= (buf[++idx] << 8) & 0xff00; 517 w->dpcd_address |= buf[++idx] & 0xff; 518 519 w->num_bytes = buf[++idx]; 520 521 w->bytes = kmemdup(&buf[++idx], w->num_bytes, 522 GFP_KERNEL); 523 if (!w->bytes) 524 return -ENOMEM; 525 } 526 break; 527 case DP_REMOTE_I2C_READ: 528 { 529 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read; 530 struct drm_dp_remote_i2c_read_tx *tx; 531 bool failed = false; 532 533 r->num_transactions = buf[idx] & 0x3; 534 r->port_number = (buf[idx] >> 4) & 0xf; 535 for (i = 0; i < r->num_transactions; i++) { 536 tx = &r->transactions[i]; 537 538 tx->i2c_dev_id = buf[++idx] & 0x7f; 539 tx->num_bytes = buf[++idx]; 540 tx->bytes = kmemdup(&buf[++idx], 541 tx->num_bytes, 542 GFP_KERNEL); 543 if (!tx->bytes) { 544 failed = true; 545 break; 546 } 547 idx += tx->num_bytes; 548 tx->no_stop_bit = (buf[idx] >> 5) & 0x1; 549 tx->i2c_transaction_delay = buf[idx] & 0xf; 550 } 551 552 if (failed) { 553 for (i = 0; i < r->num_transactions; i++) { 554 tx = &r->transactions[i]; 555 kfree(tx->bytes); 556 } 557 return -ENOMEM; 558 } 559 560 r->read_i2c_device_id = buf[++idx] & 0x7f; 561 r->num_bytes_read = buf[++idx]; 562 } 563 break; 564 case DP_REMOTE_I2C_WRITE: 565 { 566 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write; 567 568 w->port_number = (buf[idx] >> 4) & 0xf; 569 w->write_i2c_device_id = buf[++idx] & 0x7f; 570 w->num_bytes = buf[++idx]; 571 w->bytes = kmemdup(&buf[++idx], w->num_bytes, 572 GFP_KERNEL); 573 if (!w->bytes) 574 return -ENOMEM; 575 } 576 break; 577 case DP_QUERY_STREAM_ENC_STATUS: 578 req->u.enc_status.stream_id = buf[idx++]; 579 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++) 580 req->u.enc_status.client_id[i] = buf[idx++]; 581 582 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0), 583 buf[idx]); 584 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2), 585 buf[idx]); 586 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3), 587 buf[idx]); 588 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5), 589 buf[idx]); 590 break; 591 } 592 593 return 0; 594 } 595 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req); 596 597 void 598 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req, 599 int indent, struct drm_printer *printer) 600 { 601 int i; 602 603 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__) 604 if (req->req_type == DP_LINK_ADDRESS) { 605 /* No contents to print */ 606 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type)); 607 return; 608 } 609 610 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type)); 611 indent++; 612 613 switch (req->req_type) { 614 case DP_ENUM_PATH_RESOURCES: 615 case DP_POWER_DOWN_PHY: 616 case DP_POWER_UP_PHY: 617 P("port=%d\n", req->u.port_num.port_number); 618 break; 619 case DP_ALLOCATE_PAYLOAD: 620 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n", 621 req->u.allocate_payload.port_number, 622 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn, 623 req->u.allocate_payload.number_sdp_streams, 624 req->u.allocate_payload.number_sdp_streams, 625 req->u.allocate_payload.sdp_stream_sink); 626 break; 627 case DP_QUERY_PAYLOAD: 628 P("port=%d vcpi=%d\n", 629 req->u.query_payload.port_number, 630 req->u.query_payload.vcpi); 631 break; 632 case DP_REMOTE_DPCD_READ: 633 P("port=%d dpcd_addr=%05x len=%d\n", 634 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address, 635 req->u.dpcd_read.num_bytes); 636 break; 637 case DP_REMOTE_DPCD_WRITE: 638 P("port=%d addr=%05x len=%d: %*ph\n", 639 req->u.dpcd_write.port_number, 640 req->u.dpcd_write.dpcd_address, 641 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes, 642 req->u.dpcd_write.bytes); 643 break; 644 case DP_REMOTE_I2C_READ: 645 P("port=%d num_tx=%d id=%d size=%d:\n", 646 req->u.i2c_read.port_number, 647 req->u.i2c_read.num_transactions, 648 req->u.i2c_read.read_i2c_device_id, 649 req->u.i2c_read.num_bytes_read); 650 651 indent++; 652 for (i = 0; i < req->u.i2c_read.num_transactions; i++) { 653 const struct drm_dp_remote_i2c_read_tx *rtx = 654 &req->u.i2c_read.transactions[i]; 655 656 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n", 657 i, rtx->i2c_dev_id, rtx->num_bytes, 658 rtx->no_stop_bit, rtx->i2c_transaction_delay, 659 rtx->num_bytes, rtx->bytes); 660 } 661 break; 662 case DP_REMOTE_I2C_WRITE: 663 P("port=%d id=%d size=%d: %*ph\n", 664 req->u.i2c_write.port_number, 665 req->u.i2c_write.write_i2c_device_id, 666 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, 667 req->u.i2c_write.bytes); 668 break; 669 case DP_QUERY_STREAM_ENC_STATUS: 670 P("stream_id=%u client_id=%*ph stream_event=%x " 671 "valid_event=%d stream_behavior=%x valid_behavior=%d", 672 req->u.enc_status.stream_id, 673 (int)ARRAY_SIZE(req->u.enc_status.client_id), 674 req->u.enc_status.client_id, req->u.enc_status.stream_event, 675 req->u.enc_status.valid_stream_event, 676 req->u.enc_status.stream_behavior, 677 req->u.enc_status.valid_stream_behavior); 678 break; 679 default: 680 P("???\n"); 681 break; 682 } 683 #undef P 684 } 685 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body); 686 687 static inline void 688 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p, 689 const struct drm_dp_sideband_msg_tx *txmsg) 690 { 691 struct drm_dp_sideband_msg_req_body req; 692 char buf[64]; 693 int ret; 694 int i; 695 696 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf, 697 sizeof(buf)); 698 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n", 699 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno, 700 drm_dp_mst_sideband_tx_state_str(txmsg->state), 701 txmsg->path_msg, buf); 702 703 ret = drm_dp_decode_sideband_req(txmsg, &req); 704 if (ret) { 705 drm_printf(p, "<failed to decode sideband req: %d>\n", ret); 706 return; 707 } 708 drm_dp_dump_sideband_msg_req_body(&req, 1, p); 709 710 switch (req.req_type) { 711 case DP_REMOTE_DPCD_WRITE: 712 kfree(req.u.dpcd_write.bytes); 713 break; 714 case DP_REMOTE_I2C_READ: 715 for (i = 0; i < req.u.i2c_read.num_transactions; i++) 716 kfree(req.u.i2c_read.transactions[i].bytes); 717 break; 718 case DP_REMOTE_I2C_WRITE: 719 kfree(req.u.i2c_write.bytes); 720 break; 721 } 722 } 723 724 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) 725 { 726 u8 crc4; 727 728 crc4 = drm_dp_msg_data_crc4(msg, len); 729 msg[len] = crc4; 730 } 731 732 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, 733 struct drm_dp_sideband_msg_tx *raw) 734 { 735 int idx = 0; 736 u8 *buf = raw->msg; 737 738 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); 739 740 raw->cur_len = idx; 741 } 742 743 static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg, 744 struct drm_dp_sideband_msg_hdr *hdr, 745 u8 hdrlen) 746 { 747 /* 748 * ignore out-of-order messages or messages that are part of a 749 * failed transaction 750 */ 751 if (!hdr->somt && !msg->have_somt) 752 return false; 753 754 /* get length contained in this portion */ 755 msg->curchunk_idx = 0; 756 msg->curchunk_len = hdr->msg_len; 757 msg->curchunk_hdrlen = hdrlen; 758 759 /* we have already gotten an somt - don't bother parsing */ 760 if (hdr->somt && msg->have_somt) 761 return false; 762 763 if (hdr->somt) { 764 memcpy(&msg->initial_hdr, hdr, 765 sizeof(struct drm_dp_sideband_msg_hdr)); 766 msg->have_somt = true; 767 } 768 if (hdr->eomt) 769 msg->have_eomt = true; 770 771 return true; 772 } 773 774 /* this adds a chunk of msg to the builder to get the final msg */ 775 static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg, 776 u8 *replybuf, u8 replybuflen) 777 { 778 u8 crc4; 779 780 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); 781 msg->curchunk_idx += replybuflen; 782 783 if (msg->curchunk_idx >= msg->curchunk_len) { 784 /* do CRC */ 785 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); 786 if (crc4 != msg->chunk[msg->curchunk_len - 1]) 787 print_hex_dump(KERN_DEBUG, "wrong crc", 788 DUMP_PREFIX_NONE, 16, 1, 789 msg->chunk, msg->curchunk_len, false); 790 /* copy chunk into bigger msg */ 791 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); 792 msg->curlen += msg->curchunk_len - 1; 793 } 794 return true; 795 } 796 797 static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr, 798 struct drm_dp_sideband_msg_rx *raw, 799 struct drm_dp_sideband_msg_reply_body *repmsg) 800 { 801 int idx = 1; 802 int i; 803 804 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); 805 idx += 16; 806 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; 807 idx++; 808 if (idx > raw->curlen) 809 goto fail_len; 810 for (i = 0; i < repmsg->u.link_addr.nports; i++) { 811 if (raw->msg[idx] & 0x80) 812 repmsg->u.link_addr.ports[i].input_port = 1; 813 814 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; 815 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); 816 817 idx++; 818 if (idx > raw->curlen) 819 goto fail_len; 820 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; 821 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; 822 if (repmsg->u.link_addr.ports[i].input_port == 0) 823 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 824 idx++; 825 if (idx > raw->curlen) 826 goto fail_len; 827 if (repmsg->u.link_addr.ports[i].input_port == 0) { 828 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); 829 idx++; 830 if (idx > raw->curlen) 831 goto fail_len; 832 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); 833 idx += 16; 834 if (idx > raw->curlen) 835 goto fail_len; 836 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; 837 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); 838 idx++; 839 840 } 841 if (idx > raw->curlen) 842 goto fail_len; 843 } 844 845 return true; 846 fail_len: 847 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 848 return false; 849 } 850 851 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, 852 struct drm_dp_sideband_msg_reply_body *repmsg) 853 { 854 int idx = 1; 855 856 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; 857 idx++; 858 if (idx > raw->curlen) 859 goto fail_len; 860 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; 861 idx++; 862 if (idx > raw->curlen) 863 goto fail_len; 864 865 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); 866 return true; 867 fail_len: 868 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 869 return false; 870 } 871 872 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, 873 struct drm_dp_sideband_msg_reply_body *repmsg) 874 { 875 int idx = 1; 876 877 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; 878 idx++; 879 if (idx > raw->curlen) 880 goto fail_len; 881 return true; 882 fail_len: 883 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); 884 return false; 885 } 886 887 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, 888 struct drm_dp_sideband_msg_reply_body *repmsg) 889 { 890 int idx = 1; 891 892 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); 893 idx++; 894 if (idx > raw->curlen) 895 goto fail_len; 896 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; 897 idx++; 898 /* TODO check */ 899 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); 900 return true; 901 fail_len: 902 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); 903 return false; 904 } 905 906 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, 907 struct drm_dp_sideband_msg_reply_body *repmsg) 908 { 909 int idx = 1; 910 911 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; 912 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; 913 idx++; 914 if (idx > raw->curlen) 915 goto fail_len; 916 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 917 idx += 2; 918 if (idx > raw->curlen) 919 goto fail_len; 920 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 921 idx += 2; 922 if (idx > raw->curlen) 923 goto fail_len; 924 return true; 925 fail_len: 926 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); 927 return false; 928 } 929 930 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, 931 struct drm_dp_sideband_msg_reply_body *repmsg) 932 { 933 int idx = 1; 934 935 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 936 idx++; 937 if (idx > raw->curlen) 938 goto fail_len; 939 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; 940 idx++; 941 if (idx > raw->curlen) 942 goto fail_len; 943 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 944 idx += 2; 945 if (idx > raw->curlen) 946 goto fail_len; 947 return true; 948 fail_len: 949 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); 950 return false; 951 } 952 953 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, 954 struct drm_dp_sideband_msg_reply_body *repmsg) 955 { 956 int idx = 1; 957 958 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 959 idx++; 960 if (idx > raw->curlen) 961 goto fail_len; 962 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 963 idx += 2; 964 if (idx > raw->curlen) 965 goto fail_len; 966 return true; 967 fail_len: 968 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); 969 return false; 970 } 971 972 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw, 973 struct drm_dp_sideband_msg_reply_body *repmsg) 974 { 975 int idx = 1; 976 977 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf; 978 idx++; 979 if (idx > raw->curlen) { 980 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n", 981 idx, raw->curlen); 982 return false; 983 } 984 return true; 985 } 986 987 static bool 988 drm_dp_sideband_parse_query_stream_enc_status( 989 struct drm_dp_sideband_msg_rx *raw, 990 struct drm_dp_sideband_msg_reply_body *repmsg) 991 { 992 struct drm_dp_query_stream_enc_status_ack_reply *reply; 993 994 reply = &repmsg->u.enc_status; 995 996 reply->stream_id = raw->msg[3]; 997 998 reply->reply_signed = raw->msg[2] & BIT(0); 999 1000 /* 1001 * NOTE: It's my impression from reading the spec that the below parsing 1002 * is correct. However I noticed while testing with an HDCP 1.4 display 1003 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I 1004 * would expect both bits to be set. So keep the parsing following the 1005 * spec, but beware reality might not match the spec (at least for some 1006 * configurations). 1007 */ 1008 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4); 1009 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3); 1010 1011 reply->query_capable_device_present = raw->msg[2] & BIT(5); 1012 reply->legacy_device_present = raw->msg[2] & BIT(6); 1013 reply->unauthorizable_device_present = raw->msg[2] & BIT(7); 1014 1015 reply->auth_completed = !!(raw->msg[1] & BIT(3)); 1016 reply->encryption_enabled = !!(raw->msg[1] & BIT(4)); 1017 reply->repeater_present = !!(raw->msg[1] & BIT(5)); 1018 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6; 1019 1020 return true; 1021 } 1022 1023 static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr, 1024 struct drm_dp_sideband_msg_rx *raw, 1025 struct drm_dp_sideband_msg_reply_body *msg) 1026 { 1027 memset(msg, 0, sizeof(*msg)); 1028 msg->reply_type = (raw->msg[0] & 0x80) >> 7; 1029 msg->req_type = (raw->msg[0] & 0x7f); 1030 1031 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { 1032 memcpy(msg->u.nak.guid, &raw->msg[1], 16); 1033 msg->u.nak.reason = raw->msg[17]; 1034 msg->u.nak.nak_data = raw->msg[18]; 1035 return false; 1036 } 1037 1038 switch (msg->req_type) { 1039 case DP_LINK_ADDRESS: 1040 return drm_dp_sideband_parse_link_address(mgr, raw, msg); 1041 case DP_QUERY_PAYLOAD: 1042 return drm_dp_sideband_parse_query_payload_ack(raw, msg); 1043 case DP_REMOTE_DPCD_READ: 1044 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); 1045 case DP_REMOTE_DPCD_WRITE: 1046 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); 1047 case DP_REMOTE_I2C_READ: 1048 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); 1049 case DP_REMOTE_I2C_WRITE: 1050 return true; /* since there's nothing to parse */ 1051 case DP_ENUM_PATH_RESOURCES: 1052 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); 1053 case DP_ALLOCATE_PAYLOAD: 1054 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); 1055 case DP_POWER_DOWN_PHY: 1056 case DP_POWER_UP_PHY: 1057 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); 1058 case DP_CLEAR_PAYLOAD_ID_TABLE: 1059 return true; /* since there's nothing to parse */ 1060 case DP_QUERY_STREAM_ENC_STATUS: 1061 return drm_dp_sideband_parse_query_stream_enc_status(raw, msg); 1062 default: 1063 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n", 1064 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); 1065 return false; 1066 } 1067 } 1068 1069 static bool 1070 drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr, 1071 struct drm_dp_sideband_msg_rx *raw, 1072 struct drm_dp_sideband_msg_req_body *msg) 1073 { 1074 int idx = 1; 1075 1076 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 1077 idx++; 1078 if (idx > raw->curlen) 1079 goto fail_len; 1080 1081 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); 1082 idx += 16; 1083 if (idx > raw->curlen) 1084 goto fail_len; 1085 1086 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; 1087 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 1088 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; 1089 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; 1090 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); 1091 idx++; 1092 return true; 1093 fail_len: 1094 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n", 1095 idx, raw->curlen); 1096 return false; 1097 } 1098 1099 static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr, 1100 struct drm_dp_sideband_msg_rx *raw, 1101 struct drm_dp_sideband_msg_req_body *msg) 1102 { 1103 int idx = 1; 1104 1105 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 1106 idx++; 1107 if (idx > raw->curlen) 1108 goto fail_len; 1109 1110 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); 1111 idx += 16; 1112 if (idx > raw->curlen) 1113 goto fail_len; 1114 1115 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 1116 idx++; 1117 return true; 1118 fail_len: 1119 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen); 1120 return false; 1121 } 1122 1123 static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr, 1124 struct drm_dp_sideband_msg_rx *raw, 1125 struct drm_dp_sideband_msg_req_body *msg) 1126 { 1127 memset(msg, 0, sizeof(*msg)); 1128 msg->req_type = (raw->msg[0] & 0x7f); 1129 1130 switch (msg->req_type) { 1131 case DP_CONNECTION_STATUS_NOTIFY: 1132 return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg); 1133 case DP_RESOURCE_STATUS_NOTIFY: 1134 return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg); 1135 default: 1136 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n", 1137 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); 1138 return false; 1139 } 1140 } 1141 1142 static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, 1143 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) 1144 { 1145 struct drm_dp_sideband_msg_req_body req; 1146 1147 req.req_type = DP_REMOTE_DPCD_WRITE; 1148 req.u.dpcd_write.port_number = port_num; 1149 req.u.dpcd_write.dpcd_address = offset; 1150 req.u.dpcd_write.num_bytes = num_bytes; 1151 req.u.dpcd_write.bytes = bytes; 1152 drm_dp_encode_sideband_req(&req, msg); 1153 } 1154 1155 static void build_link_address(struct drm_dp_sideband_msg_tx *msg) 1156 { 1157 struct drm_dp_sideband_msg_req_body req; 1158 1159 req.req_type = DP_LINK_ADDRESS; 1160 drm_dp_encode_sideband_req(&req, msg); 1161 } 1162 1163 static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) 1164 { 1165 struct drm_dp_sideband_msg_req_body req; 1166 1167 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE; 1168 drm_dp_encode_sideband_req(&req, msg); 1169 msg->path_msg = true; 1170 } 1171 1172 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, 1173 int port_num) 1174 { 1175 struct drm_dp_sideband_msg_req_body req; 1176 1177 req.req_type = DP_ENUM_PATH_RESOURCES; 1178 req.u.port_num.port_number = port_num; 1179 drm_dp_encode_sideband_req(&req, msg); 1180 msg->path_msg = true; 1181 return 0; 1182 } 1183 1184 static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, 1185 int port_num, 1186 u8 vcpi, uint16_t pbn, 1187 u8 number_sdp_streams, 1188 u8 *sdp_stream_sink) 1189 { 1190 struct drm_dp_sideband_msg_req_body req; 1191 1192 memset(&req, 0, sizeof(req)); 1193 req.req_type = DP_ALLOCATE_PAYLOAD; 1194 req.u.allocate_payload.port_number = port_num; 1195 req.u.allocate_payload.vcpi = vcpi; 1196 req.u.allocate_payload.pbn = pbn; 1197 req.u.allocate_payload.number_sdp_streams = number_sdp_streams; 1198 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink, 1199 number_sdp_streams); 1200 drm_dp_encode_sideband_req(&req, msg); 1201 msg->path_msg = true; 1202 } 1203 1204 static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg, 1205 int port_num, bool power_up) 1206 { 1207 struct drm_dp_sideband_msg_req_body req; 1208 1209 if (power_up) 1210 req.req_type = DP_POWER_UP_PHY; 1211 else 1212 req.req_type = DP_POWER_DOWN_PHY; 1213 1214 req.u.port_num.port_number = port_num; 1215 drm_dp_encode_sideband_req(&req, msg); 1216 msg->path_msg = true; 1217 } 1218 1219 static int 1220 build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id, 1221 u8 *q_id) 1222 { 1223 struct drm_dp_sideband_msg_req_body req; 1224 1225 req.req_type = DP_QUERY_STREAM_ENC_STATUS; 1226 req.u.enc_status.stream_id = stream_id; 1227 memcpy(req.u.enc_status.client_id, q_id, 1228 sizeof(req.u.enc_status.client_id)); 1229 req.u.enc_status.stream_event = 0; 1230 req.u.enc_status.valid_stream_event = false; 1231 req.u.enc_status.stream_behavior = 0; 1232 req.u.enc_status.valid_stream_behavior = false; 1233 1234 drm_dp_encode_sideband_req(&req, msg); 1235 return 0; 1236 } 1237 1238 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, 1239 struct drm_dp_vcpi *vcpi) 1240 { 1241 int ret, vcpi_ret; 1242 1243 mutex_lock(&mgr->payload_lock); 1244 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); 1245 if (ret > mgr->max_payloads) { 1246 ret = -EINVAL; 1247 drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret); 1248 goto out_unlock; 1249 } 1250 1251 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); 1252 if (vcpi_ret > mgr->max_payloads) { 1253 ret = -EINVAL; 1254 drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret); 1255 goto out_unlock; 1256 } 1257 1258 set_bit(ret, &mgr->payload_mask); 1259 set_bit(vcpi_ret, &mgr->vcpi_mask); 1260 vcpi->vcpi = vcpi_ret + 1; 1261 mgr->proposed_vcpis[ret - 1] = vcpi; 1262 out_unlock: 1263 mutex_unlock(&mgr->payload_lock); 1264 return ret; 1265 } 1266 1267 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, 1268 int vcpi) 1269 { 1270 int i; 1271 1272 if (vcpi == 0) 1273 return; 1274 1275 mutex_lock(&mgr->payload_lock); 1276 drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi); 1277 clear_bit(vcpi - 1, &mgr->vcpi_mask); 1278 1279 for (i = 0; i < mgr->max_payloads; i++) { 1280 if (mgr->proposed_vcpis[i] && 1281 mgr->proposed_vcpis[i]->vcpi == vcpi) { 1282 mgr->proposed_vcpis[i] = NULL; 1283 clear_bit(i + 1, &mgr->payload_mask); 1284 } 1285 } 1286 mutex_unlock(&mgr->payload_lock); 1287 } 1288 1289 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, 1290 struct drm_dp_sideband_msg_tx *txmsg) 1291 { 1292 unsigned int state; 1293 1294 /* 1295 * All updates to txmsg->state are protected by mgr->qlock, and the two 1296 * cases we check here are terminal states. For those the barriers 1297 * provided by the wake_up/wait_event pair are enough. 1298 */ 1299 state = READ_ONCE(txmsg->state); 1300 return (state == DRM_DP_SIDEBAND_TX_RX || 1301 state == DRM_DP_SIDEBAND_TX_TIMEOUT); 1302 } 1303 1304 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, 1305 struct drm_dp_sideband_msg_tx *txmsg) 1306 { 1307 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 1308 unsigned long wait_timeout = msecs_to_jiffies(4000); 1309 unsigned long wait_expires = jiffies + wait_timeout; 1310 int ret; 1311 1312 for (;;) { 1313 /* 1314 * If the driver provides a way for this, change to 1315 * poll-waiting for the MST reply interrupt if we didn't receive 1316 * it for 50 msec. This would cater for cases where the HPD 1317 * pulse signal got lost somewhere, even though the sink raised 1318 * the corresponding MST interrupt correctly. One example is the 1319 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason 1320 * filters out short pulses with a duration less than ~540 usec. 1321 * 1322 * The poll period is 50 msec to avoid missing an interrupt 1323 * after the sink has cleared it (after a 110msec timeout 1324 * since it raised the interrupt). 1325 */ 1326 ret = wait_event_timeout(mgr->tx_waitq, 1327 check_txmsg_state(mgr, txmsg), 1328 mgr->cbs->poll_hpd_irq ? 1329 msecs_to_jiffies(50) : 1330 wait_timeout); 1331 1332 if (ret || !mgr->cbs->poll_hpd_irq || 1333 time_after(jiffies, wait_expires)) 1334 break; 1335 1336 mgr->cbs->poll_hpd_irq(mgr); 1337 } 1338 1339 mutex_lock(&mgr->qlock); 1340 if (ret > 0) { 1341 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { 1342 ret = -EIO; 1343 goto out; 1344 } 1345 } else { 1346 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n", 1347 txmsg, txmsg->state, txmsg->seqno); 1348 1349 /* dump some state */ 1350 ret = -EIO; 1351 1352 /* remove from q */ 1353 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || 1354 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || 1355 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) 1356 list_del(&txmsg->next); 1357 } 1358 out: 1359 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { 1360 struct drm_printer p = drm_debug_printer(DBG_PREFIX); 1361 1362 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 1363 } 1364 mutex_unlock(&mgr->qlock); 1365 1366 drm_dp_mst_kick_tx(mgr); 1367 return ret; 1368 } 1369 1370 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) 1371 { 1372 struct drm_dp_mst_branch *mstb; 1373 1374 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); 1375 if (!mstb) 1376 return NULL; 1377 1378 mstb->lct = lct; 1379 if (lct > 1) 1380 memcpy(mstb->rad, rad, lct / 2); 1381 INIT_LIST_HEAD(&mstb->ports); 1382 kref_init(&mstb->topology_kref); 1383 kref_init(&mstb->malloc_kref); 1384 return mstb; 1385 } 1386 1387 static void drm_dp_free_mst_branch_device(struct kref *kref) 1388 { 1389 struct drm_dp_mst_branch *mstb = 1390 container_of(kref, struct drm_dp_mst_branch, malloc_kref); 1391 1392 if (mstb->port_parent) 1393 drm_dp_mst_put_port_malloc(mstb->port_parent); 1394 1395 kfree(mstb); 1396 } 1397 1398 /** 1399 * DOC: Branch device and port refcounting 1400 * 1401 * Topology refcount overview 1402 * ~~~~~~~~~~~~~~~~~~~~~~~~~~ 1403 * 1404 * The refcounting schemes for &struct drm_dp_mst_branch and &struct 1405 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have 1406 * two different kinds of refcounts: topology refcounts, and malloc refcounts. 1407 * 1408 * Topology refcounts are not exposed to drivers, and are handled internally 1409 * by the DP MST helpers. The helpers use them in order to prevent the 1410 * in-memory topology state from being changed in the middle of critical 1411 * operations like changing the internal state of payload allocations. This 1412 * means each branch and port will be considered to be connected to the rest 1413 * of the topology until its topology refcount reaches zero. Additionally, 1414 * for ports this means that their associated &struct drm_connector will stay 1415 * registered with userspace until the port's refcount reaches 0. 1416 * 1417 * Malloc refcount overview 1418 * ~~~~~~~~~~~~~~~~~~~~~~~~ 1419 * 1420 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct 1421 * drm_dp_mst_branch allocated even after all of its topology references have 1422 * been dropped, so that the driver or MST helpers can safely access each 1423 * branch's last known state before it was disconnected from the topology. 1424 * When the malloc refcount of a port or branch reaches 0, the memory 1425 * allocation containing the &struct drm_dp_mst_branch or &struct 1426 * drm_dp_mst_port respectively will be freed. 1427 * 1428 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed 1429 * to drivers. As of writing this documentation, there are no drivers that 1430 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST 1431 * helpers. Exposing this API to drivers in a race-free manner would take more 1432 * tweaking of the refcounting scheme, however patches are welcome provided 1433 * there is a legitimate driver usecase for this. 1434 * 1435 * Refcount relationships in a topology 1436 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1437 * 1438 * Let's take a look at why the relationship between topology and malloc 1439 * refcounts is designed the way it is. 1440 * 1441 * .. kernel-figure:: dp-mst/topology-figure-1.dot 1442 * 1443 * An example of topology and malloc refs in a DP MST topology with two 1444 * active payloads. Topology refcount increments are indicated by solid 1445 * lines, and malloc refcount increments are indicated by dashed lines. 1446 * Each starts from the branch which incremented the refcount, and ends at 1447 * the branch to which the refcount belongs to, i.e. the arrow points the 1448 * same way as the C pointers used to reference a structure. 1449 * 1450 * As you can see in the above figure, every branch increments the topology 1451 * refcount of its children, and increments the malloc refcount of its 1452 * parent. Additionally, every payload increments the malloc refcount of its 1453 * assigned port by 1. 1454 * 1455 * So, what would happen if MSTB #3 from the above figure was unplugged from 1456 * the system, but the driver hadn't yet removed payload #2 from port #3? The 1457 * topology would start to look like the figure below. 1458 * 1459 * .. kernel-figure:: dp-mst/topology-figure-2.dot 1460 * 1461 * Ports and branch devices which have been released from memory are 1462 * colored grey, and references which have been removed are colored red. 1463 * 1464 * Whenever a port or branch device's topology refcount reaches zero, it will 1465 * decrement the topology refcounts of all its children, the malloc refcount 1466 * of its parent, and finally its own malloc refcount. For MSTB #4 and port 1467 * #4, this means they both have been disconnected from the topology and freed 1468 * from memory. But, because payload #2 is still holding a reference to port 1469 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port 1470 * is still accessible from memory. This also means port #3 has not yet 1471 * decremented the malloc refcount of MSTB #3, so its &struct 1472 * drm_dp_mst_branch will also stay allocated in memory until port #3's 1473 * malloc refcount reaches 0. 1474 * 1475 * This relationship is necessary because in order to release payload #2, we 1476 * need to be able to figure out the last relative of port #3 that's still 1477 * connected to the topology. In this case, we would travel up the topology as 1478 * shown below. 1479 * 1480 * .. kernel-figure:: dp-mst/topology-figure-3.dot 1481 * 1482 * And finally, remove payload #2 by communicating with port #2 through 1483 * sideband transactions. 1484 */ 1485 1486 /** 1487 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch 1488 * device 1489 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of 1490 * 1491 * Increments &drm_dp_mst_branch.malloc_kref. When 1492 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb 1493 * will be released and @mstb may no longer be used. 1494 * 1495 * See also: drm_dp_mst_put_mstb_malloc() 1496 */ 1497 static void 1498 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb) 1499 { 1500 kref_get(&mstb->malloc_kref); 1501 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); 1502 } 1503 1504 /** 1505 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch 1506 * device 1507 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of 1508 * 1509 * Decrements &drm_dp_mst_branch.malloc_kref. When 1510 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb 1511 * will be released and @mstb may no longer be used. 1512 * 1513 * See also: drm_dp_mst_get_mstb_malloc() 1514 */ 1515 static void 1516 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb) 1517 { 1518 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); 1519 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); 1520 } 1521 1522 static void drm_dp_free_mst_port(struct kref *kref) 1523 { 1524 struct drm_dp_mst_port *port = 1525 container_of(kref, struct drm_dp_mst_port, malloc_kref); 1526 1527 drm_dp_mst_put_mstb_malloc(port->parent); 1528 kfree(port); 1529 } 1530 1531 /** 1532 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port 1533 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of 1534 * 1535 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref 1536 * reaches 0, the memory allocation for @port will be released and @port may 1537 * no longer be used. 1538 * 1539 * Because @port could potentially be freed at any time by the DP MST helpers 1540 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this 1541 * function, drivers that which to make use of &struct drm_dp_mst_port should 1542 * ensure that they grab at least one main malloc reference to their MST ports 1543 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before 1544 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0. 1545 * 1546 * See also: drm_dp_mst_put_port_malloc() 1547 */ 1548 void 1549 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port) 1550 { 1551 kref_get(&port->malloc_kref); 1552 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref)); 1553 } 1554 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc); 1555 1556 /** 1557 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port 1558 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of 1559 * 1560 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref 1561 * reaches 0, the memory allocation for @port will be released and @port may 1562 * no longer be used. 1563 * 1564 * See also: drm_dp_mst_get_port_malloc() 1565 */ 1566 void 1567 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) 1568 { 1569 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); 1570 kref_put(&port->malloc_kref, drm_dp_free_mst_port); 1571 } 1572 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); 1573 1574 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 1575 1576 #define STACK_DEPTH 8 1577 1578 static noinline void 1579 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr, 1580 struct drm_dp_mst_topology_ref_history *history, 1581 enum drm_dp_mst_topology_ref_type type) 1582 { 1583 struct drm_dp_mst_topology_ref_entry *entry = NULL; 1584 depot_stack_handle_t backtrace; 1585 ulong stack_entries[STACK_DEPTH]; 1586 uint n; 1587 int i; 1588 1589 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1); 1590 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL); 1591 if (!backtrace) 1592 return; 1593 1594 /* Try to find an existing entry for this backtrace */ 1595 for (i = 0; i < history->len; i++) { 1596 if (history->entries[i].backtrace == backtrace) { 1597 entry = &history->entries[i]; 1598 break; 1599 } 1600 } 1601 1602 /* Otherwise add one */ 1603 if (!entry) { 1604 struct drm_dp_mst_topology_ref_entry *new; 1605 int new_len = history->len + 1; 1606 1607 new = krealloc(history->entries, sizeof(*new) * new_len, 1608 GFP_KERNEL); 1609 if (!new) 1610 return; 1611 1612 entry = &new[history->len]; 1613 history->len = new_len; 1614 history->entries = new; 1615 1616 entry->backtrace = backtrace; 1617 entry->type = type; 1618 entry->count = 0; 1619 } 1620 entry->count++; 1621 entry->ts_nsec = ktime_get_ns(); 1622 } 1623 1624 static int 1625 topology_ref_history_cmp(const void *a, const void *b) 1626 { 1627 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b; 1628 1629 if (entry_a->ts_nsec > entry_b->ts_nsec) 1630 return 1; 1631 else if (entry_a->ts_nsec < entry_b->ts_nsec) 1632 return -1; 1633 else 1634 return 0; 1635 } 1636 1637 static inline const char * 1638 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) 1639 { 1640 if (type == DRM_DP_MST_TOPOLOGY_REF_GET) 1641 return "get"; 1642 else 1643 return "put"; 1644 } 1645 1646 static void 1647 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, 1648 void *ptr, const char *type_str) 1649 { 1650 struct drm_printer p = drm_debug_printer(DBG_PREFIX); 1651 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 1652 int i; 1653 1654 if (!buf) 1655 return; 1656 1657 if (!history->len) 1658 goto out; 1659 1660 /* First, sort the list so that it goes from oldest to newest 1661 * reference entry 1662 */ 1663 sort(history->entries, history->len, sizeof(*history->entries), 1664 topology_ref_history_cmp, NULL); 1665 1666 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", 1667 type_str, ptr); 1668 1669 for (i = 0; i < history->len; i++) { 1670 const struct drm_dp_mst_topology_ref_entry *entry = 1671 &history->entries[i]; 1672 u64 ts_nsec = entry->ts_nsec; 1673 u32 rem_nsec = do_div(ts_nsec, 1000000000); 1674 1675 stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4); 1676 1677 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", 1678 entry->count, 1679 topology_ref_type_to_str(entry->type), 1680 ts_nsec, rem_nsec / 1000, buf); 1681 } 1682 1683 /* Now free the history, since this is the only time we expose it */ 1684 kfree(history->entries); 1685 out: 1686 kfree(buf); 1687 } 1688 1689 static __always_inline void 1690 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) 1691 { 1692 __dump_topology_ref_history(&mstb->topology_ref_history, mstb, 1693 "MSTB"); 1694 } 1695 1696 static __always_inline void 1697 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) 1698 { 1699 __dump_topology_ref_history(&port->topology_ref_history, port, 1700 "Port"); 1701 } 1702 1703 static __always_inline void 1704 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb, 1705 enum drm_dp_mst_topology_ref_type type) 1706 { 1707 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); 1708 } 1709 1710 static __always_inline void 1711 save_port_topology_ref(struct drm_dp_mst_port *port, 1712 enum drm_dp_mst_topology_ref_type type) 1713 { 1714 __topology_ref_save(port->mgr, &port->topology_ref_history, type); 1715 } 1716 1717 static inline void 1718 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) 1719 { 1720 mutex_lock(&mgr->topology_ref_history_lock); 1721 } 1722 1723 static inline void 1724 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) 1725 { 1726 mutex_unlock(&mgr->topology_ref_history_lock); 1727 } 1728 #else 1729 static inline void 1730 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {} 1731 static inline void 1732 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {} 1733 static inline void 1734 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {} 1735 static inline void 1736 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} 1737 #define save_mstb_topology_ref(mstb, type) 1738 #define save_port_topology_ref(port, type) 1739 #endif 1740 1741 static void drm_dp_destroy_mst_branch_device(struct kref *kref) 1742 { 1743 struct drm_dp_mst_branch *mstb = 1744 container_of(kref, struct drm_dp_mst_branch, topology_kref); 1745 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 1746 1747 drm_dp_mst_dump_mstb_topology_history(mstb); 1748 1749 INIT_LIST_HEAD(&mstb->destroy_next); 1750 1751 /* 1752 * This can get called under mgr->mutex, so we need to perform the 1753 * actual destruction of the mstb in another worker 1754 */ 1755 mutex_lock(&mgr->delayed_destroy_lock); 1756 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); 1757 mutex_unlock(&mgr->delayed_destroy_lock); 1758 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); 1759 } 1760 1761 /** 1762 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a 1763 * branch device unless it's zero 1764 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of 1765 * 1766 * Attempts to grab a topology reference to @mstb, if it hasn't yet been 1767 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has 1768 * reached 0). Holding a topology reference implies that a malloc reference 1769 * will be held to @mstb as long as the user holds the topology reference. 1770 * 1771 * Care should be taken to ensure that the user has at least one malloc 1772 * reference to @mstb. If you already have a topology reference to @mstb, you 1773 * should use drm_dp_mst_topology_get_mstb() instead. 1774 * 1775 * See also: 1776 * drm_dp_mst_topology_get_mstb() 1777 * drm_dp_mst_topology_put_mstb() 1778 * 1779 * Returns: 1780 * * 1: A topology reference was grabbed successfully 1781 * * 0: @port is no longer in the topology, no reference was grabbed 1782 */ 1783 static int __must_check 1784 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) 1785 { 1786 int ret; 1787 1788 topology_ref_history_lock(mstb->mgr); 1789 ret = kref_get_unless_zero(&mstb->topology_kref); 1790 if (ret) { 1791 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); 1792 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); 1793 } 1794 1795 topology_ref_history_unlock(mstb->mgr); 1796 1797 return ret; 1798 } 1799 1800 /** 1801 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a 1802 * branch device 1803 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of 1804 * 1805 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or 1806 * not it's already reached 0. This is only valid to use in scenarios where 1807 * you are already guaranteed to have at least one active topology reference 1808 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used. 1809 * 1810 * See also: 1811 * drm_dp_mst_topology_try_get_mstb() 1812 * drm_dp_mst_topology_put_mstb() 1813 */ 1814 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) 1815 { 1816 topology_ref_history_lock(mstb->mgr); 1817 1818 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); 1819 WARN_ON(kref_read(&mstb->topology_kref) == 0); 1820 kref_get(&mstb->topology_kref); 1821 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); 1822 1823 topology_ref_history_unlock(mstb->mgr); 1824 } 1825 1826 /** 1827 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch 1828 * device 1829 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from 1830 * 1831 * Releases a topology reference from @mstb by decrementing 1832 * &drm_dp_mst_branch.topology_kref. 1833 * 1834 * See also: 1835 * drm_dp_mst_topology_try_get_mstb() 1836 * drm_dp_mst_topology_get_mstb() 1837 */ 1838 static void 1839 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) 1840 { 1841 topology_ref_history_lock(mstb->mgr); 1842 1843 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); 1844 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); 1845 1846 topology_ref_history_unlock(mstb->mgr); 1847 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); 1848 } 1849 1850 static void drm_dp_destroy_port(struct kref *kref) 1851 { 1852 struct drm_dp_mst_port *port = 1853 container_of(kref, struct drm_dp_mst_port, topology_kref); 1854 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 1855 1856 drm_dp_mst_dump_port_topology_history(port); 1857 1858 /* There's nothing that needs locking to destroy an input port yet */ 1859 if (port->input) { 1860 drm_dp_mst_put_port_malloc(port); 1861 return; 1862 } 1863 1864 kfree(port->cached_edid); 1865 1866 /* 1867 * we can't destroy the connector here, as we might be holding the 1868 * mode_config.mutex from an EDID retrieval 1869 */ 1870 mutex_lock(&mgr->delayed_destroy_lock); 1871 list_add(&port->next, &mgr->destroy_port_list); 1872 mutex_unlock(&mgr->delayed_destroy_lock); 1873 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); 1874 } 1875 1876 /** 1877 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a 1878 * port unless it's zero 1879 * @port: &struct drm_dp_mst_port to increment the topology refcount of 1880 * 1881 * Attempts to grab a topology reference to @port, if it hasn't yet been 1882 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached 1883 * 0). Holding a topology reference implies that a malloc reference will be 1884 * held to @port as long as the user holds the topology reference. 1885 * 1886 * Care should be taken to ensure that the user has at least one malloc 1887 * reference to @port. If you already have a topology reference to @port, you 1888 * should use drm_dp_mst_topology_get_port() instead. 1889 * 1890 * See also: 1891 * drm_dp_mst_topology_get_port() 1892 * drm_dp_mst_topology_put_port() 1893 * 1894 * Returns: 1895 * * 1: A topology reference was grabbed successfully 1896 * * 0: @port is no longer in the topology, no reference was grabbed 1897 */ 1898 static int __must_check 1899 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) 1900 { 1901 int ret; 1902 1903 topology_ref_history_lock(port->mgr); 1904 ret = kref_get_unless_zero(&port->topology_kref); 1905 if (ret) { 1906 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); 1907 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); 1908 } 1909 1910 topology_ref_history_unlock(port->mgr); 1911 return ret; 1912 } 1913 1914 /** 1915 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port 1916 * @port: The &struct drm_dp_mst_port to increment the topology refcount of 1917 * 1918 * Increments &drm_dp_mst_port.topology_refcount without checking whether or 1919 * not it's already reached 0. This is only valid to use in scenarios where 1920 * you are already guaranteed to have at least one active topology reference 1921 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used. 1922 * 1923 * See also: 1924 * drm_dp_mst_topology_try_get_port() 1925 * drm_dp_mst_topology_put_port() 1926 */ 1927 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) 1928 { 1929 topology_ref_history_lock(port->mgr); 1930 1931 WARN_ON(kref_read(&port->topology_kref) == 0); 1932 kref_get(&port->topology_kref); 1933 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); 1934 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); 1935 1936 topology_ref_history_unlock(port->mgr); 1937 } 1938 1939 /** 1940 * drm_dp_mst_topology_put_port() - release a topology reference to a port 1941 * @port: The &struct drm_dp_mst_port to release the topology reference from 1942 * 1943 * Releases a topology reference from @port by decrementing 1944 * &drm_dp_mst_port.topology_kref. 1945 * 1946 * See also: 1947 * drm_dp_mst_topology_try_get_port() 1948 * drm_dp_mst_topology_get_port() 1949 */ 1950 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) 1951 { 1952 topology_ref_history_lock(port->mgr); 1953 1954 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); 1955 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); 1956 1957 topology_ref_history_unlock(port->mgr); 1958 kref_put(&port->topology_kref, drm_dp_destroy_port); 1959 } 1960 1961 static struct drm_dp_mst_branch * 1962 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb, 1963 struct drm_dp_mst_branch *to_find) 1964 { 1965 struct drm_dp_mst_port *port; 1966 struct drm_dp_mst_branch *rmstb; 1967 1968 if (to_find == mstb) 1969 return mstb; 1970 1971 list_for_each_entry(port, &mstb->ports, next) { 1972 if (port->mstb) { 1973 rmstb = drm_dp_mst_topology_get_mstb_validated_locked( 1974 port->mstb, to_find); 1975 if (rmstb) 1976 return rmstb; 1977 } 1978 } 1979 return NULL; 1980 } 1981 1982 static struct drm_dp_mst_branch * 1983 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr, 1984 struct drm_dp_mst_branch *mstb) 1985 { 1986 struct drm_dp_mst_branch *rmstb = NULL; 1987 1988 mutex_lock(&mgr->lock); 1989 if (mgr->mst_primary) { 1990 rmstb = drm_dp_mst_topology_get_mstb_validated_locked( 1991 mgr->mst_primary, mstb); 1992 1993 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb)) 1994 rmstb = NULL; 1995 } 1996 mutex_unlock(&mgr->lock); 1997 return rmstb; 1998 } 1999 2000 static struct drm_dp_mst_port * 2001 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb, 2002 struct drm_dp_mst_port *to_find) 2003 { 2004 struct drm_dp_mst_port *port, *mport; 2005 2006 list_for_each_entry(port, &mstb->ports, next) { 2007 if (port == to_find) 2008 return port; 2009 2010 if (port->mstb) { 2011 mport = drm_dp_mst_topology_get_port_validated_locked( 2012 port->mstb, to_find); 2013 if (mport) 2014 return mport; 2015 } 2016 } 2017 return NULL; 2018 } 2019 2020 static struct drm_dp_mst_port * 2021 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, 2022 struct drm_dp_mst_port *port) 2023 { 2024 struct drm_dp_mst_port *rport = NULL; 2025 2026 mutex_lock(&mgr->lock); 2027 if (mgr->mst_primary) { 2028 rport = drm_dp_mst_topology_get_port_validated_locked( 2029 mgr->mst_primary, port); 2030 2031 if (rport && !drm_dp_mst_topology_try_get_port(rport)) 2032 rport = NULL; 2033 } 2034 mutex_unlock(&mgr->lock); 2035 return rport; 2036 } 2037 2038 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) 2039 { 2040 struct drm_dp_mst_port *port; 2041 int ret; 2042 2043 list_for_each_entry(port, &mstb->ports, next) { 2044 if (port->port_num == port_num) { 2045 ret = drm_dp_mst_topology_try_get_port(port); 2046 return ret ? port : NULL; 2047 } 2048 } 2049 2050 return NULL; 2051 } 2052 2053 /* 2054 * calculate a new RAD for this MST branch device 2055 * if parent has an LCT of 2 then it has 1 nibble of RAD, 2056 * if parent has an LCT of 3 then it has 2 nibbles of RAD, 2057 */ 2058 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, 2059 u8 *rad) 2060 { 2061 int parent_lct = port->parent->lct; 2062 int shift = 4; 2063 int idx = (parent_lct - 1) / 2; 2064 2065 if (parent_lct > 1) { 2066 memcpy(rad, port->parent->rad, idx + 1); 2067 shift = (parent_lct % 2) ? 4 : 0; 2068 } else 2069 rad[0] = 0; 2070 2071 rad[idx] |= port->port_num << shift; 2072 return parent_lct + 1; 2073 } 2074 2075 static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs) 2076 { 2077 switch (pdt) { 2078 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2079 case DP_PEER_DEVICE_SST_SINK: 2080 return true; 2081 case DP_PEER_DEVICE_MST_BRANCHING: 2082 /* For sst branch device */ 2083 if (!mcs) 2084 return true; 2085 2086 return false; 2087 } 2088 return true; 2089 } 2090 2091 static int 2092 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, 2093 bool new_mcs) 2094 { 2095 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 2096 struct drm_dp_mst_branch *mstb; 2097 u8 rad[8], lct; 2098 int ret = 0; 2099 2100 if (port->pdt == new_pdt && port->mcs == new_mcs) 2101 return 0; 2102 2103 /* Teardown the old pdt, if there is one */ 2104 if (port->pdt != DP_PEER_DEVICE_NONE) { 2105 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 2106 /* 2107 * If the new PDT would also have an i2c bus, 2108 * don't bother with reregistering it 2109 */ 2110 if (new_pdt != DP_PEER_DEVICE_NONE && 2111 drm_dp_mst_is_end_device(new_pdt, new_mcs)) { 2112 port->pdt = new_pdt; 2113 port->mcs = new_mcs; 2114 return 0; 2115 } 2116 2117 /* remove i2c over sideband */ 2118 drm_dp_mst_unregister_i2c_bus(port); 2119 } else { 2120 mutex_lock(&mgr->lock); 2121 drm_dp_mst_topology_put_mstb(port->mstb); 2122 port->mstb = NULL; 2123 mutex_unlock(&mgr->lock); 2124 } 2125 } 2126 2127 port->pdt = new_pdt; 2128 port->mcs = new_mcs; 2129 2130 if (port->pdt != DP_PEER_DEVICE_NONE) { 2131 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 2132 /* add i2c over sideband */ 2133 ret = drm_dp_mst_register_i2c_bus(port); 2134 } else { 2135 lct = drm_dp_calculate_rad(port, rad); 2136 mstb = drm_dp_add_mst_branch_device(lct, rad); 2137 if (!mstb) { 2138 ret = -ENOMEM; 2139 drm_err(mgr->dev, "Failed to create MSTB for port %p", port); 2140 goto out; 2141 } 2142 2143 mutex_lock(&mgr->lock); 2144 port->mstb = mstb; 2145 mstb->mgr = port->mgr; 2146 mstb->port_parent = port; 2147 2148 /* 2149 * Make sure this port's memory allocation stays 2150 * around until its child MSTB releases it 2151 */ 2152 drm_dp_mst_get_port_malloc(port); 2153 mutex_unlock(&mgr->lock); 2154 2155 /* And make sure we send a link address for this */ 2156 ret = 1; 2157 } 2158 } 2159 2160 out: 2161 if (ret < 0) 2162 port->pdt = DP_PEER_DEVICE_NONE; 2163 return ret; 2164 } 2165 2166 /** 2167 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband 2168 * @aux: Fake sideband AUX CH 2169 * @offset: address of the (first) register to read 2170 * @buffer: buffer to store the register values 2171 * @size: number of bytes in @buffer 2172 * 2173 * Performs the same functionality for remote devices via 2174 * sideband messaging as drm_dp_dpcd_read() does for local 2175 * devices via actual AUX CH. 2176 * 2177 * Return: Number of bytes read, or negative error code on failure. 2178 */ 2179 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, 2180 unsigned int offset, void *buffer, size_t size) 2181 { 2182 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, 2183 aux); 2184 2185 return drm_dp_send_dpcd_read(port->mgr, port, 2186 offset, size, buffer); 2187 } 2188 2189 /** 2190 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband 2191 * @aux: Fake sideband AUX CH 2192 * @offset: address of the (first) register to write 2193 * @buffer: buffer containing the values to write 2194 * @size: number of bytes in @buffer 2195 * 2196 * Performs the same functionality for remote devices via 2197 * sideband messaging as drm_dp_dpcd_write() does for local 2198 * devices via actual AUX CH. 2199 * 2200 * Return: number of bytes written on success, negative error code on failure. 2201 */ 2202 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, 2203 unsigned int offset, void *buffer, size_t size) 2204 { 2205 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, 2206 aux); 2207 2208 return drm_dp_send_dpcd_write(port->mgr, port, 2209 offset, size, buffer); 2210 } 2211 2212 static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) 2213 { 2214 int ret = 0; 2215 2216 memcpy(mstb->guid, guid, 16); 2217 2218 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { 2219 if (mstb->port_parent) { 2220 ret = drm_dp_send_dpcd_write(mstb->mgr, 2221 mstb->port_parent, 2222 DP_GUID, 16, mstb->guid); 2223 } else { 2224 ret = drm_dp_dpcd_write(mstb->mgr->aux, 2225 DP_GUID, mstb->guid, 16); 2226 } 2227 } 2228 2229 if (ret < 16 && ret > 0) 2230 return -EPROTO; 2231 2232 return ret == 16 ? 0 : ret; 2233 } 2234 2235 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, 2236 int pnum, 2237 char *proppath, 2238 size_t proppath_size) 2239 { 2240 int i; 2241 char temp[8]; 2242 2243 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); 2244 for (i = 0; i < (mstb->lct - 1); i++) { 2245 int shift = (i % 2) ? 0 : 4; 2246 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; 2247 2248 snprintf(temp, sizeof(temp), "-%d", port_num); 2249 strlcat(proppath, temp, proppath_size); 2250 } 2251 snprintf(temp, sizeof(temp), "-%d", pnum); 2252 strlcat(proppath, temp, proppath_size); 2253 } 2254 2255 /** 2256 * drm_dp_mst_connector_late_register() - Late MST connector registration 2257 * @connector: The MST connector 2258 * @port: The MST port for this connector 2259 * 2260 * Helper to register the remote aux device for this MST port. Drivers should 2261 * call this from their mst connector's late_register hook to enable MST aux 2262 * devices. 2263 * 2264 * Return: 0 on success, negative error code on failure. 2265 */ 2266 int drm_dp_mst_connector_late_register(struct drm_connector *connector, 2267 struct drm_dp_mst_port *port) 2268 { 2269 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n", 2270 port->aux.name, connector->kdev->kobj.name); 2271 2272 port->aux.dev = connector->kdev; 2273 return drm_dp_aux_register_devnode(&port->aux); 2274 } 2275 EXPORT_SYMBOL(drm_dp_mst_connector_late_register); 2276 2277 /** 2278 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration 2279 * @connector: The MST connector 2280 * @port: The MST port for this connector 2281 * 2282 * Helper to unregister the remote aux device for this MST port, registered by 2283 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst 2284 * connector's early_unregister hook. 2285 */ 2286 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, 2287 struct drm_dp_mst_port *port) 2288 { 2289 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n", 2290 port->aux.name, connector->kdev->kobj.name); 2291 drm_dp_aux_unregister_devnode(&port->aux); 2292 } 2293 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); 2294 2295 static void 2296 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, 2297 struct drm_dp_mst_port *port) 2298 { 2299 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 2300 char proppath[255]; 2301 int ret; 2302 2303 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); 2304 port->connector = mgr->cbs->add_connector(mgr, port, proppath); 2305 if (!port->connector) { 2306 ret = -ENOMEM; 2307 goto error; 2308 } 2309 2310 if (port->pdt != DP_PEER_DEVICE_NONE && 2311 drm_dp_mst_is_end_device(port->pdt, port->mcs) && 2312 port->port_num >= DP_MST_LOGICAL_PORT_0) 2313 port->cached_edid = drm_get_edid(port->connector, 2314 &port->aux.ddc); 2315 2316 drm_connector_register(port->connector); 2317 return; 2318 2319 error: 2320 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret); 2321 } 2322 2323 /* 2324 * Drop a topology reference, and unlink the port from the in-memory topology 2325 * layout 2326 */ 2327 static void 2328 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, 2329 struct drm_dp_mst_port *port) 2330 { 2331 mutex_lock(&mgr->lock); 2332 port->parent->num_ports--; 2333 list_del(&port->next); 2334 mutex_unlock(&mgr->lock); 2335 drm_dp_mst_topology_put_port(port); 2336 } 2337 2338 static struct drm_dp_mst_port * 2339 drm_dp_mst_add_port(struct drm_device *dev, 2340 struct drm_dp_mst_topology_mgr *mgr, 2341 struct drm_dp_mst_branch *mstb, u8 port_number) 2342 { 2343 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL); 2344 2345 if (!port) 2346 return NULL; 2347 2348 kref_init(&port->topology_kref); 2349 kref_init(&port->malloc_kref); 2350 port->parent = mstb; 2351 port->port_num = port_number; 2352 port->mgr = mgr; 2353 port->aux.name = "DPMST"; 2354 port->aux.dev = dev->dev; 2355 port->aux.is_remote = true; 2356 2357 /* initialize the MST downstream port's AUX crc work queue */ 2358 port->aux.drm_dev = dev; 2359 drm_dp_remote_aux_init(&port->aux); 2360 2361 /* 2362 * Make sure the memory allocation for our parent branch stays 2363 * around until our own memory allocation is released 2364 */ 2365 drm_dp_mst_get_mstb_malloc(mstb); 2366 2367 return port; 2368 } 2369 2370 static int 2371 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, 2372 struct drm_device *dev, 2373 struct drm_dp_link_addr_reply_port *port_msg) 2374 { 2375 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 2376 struct drm_dp_mst_port *port; 2377 int old_ddps = 0, ret; 2378 u8 new_pdt = DP_PEER_DEVICE_NONE; 2379 bool new_mcs = 0; 2380 bool created = false, send_link_addr = false, changed = false; 2381 2382 port = drm_dp_get_port(mstb, port_msg->port_number); 2383 if (!port) { 2384 port = drm_dp_mst_add_port(dev, mgr, mstb, 2385 port_msg->port_number); 2386 if (!port) 2387 return -ENOMEM; 2388 created = true; 2389 changed = true; 2390 } else if (!port->input && port_msg->input_port && port->connector) { 2391 /* Since port->connector can't be changed here, we create a 2392 * new port if input_port changes from 0 to 1 2393 */ 2394 drm_dp_mst_topology_unlink_port(mgr, port); 2395 drm_dp_mst_topology_put_port(port); 2396 port = drm_dp_mst_add_port(dev, mgr, mstb, 2397 port_msg->port_number); 2398 if (!port) 2399 return -ENOMEM; 2400 changed = true; 2401 created = true; 2402 } else if (port->input && !port_msg->input_port) { 2403 changed = true; 2404 } else if (port->connector) { 2405 /* We're updating a port that's exposed to userspace, so do it 2406 * under lock 2407 */ 2408 drm_modeset_lock(&mgr->base.lock, NULL); 2409 2410 old_ddps = port->ddps; 2411 changed = port->ddps != port_msg->ddps || 2412 (port->ddps && 2413 (port->ldps != port_msg->legacy_device_plug_status || 2414 port->dpcd_rev != port_msg->dpcd_revision || 2415 port->mcs != port_msg->mcs || 2416 port->pdt != port_msg->peer_device_type || 2417 port->num_sdp_stream_sinks != 2418 port_msg->num_sdp_stream_sinks)); 2419 } 2420 2421 port->input = port_msg->input_port; 2422 if (!port->input) 2423 new_pdt = port_msg->peer_device_type; 2424 new_mcs = port_msg->mcs; 2425 port->ddps = port_msg->ddps; 2426 port->ldps = port_msg->legacy_device_plug_status; 2427 port->dpcd_rev = port_msg->dpcd_revision; 2428 port->num_sdp_streams = port_msg->num_sdp_streams; 2429 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; 2430 2431 /* manage mstb port lists with mgr lock - take a reference 2432 for this list */ 2433 if (created) { 2434 mutex_lock(&mgr->lock); 2435 drm_dp_mst_topology_get_port(port); 2436 list_add(&port->next, &mstb->ports); 2437 mstb->num_ports++; 2438 mutex_unlock(&mgr->lock); 2439 } 2440 2441 /* 2442 * Reprobe PBN caps on both hotplug, and when re-probing the link 2443 * for our parent mstb 2444 */ 2445 if (old_ddps != port->ddps || !created) { 2446 if (port->ddps && !port->input) { 2447 ret = drm_dp_send_enum_path_resources(mgr, mstb, 2448 port); 2449 if (ret == 1) 2450 changed = true; 2451 } else { 2452 port->full_pbn = 0; 2453 } 2454 } 2455 2456 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); 2457 if (ret == 1) { 2458 send_link_addr = true; 2459 } else if (ret < 0) { 2460 drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret); 2461 goto fail; 2462 } 2463 2464 /* 2465 * If this port wasn't just created, then we're reprobing because 2466 * we're coming out of suspend. In this case, always resend the link 2467 * address if there's an MSTB on this port 2468 */ 2469 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && 2470 port->mcs) 2471 send_link_addr = true; 2472 2473 if (port->connector) 2474 drm_modeset_unlock(&mgr->base.lock); 2475 else if (!port->input) 2476 drm_dp_mst_port_add_connector(mstb, port); 2477 2478 if (send_link_addr && port->mstb) { 2479 ret = drm_dp_send_link_address(mgr, port->mstb); 2480 if (ret == 1) /* MSTB below us changed */ 2481 changed = true; 2482 else if (ret < 0) 2483 goto fail_put; 2484 } 2485 2486 /* put reference to this port */ 2487 drm_dp_mst_topology_put_port(port); 2488 return changed; 2489 2490 fail: 2491 drm_dp_mst_topology_unlink_port(mgr, port); 2492 if (port->connector) 2493 drm_modeset_unlock(&mgr->base.lock); 2494 fail_put: 2495 drm_dp_mst_topology_put_port(port); 2496 return ret; 2497 } 2498 2499 static void 2500 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, 2501 struct drm_dp_connection_status_notify *conn_stat) 2502 { 2503 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 2504 struct drm_dp_mst_port *port; 2505 int old_ddps, ret; 2506 u8 new_pdt; 2507 bool new_mcs; 2508 bool dowork = false, create_connector = false; 2509 2510 port = drm_dp_get_port(mstb, conn_stat->port_number); 2511 if (!port) 2512 return; 2513 2514 if (port->connector) { 2515 if (!port->input && conn_stat->input_port) { 2516 /* 2517 * We can't remove a connector from an already exposed 2518 * port, so just throw the port out and make sure we 2519 * reprobe the link address of it's parent MSTB 2520 */ 2521 drm_dp_mst_topology_unlink_port(mgr, port); 2522 mstb->link_address_sent = false; 2523 dowork = true; 2524 goto out; 2525 } 2526 2527 /* Locking is only needed if the port's exposed to userspace */ 2528 drm_modeset_lock(&mgr->base.lock, NULL); 2529 } else if (port->input && !conn_stat->input_port) { 2530 create_connector = true; 2531 /* Reprobe link address so we get num_sdp_streams */ 2532 mstb->link_address_sent = false; 2533 dowork = true; 2534 } 2535 2536 old_ddps = port->ddps; 2537 port->input = conn_stat->input_port; 2538 port->ldps = conn_stat->legacy_device_plug_status; 2539 port->ddps = conn_stat->displayport_device_plug_status; 2540 2541 if (old_ddps != port->ddps) { 2542 if (port->ddps && !port->input) 2543 drm_dp_send_enum_path_resources(mgr, mstb, port); 2544 else 2545 port->full_pbn = 0; 2546 } 2547 2548 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; 2549 new_mcs = conn_stat->message_capability_status; 2550 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); 2551 if (ret == 1) { 2552 dowork = true; 2553 } else if (ret < 0) { 2554 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret); 2555 dowork = false; 2556 } 2557 2558 if (port->connector) 2559 drm_modeset_unlock(&mgr->base.lock); 2560 else if (create_connector) 2561 drm_dp_mst_port_add_connector(mstb, port); 2562 2563 out: 2564 drm_dp_mst_topology_put_port(port); 2565 if (dowork) 2566 queue_work(system_long_wq, &mstb->mgr->work); 2567 } 2568 2569 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, 2570 u8 lct, u8 *rad) 2571 { 2572 struct drm_dp_mst_branch *mstb; 2573 struct drm_dp_mst_port *port; 2574 int i, ret; 2575 /* find the port by iterating down */ 2576 2577 mutex_lock(&mgr->lock); 2578 mstb = mgr->mst_primary; 2579 2580 if (!mstb) 2581 goto out; 2582 2583 for (i = 0; i < lct - 1; i++) { 2584 int shift = (i % 2) ? 0 : 4; 2585 int port_num = (rad[i / 2] >> shift) & 0xf; 2586 2587 list_for_each_entry(port, &mstb->ports, next) { 2588 if (port->port_num == port_num) { 2589 mstb = port->mstb; 2590 if (!mstb) { 2591 drm_err(mgr->dev, 2592 "failed to lookup MSTB with lct %d, rad %02x\n", 2593 lct, rad[0]); 2594 goto out; 2595 } 2596 2597 break; 2598 } 2599 } 2600 } 2601 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2602 if (!ret) 2603 mstb = NULL; 2604 out: 2605 mutex_unlock(&mgr->lock); 2606 return mstb; 2607 } 2608 2609 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( 2610 struct drm_dp_mst_branch *mstb, 2611 const uint8_t *guid) 2612 { 2613 struct drm_dp_mst_branch *found_mstb; 2614 struct drm_dp_mst_port *port; 2615 2616 if (memcmp(mstb->guid, guid, 16) == 0) 2617 return mstb; 2618 2619 2620 list_for_each_entry(port, &mstb->ports, next) { 2621 if (!port->mstb) 2622 continue; 2623 2624 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); 2625 2626 if (found_mstb) 2627 return found_mstb; 2628 } 2629 2630 return NULL; 2631 } 2632 2633 static struct drm_dp_mst_branch * 2634 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, 2635 const uint8_t *guid) 2636 { 2637 struct drm_dp_mst_branch *mstb; 2638 int ret; 2639 2640 /* find the port by iterating down */ 2641 mutex_lock(&mgr->lock); 2642 2643 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); 2644 if (mstb) { 2645 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2646 if (!ret) 2647 mstb = NULL; 2648 } 2649 2650 mutex_unlock(&mgr->lock); 2651 return mstb; 2652 } 2653 2654 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 2655 struct drm_dp_mst_branch *mstb) 2656 { 2657 struct drm_dp_mst_port *port; 2658 int ret; 2659 bool changed = false; 2660 2661 if (!mstb->link_address_sent) { 2662 ret = drm_dp_send_link_address(mgr, mstb); 2663 if (ret == 1) 2664 changed = true; 2665 else if (ret < 0) 2666 return ret; 2667 } 2668 2669 list_for_each_entry(port, &mstb->ports, next) { 2670 if (port->input || !port->ddps || !port->mstb) 2671 continue; 2672 2673 ret = drm_dp_check_and_send_link_address(mgr, port->mstb); 2674 if (ret == 1) 2675 changed = true; 2676 else if (ret < 0) 2677 return ret; 2678 } 2679 2680 return changed; 2681 } 2682 2683 static void drm_dp_mst_link_probe_work(struct work_struct *work) 2684 { 2685 struct drm_dp_mst_topology_mgr *mgr = 2686 container_of(work, struct drm_dp_mst_topology_mgr, work); 2687 struct drm_device *dev = mgr->dev; 2688 struct drm_dp_mst_branch *mstb; 2689 int ret; 2690 bool clear_payload_id_table; 2691 2692 mutex_lock(&mgr->probe_lock); 2693 2694 mutex_lock(&mgr->lock); 2695 clear_payload_id_table = !mgr->payload_id_table_cleared; 2696 mgr->payload_id_table_cleared = true; 2697 2698 mstb = mgr->mst_primary; 2699 if (mstb) { 2700 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2701 if (!ret) 2702 mstb = NULL; 2703 } 2704 mutex_unlock(&mgr->lock); 2705 if (!mstb) { 2706 mutex_unlock(&mgr->probe_lock); 2707 return; 2708 } 2709 2710 /* 2711 * Certain branch devices seem to incorrectly report an available_pbn 2712 * of 0 on downstream sinks, even after clearing the 2713 * DP_PAYLOAD_ALLOCATE_* registers in 2714 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C 2715 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make 2716 * things work again. 2717 */ 2718 if (clear_payload_id_table) { 2719 drm_dbg_kms(dev, "Clearing payload ID table\n"); 2720 drm_dp_send_clear_payload_id_table(mgr, mstb); 2721 } 2722 2723 ret = drm_dp_check_and_send_link_address(mgr, mstb); 2724 drm_dp_mst_topology_put_mstb(mstb); 2725 2726 mutex_unlock(&mgr->probe_lock); 2727 if (ret > 0) 2728 drm_kms_helper_hotplug_event(dev); 2729 } 2730 2731 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 2732 u8 *guid) 2733 { 2734 u64 salt; 2735 2736 if (memchr_inv(guid, 0, 16)) 2737 return true; 2738 2739 salt = get_jiffies_64(); 2740 2741 memcpy(&guid[0], &salt, sizeof(u64)); 2742 memcpy(&guid[8], &salt, sizeof(u64)); 2743 2744 return false; 2745 } 2746 2747 static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, 2748 u8 port_num, u32 offset, u8 num_bytes) 2749 { 2750 struct drm_dp_sideband_msg_req_body req; 2751 2752 req.req_type = DP_REMOTE_DPCD_READ; 2753 req.u.dpcd_read.port_number = port_num; 2754 req.u.dpcd_read.dpcd_address = offset; 2755 req.u.dpcd_read.num_bytes = num_bytes; 2756 drm_dp_encode_sideband_req(&req, msg); 2757 } 2758 2759 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, 2760 bool up, u8 *msg, int len) 2761 { 2762 int ret; 2763 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; 2764 int tosend, total, offset; 2765 int retries = 0; 2766 2767 retry: 2768 total = len; 2769 offset = 0; 2770 do { 2771 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); 2772 2773 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, 2774 &msg[offset], 2775 tosend); 2776 if (ret != tosend) { 2777 if (ret == -EIO && retries < 5) { 2778 retries++; 2779 goto retry; 2780 } 2781 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); 2782 2783 return -EIO; 2784 } 2785 offset += tosend; 2786 total -= tosend; 2787 } while (total > 0); 2788 return 0; 2789 } 2790 2791 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, 2792 struct drm_dp_sideband_msg_tx *txmsg) 2793 { 2794 struct drm_dp_mst_branch *mstb = txmsg->dst; 2795 u8 req_type; 2796 2797 req_type = txmsg->msg[0] & 0x7f; 2798 if (req_type == DP_CONNECTION_STATUS_NOTIFY || 2799 req_type == DP_RESOURCE_STATUS_NOTIFY || 2800 req_type == DP_CLEAR_PAYLOAD_ID_TABLE) 2801 hdr->broadcast = 1; 2802 else 2803 hdr->broadcast = 0; 2804 hdr->path_msg = txmsg->path_msg; 2805 if (hdr->broadcast) { 2806 hdr->lct = 1; 2807 hdr->lcr = 6; 2808 } else { 2809 hdr->lct = mstb->lct; 2810 hdr->lcr = mstb->lct - 1; 2811 } 2812 2813 memcpy(hdr->rad, mstb->rad, hdr->lct / 2); 2814 2815 return 0; 2816 } 2817 /* 2818 * process a single block of the next message in the sideband queue 2819 */ 2820 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 2821 struct drm_dp_sideband_msg_tx *txmsg, 2822 bool up) 2823 { 2824 u8 chunk[48]; 2825 struct drm_dp_sideband_msg_hdr hdr; 2826 int len, space, idx, tosend; 2827 int ret; 2828 2829 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT) 2830 return 0; 2831 2832 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); 2833 2834 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) 2835 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; 2836 2837 /* make hdr from dst mst */ 2838 ret = set_hdr_from_dst_qlock(&hdr, txmsg); 2839 if (ret < 0) 2840 return ret; 2841 2842 /* amount left to send in this message */ 2843 len = txmsg->cur_len - txmsg->cur_offset; 2844 2845 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ 2846 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); 2847 2848 tosend = min(len, space); 2849 if (len == txmsg->cur_len) 2850 hdr.somt = 1; 2851 if (space >= len) 2852 hdr.eomt = 1; 2853 2854 2855 hdr.msg_len = tosend + 1; 2856 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); 2857 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); 2858 /* add crc at end */ 2859 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); 2860 idx += tosend + 1; 2861 2862 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); 2863 if (ret) { 2864 if (drm_debug_enabled(DRM_UT_DP)) { 2865 struct drm_printer p = drm_debug_printer(DBG_PREFIX); 2866 2867 drm_printf(&p, "sideband msg failed to send\n"); 2868 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 2869 } 2870 return ret; 2871 } 2872 2873 txmsg->cur_offset += tosend; 2874 if (txmsg->cur_offset == txmsg->cur_len) { 2875 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; 2876 return 1; 2877 } 2878 return 0; 2879 } 2880 2881 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 2882 { 2883 struct drm_dp_sideband_msg_tx *txmsg; 2884 int ret; 2885 2886 WARN_ON(!mutex_is_locked(&mgr->qlock)); 2887 2888 /* construct a chunk from the first msg in the tx_msg queue */ 2889 if (list_empty(&mgr->tx_msg_downq)) 2890 return; 2891 2892 txmsg = list_first_entry(&mgr->tx_msg_downq, 2893 struct drm_dp_sideband_msg_tx, next); 2894 ret = process_single_tx_qlock(mgr, txmsg, false); 2895 if (ret < 0) { 2896 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret); 2897 list_del(&txmsg->next); 2898 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 2899 wake_up_all(&mgr->tx_waitq); 2900 } 2901 } 2902 2903 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, 2904 struct drm_dp_sideband_msg_tx *txmsg) 2905 { 2906 mutex_lock(&mgr->qlock); 2907 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); 2908 2909 if (drm_debug_enabled(DRM_UT_DP)) { 2910 struct drm_printer p = drm_debug_printer(DBG_PREFIX); 2911 2912 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 2913 } 2914 2915 if (list_is_singular(&mgr->tx_msg_downq)) 2916 process_single_down_tx_qlock(mgr); 2917 mutex_unlock(&mgr->qlock); 2918 } 2919 2920 static void 2921 drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr, 2922 struct drm_dp_link_address_ack_reply *reply) 2923 { 2924 struct drm_dp_link_addr_reply_port *port_reply; 2925 int i; 2926 2927 for (i = 0; i < reply->nports; i++) { 2928 port_reply = &reply->ports[i]; 2929 drm_dbg_kms(mgr->dev, 2930 "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", 2931 i, 2932 port_reply->input_port, 2933 port_reply->peer_device_type, 2934 port_reply->port_number, 2935 port_reply->dpcd_revision, 2936 port_reply->mcs, 2937 port_reply->ddps, 2938 port_reply->legacy_device_plug_status, 2939 port_reply->num_sdp_streams, 2940 port_reply->num_sdp_stream_sinks); 2941 } 2942 } 2943 2944 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 2945 struct drm_dp_mst_branch *mstb) 2946 { 2947 struct drm_dp_sideband_msg_tx *txmsg; 2948 struct drm_dp_link_address_ack_reply *reply; 2949 struct drm_dp_mst_port *port, *tmp; 2950 int i, ret, port_mask = 0; 2951 bool changed = false; 2952 2953 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2954 if (!txmsg) 2955 return -ENOMEM; 2956 2957 txmsg->dst = mstb; 2958 build_link_address(txmsg); 2959 2960 mstb->link_address_sent = true; 2961 drm_dp_queue_down_tx(mgr, txmsg); 2962 2963 /* FIXME: Actually do some real error handling here */ 2964 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 2965 if (ret <= 0) { 2966 drm_err(mgr->dev, "Sending link address failed with %d\n", ret); 2967 goto out; 2968 } 2969 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 2970 drm_err(mgr->dev, "link address NAK received\n"); 2971 ret = -EIO; 2972 goto out; 2973 } 2974 2975 reply = &txmsg->reply.u.link_addr; 2976 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports); 2977 drm_dp_dump_link_address(mgr, reply); 2978 2979 ret = drm_dp_check_mstb_guid(mstb, reply->guid); 2980 if (ret) { 2981 char buf[64]; 2982 2983 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf)); 2984 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret); 2985 goto out; 2986 } 2987 2988 for (i = 0; i < reply->nports; i++) { 2989 port_mask |= BIT(reply->ports[i].port_number); 2990 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, 2991 &reply->ports[i]); 2992 if (ret == 1) 2993 changed = true; 2994 else if (ret < 0) 2995 goto out; 2996 } 2997 2998 /* Prune any ports that are currently a part of mstb in our in-memory 2999 * topology, but were not seen in this link address. Usually this 3000 * means that they were removed while the topology was out of sync, 3001 * e.g. during suspend/resume 3002 */ 3003 mutex_lock(&mgr->lock); 3004 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 3005 if (port_mask & BIT(port->port_num)) 3006 continue; 3007 3008 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n", 3009 port->port_num); 3010 list_del(&port->next); 3011 drm_dp_mst_topology_put_port(port); 3012 changed = true; 3013 } 3014 mutex_unlock(&mgr->lock); 3015 3016 out: 3017 if (ret <= 0) 3018 mstb->link_address_sent = false; 3019 kfree(txmsg); 3020 return ret < 0 ? ret : changed; 3021 } 3022 3023 static void 3024 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, 3025 struct drm_dp_mst_branch *mstb) 3026 { 3027 struct drm_dp_sideband_msg_tx *txmsg; 3028 int ret; 3029 3030 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3031 if (!txmsg) 3032 return; 3033 3034 txmsg->dst = mstb; 3035 build_clear_payload_id_table(txmsg); 3036 3037 drm_dp_queue_down_tx(mgr, txmsg); 3038 3039 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3040 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3041 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n"); 3042 3043 kfree(txmsg); 3044 } 3045 3046 static int 3047 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 3048 struct drm_dp_mst_branch *mstb, 3049 struct drm_dp_mst_port *port) 3050 { 3051 struct drm_dp_enum_path_resources_ack_reply *path_res; 3052 struct drm_dp_sideband_msg_tx *txmsg; 3053 int ret; 3054 3055 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3056 if (!txmsg) 3057 return -ENOMEM; 3058 3059 txmsg->dst = mstb; 3060 build_enum_path_resources(txmsg, port->port_num); 3061 3062 drm_dp_queue_down_tx(mgr, txmsg); 3063 3064 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3065 if (ret > 0) { 3066 ret = 0; 3067 path_res = &txmsg->reply.u.path_resources; 3068 3069 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 3070 drm_dbg_kms(mgr->dev, "enum path resources nak received\n"); 3071 } else { 3072 if (port->port_num != path_res->port_number) 3073 DRM_ERROR("got incorrect port in response\n"); 3074 3075 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n", 3076 path_res->port_number, 3077 path_res->full_payload_bw_number, 3078 path_res->avail_payload_bw_number); 3079 3080 /* 3081 * If something changed, make sure we send a 3082 * hotplug 3083 */ 3084 if (port->full_pbn != path_res->full_payload_bw_number || 3085 port->fec_capable != path_res->fec_capable) 3086 ret = 1; 3087 3088 port->full_pbn = path_res->full_payload_bw_number; 3089 port->fec_capable = path_res->fec_capable; 3090 } 3091 } 3092 3093 kfree(txmsg); 3094 return ret; 3095 } 3096 3097 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) 3098 { 3099 if (!mstb->port_parent) 3100 return NULL; 3101 3102 if (mstb->port_parent->mstb != mstb) 3103 return mstb->port_parent; 3104 3105 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); 3106 } 3107 3108 /* 3109 * Searches upwards in the topology starting from mstb to try to find the 3110 * closest available parent of mstb that's still connected to the rest of the 3111 * topology. This can be used in order to perform operations like releasing 3112 * payloads, where the branch device which owned the payload may no longer be 3113 * around and thus would require that the payload on the last living relative 3114 * be freed instead. 3115 */ 3116 static struct drm_dp_mst_branch * 3117 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, 3118 struct drm_dp_mst_branch *mstb, 3119 int *port_num) 3120 { 3121 struct drm_dp_mst_branch *rmstb = NULL; 3122 struct drm_dp_mst_port *found_port; 3123 3124 mutex_lock(&mgr->lock); 3125 if (!mgr->mst_primary) 3126 goto out; 3127 3128 do { 3129 found_port = drm_dp_get_last_connected_port_to_mstb(mstb); 3130 if (!found_port) 3131 break; 3132 3133 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { 3134 rmstb = found_port->parent; 3135 *port_num = found_port->port_num; 3136 } else { 3137 /* Search again, starting from this parent */ 3138 mstb = found_port->parent; 3139 } 3140 } while (!rmstb); 3141 out: 3142 mutex_unlock(&mgr->lock); 3143 return rmstb; 3144 } 3145 3146 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, 3147 struct drm_dp_mst_port *port, 3148 int id, 3149 int pbn) 3150 { 3151 struct drm_dp_sideband_msg_tx *txmsg; 3152 struct drm_dp_mst_branch *mstb; 3153 int ret, port_num; 3154 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 3155 int i; 3156 3157 port_num = port->port_num; 3158 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3159 if (!mstb) { 3160 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, 3161 port->parent, 3162 &port_num); 3163 3164 if (!mstb) 3165 return -EINVAL; 3166 } 3167 3168 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3169 if (!txmsg) { 3170 ret = -ENOMEM; 3171 goto fail_put; 3172 } 3173 3174 for (i = 0; i < port->num_sdp_streams; i++) 3175 sinks[i] = i; 3176 3177 txmsg->dst = mstb; 3178 build_allocate_payload(txmsg, port_num, 3179 id, 3180 pbn, port->num_sdp_streams, sinks); 3181 3182 drm_dp_queue_down_tx(mgr, txmsg); 3183 3184 /* 3185 * FIXME: there is a small chance that between getting the last 3186 * connected mstb and sending the payload message, the last connected 3187 * mstb could also be removed from the topology. In the future, this 3188 * needs to be fixed by restarting the 3189 * drm_dp_get_last_connected_port_and_mstb() search in the event of a 3190 * timeout if the topology is still connected to the system. 3191 */ 3192 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3193 if (ret > 0) { 3194 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3195 ret = -EINVAL; 3196 else 3197 ret = 0; 3198 } 3199 kfree(txmsg); 3200 fail_put: 3201 drm_dp_mst_topology_put_mstb(mstb); 3202 return ret; 3203 } 3204 3205 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, 3206 struct drm_dp_mst_port *port, bool power_up) 3207 { 3208 struct drm_dp_sideband_msg_tx *txmsg; 3209 int ret; 3210 3211 port = drm_dp_mst_topology_get_port_validated(mgr, port); 3212 if (!port) 3213 return -EINVAL; 3214 3215 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3216 if (!txmsg) { 3217 drm_dp_mst_topology_put_port(port); 3218 return -ENOMEM; 3219 } 3220 3221 txmsg->dst = port->parent; 3222 build_power_updown_phy(txmsg, port->port_num, power_up); 3223 drm_dp_queue_down_tx(mgr, txmsg); 3224 3225 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); 3226 if (ret > 0) { 3227 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3228 ret = -EINVAL; 3229 else 3230 ret = 0; 3231 } 3232 kfree(txmsg); 3233 drm_dp_mst_topology_put_port(port); 3234 3235 return ret; 3236 } 3237 EXPORT_SYMBOL(drm_dp_send_power_updown_phy); 3238 3239 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, 3240 struct drm_dp_mst_port *port, 3241 struct drm_dp_query_stream_enc_status_ack_reply *status) 3242 { 3243 struct drm_dp_sideband_msg_tx *txmsg; 3244 u8 nonce[7]; 3245 int ret; 3246 3247 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3248 if (!txmsg) 3249 return -ENOMEM; 3250 3251 port = drm_dp_mst_topology_get_port_validated(mgr, port); 3252 if (!port) { 3253 ret = -EINVAL; 3254 goto out_get_port; 3255 } 3256 3257 get_random_bytes(nonce, sizeof(nonce)); 3258 3259 /* 3260 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message 3261 * transaction at the MST Branch device directly connected to the 3262 * Source" 3263 */ 3264 txmsg->dst = mgr->mst_primary; 3265 3266 build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce); 3267 3268 drm_dp_queue_down_tx(mgr, txmsg); 3269 3270 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg); 3271 if (ret < 0) { 3272 goto out; 3273 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 3274 drm_dbg_kms(mgr->dev, "query encryption status nak received\n"); 3275 ret = -ENXIO; 3276 goto out; 3277 } 3278 3279 ret = 0; 3280 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status)); 3281 3282 out: 3283 drm_dp_mst_topology_put_port(port); 3284 out_get_port: 3285 kfree(txmsg); 3286 return ret; 3287 } 3288 EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status); 3289 3290 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 3291 int id, 3292 struct drm_dp_payload *payload) 3293 { 3294 int ret; 3295 3296 ret = drm_dp_dpcd_write_payload(mgr, id, payload); 3297 if (ret < 0) { 3298 payload->payload_state = 0; 3299 return ret; 3300 } 3301 payload->payload_state = DP_PAYLOAD_LOCAL; 3302 return 0; 3303 } 3304 3305 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 3306 struct drm_dp_mst_port *port, 3307 int id, 3308 struct drm_dp_payload *payload) 3309 { 3310 int ret; 3311 3312 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); 3313 if (ret < 0) 3314 return ret; 3315 payload->payload_state = DP_PAYLOAD_REMOTE; 3316 return ret; 3317 } 3318 3319 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 3320 struct drm_dp_mst_port *port, 3321 int id, 3322 struct drm_dp_payload *payload) 3323 { 3324 drm_dbg_kms(mgr->dev, "\n"); 3325 /* it's okay for these to fail */ 3326 if (port) { 3327 drm_dp_payload_send_msg(mgr, port, id, 0); 3328 } 3329 3330 drm_dp_dpcd_write_payload(mgr, id, payload); 3331 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL; 3332 return 0; 3333 } 3334 3335 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 3336 int id, 3337 struct drm_dp_payload *payload) 3338 { 3339 payload->payload_state = 0; 3340 return 0; 3341 } 3342 3343 /** 3344 * drm_dp_update_payload_part1() - Execute payload update part 1 3345 * @mgr: manager to use. 3346 * @start_slot: this is the cur slot 3347 * 3348 * NOTE: start_slot is a temporary workaround for non-atomic drivers, 3349 * this will be removed when non-atomic mst helpers are moved out of the helper 3350 * 3351 * This iterates over all proposed virtual channels, and tries to 3352 * allocate space in the link for them. For 0->slots transitions, 3353 * this step just writes the VCPI to the MST device. For slots->0 3354 * transitions, this writes the updated VCPIs and removes the 3355 * remote VC payloads. 3356 * 3357 * after calling this the driver should generate ACT and payload 3358 * packets. 3359 */ 3360 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot) 3361 { 3362 struct drm_dp_payload req_payload; 3363 struct drm_dp_mst_port *port; 3364 int i, j; 3365 int cur_slots = start_slot; 3366 bool skip; 3367 3368 mutex_lock(&mgr->payload_lock); 3369 for (i = 0; i < mgr->max_payloads; i++) { 3370 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; 3371 struct drm_dp_payload *payload = &mgr->payloads[i]; 3372 bool put_port = false; 3373 3374 /* solve the current payloads - compare to the hw ones 3375 - update the hw view */ 3376 req_payload.start_slot = cur_slots; 3377 if (vcpi) { 3378 port = container_of(vcpi, struct drm_dp_mst_port, 3379 vcpi); 3380 3381 mutex_lock(&mgr->lock); 3382 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); 3383 mutex_unlock(&mgr->lock); 3384 3385 if (skip) { 3386 drm_dbg_kms(mgr->dev, 3387 "Virtual channel %d is not in current topology\n", 3388 i); 3389 continue; 3390 } 3391 /* Validated ports don't matter if we're releasing 3392 * VCPI 3393 */ 3394 if (vcpi->num_slots) { 3395 port = drm_dp_mst_topology_get_port_validated( 3396 mgr, port); 3397 if (!port) { 3398 if (vcpi->num_slots == payload->num_slots) { 3399 cur_slots += vcpi->num_slots; 3400 payload->start_slot = req_payload.start_slot; 3401 continue; 3402 } else { 3403 drm_dbg_kms(mgr->dev, 3404 "Fail:set payload to invalid sink"); 3405 mutex_unlock(&mgr->payload_lock); 3406 return -EINVAL; 3407 } 3408 } 3409 put_port = true; 3410 } 3411 3412 req_payload.num_slots = vcpi->num_slots; 3413 req_payload.vcpi = vcpi->vcpi; 3414 } else { 3415 port = NULL; 3416 req_payload.num_slots = 0; 3417 } 3418 3419 payload->start_slot = req_payload.start_slot; 3420 /* work out what is required to happen with this payload */ 3421 if (payload->num_slots != req_payload.num_slots) { 3422 3423 /* need to push an update for this payload */ 3424 if (req_payload.num_slots) { 3425 drm_dp_create_payload_step1(mgr, vcpi->vcpi, 3426 &req_payload); 3427 payload->num_slots = req_payload.num_slots; 3428 payload->vcpi = req_payload.vcpi; 3429 3430 } else if (payload->num_slots) { 3431 payload->num_slots = 0; 3432 drm_dp_destroy_payload_step1(mgr, port, 3433 payload->vcpi, 3434 payload); 3435 req_payload.payload_state = 3436 payload->payload_state; 3437 payload->start_slot = 0; 3438 } 3439 payload->payload_state = req_payload.payload_state; 3440 } 3441 cur_slots += req_payload.num_slots; 3442 3443 if (put_port) 3444 drm_dp_mst_topology_put_port(port); 3445 } 3446 3447 for (i = 0; i < mgr->max_payloads; /* do nothing */) { 3448 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) { 3449 i++; 3450 continue; 3451 } 3452 3453 drm_dbg_kms(mgr->dev, "removing payload %d\n", i); 3454 for (j = i; j < mgr->max_payloads - 1; j++) { 3455 mgr->payloads[j] = mgr->payloads[j + 1]; 3456 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; 3457 3458 if (mgr->proposed_vcpis[j] && 3459 mgr->proposed_vcpis[j]->num_slots) { 3460 set_bit(j + 1, &mgr->payload_mask); 3461 } else { 3462 clear_bit(j + 1, &mgr->payload_mask); 3463 } 3464 } 3465 3466 memset(&mgr->payloads[mgr->max_payloads - 1], 0, 3467 sizeof(struct drm_dp_payload)); 3468 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; 3469 clear_bit(mgr->max_payloads, &mgr->payload_mask); 3470 } 3471 mutex_unlock(&mgr->payload_lock); 3472 3473 return 0; 3474 } 3475 EXPORT_SYMBOL(drm_dp_update_payload_part1); 3476 3477 /** 3478 * drm_dp_update_payload_part2() - Execute payload update part 2 3479 * @mgr: manager to use. 3480 * 3481 * This iterates over all proposed virtual channels, and tries to 3482 * allocate space in the link for them. For 0->slots transitions, 3483 * this step writes the remote VC payload commands. For slots->0 3484 * this just resets some internal state. 3485 */ 3486 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) 3487 { 3488 struct drm_dp_mst_port *port; 3489 int i; 3490 int ret = 0; 3491 bool skip; 3492 3493 mutex_lock(&mgr->payload_lock); 3494 for (i = 0; i < mgr->max_payloads; i++) { 3495 3496 if (!mgr->proposed_vcpis[i]) 3497 continue; 3498 3499 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 3500 3501 mutex_lock(&mgr->lock); 3502 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); 3503 mutex_unlock(&mgr->lock); 3504 3505 if (skip) 3506 continue; 3507 3508 drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state); 3509 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { 3510 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); 3511 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 3512 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); 3513 } 3514 if (ret) { 3515 mutex_unlock(&mgr->payload_lock); 3516 return ret; 3517 } 3518 } 3519 mutex_unlock(&mgr->payload_lock); 3520 return 0; 3521 } 3522 EXPORT_SYMBOL(drm_dp_update_payload_part2); 3523 3524 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 3525 struct drm_dp_mst_port *port, 3526 int offset, int size, u8 *bytes) 3527 { 3528 int ret = 0; 3529 struct drm_dp_sideband_msg_tx *txmsg; 3530 struct drm_dp_mst_branch *mstb; 3531 3532 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3533 if (!mstb) 3534 return -EINVAL; 3535 3536 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3537 if (!txmsg) { 3538 ret = -ENOMEM; 3539 goto fail_put; 3540 } 3541 3542 build_dpcd_read(txmsg, port->port_num, offset, size); 3543 txmsg->dst = port->parent; 3544 3545 drm_dp_queue_down_tx(mgr, txmsg); 3546 3547 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3548 if (ret < 0) 3549 goto fail_free; 3550 3551 if (txmsg->reply.reply_type == 1) { 3552 drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", 3553 mstb, port->port_num, offset, size); 3554 ret = -EIO; 3555 goto fail_free; 3556 } 3557 3558 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) { 3559 ret = -EPROTO; 3560 goto fail_free; 3561 } 3562 3563 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes, 3564 size); 3565 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret); 3566 3567 fail_free: 3568 kfree(txmsg); 3569 fail_put: 3570 drm_dp_mst_topology_put_mstb(mstb); 3571 3572 return ret; 3573 } 3574 3575 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 3576 struct drm_dp_mst_port *port, 3577 int offset, int size, u8 *bytes) 3578 { 3579 int ret; 3580 struct drm_dp_sideband_msg_tx *txmsg; 3581 struct drm_dp_mst_branch *mstb; 3582 3583 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3584 if (!mstb) 3585 return -EINVAL; 3586 3587 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3588 if (!txmsg) { 3589 ret = -ENOMEM; 3590 goto fail_put; 3591 } 3592 3593 build_dpcd_write(txmsg, port->port_num, offset, size, bytes); 3594 txmsg->dst = mstb; 3595 3596 drm_dp_queue_down_tx(mgr, txmsg); 3597 3598 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3599 if (ret > 0) { 3600 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3601 ret = -EIO; 3602 else 3603 ret = size; 3604 } 3605 3606 kfree(txmsg); 3607 fail_put: 3608 drm_dp_mst_topology_put_mstb(mstb); 3609 return ret; 3610 } 3611 3612 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) 3613 { 3614 struct drm_dp_sideband_msg_reply_body reply; 3615 3616 reply.reply_type = DP_SIDEBAND_REPLY_ACK; 3617 reply.req_type = req_type; 3618 drm_dp_encode_sideband_reply(&reply, msg); 3619 return 0; 3620 } 3621 3622 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, 3623 struct drm_dp_mst_branch *mstb, 3624 int req_type, bool broadcast) 3625 { 3626 struct drm_dp_sideband_msg_tx *txmsg; 3627 3628 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3629 if (!txmsg) 3630 return -ENOMEM; 3631 3632 txmsg->dst = mstb; 3633 drm_dp_encode_up_ack_reply(txmsg, req_type); 3634 3635 mutex_lock(&mgr->qlock); 3636 /* construct a chunk from the first msg in the tx_msg queue */ 3637 process_single_tx_qlock(mgr, txmsg, true); 3638 mutex_unlock(&mgr->qlock); 3639 3640 kfree(txmsg); 3641 return 0; 3642 } 3643 3644 /** 3645 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link 3646 * @mgr: The &drm_dp_mst_topology_mgr to use 3647 * @link_rate: link rate in 10kbits/s units 3648 * @link_lane_count: lane count 3649 * 3650 * Calculate the total bandwidth of a MultiStream Transport link. The returned 3651 * value is in units of PBNs/(timeslots/1 MTP). This value can be used to 3652 * convert the number of PBNs required for a given stream to the number of 3653 * timeslots this stream requires in each MTP. 3654 */ 3655 int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, 3656 int link_rate, int link_lane_count) 3657 { 3658 if (link_rate == 0 || link_lane_count == 0) 3659 drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n", 3660 link_rate, link_lane_count); 3661 3662 /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ 3663 return link_rate * link_lane_count / 54000; 3664 } 3665 EXPORT_SYMBOL(drm_dp_get_vc_payload_bw); 3666 3667 /** 3668 * drm_dp_read_mst_cap() - check whether or not a sink supports MST 3669 * @aux: The DP AUX channel to use 3670 * @dpcd: A cached copy of the DPCD capabilities for this sink 3671 * 3672 * Returns: %True if the sink supports MST, %false otherwise 3673 */ 3674 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, 3675 const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 3676 { 3677 u8 mstm_cap; 3678 3679 if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) 3680 return false; 3681 3682 if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) 3683 return false; 3684 3685 return mstm_cap & DP_MST_CAP; 3686 } 3687 EXPORT_SYMBOL(drm_dp_read_mst_cap); 3688 3689 /** 3690 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager 3691 * @mgr: manager to set state for 3692 * @mst_state: true to enable MST on this connector - false to disable. 3693 * 3694 * This is called by the driver when it detects an MST capable device plugged 3695 * into a DP MST capable port, or when a DP MST capable device is unplugged. 3696 */ 3697 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) 3698 { 3699 int ret = 0; 3700 struct drm_dp_mst_branch *mstb = NULL; 3701 3702 mutex_lock(&mgr->payload_lock); 3703 mutex_lock(&mgr->lock); 3704 if (mst_state == mgr->mst_state) 3705 goto out_unlock; 3706 3707 mgr->mst_state = mst_state; 3708 /* set the device into MST mode */ 3709 if (mst_state) { 3710 struct drm_dp_payload reset_pay; 3711 int lane_count; 3712 int link_rate; 3713 3714 WARN_ON(mgr->mst_primary); 3715 3716 /* get dpcd info */ 3717 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); 3718 if (ret < 0) { 3719 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", 3720 mgr->aux->name, ret); 3721 goto out_unlock; 3722 } 3723 3724 lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count); 3725 link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate); 3726 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr, 3727 link_rate, 3728 lane_count); 3729 if (mgr->pbn_div == 0) { 3730 ret = -EINVAL; 3731 goto out_unlock; 3732 } 3733 3734 /* add initial branch device at LCT 1 */ 3735 mstb = drm_dp_add_mst_branch_device(1, NULL); 3736 if (mstb == NULL) { 3737 ret = -ENOMEM; 3738 goto out_unlock; 3739 } 3740 mstb->mgr = mgr; 3741 3742 /* give this the main reference */ 3743 mgr->mst_primary = mstb; 3744 drm_dp_mst_topology_get_mstb(mgr->mst_primary); 3745 3746 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3747 DP_MST_EN | 3748 DP_UP_REQ_EN | 3749 DP_UPSTREAM_IS_SRC); 3750 if (ret < 0) 3751 goto out_unlock; 3752 3753 reset_pay.start_slot = 0; 3754 reset_pay.num_slots = 0x3f; 3755 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); 3756 3757 queue_work(system_long_wq, &mgr->work); 3758 3759 ret = 0; 3760 } else { 3761 /* disable MST on the device */ 3762 mstb = mgr->mst_primary; 3763 mgr->mst_primary = NULL; 3764 /* this can fail if the device is gone */ 3765 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); 3766 ret = 0; 3767 memset(mgr->payloads, 0, 3768 mgr->max_payloads * sizeof(mgr->payloads[0])); 3769 memset(mgr->proposed_vcpis, 0, 3770 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0])); 3771 mgr->payload_mask = 0; 3772 set_bit(0, &mgr->payload_mask); 3773 mgr->vcpi_mask = 0; 3774 mgr->payload_id_table_cleared = false; 3775 } 3776 3777 out_unlock: 3778 mutex_unlock(&mgr->lock); 3779 mutex_unlock(&mgr->payload_lock); 3780 if (mstb) 3781 drm_dp_mst_topology_put_mstb(mstb); 3782 return ret; 3783 3784 } 3785 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); 3786 3787 static void 3788 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) 3789 { 3790 struct drm_dp_mst_port *port; 3791 3792 /* The link address will need to be re-sent on resume */ 3793 mstb->link_address_sent = false; 3794 3795 list_for_each_entry(port, &mstb->ports, next) 3796 if (port->mstb) 3797 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); 3798 } 3799 3800 /** 3801 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager 3802 * @mgr: manager to suspend 3803 * 3804 * This function tells the MST device that we can't handle UP messages 3805 * anymore. This should stop it from sending any since we are suspended. 3806 */ 3807 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) 3808 { 3809 mutex_lock(&mgr->lock); 3810 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3811 DP_MST_EN | DP_UPSTREAM_IS_SRC); 3812 mutex_unlock(&mgr->lock); 3813 flush_work(&mgr->up_req_work); 3814 flush_work(&mgr->work); 3815 flush_work(&mgr->delayed_destroy_work); 3816 3817 mutex_lock(&mgr->lock); 3818 if (mgr->mst_state && mgr->mst_primary) 3819 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); 3820 mutex_unlock(&mgr->lock); 3821 } 3822 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 3823 3824 /** 3825 * drm_dp_mst_topology_mgr_resume() - resume the MST manager 3826 * @mgr: manager to resume 3827 * @sync: whether or not to perform topology reprobing synchronously 3828 * 3829 * This will fetch DPCD and see if the device is still there, 3830 * if it is, it will rewrite the MSTM control bits, and return. 3831 * 3832 * If the device fails this returns -1, and the driver should do 3833 * a full MST reprobe, in case we were undocked. 3834 * 3835 * During system resume (where it is assumed that the driver will be calling 3836 * drm_atomic_helper_resume()) this function should be called beforehand with 3837 * @sync set to true. In contexts like runtime resume where the driver is not 3838 * expected to be calling drm_atomic_helper_resume(), this function should be 3839 * called with @sync set to false in order to avoid deadlocking. 3840 * 3841 * Returns: -1 if the MST topology was removed while we were suspended, 0 3842 * otherwise. 3843 */ 3844 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, 3845 bool sync) 3846 { 3847 int ret; 3848 u8 guid[16]; 3849 3850 mutex_lock(&mgr->lock); 3851 if (!mgr->mst_primary) 3852 goto out_fail; 3853 3854 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 3855 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 3856 goto out_fail; 3857 } 3858 3859 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3860 DP_MST_EN | 3861 DP_UP_REQ_EN | 3862 DP_UPSTREAM_IS_SRC); 3863 if (ret < 0) { 3864 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 3865 goto out_fail; 3866 } 3867 3868 /* Some hubs forget their guids after they resume */ 3869 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); 3870 if (ret != 16) { 3871 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 3872 goto out_fail; 3873 } 3874 3875 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid); 3876 if (ret) { 3877 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n"); 3878 goto out_fail; 3879 } 3880 3881 /* 3882 * For the final step of resuming the topology, we need to bring the 3883 * state of our in-memory topology back into sync with reality. So, 3884 * restart the probing process as if we're probing a new hub 3885 */ 3886 queue_work(system_long_wq, &mgr->work); 3887 mutex_unlock(&mgr->lock); 3888 3889 if (sync) { 3890 drm_dbg_kms(mgr->dev, 3891 "Waiting for link probe work to finish re-syncing topology...\n"); 3892 flush_work(&mgr->work); 3893 } 3894 3895 return 0; 3896 3897 out_fail: 3898 mutex_unlock(&mgr->lock); 3899 return -1; 3900 } 3901 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 3902 3903 static bool 3904 drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, 3905 struct drm_dp_mst_branch **mstb) 3906 { 3907 int len; 3908 u8 replyblock[32]; 3909 int replylen, curreply; 3910 int ret; 3911 u8 hdrlen; 3912 struct drm_dp_sideband_msg_hdr hdr; 3913 struct drm_dp_sideband_msg_rx *msg = 3914 up ? &mgr->up_req_recv : &mgr->down_rep_recv; 3915 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : 3916 DP_SIDEBAND_MSG_DOWN_REP_BASE; 3917 3918 if (!up) 3919 *mstb = NULL; 3920 3921 len = min(mgr->max_dpcd_transaction_bytes, 16); 3922 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); 3923 if (ret != len) { 3924 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); 3925 return false; 3926 } 3927 3928 ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen); 3929 if (ret == false) { 3930 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 3931 1, replyblock, len, false); 3932 drm_dbg_kms(mgr->dev, "ERROR: failed header\n"); 3933 return false; 3934 } 3935 3936 if (!up) { 3937 /* Caller is responsible for giving back this reference */ 3938 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad); 3939 if (!*mstb) { 3940 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct); 3941 return false; 3942 } 3943 } 3944 3945 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) { 3946 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]); 3947 return false; 3948 } 3949 3950 replylen = min(msg->curchunk_len, (u8)(len - hdrlen)); 3951 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen); 3952 if (!ret) { 3953 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]); 3954 return false; 3955 } 3956 3957 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len; 3958 curreply = len; 3959 while (replylen > 0) { 3960 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); 3961 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 3962 replyblock, len); 3963 if (ret != len) { 3964 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", 3965 len, ret); 3966 return false; 3967 } 3968 3969 ret = drm_dp_sideband_append_payload(msg, replyblock, len); 3970 if (!ret) { 3971 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n"); 3972 return false; 3973 } 3974 3975 curreply += len; 3976 replylen -= len; 3977 } 3978 return true; 3979 } 3980 3981 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 3982 { 3983 struct drm_dp_sideband_msg_tx *txmsg; 3984 struct drm_dp_mst_branch *mstb = NULL; 3985 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv; 3986 3987 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb)) 3988 goto out; 3989 3990 /* Multi-packet message transmission, don't clear the reply */ 3991 if (!msg->have_eomt) 3992 goto out; 3993 3994 /* find the message */ 3995 mutex_lock(&mgr->qlock); 3996 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, 3997 struct drm_dp_sideband_msg_tx, next); 3998 mutex_unlock(&mgr->qlock); 3999 4000 /* Were we actually expecting a response, and from this mstb? */ 4001 if (!txmsg || txmsg->dst != mstb) { 4002 struct drm_dp_sideband_msg_hdr *hdr; 4003 4004 hdr = &msg->initial_hdr; 4005 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n", 4006 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]); 4007 goto out_clear_reply; 4008 } 4009 4010 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply); 4011 4012 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 4013 drm_dbg_kms(mgr->dev, 4014 "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", 4015 txmsg->reply.req_type, 4016 drm_dp_mst_req_type_str(txmsg->reply.req_type), 4017 txmsg->reply.u.nak.reason, 4018 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), 4019 txmsg->reply.u.nak.nak_data); 4020 } 4021 4022 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); 4023 drm_dp_mst_topology_put_mstb(mstb); 4024 4025 mutex_lock(&mgr->qlock); 4026 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 4027 list_del(&txmsg->next); 4028 mutex_unlock(&mgr->qlock); 4029 4030 wake_up_all(&mgr->tx_waitq); 4031 4032 return 0; 4033 4034 out_clear_reply: 4035 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); 4036 out: 4037 if (mstb) 4038 drm_dp_mst_topology_put_mstb(mstb); 4039 4040 return 0; 4041 } 4042 4043 static inline bool 4044 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, 4045 struct drm_dp_pending_up_req *up_req) 4046 { 4047 struct drm_dp_mst_branch *mstb = NULL; 4048 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; 4049 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; 4050 bool hotplug = false; 4051 4052 if (hdr->broadcast) { 4053 const u8 *guid = NULL; 4054 4055 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) 4056 guid = msg->u.conn_stat.guid; 4057 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) 4058 guid = msg->u.resource_stat.guid; 4059 4060 if (guid) 4061 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); 4062 } else { 4063 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); 4064 } 4065 4066 if (!mstb) { 4067 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct); 4068 return false; 4069 } 4070 4071 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ 4072 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { 4073 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); 4074 hotplug = true; 4075 } 4076 4077 drm_dp_mst_topology_put_mstb(mstb); 4078 return hotplug; 4079 } 4080 4081 static void drm_dp_mst_up_req_work(struct work_struct *work) 4082 { 4083 struct drm_dp_mst_topology_mgr *mgr = 4084 container_of(work, struct drm_dp_mst_topology_mgr, 4085 up_req_work); 4086 struct drm_dp_pending_up_req *up_req; 4087 bool send_hotplug = false; 4088 4089 mutex_lock(&mgr->probe_lock); 4090 while (true) { 4091 mutex_lock(&mgr->up_req_lock); 4092 up_req = list_first_entry_or_null(&mgr->up_req_list, 4093 struct drm_dp_pending_up_req, 4094 next); 4095 if (up_req) 4096 list_del(&up_req->next); 4097 mutex_unlock(&mgr->up_req_lock); 4098 4099 if (!up_req) 4100 break; 4101 4102 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req); 4103 kfree(up_req); 4104 } 4105 mutex_unlock(&mgr->probe_lock); 4106 4107 if (send_hotplug) 4108 drm_kms_helper_hotplug_event(mgr->dev); 4109 } 4110 4111 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 4112 { 4113 struct drm_dp_pending_up_req *up_req; 4114 4115 if (!drm_dp_get_one_sb_msg(mgr, true, NULL)) 4116 goto out; 4117 4118 if (!mgr->up_req_recv.have_eomt) 4119 return 0; 4120 4121 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); 4122 if (!up_req) 4123 return -ENOMEM; 4124 4125 INIT_LIST_HEAD(&up_req->next); 4126 4127 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg); 4128 4129 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && 4130 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { 4131 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", 4132 up_req->msg.req_type); 4133 kfree(up_req); 4134 goto out; 4135 } 4136 4137 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, 4138 false); 4139 4140 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { 4141 const struct drm_dp_connection_status_notify *conn_stat = 4142 &up_req->msg.u.conn_stat; 4143 4144 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", 4145 conn_stat->port_number, 4146 conn_stat->legacy_device_plug_status, 4147 conn_stat->displayport_device_plug_status, 4148 conn_stat->message_capability_status, 4149 conn_stat->input_port, 4150 conn_stat->peer_device_type); 4151 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 4152 const struct drm_dp_resource_status_notify *res_stat = 4153 &up_req->msg.u.resource_stat; 4154 4155 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n", 4156 res_stat->port_number, 4157 res_stat->available_pbn); 4158 } 4159 4160 up_req->hdr = mgr->up_req_recv.initial_hdr; 4161 mutex_lock(&mgr->up_req_lock); 4162 list_add_tail(&up_req->next, &mgr->up_req_list); 4163 mutex_unlock(&mgr->up_req_lock); 4164 queue_work(system_long_wq, &mgr->up_req_work); 4165 4166 out: 4167 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 4168 return 0; 4169 } 4170 4171 /** 4172 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify 4173 * @mgr: manager to notify irq for. 4174 * @esi: 4 bytes from SINK_COUNT_ESI 4175 * @handled: whether the hpd interrupt was consumed or not 4176 * 4177 * This should be called from the driver when it detects a short IRQ, 4178 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The 4179 * topology manager will process the sideband messages received as a result 4180 * of this. 4181 */ 4182 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) 4183 { 4184 int ret = 0; 4185 int sc; 4186 *handled = false; 4187 sc = DP_GET_SINK_COUNT(esi[0]); 4188 4189 if (sc != mgr->sink_count) { 4190 mgr->sink_count = sc; 4191 *handled = true; 4192 } 4193 4194 if (esi[1] & DP_DOWN_REP_MSG_RDY) { 4195 ret = drm_dp_mst_handle_down_rep(mgr); 4196 *handled = true; 4197 } 4198 4199 if (esi[1] & DP_UP_REQ_MSG_RDY) { 4200 ret |= drm_dp_mst_handle_up_req(mgr); 4201 *handled = true; 4202 } 4203 4204 drm_dp_mst_kick_tx(mgr); 4205 return ret; 4206 } 4207 EXPORT_SYMBOL(drm_dp_mst_hpd_irq); 4208 4209 /** 4210 * drm_dp_mst_detect_port() - get connection status for an MST port 4211 * @connector: DRM connector for this port 4212 * @ctx: The acquisition context to use for grabbing locks 4213 * @mgr: manager for this port 4214 * @port: pointer to a port 4215 * 4216 * This returns the current connection state for a port. 4217 */ 4218 int 4219 drm_dp_mst_detect_port(struct drm_connector *connector, 4220 struct drm_modeset_acquire_ctx *ctx, 4221 struct drm_dp_mst_topology_mgr *mgr, 4222 struct drm_dp_mst_port *port) 4223 { 4224 int ret; 4225 4226 /* we need to search for the port in the mgr in case it's gone */ 4227 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4228 if (!port) 4229 return connector_status_disconnected; 4230 4231 ret = drm_modeset_lock(&mgr->base.lock, ctx); 4232 if (ret) 4233 goto out; 4234 4235 ret = connector_status_disconnected; 4236 4237 if (!port->ddps) 4238 goto out; 4239 4240 switch (port->pdt) { 4241 case DP_PEER_DEVICE_NONE: 4242 break; 4243 case DP_PEER_DEVICE_MST_BRANCHING: 4244 if (!port->mcs) 4245 ret = connector_status_connected; 4246 break; 4247 4248 case DP_PEER_DEVICE_SST_SINK: 4249 ret = connector_status_connected; 4250 /* for logical ports - cache the EDID */ 4251 if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid) 4252 port->cached_edid = drm_get_edid(connector, &port->aux.ddc); 4253 break; 4254 case DP_PEER_DEVICE_DP_LEGACY_CONV: 4255 if (port->ldps) 4256 ret = connector_status_connected; 4257 break; 4258 } 4259 out: 4260 drm_dp_mst_topology_put_port(port); 4261 return ret; 4262 } 4263 EXPORT_SYMBOL(drm_dp_mst_detect_port); 4264 4265 /** 4266 * drm_dp_mst_get_edid() - get EDID for an MST port 4267 * @connector: toplevel connector to get EDID for 4268 * @mgr: manager for this port 4269 * @port: unverified pointer to a port. 4270 * 4271 * This returns an EDID for the port connected to a connector, 4272 * It validates the pointer still exists so the caller doesn't require a 4273 * reference. 4274 */ 4275 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 4276 { 4277 struct edid *edid = NULL; 4278 4279 /* we need to search for the port in the mgr in case it's gone */ 4280 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4281 if (!port) 4282 return NULL; 4283 4284 if (port->cached_edid) 4285 edid = drm_edid_duplicate(port->cached_edid); 4286 else { 4287 edid = drm_get_edid(connector, &port->aux.ddc); 4288 } 4289 port->has_audio = drm_detect_monitor_audio(edid); 4290 drm_dp_mst_topology_put_port(port); 4291 return edid; 4292 } 4293 EXPORT_SYMBOL(drm_dp_mst_get_edid); 4294 4295 /** 4296 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value 4297 * @mgr: manager to use 4298 * @pbn: payload bandwidth to convert into slots. 4299 * 4300 * Calculate the number of VCPI slots that will be required for the given PBN 4301 * value. This function is deprecated, and should not be used in atomic 4302 * drivers. 4303 * 4304 * RETURNS: 4305 * The total slots required for this port, or error. 4306 */ 4307 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, 4308 int pbn) 4309 { 4310 int num_slots; 4311 4312 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 4313 4314 /* max. time slots - one slot for MTP header */ 4315 if (num_slots > 63) 4316 return -ENOSPC; 4317 return num_slots; 4318 } 4319 EXPORT_SYMBOL(drm_dp_find_vcpi_slots); 4320 4321 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, 4322 struct drm_dp_vcpi *vcpi, int pbn, int slots) 4323 { 4324 int ret; 4325 4326 vcpi->pbn = pbn; 4327 vcpi->aligned_pbn = slots * mgr->pbn_div; 4328 vcpi->num_slots = slots; 4329 4330 ret = drm_dp_mst_assign_payload_id(mgr, vcpi); 4331 if (ret < 0) 4332 return ret; 4333 return 0; 4334 } 4335 4336 /** 4337 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state 4338 * @state: global atomic state 4339 * @mgr: MST topology manager for the port 4340 * @port: port to find vcpi slots for 4341 * @pbn: bandwidth required for the mode in PBN 4342 * @pbn_div: divider for DSC mode that takes FEC into account 4343 * 4344 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it 4345 * may have had. Any atomic drivers which support MST must call this function 4346 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the 4347 * current VCPI allocation for the new state, but only when 4348 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set 4349 * to ensure compatibility with userspace applications that still use the 4350 * legacy modesetting UAPI. 4351 * 4352 * Allocations set by this function are not checked against the bandwidth 4353 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check(). 4354 * 4355 * Additionally, it is OK to call this function multiple times on the same 4356 * @port as needed. It is not OK however, to call this function and 4357 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase. 4358 * 4359 * See also: 4360 * drm_dp_atomic_release_vcpi_slots() 4361 * drm_dp_mst_atomic_check() 4362 * 4363 * Returns: 4364 * Total slots in the atomic state assigned for this port, or a negative error 4365 * code if the port no longer exists 4366 */ 4367 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, 4368 struct drm_dp_mst_topology_mgr *mgr, 4369 struct drm_dp_mst_port *port, int pbn, 4370 int pbn_div) 4371 { 4372 struct drm_dp_mst_topology_state *topology_state; 4373 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; 4374 int prev_slots, prev_bw, req_slots; 4375 4376 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 4377 if (IS_ERR(topology_state)) 4378 return PTR_ERR(topology_state); 4379 4380 /* Find the current allocation for this port, if any */ 4381 list_for_each_entry(pos, &topology_state->vcpis, next) { 4382 if (pos->port == port) { 4383 vcpi = pos; 4384 prev_slots = vcpi->vcpi; 4385 prev_bw = vcpi->pbn; 4386 4387 /* 4388 * This should never happen, unless the driver tries 4389 * releasing and allocating the same VCPI allocation, 4390 * which is an error 4391 */ 4392 if (WARN_ON(!prev_slots)) { 4393 drm_err(mgr->dev, 4394 "cannot allocate and release VCPI on [MST PORT:%p] in the same state\n", 4395 port); 4396 return -EINVAL; 4397 } 4398 4399 break; 4400 } 4401 } 4402 if (!vcpi) { 4403 prev_slots = 0; 4404 prev_bw = 0; 4405 } 4406 4407 if (pbn_div <= 0) 4408 pbn_div = mgr->pbn_div; 4409 4410 req_slots = DIV_ROUND_UP(pbn, pbn_div); 4411 4412 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", 4413 port->connector->base.id, port->connector->name, 4414 port, prev_slots, req_slots); 4415 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", 4416 port->connector->base.id, port->connector->name, 4417 port, prev_bw, pbn); 4418 4419 /* Add the new allocation to the state */ 4420 if (!vcpi) { 4421 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL); 4422 if (!vcpi) 4423 return -ENOMEM; 4424 4425 drm_dp_mst_get_port_malloc(port); 4426 vcpi->port = port; 4427 list_add(&vcpi->next, &topology_state->vcpis); 4428 } 4429 vcpi->vcpi = req_slots; 4430 vcpi->pbn = pbn; 4431 4432 return req_slots; 4433 } 4434 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); 4435 4436 /** 4437 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots 4438 * @state: global atomic state 4439 * @mgr: MST topology manager for the port 4440 * @port: The port to release the VCPI slots from 4441 * 4442 * Releases any VCPI slots that have been allocated to a port in the atomic 4443 * state. Any atomic drivers which support MST must call this function in 4444 * their &drm_connector_helper_funcs.atomic_check() callback when the 4445 * connector will no longer have VCPI allocated (e.g. because its CRTC was 4446 * removed) when it had VCPI allocated in the previous atomic state. 4447 * 4448 * It is OK to call this even if @port has been removed from the system. 4449 * Additionally, it is OK to call this function multiple times on the same 4450 * @port as needed. It is not OK however, to call this function and 4451 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check 4452 * phase. 4453 * 4454 * See also: 4455 * drm_dp_atomic_find_vcpi_slots() 4456 * drm_dp_mst_atomic_check() 4457 * 4458 * Returns: 4459 * 0 if all slots for this port were added back to 4460 * &drm_dp_mst_topology_state.avail_slots or negative error code 4461 */ 4462 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, 4463 struct drm_dp_mst_topology_mgr *mgr, 4464 struct drm_dp_mst_port *port) 4465 { 4466 struct drm_dp_mst_topology_state *topology_state; 4467 struct drm_dp_vcpi_allocation *pos; 4468 bool found = false; 4469 4470 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 4471 if (IS_ERR(topology_state)) 4472 return PTR_ERR(topology_state); 4473 4474 list_for_each_entry(pos, &topology_state->vcpis, next) { 4475 if (pos->port == port) { 4476 found = true; 4477 break; 4478 } 4479 } 4480 if (WARN_ON(!found)) { 4481 drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n", 4482 port, &topology_state->base); 4483 return -EINVAL; 4484 } 4485 4486 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); 4487 if (pos->vcpi) { 4488 drm_dp_mst_put_port_malloc(port); 4489 pos->vcpi = 0; 4490 pos->pbn = 0; 4491 } 4492 4493 return 0; 4494 } 4495 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); 4496 4497 /** 4498 * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format 4499 * @mst_state: mst_state to update 4500 * @link_encoding_cap: the ecoding format on the link 4501 */ 4502 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap) 4503 { 4504 if (link_encoding_cap == DP_CAP_ANSI_128B132B) { 4505 mst_state->total_avail_slots = 64; 4506 mst_state->start_slot = 0; 4507 } else { 4508 mst_state->total_avail_slots = 63; 4509 mst_state->start_slot = 1; 4510 } 4511 4512 DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n", 4513 (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b", 4514 mst_state); 4515 } 4516 EXPORT_SYMBOL(drm_dp_mst_update_slots); 4517 4518 /** 4519 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel 4520 * @mgr: manager for this port 4521 * @port: port to allocate a virtual channel for. 4522 * @pbn: payload bandwidth number to request 4523 * @slots: returned number of slots for this PBN. 4524 */ 4525 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, 4526 struct drm_dp_mst_port *port, int pbn, int slots) 4527 { 4528 int ret; 4529 4530 if (slots < 0) 4531 return false; 4532 4533 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4534 if (!port) 4535 return false; 4536 4537 if (port->vcpi.vcpi > 0) { 4538 drm_dbg_kms(mgr->dev, 4539 "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", 4540 port->vcpi.vcpi, port->vcpi.pbn, pbn); 4541 if (pbn == port->vcpi.pbn) { 4542 drm_dp_mst_topology_put_port(port); 4543 return true; 4544 } 4545 } 4546 4547 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); 4548 if (ret) { 4549 drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n", 4550 DIV_ROUND_UP(pbn, mgr->pbn_div), ret); 4551 drm_dp_mst_topology_put_port(port); 4552 goto out; 4553 } 4554 drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots); 4555 4556 /* Keep port allocated until its payload has been removed */ 4557 drm_dp_mst_get_port_malloc(port); 4558 drm_dp_mst_topology_put_port(port); 4559 return true; 4560 out: 4561 return false; 4562 } 4563 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); 4564 4565 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 4566 { 4567 int slots = 0; 4568 4569 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4570 if (!port) 4571 return slots; 4572 4573 slots = port->vcpi.num_slots; 4574 drm_dp_mst_topology_put_port(port); 4575 return slots; 4576 } 4577 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); 4578 4579 /** 4580 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI 4581 * @mgr: manager for this port 4582 * @port: unverified pointer to a port. 4583 * 4584 * This just resets the number of slots for the ports VCPI for later programming. 4585 */ 4586 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 4587 { 4588 /* 4589 * A port with VCPI will remain allocated until its VCPI is 4590 * released, no verified ref needed 4591 */ 4592 4593 port->vcpi.num_slots = 0; 4594 } 4595 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); 4596 4597 /** 4598 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI 4599 * @mgr: manager for this port 4600 * @port: port to deallocate vcpi for 4601 * 4602 * This can be called unconditionally, regardless of whether 4603 * drm_dp_mst_allocate_vcpi() succeeded or not. 4604 */ 4605 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, 4606 struct drm_dp_mst_port *port) 4607 { 4608 bool skip; 4609 4610 if (!port->vcpi.vcpi) 4611 return; 4612 4613 mutex_lock(&mgr->lock); 4614 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); 4615 mutex_unlock(&mgr->lock); 4616 4617 if (skip) 4618 return; 4619 4620 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 4621 port->vcpi.num_slots = 0; 4622 port->vcpi.pbn = 0; 4623 port->vcpi.aligned_pbn = 0; 4624 port->vcpi.vcpi = 0; 4625 drm_dp_mst_put_port_malloc(port); 4626 } 4627 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); 4628 4629 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 4630 int id, struct drm_dp_payload *payload) 4631 { 4632 u8 payload_alloc[3], status; 4633 int ret; 4634 int retries = 0; 4635 4636 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, 4637 DP_PAYLOAD_TABLE_UPDATED); 4638 4639 payload_alloc[0] = id; 4640 payload_alloc[1] = payload->start_slot; 4641 payload_alloc[2] = payload->num_slots; 4642 4643 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); 4644 if (ret != 3) { 4645 drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret); 4646 goto fail; 4647 } 4648 4649 retry: 4650 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 4651 if (ret < 0) { 4652 drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret); 4653 goto fail; 4654 } 4655 4656 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { 4657 retries++; 4658 if (retries < 20) { 4659 usleep_range(10000, 20000); 4660 goto retry; 4661 } 4662 drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n", 4663 status); 4664 ret = -EINVAL; 4665 goto fail; 4666 } 4667 ret = 0; 4668 fail: 4669 return ret; 4670 } 4671 4672 static int do_get_act_status(struct drm_dp_aux *aux) 4673 { 4674 int ret; 4675 u8 status; 4676 4677 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 4678 if (ret < 0) 4679 return ret; 4680 4681 return status; 4682 } 4683 4684 /** 4685 * drm_dp_check_act_status() - Polls for ACT handled status. 4686 * @mgr: manager to use 4687 * 4688 * Tries waiting for the MST hub to finish updating it's payload table by 4689 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really 4690 * take that long). 4691 * 4692 * Returns: 4693 * 0 if the ACT was handled in time, negative error code on failure. 4694 */ 4695 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) 4696 { 4697 /* 4698 * There doesn't seem to be any recommended retry count or timeout in 4699 * the MST specification. Since some hubs have been observed to take 4700 * over 1 second to update their payload allocations under certain 4701 * conditions, we use a rather large timeout value. 4702 */ 4703 const int timeout_ms = 3000; 4704 int ret, status; 4705 4706 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, 4707 status & DP_PAYLOAD_ACT_HANDLED || status < 0, 4708 200, timeout_ms * USEC_PER_MSEC); 4709 if (ret < 0 && status >= 0) { 4710 drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n", 4711 timeout_ms, status); 4712 return -EINVAL; 4713 } else if (status < 0) { 4714 /* 4715 * Failure here isn't unexpected - the hub may have 4716 * just been unplugged 4717 */ 4718 drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status); 4719 return status; 4720 } 4721 4722 return 0; 4723 } 4724 EXPORT_SYMBOL(drm_dp_check_act_status); 4725 4726 /** 4727 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. 4728 * @clock: dot clock for the mode 4729 * @bpp: bpp for the mode. 4730 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel 4731 * 4732 * This uses the formula in the spec to calculate the PBN value for a mode. 4733 */ 4734 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc) 4735 { 4736 /* 4737 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 4738 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on 4739 * common multiplier to render an integer PBN for all link rate/lane 4740 * counts combinations 4741 * calculate 4742 * peak_kbps *= (1006/1000) 4743 * peak_kbps *= (64/54) 4744 * peak_kbps *= 8 convert to bytes 4745 * 4746 * If the bpp is in units of 1/16, further divide by 16. Put this 4747 * factor in the numerator rather than the denominator to avoid 4748 * integer overflow 4749 */ 4750 4751 if (dsc) 4752 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006), 4753 8 * 54 * 1000 * 1000); 4754 4755 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006), 4756 8 * 54 * 1000 * 1000); 4757 } 4758 EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 4759 4760 /* we want to kick the TX after we've ack the up/down IRQs. */ 4761 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) 4762 { 4763 queue_work(system_long_wq, &mgr->tx_work); 4764 } 4765 4766 /* 4767 * Helper function for parsing DP device types into convenient strings 4768 * for use with dp_mst_topology 4769 */ 4770 static const char *pdt_to_string(u8 pdt) 4771 { 4772 switch (pdt) { 4773 case DP_PEER_DEVICE_NONE: 4774 return "NONE"; 4775 case DP_PEER_DEVICE_SOURCE_OR_SST: 4776 return "SOURCE OR SST"; 4777 case DP_PEER_DEVICE_MST_BRANCHING: 4778 return "MST BRANCHING"; 4779 case DP_PEER_DEVICE_SST_SINK: 4780 return "SST SINK"; 4781 case DP_PEER_DEVICE_DP_LEGACY_CONV: 4782 return "DP LEGACY CONV"; 4783 default: 4784 return "ERR"; 4785 } 4786 } 4787 4788 static void drm_dp_mst_dump_mstb(struct seq_file *m, 4789 struct drm_dp_mst_branch *mstb) 4790 { 4791 struct drm_dp_mst_port *port; 4792 int tabs = mstb->lct; 4793 char prefix[10]; 4794 int i; 4795 4796 for (i = 0; i < tabs; i++) 4797 prefix[i] = '\t'; 4798 prefix[i] = '\0'; 4799 4800 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports); 4801 list_for_each_entry(port, &mstb->ports, next) { 4802 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n", 4803 prefix, 4804 port->port_num, 4805 port, 4806 port->input ? "input" : "output", 4807 pdt_to_string(port->pdt), 4808 port->ddps, 4809 port->ldps, 4810 port->num_sdp_streams, 4811 port->num_sdp_stream_sinks, 4812 port->fec_capable ? "true" : "false", 4813 port->connector); 4814 if (port->mstb) 4815 drm_dp_mst_dump_mstb(m, port->mstb); 4816 } 4817 } 4818 4819 #define DP_PAYLOAD_TABLE_SIZE 64 4820 4821 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 4822 char *buf) 4823 { 4824 int i; 4825 4826 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { 4827 if (drm_dp_dpcd_read(mgr->aux, 4828 DP_PAYLOAD_TABLE_UPDATE_STATUS + i, 4829 &buf[i], 16) != 16) 4830 return false; 4831 } 4832 return true; 4833 } 4834 4835 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, 4836 struct drm_dp_mst_port *port, char *name, 4837 int namelen) 4838 { 4839 struct edid *mst_edid; 4840 4841 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); 4842 drm_edid_get_monitor_name(mst_edid, name, namelen); 4843 kfree(mst_edid); 4844 } 4845 4846 /** 4847 * drm_dp_mst_dump_topology(): dump topology to seq file. 4848 * @m: seq_file to dump output to 4849 * @mgr: manager to dump current topology for. 4850 * 4851 * helper to dump MST topology to a seq file for debugfs. 4852 */ 4853 void drm_dp_mst_dump_topology(struct seq_file *m, 4854 struct drm_dp_mst_topology_mgr *mgr) 4855 { 4856 int i; 4857 struct drm_dp_mst_port *port; 4858 4859 mutex_lock(&mgr->lock); 4860 if (mgr->mst_primary) 4861 drm_dp_mst_dump_mstb(m, mgr->mst_primary); 4862 4863 /* dump VCPIs */ 4864 mutex_unlock(&mgr->lock); 4865 4866 mutex_lock(&mgr->payload_lock); 4867 seq_printf(m, "\n*** VCPI Info ***\n"); 4868 seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads); 4869 4870 seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n"); 4871 for (i = 0; i < mgr->max_payloads; i++) { 4872 if (mgr->proposed_vcpis[i]) { 4873 char name[14]; 4874 4875 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 4876 fetch_monitor_name(mgr, port, name, sizeof(name)); 4877 seq_printf(m, "%10d%10d%10d%10d%20s\n", 4878 i, 4879 port->port_num, 4880 port->vcpi.vcpi, 4881 port->vcpi.num_slots, 4882 (*name != 0) ? name : "Unknown"); 4883 } else 4884 seq_printf(m, "%6d - Unused\n", i); 4885 } 4886 seq_printf(m, "\n*** Payload Info ***\n"); 4887 seq_printf(m, "| idx | state | start slot | # slots |\n"); 4888 for (i = 0; i < mgr->max_payloads; i++) { 4889 seq_printf(m, "%10d%10d%15d%10d\n", 4890 i, 4891 mgr->payloads[i].payload_state, 4892 mgr->payloads[i].start_slot, 4893 mgr->payloads[i].num_slots); 4894 } 4895 mutex_unlock(&mgr->payload_lock); 4896 4897 seq_printf(m, "\n*** DPCD Info ***\n"); 4898 mutex_lock(&mgr->lock); 4899 if (mgr->mst_primary) { 4900 u8 buf[DP_PAYLOAD_TABLE_SIZE]; 4901 int ret; 4902 4903 if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) { 4904 seq_printf(m, "dpcd read failed\n"); 4905 goto out; 4906 } 4907 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); 4908 4909 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); 4910 if (ret) { 4911 seq_printf(m, "faux/mst read failed\n"); 4912 goto out; 4913 } 4914 seq_printf(m, "faux/mst: %*ph\n", 2, buf); 4915 4916 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); 4917 if (ret) { 4918 seq_printf(m, "mst ctrl read failed\n"); 4919 goto out; 4920 } 4921 seq_printf(m, "mst ctrl: %*ph\n", 1, buf); 4922 4923 /* dump the standard OUI branch header */ 4924 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); 4925 if (ret) { 4926 seq_printf(m, "branch oui read failed\n"); 4927 goto out; 4928 } 4929 seq_printf(m, "branch oui: %*phN devid: ", 3, buf); 4930 4931 for (i = 0x3; i < 0x8 && buf[i]; i++) 4932 seq_printf(m, "%c", buf[i]); 4933 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", 4934 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); 4935 if (dump_dp_payload_table(mgr, buf)) 4936 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf); 4937 } 4938 4939 out: 4940 mutex_unlock(&mgr->lock); 4941 4942 } 4943 EXPORT_SYMBOL(drm_dp_mst_dump_topology); 4944 4945 static void drm_dp_tx_work(struct work_struct *work) 4946 { 4947 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 4948 4949 mutex_lock(&mgr->qlock); 4950 if (!list_empty(&mgr->tx_msg_downq)) 4951 process_single_down_tx_qlock(mgr); 4952 mutex_unlock(&mgr->qlock); 4953 } 4954 4955 static inline void 4956 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) 4957 { 4958 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); 4959 4960 if (port->connector) { 4961 drm_connector_unregister(port->connector); 4962 drm_connector_put(port->connector); 4963 } 4964 4965 drm_dp_mst_put_port_malloc(port); 4966 } 4967 4968 static inline void 4969 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb) 4970 { 4971 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 4972 struct drm_dp_mst_port *port, *port_tmp; 4973 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp; 4974 bool wake_tx = false; 4975 4976 mutex_lock(&mgr->lock); 4977 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) { 4978 list_del(&port->next); 4979 drm_dp_mst_topology_put_port(port); 4980 } 4981 mutex_unlock(&mgr->lock); 4982 4983 /* drop any tx slot msg */ 4984 mutex_lock(&mstb->mgr->qlock); 4985 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) { 4986 if (txmsg->dst != mstb) 4987 continue; 4988 4989 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 4990 list_del(&txmsg->next); 4991 wake_tx = true; 4992 } 4993 mutex_unlock(&mstb->mgr->qlock); 4994 4995 if (wake_tx) 4996 wake_up_all(&mstb->mgr->tx_waitq); 4997 4998 drm_dp_mst_put_mstb_malloc(mstb); 4999 } 5000 5001 static void drm_dp_delayed_destroy_work(struct work_struct *work) 5002 { 5003 struct drm_dp_mst_topology_mgr *mgr = 5004 container_of(work, struct drm_dp_mst_topology_mgr, 5005 delayed_destroy_work); 5006 bool send_hotplug = false, go_again; 5007 5008 /* 5009 * Not a regular list traverse as we have to drop the destroy 5010 * connector lock before destroying the mstb/port, to avoid AB->BA 5011 * ordering between this lock and the config mutex. 5012 */ 5013 do { 5014 go_again = false; 5015 5016 for (;;) { 5017 struct drm_dp_mst_branch *mstb; 5018 5019 mutex_lock(&mgr->delayed_destroy_lock); 5020 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, 5021 struct drm_dp_mst_branch, 5022 destroy_next); 5023 if (mstb) 5024 list_del(&mstb->destroy_next); 5025 mutex_unlock(&mgr->delayed_destroy_lock); 5026 5027 if (!mstb) 5028 break; 5029 5030 drm_dp_delayed_destroy_mstb(mstb); 5031 go_again = true; 5032 } 5033 5034 for (;;) { 5035 struct drm_dp_mst_port *port; 5036 5037 mutex_lock(&mgr->delayed_destroy_lock); 5038 port = list_first_entry_or_null(&mgr->destroy_port_list, 5039 struct drm_dp_mst_port, 5040 next); 5041 if (port) 5042 list_del(&port->next); 5043 mutex_unlock(&mgr->delayed_destroy_lock); 5044 5045 if (!port) 5046 break; 5047 5048 drm_dp_delayed_destroy_port(port); 5049 send_hotplug = true; 5050 go_again = true; 5051 } 5052 } while (go_again); 5053 5054 if (send_hotplug) 5055 drm_kms_helper_hotplug_event(mgr->dev); 5056 } 5057 5058 static struct drm_private_state * 5059 drm_dp_mst_duplicate_state(struct drm_private_obj *obj) 5060 { 5061 struct drm_dp_mst_topology_state *state, *old_state = 5062 to_dp_mst_topology_state(obj->state); 5063 struct drm_dp_vcpi_allocation *pos, *vcpi; 5064 5065 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL); 5066 if (!state) 5067 return NULL; 5068 5069 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 5070 5071 INIT_LIST_HEAD(&state->vcpis); 5072 5073 list_for_each_entry(pos, &old_state->vcpis, next) { 5074 /* Prune leftover freed VCPI allocations */ 5075 if (!pos->vcpi) 5076 continue; 5077 5078 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL); 5079 if (!vcpi) 5080 goto fail; 5081 5082 drm_dp_mst_get_port_malloc(vcpi->port); 5083 list_add(&vcpi->next, &state->vcpis); 5084 } 5085 5086 return &state->base; 5087 5088 fail: 5089 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) { 5090 drm_dp_mst_put_port_malloc(pos->port); 5091 kfree(pos); 5092 } 5093 kfree(state); 5094 5095 return NULL; 5096 } 5097 5098 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, 5099 struct drm_private_state *state) 5100 { 5101 struct drm_dp_mst_topology_state *mst_state = 5102 to_dp_mst_topology_state(state); 5103 struct drm_dp_vcpi_allocation *pos, *tmp; 5104 5105 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) { 5106 /* We only keep references to ports with non-zero VCPIs */ 5107 if (pos->vcpi) 5108 drm_dp_mst_put_port_malloc(pos->port); 5109 kfree(pos); 5110 } 5111 5112 kfree(mst_state); 5113 } 5114 5115 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, 5116 struct drm_dp_mst_branch *branch) 5117 { 5118 while (port->parent) { 5119 if (port->parent == branch) 5120 return true; 5121 5122 if (port->parent->port_parent) 5123 port = port->parent->port_parent; 5124 else 5125 break; 5126 } 5127 return false; 5128 } 5129 5130 static int 5131 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, 5132 struct drm_dp_mst_topology_state *state); 5133 5134 static int 5135 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, 5136 struct drm_dp_mst_topology_state *state) 5137 { 5138 struct drm_dp_vcpi_allocation *vcpi; 5139 struct drm_dp_mst_port *port; 5140 int pbn_used = 0, ret; 5141 bool found = false; 5142 5143 /* Check that we have at least one port in our state that's downstream 5144 * of this branch, otherwise we can skip this branch 5145 */ 5146 list_for_each_entry(vcpi, &state->vcpis, next) { 5147 if (!vcpi->pbn || 5148 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb)) 5149 continue; 5150 5151 found = true; 5152 break; 5153 } 5154 if (!found) 5155 return 0; 5156 5157 if (mstb->port_parent) 5158 drm_dbg_atomic(mstb->mgr->dev, 5159 "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n", 5160 mstb->port_parent->parent, mstb->port_parent, mstb); 5161 else 5162 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb); 5163 5164 list_for_each_entry(port, &mstb->ports, next) { 5165 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state); 5166 if (ret < 0) 5167 return ret; 5168 5169 pbn_used += ret; 5170 } 5171 5172 return pbn_used; 5173 } 5174 5175 static int 5176 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, 5177 struct drm_dp_mst_topology_state *state) 5178 { 5179 struct drm_dp_vcpi_allocation *vcpi; 5180 int pbn_used = 0; 5181 5182 if (port->pdt == DP_PEER_DEVICE_NONE) 5183 return 0; 5184 5185 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { 5186 bool found = false; 5187 5188 list_for_each_entry(vcpi, &state->vcpis, next) { 5189 if (vcpi->port != port) 5190 continue; 5191 if (!vcpi->pbn) 5192 return 0; 5193 5194 found = true; 5195 break; 5196 } 5197 if (!found) 5198 return 0; 5199 5200 /* 5201 * This could happen if the sink deasserted its HPD line, but 5202 * the branch device still reports it as attached (PDT != NONE). 5203 */ 5204 if (!port->full_pbn) { 5205 drm_dbg_atomic(port->mgr->dev, 5206 "[MSTB:%p] [MST PORT:%p] no BW available for the port\n", 5207 port->parent, port); 5208 return -EINVAL; 5209 } 5210 5211 pbn_used = vcpi->pbn; 5212 } else { 5213 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, 5214 state); 5215 if (pbn_used <= 0) 5216 return pbn_used; 5217 } 5218 5219 if (pbn_used > port->full_pbn) { 5220 drm_dbg_atomic(port->mgr->dev, 5221 "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", 5222 port->parent, port, pbn_used, port->full_pbn); 5223 return -ENOSPC; 5224 } 5225 5226 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", 5227 port->parent, port, pbn_used, port->full_pbn); 5228 5229 return pbn_used; 5230 } 5231 5232 static inline int 5233 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, 5234 struct drm_dp_mst_topology_state *mst_state) 5235 { 5236 struct drm_dp_vcpi_allocation *vcpi; 5237 int avail_slots = mst_state->total_avail_slots, payload_count = 0; 5238 5239 list_for_each_entry(vcpi, &mst_state->vcpis, next) { 5240 /* Releasing VCPI is always OK-even if the port is gone */ 5241 if (!vcpi->vcpi) { 5242 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n", 5243 vcpi->port); 5244 continue; 5245 } 5246 5247 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n", 5248 vcpi->port, vcpi->vcpi); 5249 5250 avail_slots -= vcpi->vcpi; 5251 if (avail_slots < 0) { 5252 drm_dbg_atomic(mgr->dev, 5253 "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n", 5254 vcpi->port, mst_state, avail_slots + vcpi->vcpi); 5255 return -ENOSPC; 5256 } 5257 5258 if (++payload_count > mgr->max_payloads) { 5259 drm_dbg_atomic(mgr->dev, 5260 "[MST MGR:%p] state %p has too many payloads (max=%d)\n", 5261 mgr, mst_state, mgr->max_payloads); 5262 return -EINVAL; 5263 } 5264 } 5265 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", 5266 mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots); 5267 5268 return 0; 5269 } 5270 5271 /** 5272 * drm_dp_mst_add_affected_dsc_crtcs 5273 * @state: Pointer to the new struct drm_dp_mst_topology_state 5274 * @mgr: MST topology manager 5275 * 5276 * Whenever there is a change in mst topology 5277 * DSC configuration would have to be recalculated 5278 * therefore we need to trigger modeset on all affected 5279 * CRTCs in that topology 5280 * 5281 * See also: 5282 * drm_dp_mst_atomic_enable_dsc() 5283 */ 5284 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) 5285 { 5286 struct drm_dp_mst_topology_state *mst_state; 5287 struct drm_dp_vcpi_allocation *pos; 5288 struct drm_connector *connector; 5289 struct drm_connector_state *conn_state; 5290 struct drm_crtc *crtc; 5291 struct drm_crtc_state *crtc_state; 5292 5293 mst_state = drm_atomic_get_mst_topology_state(state, mgr); 5294 5295 if (IS_ERR(mst_state)) 5296 return -EINVAL; 5297 5298 list_for_each_entry(pos, &mst_state->vcpis, next) { 5299 5300 connector = pos->port->connector; 5301 5302 if (!connector) 5303 return -EINVAL; 5304 5305 conn_state = drm_atomic_get_connector_state(state, connector); 5306 5307 if (IS_ERR(conn_state)) 5308 return PTR_ERR(conn_state); 5309 5310 crtc = conn_state->crtc; 5311 5312 if (!crtc) 5313 continue; 5314 5315 if (!drm_dp_mst_dsc_aux_for_port(pos->port)) 5316 continue; 5317 5318 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc); 5319 5320 if (IS_ERR(crtc_state)) 5321 return PTR_ERR(crtc_state); 5322 5323 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", 5324 mgr, crtc); 5325 5326 crtc_state->mode_changed = true; 5327 } 5328 return 0; 5329 } 5330 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs); 5331 5332 /** 5333 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off 5334 * @state: Pointer to the new drm_atomic_state 5335 * @port: Pointer to the affected MST Port 5336 * @pbn: Newly recalculated bw required for link with DSC enabled 5337 * @pbn_div: Divider to calculate correct number of pbn per slot 5338 * @enable: Boolean flag to enable or disable DSC on the port 5339 * 5340 * This function enables DSC on the given Port 5341 * by recalculating its vcpi from pbn provided 5342 * and sets dsc_enable flag to keep track of which 5343 * ports have DSC enabled 5344 * 5345 */ 5346 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, 5347 struct drm_dp_mst_port *port, 5348 int pbn, int pbn_div, 5349 bool enable) 5350 { 5351 struct drm_dp_mst_topology_state *mst_state; 5352 struct drm_dp_vcpi_allocation *pos; 5353 bool found = false; 5354 int vcpi = 0; 5355 5356 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); 5357 5358 if (IS_ERR(mst_state)) 5359 return PTR_ERR(mst_state); 5360 5361 list_for_each_entry(pos, &mst_state->vcpis, next) { 5362 if (pos->port == port) { 5363 found = true; 5364 break; 5365 } 5366 } 5367 5368 if (!found) { 5369 drm_dbg_atomic(state->dev, 5370 "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n", 5371 port, mst_state); 5372 return -EINVAL; 5373 } 5374 5375 if (pos->dsc_enabled == enable) { 5376 drm_dbg_atomic(state->dev, 5377 "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n", 5378 port, enable, pos->vcpi); 5379 vcpi = pos->vcpi; 5380 } 5381 5382 if (enable) { 5383 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div); 5384 drm_dbg_atomic(state->dev, 5385 "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n", 5386 port, vcpi); 5387 if (vcpi < 0) 5388 return -EINVAL; 5389 } 5390 5391 pos->dsc_enabled = enable; 5392 5393 return vcpi; 5394 } 5395 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); 5396 /** 5397 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an 5398 * atomic update is valid 5399 * @state: Pointer to the new &struct drm_dp_mst_topology_state 5400 * 5401 * Checks the given topology state for an atomic update to ensure that it's 5402 * valid. This includes checking whether there's enough bandwidth to support 5403 * the new VCPI allocations in the atomic update. 5404 * 5405 * Any atomic drivers supporting DP MST must make sure to call this after 5406 * checking the rest of their state in their 5407 * &drm_mode_config_funcs.atomic_check() callback. 5408 * 5409 * See also: 5410 * drm_dp_atomic_find_vcpi_slots() 5411 * drm_dp_atomic_release_vcpi_slots() 5412 * 5413 * Returns: 5414 * 5415 * 0 if the new state is valid, negative error code otherwise. 5416 */ 5417 int drm_dp_mst_atomic_check(struct drm_atomic_state *state) 5418 { 5419 struct drm_dp_mst_topology_mgr *mgr; 5420 struct drm_dp_mst_topology_state *mst_state; 5421 int i, ret = 0; 5422 5423 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 5424 if (!mgr->mst_state) 5425 continue; 5426 5427 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); 5428 if (ret) 5429 break; 5430 5431 mutex_lock(&mgr->lock); 5432 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, 5433 mst_state); 5434 mutex_unlock(&mgr->lock); 5435 if (ret < 0) 5436 break; 5437 else 5438 ret = 0; 5439 } 5440 5441 return ret; 5442 } 5443 EXPORT_SYMBOL(drm_dp_mst_atomic_check); 5444 5445 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = { 5446 .atomic_duplicate_state = drm_dp_mst_duplicate_state, 5447 .atomic_destroy_state = drm_dp_mst_destroy_state, 5448 }; 5449 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); 5450 5451 /** 5452 * drm_atomic_get_mst_topology_state: get MST topology state 5453 * 5454 * @state: global atomic state 5455 * @mgr: MST topology manager, also the private object in this case 5456 * 5457 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic 5458 * state vtable so that the private object state returned is that of a MST 5459 * topology object. 5460 * 5461 * RETURNS: 5462 * 5463 * The MST topology state or error pointer. 5464 */ 5465 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, 5466 struct drm_dp_mst_topology_mgr *mgr) 5467 { 5468 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); 5469 } 5470 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); 5471 5472 /** 5473 * drm_dp_mst_topology_mgr_init - initialise a topology manager 5474 * @mgr: manager struct to initialise 5475 * @dev: device providing this structure - for i2c addition. 5476 * @aux: DP helper aux channel to talk to this device 5477 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit 5478 * @max_payloads: maximum number of payloads this GPU can source 5479 * @max_lane_count: maximum number of lanes this GPU supports 5480 * @max_link_rate: maximum link rate per lane this GPU supports in kHz 5481 * @conn_base_id: the connector object ID the MST device is connected to. 5482 * 5483 * Return 0 for success, or negative error code on failure 5484 */ 5485 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, 5486 struct drm_device *dev, struct drm_dp_aux *aux, 5487 int max_dpcd_transaction_bytes, int max_payloads, 5488 int max_lane_count, int max_link_rate, 5489 int conn_base_id) 5490 { 5491 struct drm_dp_mst_topology_state *mst_state; 5492 5493 mutex_init(&mgr->lock); 5494 mutex_init(&mgr->qlock); 5495 mutex_init(&mgr->payload_lock); 5496 mutex_init(&mgr->delayed_destroy_lock); 5497 mutex_init(&mgr->up_req_lock); 5498 mutex_init(&mgr->probe_lock); 5499 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 5500 mutex_init(&mgr->topology_ref_history_lock); 5501 stack_depot_init(); 5502 #endif 5503 INIT_LIST_HEAD(&mgr->tx_msg_downq); 5504 INIT_LIST_HEAD(&mgr->destroy_port_list); 5505 INIT_LIST_HEAD(&mgr->destroy_branch_device_list); 5506 INIT_LIST_HEAD(&mgr->up_req_list); 5507 5508 /* 5509 * delayed_destroy_work will be queued on a dedicated WQ, so that any 5510 * requeuing will be also flushed when deiniting the topology manager. 5511 */ 5512 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0); 5513 if (mgr->delayed_destroy_wq == NULL) 5514 return -ENOMEM; 5515 5516 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 5517 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); 5518 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); 5519 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); 5520 init_waitqueue_head(&mgr->tx_waitq); 5521 mgr->dev = dev; 5522 mgr->aux = aux; 5523 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; 5524 mgr->max_payloads = max_payloads; 5525 mgr->max_lane_count = max_lane_count; 5526 mgr->max_link_rate = max_link_rate; 5527 mgr->conn_base_id = conn_base_id; 5528 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || 5529 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) 5530 return -EINVAL; 5531 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); 5532 if (!mgr->payloads) 5533 return -ENOMEM; 5534 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); 5535 if (!mgr->proposed_vcpis) 5536 return -ENOMEM; 5537 set_bit(0, &mgr->payload_mask); 5538 5539 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); 5540 if (mst_state == NULL) 5541 return -ENOMEM; 5542 5543 mst_state->total_avail_slots = 63; 5544 mst_state->start_slot = 1; 5545 5546 mst_state->mgr = mgr; 5547 INIT_LIST_HEAD(&mst_state->vcpis); 5548 5549 drm_atomic_private_obj_init(dev, &mgr->base, 5550 &mst_state->base, 5551 &drm_dp_mst_topology_state_funcs); 5552 5553 return 0; 5554 } 5555 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 5556 5557 /** 5558 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. 5559 * @mgr: manager to destroy 5560 */ 5561 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 5562 { 5563 drm_dp_mst_topology_mgr_set_mst(mgr, false); 5564 flush_work(&mgr->work); 5565 /* The following will also drain any requeued work on the WQ. */ 5566 if (mgr->delayed_destroy_wq) { 5567 destroy_workqueue(mgr->delayed_destroy_wq); 5568 mgr->delayed_destroy_wq = NULL; 5569 } 5570 mutex_lock(&mgr->payload_lock); 5571 kfree(mgr->payloads); 5572 mgr->payloads = NULL; 5573 kfree(mgr->proposed_vcpis); 5574 mgr->proposed_vcpis = NULL; 5575 mutex_unlock(&mgr->payload_lock); 5576 mgr->dev = NULL; 5577 mgr->aux = NULL; 5578 drm_atomic_private_obj_fini(&mgr->base); 5579 mgr->funcs = NULL; 5580 5581 mutex_destroy(&mgr->delayed_destroy_lock); 5582 mutex_destroy(&mgr->payload_lock); 5583 mutex_destroy(&mgr->qlock); 5584 mutex_destroy(&mgr->lock); 5585 mutex_destroy(&mgr->up_req_lock); 5586 mutex_destroy(&mgr->probe_lock); 5587 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 5588 mutex_destroy(&mgr->topology_ref_history_lock); 5589 #endif 5590 } 5591 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); 5592 5593 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num) 5594 { 5595 int i; 5596 5597 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS) 5598 return false; 5599 5600 for (i = 0; i < num - 1; i++) { 5601 if (msgs[i].flags & I2C_M_RD || 5602 msgs[i].len > 0xff) 5603 return false; 5604 } 5605 5606 return msgs[num - 1].flags & I2C_M_RD && 5607 msgs[num - 1].len <= 0xff; 5608 } 5609 5610 static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num) 5611 { 5612 int i; 5613 5614 for (i = 0; i < num - 1; i++) { 5615 if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) || 5616 msgs[i].len > 0xff) 5617 return false; 5618 } 5619 5620 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff; 5621 } 5622 5623 static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb, 5624 struct drm_dp_mst_port *port, 5625 struct i2c_msg *msgs, int num) 5626 { 5627 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 5628 unsigned int i; 5629 struct drm_dp_sideband_msg_req_body msg; 5630 struct drm_dp_sideband_msg_tx *txmsg = NULL; 5631 int ret; 5632 5633 memset(&msg, 0, sizeof(msg)); 5634 msg.req_type = DP_REMOTE_I2C_READ; 5635 msg.u.i2c_read.num_transactions = num - 1; 5636 msg.u.i2c_read.port_number = port->port_num; 5637 for (i = 0; i < num - 1; i++) { 5638 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; 5639 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; 5640 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; 5641 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP); 5642 } 5643 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; 5644 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; 5645 5646 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 5647 if (!txmsg) { 5648 ret = -ENOMEM; 5649 goto out; 5650 } 5651 5652 txmsg->dst = mstb; 5653 drm_dp_encode_sideband_req(&msg, txmsg); 5654 5655 drm_dp_queue_down_tx(mgr, txmsg); 5656 5657 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 5658 if (ret > 0) { 5659 5660 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 5661 ret = -EREMOTEIO; 5662 goto out; 5663 } 5664 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { 5665 ret = -EIO; 5666 goto out; 5667 } 5668 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); 5669 ret = num; 5670 } 5671 out: 5672 kfree(txmsg); 5673 return ret; 5674 } 5675 5676 static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb, 5677 struct drm_dp_mst_port *port, 5678 struct i2c_msg *msgs, int num) 5679 { 5680 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 5681 unsigned int i; 5682 struct drm_dp_sideband_msg_req_body msg; 5683 struct drm_dp_sideband_msg_tx *txmsg = NULL; 5684 int ret; 5685 5686 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 5687 if (!txmsg) { 5688 ret = -ENOMEM; 5689 goto out; 5690 } 5691 for (i = 0; i < num; i++) { 5692 memset(&msg, 0, sizeof(msg)); 5693 msg.req_type = DP_REMOTE_I2C_WRITE; 5694 msg.u.i2c_write.port_number = port->port_num; 5695 msg.u.i2c_write.write_i2c_device_id = msgs[i].addr; 5696 msg.u.i2c_write.num_bytes = msgs[i].len; 5697 msg.u.i2c_write.bytes = msgs[i].buf; 5698 5699 memset(txmsg, 0, sizeof(*txmsg)); 5700 txmsg->dst = mstb; 5701 5702 drm_dp_encode_sideband_req(&msg, txmsg); 5703 drm_dp_queue_down_tx(mgr, txmsg); 5704 5705 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 5706 if (ret > 0) { 5707 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 5708 ret = -EREMOTEIO; 5709 goto out; 5710 } 5711 } else { 5712 goto out; 5713 } 5714 } 5715 ret = num; 5716 out: 5717 kfree(txmsg); 5718 return ret; 5719 } 5720 5721 /* I2C device */ 5722 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, 5723 struct i2c_msg *msgs, int num) 5724 { 5725 struct drm_dp_aux *aux = adapter->algo_data; 5726 struct drm_dp_mst_port *port = 5727 container_of(aux, struct drm_dp_mst_port, aux); 5728 struct drm_dp_mst_branch *mstb; 5729 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 5730 int ret; 5731 5732 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 5733 if (!mstb) 5734 return -EREMOTEIO; 5735 5736 if (remote_i2c_read_ok(msgs, num)) { 5737 ret = drm_dp_mst_i2c_read(mstb, port, msgs, num); 5738 } else if (remote_i2c_write_ok(msgs, num)) { 5739 ret = drm_dp_mst_i2c_write(mstb, port, msgs, num); 5740 } else { 5741 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n"); 5742 ret = -EIO; 5743 } 5744 5745 drm_dp_mst_topology_put_mstb(mstb); 5746 return ret; 5747 } 5748 5749 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) 5750 { 5751 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 5752 I2C_FUNC_SMBUS_READ_BLOCK_DATA | 5753 I2C_FUNC_SMBUS_BLOCK_PROC_CALL | 5754 I2C_FUNC_10BIT_ADDR; 5755 } 5756 5757 static const struct i2c_algorithm drm_dp_mst_i2c_algo = { 5758 .functionality = drm_dp_mst_i2c_functionality, 5759 .master_xfer = drm_dp_mst_i2c_xfer, 5760 }; 5761 5762 /** 5763 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX 5764 * @port: The port to add the I2C bus on 5765 * 5766 * Returns 0 on success or a negative error code on failure. 5767 */ 5768 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port) 5769 { 5770 struct drm_dp_aux *aux = &port->aux; 5771 struct device *parent_dev = port->mgr->dev->dev; 5772 5773 aux->ddc.algo = &drm_dp_mst_i2c_algo; 5774 aux->ddc.algo_data = aux; 5775 aux->ddc.retries = 3; 5776 5777 aux->ddc.class = I2C_CLASS_DDC; 5778 aux->ddc.owner = THIS_MODULE; 5779 /* FIXME: set the kdev of the port's connector as parent */ 5780 aux->ddc.dev.parent = parent_dev; 5781 aux->ddc.dev.of_node = parent_dev->of_node; 5782 5783 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev), 5784 sizeof(aux->ddc.name)); 5785 5786 return i2c_add_adapter(&aux->ddc); 5787 } 5788 5789 /** 5790 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter 5791 * @port: The port to remove the I2C bus from 5792 */ 5793 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port) 5794 { 5795 i2c_del_adapter(&port->aux.ddc); 5796 } 5797 5798 /** 5799 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device 5800 * @port: The port to check 5801 * 5802 * A single physical MST hub object can be represented in the topology 5803 * by multiple branches, with virtual ports between those branches. 5804 * 5805 * As of DP1.4, An MST hub with internal (virtual) ports must expose 5806 * certain DPCD registers over those ports. See sections 2.6.1.1.1 5807 * and 2.6.1.1.2 of Display Port specification v1.4 for details. 5808 * 5809 * May acquire mgr->lock 5810 * 5811 * Returns: 5812 * true if the port is a virtual DP peer device, false otherwise 5813 */ 5814 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port) 5815 { 5816 struct drm_dp_mst_port *downstream_port; 5817 5818 if (!port || port->dpcd_rev < DP_DPCD_REV_14) 5819 return false; 5820 5821 /* Virtual DP Sink (Internal Display Panel) */ 5822 if (port->port_num >= 8) 5823 return true; 5824 5825 /* DP-to-HDMI Protocol Converter */ 5826 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV && 5827 !port->mcs && 5828 port->ldps) 5829 return true; 5830 5831 /* DP-to-DP */ 5832 mutex_lock(&port->mgr->lock); 5833 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && 5834 port->mstb && 5835 port->mstb->num_ports == 2) { 5836 list_for_each_entry(downstream_port, &port->mstb->ports, next) { 5837 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK && 5838 !downstream_port->input) { 5839 mutex_unlock(&port->mgr->lock); 5840 return true; 5841 } 5842 } 5843 } 5844 mutex_unlock(&port->mgr->lock); 5845 5846 return false; 5847 } 5848 5849 /** 5850 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC 5851 * @port: The port to check. A leaf of the MST tree with an attached display. 5852 * 5853 * Depending on the situation, DSC may be enabled via the endpoint aux, 5854 * the immediately upstream aux, or the connector's physical aux. 5855 * 5856 * This is both the correct aux to read DSC_CAPABILITY and the 5857 * correct aux to write DSC_ENABLED. 5858 * 5859 * This operation can be expensive (up to four aux reads), so 5860 * the caller should cache the return. 5861 * 5862 * Returns: 5863 * NULL if DSC cannot be enabled on this port, otherwise the aux device 5864 */ 5865 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) 5866 { 5867 struct drm_dp_mst_port *immediate_upstream_port; 5868 struct drm_dp_mst_port *fec_port; 5869 struct drm_dp_desc desc = {}; 5870 u8 endpoint_fec; 5871 u8 endpoint_dsc; 5872 5873 if (!port) 5874 return NULL; 5875 5876 if (port->parent->port_parent) 5877 immediate_upstream_port = port->parent->port_parent; 5878 else 5879 immediate_upstream_port = NULL; 5880 5881 fec_port = immediate_upstream_port; 5882 while (fec_port) { 5883 /* 5884 * Each physical link (i.e. not a virtual port) between the 5885 * output and the primary device must support FEC 5886 */ 5887 if (!drm_dp_mst_is_virtual_dpcd(fec_port) && 5888 !fec_port->fec_capable) 5889 return NULL; 5890 5891 fec_port = fec_port->parent->port_parent; 5892 } 5893 5894 /* DP-to-DP peer device */ 5895 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { 5896 u8 upstream_dsc; 5897 5898 if (drm_dp_dpcd_read(&port->aux, 5899 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) 5900 return NULL; 5901 if (drm_dp_dpcd_read(&port->aux, 5902 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) 5903 return NULL; 5904 if (drm_dp_dpcd_read(&immediate_upstream_port->aux, 5905 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) 5906 return NULL; 5907 5908 /* Enpoint decompression with DP-to-DP peer device */ 5909 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && 5910 (endpoint_fec & DP_FEC_CAPABLE) && 5911 (upstream_dsc & 0x2) /* DSC passthrough */) 5912 return &port->aux; 5913 5914 /* Virtual DPCD decompression with DP-to-DP peer device */ 5915 return &immediate_upstream_port->aux; 5916 } 5917 5918 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */ 5919 if (drm_dp_mst_is_virtual_dpcd(port)) 5920 return &port->aux; 5921 5922 /* 5923 * Synaptics quirk 5924 * Applies to ports for which: 5925 * - Physical aux has Synaptics OUI 5926 * - DPv1.4 or higher 5927 * - Port is on primary branch device 5928 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) 5929 */ 5930 if (drm_dp_read_desc(port->mgr->aux, &desc, true)) 5931 return NULL; 5932 5933 if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && 5934 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && 5935 port->parent == port->mgr->mst_primary) { 5936 u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; 5937 5938 if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0) 5939 return NULL; 5940 5941 if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) && 5942 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) 5943 != DP_DWN_STRM_PORT_TYPE_ANALOG)) 5944 return port->mgr->aux; 5945 } 5946 5947 /* 5948 * The check below verifies if the MST sink 5949 * connected to the GPU is capable of DSC - 5950 * therefore the endpoint needs to be 5951 * both DSC and FEC capable. 5952 */ 5953 if (drm_dp_dpcd_read(&port->aux, 5954 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) 5955 return NULL; 5956 if (drm_dp_dpcd_read(&port->aux, 5957 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) 5958 return NULL; 5959 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && 5960 (endpoint_fec & DP_FEC_CAPABLE)) 5961 return &port->aux; 5962 5963 return NULL; 5964 } 5965 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port); 5966