1 /*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/display/drm_dp_mst_helper.h>
28 #include <drm/drm_atomic.h>
29 #include <drm/drm_atomic_helper.h>
30 #include "dm_services.h"
31 #include "amdgpu.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_mst_types.h"
34 #include "amdgpu_dm_hdcp.h"
35
36 #include "dc.h"
37 #include "dm_helpers.h"
38
39 #include "ddc_service_types.h"
40 #include "dpcd_defs.h"
41
42 #include "dmub_cmd.h"
43 #if defined(CONFIG_DEBUG_FS)
44 #include "amdgpu_dm_debugfs.h"
45 #endif
46
47 #include "dc/dcn20/dcn20_resource.h"
48
49 #define PEAK_FACTOR_X1000 1006
50
51 /*
52 * This function handles both native AUX and I2C-Over-AUX transactions.
53 */
dm_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)54 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
55 struct drm_dp_aux_msg *msg)
56 {
57 ssize_t result = 0;
58 struct aux_payload payload;
59 enum aux_return_code_type operation_result;
60 struct amdgpu_device *adev;
61 struct ddc_service *ddc;
62 uint8_t copy[16];
63
64 if (WARN_ON(msg->size > 16))
65 return -E2BIG;
66
67 payload.address = msg->address;
68 payload.data = msg->buffer;
69 payload.length = msg->size;
70 payload.reply = &msg->reply;
71 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
72 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
73 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
74 payload.write_status_update =
75 (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
76 payload.defer_delay = 0;
77
78 if (payload.write) {
79 memcpy(copy, msg->buffer, msg->size);
80 payload.data = copy;
81 }
82
83 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
84 &operation_result);
85
86 /*
87 * w/a on certain intel platform where hpd is unexpected to pull low during
88 * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON
89 * aux transaction is succuess in such case, therefore bypass the error
90 */
91 ddc = TO_DM_AUX(aux)->ddc_service;
92 adev = ddc->ctx->driver_context;
93 if (adev->dm.aux_hpd_discon_quirk) {
94 if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
95 operation_result == AUX_RET_ERROR_HPD_DISCON) {
96 result = msg->size;
97 operation_result = AUX_RET_SUCCESS;
98 }
99 }
100
101 /*
102 * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
103 */
104 if (payload.write && result >= 0) {
105 if (result) {
106 /*one byte indicating partially written bytes*/
107 drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
108 result = payload.data[0];
109 } else if (!payload.reply[0])
110 /*I2C_ACK|AUX_ACK*/
111 result = msg->size;
112 }
113
114 if (result < 0) {
115 switch (operation_result) {
116 case AUX_RET_SUCCESS:
117 break;
118 case AUX_RET_ERROR_HPD_DISCON:
119 case AUX_RET_ERROR_UNKNOWN:
120 case AUX_RET_ERROR_INVALID_OPERATION:
121 case AUX_RET_ERROR_PROTOCOL_ERROR:
122 result = -EIO;
123 break;
124 case AUX_RET_ERROR_INVALID_REPLY:
125 case AUX_RET_ERROR_ENGINE_ACQUIRE:
126 result = -EBUSY;
127 break;
128 case AUX_RET_ERROR_TIMEOUT:
129 result = -ETIMEDOUT;
130 break;
131 }
132
133 drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
134 }
135
136 if (payload.reply[0])
137 drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
138 payload.reply[0]);
139
140 return result;
141 }
142
143 static void
dm_dp_mst_connector_destroy(struct drm_connector * connector)144 dm_dp_mst_connector_destroy(struct drm_connector *connector)
145 {
146 struct amdgpu_dm_connector *aconnector =
147 to_amdgpu_dm_connector(connector);
148
149 if (aconnector->dc_sink) {
150 dc_link_remove_remote_sink(aconnector->dc_link,
151 aconnector->dc_sink);
152 dc_sink_release(aconnector->dc_sink);
153 }
154
155 kfree(aconnector->edid);
156
157 drm_connector_cleanup(connector);
158 drm_dp_mst_put_port_malloc(aconnector->mst_output_port);
159 kfree(aconnector);
160 }
161
162 static int
amdgpu_dm_mst_connector_late_register(struct drm_connector * connector)163 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
164 {
165 struct amdgpu_dm_connector *amdgpu_dm_connector =
166 to_amdgpu_dm_connector(connector);
167 int r;
168
169 r = drm_dp_mst_connector_late_register(connector,
170 amdgpu_dm_connector->mst_output_port);
171 if (r < 0)
172 return r;
173
174 #if defined(CONFIG_DEBUG_FS)
175 connector_debugfs_init(amdgpu_dm_connector);
176 #endif
177
178 return 0;
179 }
180
181 static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector * connector)182 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
183 {
184 struct amdgpu_dm_connector *aconnector =
185 to_amdgpu_dm_connector(connector);
186 struct drm_dp_mst_port *port = aconnector->mst_output_port;
187 struct amdgpu_dm_connector *root = aconnector->mst_root;
188 struct dc_link *dc_link = aconnector->dc_link;
189 struct dc_sink *dc_sink = aconnector->dc_sink;
190
191 drm_dp_mst_connector_early_unregister(connector, port);
192
193 /*
194 * Release dc_sink for connector which its attached port is
195 * no longer in the mst topology
196 */
197 drm_modeset_lock(&root->mst_mgr.base.lock, NULL);
198 if (dc_sink) {
199 if (dc_link->sink_count)
200 dc_link_remove_remote_sink(dc_link, dc_sink);
201
202 DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
203 dc_sink, dc_link->sink_count);
204
205 dc_sink_release(dc_sink);
206 aconnector->dc_sink = NULL;
207 aconnector->edid = NULL;
208 aconnector->dsc_aux = NULL;
209 port->passthrough_aux = NULL;
210 }
211
212 aconnector->mst_status = MST_STATUS_DEFAULT;
213 drm_modeset_unlock(&root->mst_mgr.base.lock);
214 }
215
216 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
217 .fill_modes = drm_helper_probe_single_connector_modes,
218 .destroy = dm_dp_mst_connector_destroy,
219 .reset = amdgpu_dm_connector_funcs_reset,
220 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
221 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
222 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
223 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
224 .late_register = amdgpu_dm_mst_connector_late_register,
225 .early_unregister = amdgpu_dm_mst_connector_early_unregister,
226 };
227
needs_dsc_aux_workaround(struct dc_link * link)228 bool needs_dsc_aux_workaround(struct dc_link *link)
229 {
230 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
231 (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
232 link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
233 return true;
234
235 return false;
236 }
237
is_synaptics_cascaded_panamera(struct dc_link * link,struct drm_dp_mst_port * port)238 static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
239 {
240 u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
241
242 if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
243 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
244 IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
245 DRM_INFO("Synaptics Cascaded MST hub\n");
246 return true;
247 }
248 }
249
250 return false;
251 }
252
validate_dsc_caps_on_connector(struct amdgpu_dm_connector * aconnector)253 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
254 {
255 struct dc_sink *dc_sink = aconnector->dc_sink;
256 struct drm_dp_mst_port *port = aconnector->mst_output_port;
257 u8 dsc_caps[16] = { 0 };
258 u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2
259 u8 *dsc_branch_dec_caps = NULL;
260
261 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
262
263 /*
264 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
265 * because it only check the dsc/fec caps of the "port variable" and not the dock
266 *
267 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
268 *
269 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
270 *
271 */
272 if (!aconnector->dsc_aux && !port->parent->port_parent &&
273 needs_dsc_aux_workaround(aconnector->dc_link))
274 aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
275
276 /* synaptics cascaded MST hub case */
277 if (is_synaptics_cascaded_panamera(aconnector->dc_link, port))
278 aconnector->dsc_aux = port->mgr->aux;
279
280 if (!aconnector->dsc_aux)
281 return false;
282
283 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
284 return false;
285
286 if (drm_dp_dpcd_read(aconnector->dsc_aux,
287 DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3)
288 dsc_branch_dec_caps = dsc_branch_dec_caps_raw;
289
290 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
291 dsc_caps, dsc_branch_dec_caps,
292 &dc_sink->dsc_caps.dsc_dec_caps))
293 return false;
294
295 return true;
296 }
297
retrieve_downstream_port_device(struct amdgpu_dm_connector * aconnector)298 static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector)
299 {
300 union dp_downstream_port_present ds_port_present;
301
302 if (!aconnector->dsc_aux)
303 return false;
304
305 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) {
306 DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n");
307 return false;
308 }
309
310 aconnector->mst_downstream_port_present = ds_port_present;
311 DRM_INFO("Downstream port present %d, type %d\n",
312 ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE);
313
314 return true;
315 }
316
dm_dp_mst_get_modes(struct drm_connector * connector)317 static int dm_dp_mst_get_modes(struct drm_connector *connector)
318 {
319 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
320 int ret = 0;
321
322 if (!aconnector)
323 return drm_add_edid_modes(connector, NULL);
324
325 if (!aconnector->edid) {
326 struct edid *edid;
327
328 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
329
330 if (!edid) {
331 amdgpu_dm_set_mst_status(&aconnector->mst_status,
332 MST_REMOTE_EDID, false);
333
334 drm_connector_update_edid_property(
335 &aconnector->base,
336 NULL);
337
338 DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name);
339 if (!aconnector->dc_sink) {
340 struct dc_sink *dc_sink;
341 struct dc_sink_init_data init_params = {
342 .link = aconnector->dc_link,
343 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
344
345 dc_sink = dc_link_add_remote_sink(
346 aconnector->dc_link,
347 NULL,
348 0,
349 &init_params);
350
351 if (!dc_sink) {
352 DRM_ERROR("Unable to add a remote sink\n");
353 return 0;
354 }
355
356 DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
357 dc_sink, aconnector->dc_link->sink_count);
358
359 dc_sink->priv = aconnector;
360 aconnector->dc_sink = dc_sink;
361 }
362
363 return ret;
364 }
365
366 aconnector->edid = edid;
367 amdgpu_dm_set_mst_status(&aconnector->mst_status,
368 MST_REMOTE_EDID, true);
369 }
370
371 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
372 dc_sink_release(aconnector->dc_sink);
373 aconnector->dc_sink = NULL;
374 }
375
376 if (!aconnector->dc_sink) {
377 struct dc_sink *dc_sink;
378 struct dc_sink_init_data init_params = {
379 .link = aconnector->dc_link,
380 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
381 dc_sink = dc_link_add_remote_sink(
382 aconnector->dc_link,
383 (uint8_t *)aconnector->edid,
384 (aconnector->edid->extensions + 1) * EDID_LENGTH,
385 &init_params);
386
387 if (!dc_sink) {
388 DRM_ERROR("Unable to add a remote sink\n");
389 return 0;
390 }
391
392 DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
393 dc_sink, aconnector->dc_link->sink_count);
394
395 dc_sink->priv = aconnector;
396 /* dc_link_add_remote_sink returns a new reference */
397 aconnector->dc_sink = dc_sink;
398
399 /* when display is unplugged from mst hub, connctor will be
400 * destroyed within dm_dp_mst_connector_destroy. connector
401 * hdcp perperties, like type, undesired, desired, enabled,
402 * will be lost. So, save hdcp properties into hdcp_work within
403 * amdgpu_dm_atomic_commit_tail. if the same display is
404 * plugged back with same display index, its hdcp properties
405 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
406 */
407 if (aconnector->dc_sink && connector->state) {
408 struct drm_device *dev = connector->dev;
409 struct amdgpu_device *adev = drm_to_adev(dev);
410
411 if (adev->dm.hdcp_workqueue) {
412 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
413 struct hdcp_workqueue *hdcp_w =
414 &hdcp_work[aconnector->dc_link->link_index];
415
416 connector->state->hdcp_content_type =
417 hdcp_w->hdcp_content_type[connector->index];
418 connector->state->content_protection =
419 hdcp_w->content_protection[connector->index];
420 }
421 }
422
423 if (aconnector->dc_sink) {
424 amdgpu_dm_update_freesync_caps(
425 connector, aconnector->edid);
426
427 if (!validate_dsc_caps_on_connector(aconnector))
428 memset(&aconnector->dc_sink->dsc_caps,
429 0, sizeof(aconnector->dc_sink->dsc_caps));
430
431 if (!retrieve_downstream_port_device(aconnector))
432 memset(&aconnector->mst_downstream_port_present,
433 0, sizeof(aconnector->mst_downstream_port_present));
434 }
435 }
436
437 drm_connector_update_edid_property(
438 &aconnector->base, aconnector->edid);
439
440 ret = drm_add_edid_modes(connector, aconnector->edid);
441
442 return ret;
443 }
444
445 static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector * connector,struct drm_atomic_state * state)446 dm_mst_atomic_best_encoder(struct drm_connector *connector,
447 struct drm_atomic_state *state)
448 {
449 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
450 connector);
451 struct drm_device *dev = connector->dev;
452 struct amdgpu_device *adev = drm_to_adev(dev);
453 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
454
455 return &adev->dm.mst_encoders[acrtc->crtc_id].base;
456 }
457
458 static int
dm_dp_mst_detect(struct drm_connector * connector,struct drm_modeset_acquire_ctx * ctx,bool force)459 dm_dp_mst_detect(struct drm_connector *connector,
460 struct drm_modeset_acquire_ctx *ctx, bool force)
461 {
462 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
463 struct amdgpu_dm_connector *master = aconnector->mst_root;
464 struct drm_dp_mst_port *port = aconnector->mst_output_port;
465 int connection_status;
466
467 if (drm_connector_is_unregistered(connector))
468 return connector_status_disconnected;
469
470 connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
471 aconnector->mst_output_port);
472
473 if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) {
474 uint8_t dpcd_rev;
475 int ret;
476
477 ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev);
478
479 if (ret == 1) {
480 port->dpcd_rev = dpcd_rev;
481
482 /* Could be DP1.2 DP Rx case*/
483 if (!dpcd_rev) {
484 ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev);
485
486 if (ret == 1)
487 port->dpcd_rev = dpcd_rev;
488 }
489
490 if (!dpcd_rev)
491 DRM_DEBUG_KMS("Can't decide DPCD revision number!");
492 }
493
494 /*
495 * Could be legacy sink, logical port etc on DP1.2.
496 * Will get Nack under these cases when issue remote
497 * DPCD read.
498 */
499 if (ret != 1)
500 DRM_DEBUG_KMS("Can't access DPCD");
501 } else if (port->pdt == DP_PEER_DEVICE_NONE) {
502 port->dpcd_rev = 0;
503 }
504
505 /*
506 * Release dc_sink for connector which unplug event is notified by CSN msg
507 */
508 if (connection_status == connector_status_disconnected && aconnector->dc_sink) {
509 if (aconnector->dc_link->sink_count)
510 dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
511
512 DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
513 aconnector->dc_link, aconnector->dc_link->sink_count);
514
515 dc_sink_release(aconnector->dc_sink);
516 aconnector->dc_sink = NULL;
517 aconnector->edid = NULL;
518 aconnector->dsc_aux = NULL;
519 port->passthrough_aux = NULL;
520
521 amdgpu_dm_set_mst_status(&aconnector->mst_status,
522 MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
523 false);
524 }
525
526 return connection_status;
527 }
528
dm_dp_mst_atomic_check(struct drm_connector * connector,struct drm_atomic_state * state)529 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
530 struct drm_atomic_state *state)
531 {
532 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
533 struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr;
534 struct drm_dp_mst_port *mst_port = aconnector->mst_output_port;
535
536 return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port);
537 }
538
539 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
540 .get_modes = dm_dp_mst_get_modes,
541 .mode_valid = amdgpu_dm_connector_mode_valid,
542 .atomic_best_encoder = dm_mst_atomic_best_encoder,
543 .detect_ctx = dm_dp_mst_detect,
544 .atomic_check = dm_dp_mst_atomic_check,
545 };
546
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)547 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
548 {
549 drm_encoder_cleanup(encoder);
550 }
551
552 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
553 .destroy = amdgpu_dm_encoder_destroy,
554 };
555
556 void
dm_dp_create_fake_mst_encoders(struct amdgpu_device * adev)557 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
558 {
559 struct drm_device *dev = adev_to_drm(adev);
560 int i;
561
562 for (i = 0; i < adev->dm.display_indexes_num; i++) {
563 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
564 struct drm_encoder *encoder = &amdgpu_encoder->base;
565
566 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
567
568 drm_encoder_init(
569 dev,
570 &amdgpu_encoder->base,
571 &amdgpu_dm_encoder_funcs,
572 DRM_MODE_ENCODER_DPMST,
573 NULL);
574
575 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
576 }
577 }
578
579 static struct drm_connector *
dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,const char * pathprop)580 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
581 struct drm_dp_mst_port *port,
582 const char *pathprop)
583 {
584 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
585 struct drm_device *dev = master->base.dev;
586 struct amdgpu_device *adev = drm_to_adev(dev);
587 struct amdgpu_dm_connector *aconnector;
588 struct drm_connector *connector;
589 int i;
590
591 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
592 if (!aconnector)
593 return NULL;
594
595 connector = &aconnector->base;
596 aconnector->mst_output_port = port;
597 aconnector->mst_root = master;
598 amdgpu_dm_set_mst_status(&aconnector->mst_status,
599 MST_PROBE, true);
600
601 if (drm_connector_init(
602 dev,
603 connector,
604 &dm_dp_mst_connector_funcs,
605 DRM_MODE_CONNECTOR_DisplayPort)) {
606 kfree(aconnector);
607 return NULL;
608 }
609 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
610
611 amdgpu_dm_connector_init_helper(
612 &adev->dm,
613 aconnector,
614 DRM_MODE_CONNECTOR_DisplayPort,
615 master->dc_link,
616 master->connector_id);
617
618 for (i = 0; i < adev->dm.display_indexes_num; i++) {
619 drm_connector_attach_encoder(&aconnector->base,
620 &adev->dm.mst_encoders[i].base);
621 }
622
623 connector->max_bpc_property = master->base.max_bpc_property;
624 if (connector->max_bpc_property)
625 drm_connector_attach_max_bpc_property(connector, 8, 16);
626
627 connector->vrr_capable_property = master->base.vrr_capable_property;
628 if (connector->vrr_capable_property)
629 drm_connector_attach_vrr_capable_property(connector);
630
631 drm_object_attach_property(
632 &connector->base,
633 dev->mode_config.path_property,
634 0);
635 drm_object_attach_property(
636 &connector->base,
637 dev->mode_config.tile_property,
638 0);
639 connector->colorspace_property = master->base.colorspace_property;
640 if (connector->colorspace_property)
641 drm_connector_attach_colorspace_property(connector);
642
643 drm_connector_set_path_property(connector, pathprop);
644
645 /*
646 * Initialize connector state before adding the connectror to drm and
647 * framebuffer lists
648 */
649 amdgpu_dm_connector_funcs_reset(connector);
650
651 drm_dp_mst_get_port_malloc(port);
652
653 return connector;
654 }
655
dm_handle_mst_sideband_msg_ready_event(struct drm_dp_mst_topology_mgr * mgr,enum mst_msg_ready_type msg_rdy_type)656 void dm_handle_mst_sideband_msg_ready_event(
657 struct drm_dp_mst_topology_mgr *mgr,
658 enum mst_msg_ready_type msg_rdy_type)
659 {
660 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
661 uint8_t dret;
662 bool new_irq_handled = false;
663 int dpcd_addr;
664 uint8_t dpcd_bytes_to_read;
665 const uint8_t max_process_count = 30;
666 uint8_t process_count = 0;
667 u8 retry;
668 struct amdgpu_dm_connector *aconnector =
669 container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
670
671
672 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
673
674 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
675 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
676 /* DPCD 0x200 - 0x201 for downstream IRQ */
677 dpcd_addr = DP_SINK_COUNT;
678 } else {
679 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
680 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
681 dpcd_addr = DP_SINK_COUNT_ESI;
682 }
683
684 mutex_lock(&aconnector->handle_mst_msg_ready);
685
686 while (process_count < max_process_count) {
687 u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
688
689 process_count++;
690
691 dret = drm_dp_dpcd_read(
692 &aconnector->dm_dp_aux.aux,
693 dpcd_addr,
694 esi,
695 dpcd_bytes_to_read);
696
697 if (dret != dpcd_bytes_to_read) {
698 DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
699 break;
700 }
701
702 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
703
704 switch (msg_rdy_type) {
705 case DOWN_REP_MSG_RDY_EVENT:
706 /* Only handle DOWN_REP_MSG_RDY case*/
707 esi[1] &= DP_DOWN_REP_MSG_RDY;
708 break;
709 case UP_REQ_MSG_RDY_EVENT:
710 /* Only handle UP_REQ_MSG_RDY case*/
711 esi[1] &= DP_UP_REQ_MSG_RDY;
712 break;
713 default:
714 /* Handle both cases*/
715 esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
716 break;
717 }
718
719 if (!esi[1])
720 break;
721
722 /* handle MST irq */
723 if (aconnector->mst_mgr.mst_state)
724 drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
725 esi,
726 ack,
727 &new_irq_handled);
728
729 if (new_irq_handled) {
730 /* ACK at DPCD to notify down stream */
731 for (retry = 0; retry < 3; retry++) {
732 ssize_t wret;
733
734 wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
735 dpcd_addr + 1,
736 ack[1]);
737 if (wret == 1)
738 break;
739 }
740
741 if (retry == 3) {
742 DRM_ERROR("Failed to ack MST event.\n");
743 break;
744 }
745
746 drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
747
748 new_irq_handled = false;
749 } else {
750 break;
751 }
752 }
753
754 mutex_unlock(&aconnector->handle_mst_msg_ready);
755
756 if (process_count == max_process_count)
757 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
758 }
759
dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr * mgr)760 static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
761 {
762 dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
763 }
764
765 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
766 .add_connector = dm_dp_add_mst_connector,
767 .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
768 };
769
amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int link_index)770 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
771 struct amdgpu_dm_connector *aconnector,
772 int link_index)
773 {
774 struct dc_link_settings max_link_enc_cap = {0};
775
776 aconnector->dm_dp_aux.aux.name =
777 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
778 link_index);
779 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
780 aconnector->dm_dp_aux.aux.drm_dev = dm->ddev;
781 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
782
783 drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
784 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
785 &aconnector->base);
786
787 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
788 return;
789
790 dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
791 aconnector->mst_mgr.cbs = &dm_mst_cbs;
792 drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev),
793 &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id);
794
795 drm_connector_attach_dp_subconnector_property(&aconnector->base);
796 }
797
dm_mst_get_pbn_divider(struct dc_link * link)798 int dm_mst_get_pbn_divider(struct dc_link *link)
799 {
800 if (!link)
801 return 0;
802
803 return dc_link_bandwidth_kbps(link,
804 dc_link_get_link_cap(link)) / (8 * 1000 * 54);
805 }
806
807 struct dsc_mst_fairness_params {
808 struct dc_crtc_timing *timing;
809 struct dc_sink *sink;
810 struct dc_dsc_bw_range bw_range;
811 bool compression_possible;
812 struct drm_dp_mst_port *port;
813 enum dsc_clock_force_state clock_force_enable;
814 uint32_t num_slices_h;
815 uint32_t num_slices_v;
816 uint32_t bpp_overwrite;
817 struct amdgpu_dm_connector *aconnector;
818 };
819
get_fec_overhead_multiplier(struct dc_link * dc_link)820 static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
821 {
822 u8 link_coding_cap;
823 uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
824
825 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
826 if (link_coding_cap == DP_128b_132b_ENCODING)
827 fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
828
829 return fec_overhead_multiplier_x1000;
830 }
831
kbps_to_peak_pbn(int kbps,uint16_t fec_overhead_multiplier_x1000)832 static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
833 {
834 u64 peak_kbps = kbps;
835
836 peak_kbps *= 1006;
837 peak_kbps *= fec_overhead_multiplier_x1000;
838 peak_kbps = div_u64(peak_kbps, 1000 * 1000);
839 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
840 }
841
set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count,int k)842 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
843 struct dsc_mst_fairness_vars *vars,
844 int count,
845 int k)
846 {
847 struct drm_connector *drm_connector;
848 int i;
849 struct dc_dsc_config_options dsc_options = {0};
850
851 for (i = 0; i < count; i++) {
852 drm_connector = ¶ms[i].aconnector->base;
853
854 dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options);
855 dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
856
857 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
858 if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
859 params[i].sink->ctx->dc->res_pool->dscs[0],
860 ¶ms[i].sink->dsc_caps.dsc_dec_caps,
861 &dsc_options,
862 0,
863 params[i].timing,
864 dc_link_get_highest_encoding_format(params[i].aconnector->dc_link),
865 ¶ms[i].timing->dsc_cfg)) {
866 params[i].timing->flags.DSC = 1;
867
868 if (params[i].bpp_overwrite)
869 params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
870 else
871 params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16;
872
873 if (params[i].num_slices_h)
874 params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
875
876 if (params[i].num_slices_v)
877 params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
878 } else {
879 params[i].timing->flags.DSC = 0;
880 }
881 params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn;
882 }
883
884 for (i = 0; i < count; i++) {
885 if (params[i].sink) {
886 if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
887 params[i].sink->sink_signal != SIGNAL_TYPE_NONE)
888 DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i,
889 params[i].sink->edid_caps.display_name);
890 }
891
892 DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n",
893 params[i].timing->flags.DSC,
894 params[i].timing->dsc_cfg.bits_per_pixel,
895 vars[i + k].pbn);
896 }
897 }
898
bpp_x16_from_pbn(struct dsc_mst_fairness_params param,int pbn)899 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
900 {
901 struct dc_dsc_config dsc_config;
902 u64 kbps;
903
904 struct drm_connector *drm_connector = ¶m.aconnector->base;
905 struct dc_dsc_config_options dsc_options = {0};
906
907 dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
908 dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
909
910 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
911 dc_dsc_compute_config(
912 param.sink->ctx->dc->res_pool->dscs[0],
913 ¶m.sink->dsc_caps.dsc_dec_caps,
914 &dsc_options,
915 (int) kbps, param.timing,
916 dc_link_get_highest_encoding_format(param.aconnector->dc_link),
917 &dsc_config);
918
919 return dsc_config.bits_per_pixel;
920 }
921
increase_dsc_bpp(struct drm_atomic_state * state,struct drm_dp_mst_topology_state * mst_state,struct dc_link * dc_link,struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count,int k)922 static int increase_dsc_bpp(struct drm_atomic_state *state,
923 struct drm_dp_mst_topology_state *mst_state,
924 struct dc_link *dc_link,
925 struct dsc_mst_fairness_params *params,
926 struct dsc_mst_fairness_vars *vars,
927 int count,
928 int k)
929 {
930 int i;
931 bool bpp_increased[MAX_PIPES];
932 int initial_slack[MAX_PIPES];
933 int min_initial_slack;
934 int next_index;
935 int remaining_to_increase = 0;
936 int link_timeslots_used;
937 int fair_pbn_alloc;
938 int ret = 0;
939 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
940
941 for (i = 0; i < count; i++) {
942 if (vars[i + k].dsc_enabled) {
943 initial_slack[i] =
944 kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
945 bpp_increased[i] = false;
946 remaining_to_increase += 1;
947 } else {
948 initial_slack[i] = 0;
949 bpp_increased[i] = true;
950 }
951 }
952
953 while (remaining_to_increase) {
954 next_index = -1;
955 min_initial_slack = -1;
956 for (i = 0; i < count; i++) {
957 if (!bpp_increased[i]) {
958 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
959 min_initial_slack = initial_slack[i];
960 next_index = i;
961 }
962 }
963 }
964
965 if (next_index == -1)
966 break;
967
968 link_timeslots_used = 0;
969
970 for (i = 0; i < count; i++)
971 link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
972
973 fair_pbn_alloc =
974 (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
975
976 if (initial_slack[next_index] > fair_pbn_alloc) {
977 vars[next_index].pbn += fair_pbn_alloc;
978 ret = drm_dp_atomic_find_time_slots(state,
979 params[next_index].port->mgr,
980 params[next_index].port,
981 vars[next_index].pbn);
982 if (ret < 0)
983 return ret;
984
985 ret = drm_dp_mst_atomic_check(state);
986 if (ret == 0) {
987 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
988 } else {
989 vars[next_index].pbn -= fair_pbn_alloc;
990 ret = drm_dp_atomic_find_time_slots(state,
991 params[next_index].port->mgr,
992 params[next_index].port,
993 vars[next_index].pbn);
994 if (ret < 0)
995 return ret;
996 }
997 } else {
998 vars[next_index].pbn += initial_slack[next_index];
999 ret = drm_dp_atomic_find_time_slots(state,
1000 params[next_index].port->mgr,
1001 params[next_index].port,
1002 vars[next_index].pbn);
1003 if (ret < 0)
1004 return ret;
1005
1006 ret = drm_dp_mst_atomic_check(state);
1007 if (ret == 0) {
1008 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
1009 } else {
1010 vars[next_index].pbn -= initial_slack[next_index];
1011 ret = drm_dp_atomic_find_time_slots(state,
1012 params[next_index].port->mgr,
1013 params[next_index].port,
1014 vars[next_index].pbn);
1015 if (ret < 0)
1016 return ret;
1017 }
1018 }
1019
1020 bpp_increased[next_index] = true;
1021 remaining_to_increase--;
1022 }
1023 return 0;
1024 }
1025
try_disable_dsc(struct drm_atomic_state * state,struct dc_link * dc_link,struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count,int k)1026 static int try_disable_dsc(struct drm_atomic_state *state,
1027 struct dc_link *dc_link,
1028 struct dsc_mst_fairness_params *params,
1029 struct dsc_mst_fairness_vars *vars,
1030 int count,
1031 int k)
1032 {
1033 int i;
1034 bool tried[MAX_PIPES];
1035 int kbps_increase[MAX_PIPES];
1036 int max_kbps_increase;
1037 int next_index;
1038 int remaining_to_try = 0;
1039 int ret;
1040 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
1041
1042 for (i = 0; i < count; i++) {
1043 if (vars[i + k].dsc_enabled
1044 && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16
1045 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
1046 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
1047 tried[i] = false;
1048 remaining_to_try += 1;
1049 } else {
1050 kbps_increase[i] = 0;
1051 tried[i] = true;
1052 }
1053 }
1054
1055 while (remaining_to_try) {
1056 next_index = -1;
1057 max_kbps_increase = -1;
1058 for (i = 0; i < count; i++) {
1059 if (!tried[i]) {
1060 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
1061 max_kbps_increase = kbps_increase[i];
1062 next_index = i;
1063 }
1064 }
1065 }
1066
1067 if (next_index == -1)
1068 break;
1069
1070 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
1071 ret = drm_dp_atomic_find_time_slots(state,
1072 params[next_index].port->mgr,
1073 params[next_index].port,
1074 vars[next_index].pbn);
1075 if (ret < 0)
1076 return ret;
1077
1078 ret = drm_dp_mst_atomic_check(state);
1079 if (ret == 0) {
1080 vars[next_index].dsc_enabled = false;
1081 vars[next_index].bpp_x16 = 0;
1082 } else {
1083 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
1084 ret = drm_dp_atomic_find_time_slots(state,
1085 params[next_index].port->mgr,
1086 params[next_index].port,
1087 vars[next_index].pbn);
1088 if (ret < 0)
1089 return ret;
1090 }
1091
1092 tried[next_index] = true;
1093 remaining_to_try--;
1094 }
1095 return 0;
1096 }
1097
compute_mst_dsc_configs_for_link(struct drm_atomic_state * state,struct dc_state * dc_state,struct dc_link * dc_link,struct dsc_mst_fairness_vars * vars,struct drm_dp_mst_topology_mgr * mgr,int * link_vars_start_index)1098 static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
1099 struct dc_state *dc_state,
1100 struct dc_link *dc_link,
1101 struct dsc_mst_fairness_vars *vars,
1102 struct drm_dp_mst_topology_mgr *mgr,
1103 int *link_vars_start_index)
1104 {
1105 struct dc_stream_state *stream;
1106 struct dsc_mst_fairness_params params[MAX_PIPES];
1107 struct amdgpu_dm_connector *aconnector;
1108 struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
1109 int count = 0;
1110 int i, k, ret;
1111 bool debugfs_overwrite = false;
1112 uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
1113
1114 memset(params, 0, sizeof(params));
1115
1116 if (IS_ERR(mst_state))
1117 return PTR_ERR(mst_state);
1118
1119 /* Set up params */
1120 for (i = 0; i < dc_state->stream_count; i++) {
1121 struct dc_dsc_policy dsc_policy = {0};
1122
1123 stream = dc_state->streams[i];
1124
1125 if (stream->link != dc_link)
1126 continue;
1127
1128 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1129 if (!aconnector)
1130 continue;
1131
1132 if (!aconnector->mst_output_port)
1133 continue;
1134
1135 stream->timing.flags.DSC = 0;
1136
1137 params[count].timing = &stream->timing;
1138 params[count].sink = stream->sink;
1139 params[count].aconnector = aconnector;
1140 params[count].port = aconnector->mst_output_port;
1141 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
1142 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
1143 debugfs_overwrite = true;
1144 params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
1145 params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
1146 params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
1147 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
1148 dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
1149 if (!dc_dsc_compute_bandwidth_range(
1150 stream->sink->ctx->dc->res_pool->dscs[0],
1151 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
1152 dsc_policy.min_target_bpp * 16,
1153 dsc_policy.max_target_bpp * 16,
1154 &stream->sink->dsc_caps.dsc_dec_caps,
1155 &stream->timing,
1156 dc_link_get_highest_encoding_format(dc_link),
1157 ¶ms[count].bw_range))
1158 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
1159 dc_link_get_highest_encoding_format(dc_link));
1160
1161 count++;
1162 }
1163
1164 if (count == 0) {
1165 ASSERT(0);
1166 return 0;
1167 }
1168
1169 /* k is start index of vars for current phy link used by mst hub */
1170 k = *link_vars_start_index;
1171 /* set vars start index for next mst hub phy link */
1172 *link_vars_start_index += count;
1173
1174 /* Try no compression */
1175 for (i = 0; i < count; i++) {
1176 vars[i + k].aconnector = params[i].aconnector;
1177 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
1178 vars[i + k].dsc_enabled = false;
1179 vars[i + k].bpp_x16 = 0;
1180 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
1181 vars[i + k].pbn);
1182 if (ret < 0)
1183 return ret;
1184 }
1185 ret = drm_dp_mst_atomic_check(state);
1186 if (ret == 0 && !debugfs_overwrite) {
1187 set_dsc_configs_from_fairness_vars(params, vars, count, k);
1188 return 0;
1189 } else if (ret != -ENOSPC) {
1190 return ret;
1191 }
1192
1193 /* Try max compression */
1194 for (i = 0; i < count; i++) {
1195 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
1196 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
1197 vars[i + k].dsc_enabled = true;
1198 vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
1199 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
1200 params[i].port, vars[i + k].pbn);
1201 if (ret < 0)
1202 return ret;
1203 } else {
1204 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
1205 vars[i + k].dsc_enabled = false;
1206 vars[i + k].bpp_x16 = 0;
1207 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
1208 params[i].port, vars[i + k].pbn);
1209 if (ret < 0)
1210 return ret;
1211 }
1212 }
1213 ret = drm_dp_mst_atomic_check(state);
1214 if (ret != 0)
1215 return ret;
1216
1217 /* Optimize degree of compression */
1218 ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
1219 if (ret < 0)
1220 return ret;
1221
1222 ret = try_disable_dsc(state, dc_link, params, vars, count, k);
1223 if (ret < 0)
1224 return ret;
1225
1226 set_dsc_configs_from_fairness_vars(params, vars, count, k);
1227
1228 return 0;
1229 }
1230
is_dsc_need_re_compute(struct drm_atomic_state * state,struct dc_state * dc_state,struct dc_link * dc_link)1231 static bool is_dsc_need_re_compute(
1232 struct drm_atomic_state *state,
1233 struct dc_state *dc_state,
1234 struct dc_link *dc_link)
1235 {
1236 int i, j;
1237 bool is_dsc_need_re_compute = false;
1238 struct amdgpu_dm_connector *stream_on_link[MAX_PIPES];
1239 int new_stream_on_link_num = 0;
1240 struct amdgpu_dm_connector *aconnector;
1241 struct dc_stream_state *stream;
1242 const struct dc *dc = dc_link->dc;
1243
1244 /* only check phy used by dsc mst branch */
1245 if (dc_link->type != dc_connection_mst_branch)
1246 return false;
1247
1248 for (i = 0; i < MAX_PIPES; i++)
1249 stream_on_link[i] = NULL;
1250
1251 /* check if there is mode change in new request */
1252 for (i = 0; i < dc_state->stream_count; i++) {
1253 struct drm_crtc_state *new_crtc_state;
1254 struct drm_connector_state *new_conn_state;
1255
1256 stream = dc_state->streams[i];
1257 if (!stream)
1258 continue;
1259
1260 /* check if stream using the same link for mst */
1261 if (stream->link != dc_link)
1262 continue;
1263
1264 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
1265 if (!aconnector)
1266 continue;
1267
1268 /*
1269 * Check if cached virtual MST DSC caps are available and DSC is supported
1270 * this change takes care of newer MST DSC capable devices that report their
1271 * DPCD caps as per specifications in their Virtual DPCD registers.
1272
1273 * TODO: implement the check for older MST DSC devices that do not conform to
1274 * specifications.
1275 */
1276 if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
1277 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
1278 continue;
1279
1280 stream_on_link[new_stream_on_link_num] = aconnector;
1281 new_stream_on_link_num++;
1282
1283 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
1284 if (!new_conn_state)
1285 continue;
1286
1287 if (IS_ERR(new_conn_state))
1288 continue;
1289
1290 if (!new_conn_state->crtc)
1291 continue;
1292
1293 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
1294 if (!new_crtc_state)
1295 continue;
1296
1297 if (IS_ERR(new_crtc_state))
1298 continue;
1299
1300 if (new_crtc_state->enable && new_crtc_state->active) {
1301 if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
1302 new_crtc_state->connectors_changed)
1303 return true;
1304 }
1305 }
1306
1307 if (new_stream_on_link_num == 0)
1308 return false;
1309
1310 /* check current_state if there stream on link but it is not in
1311 * new request state
1312 */
1313 for (i = 0; i < dc->current_state->stream_count; i++) {
1314 stream = dc->current_state->streams[i];
1315 /* only check stream on the mst hub */
1316 if (stream->link != dc_link)
1317 continue;
1318
1319 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1320 if (!aconnector)
1321 continue;
1322
1323 for (j = 0; j < new_stream_on_link_num; j++) {
1324 if (stream_on_link[j]) {
1325 if (aconnector == stream_on_link[j])
1326 break;
1327 }
1328 }
1329
1330 if (j == new_stream_on_link_num) {
1331 /* not in new state */
1332 is_dsc_need_re_compute = true;
1333 break;
1334 }
1335 }
1336
1337 return is_dsc_need_re_compute;
1338 }
1339
compute_mst_dsc_configs_for_state(struct drm_atomic_state * state,struct dc_state * dc_state,struct dsc_mst_fairness_vars * vars)1340 int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1341 struct dc_state *dc_state,
1342 struct dsc_mst_fairness_vars *vars)
1343 {
1344 int i, j;
1345 struct dc_stream_state *stream;
1346 bool computed_streams[MAX_PIPES];
1347 struct amdgpu_dm_connector *aconnector;
1348 struct drm_dp_mst_topology_mgr *mst_mgr;
1349 struct resource_pool *res_pool;
1350 int link_vars_start_index = 0;
1351 int ret = 0;
1352
1353 for (i = 0; i < dc_state->stream_count; i++)
1354 computed_streams[i] = false;
1355
1356 for (i = 0; i < dc_state->stream_count; i++) {
1357 stream = dc_state->streams[i];
1358 res_pool = stream->ctx->dc->res_pool;
1359
1360 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
1361 continue;
1362
1363 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1364
1365 if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
1366 continue;
1367
1368 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
1369 continue;
1370
1371 if (computed_streams[i])
1372 continue;
1373
1374 if (res_pool->funcs->remove_stream_from_ctx &&
1375 res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
1376 return -EINVAL;
1377
1378 if (!is_dsc_need_re_compute(state, dc_state, stream->link))
1379 continue;
1380
1381 mst_mgr = aconnector->mst_output_port->mgr;
1382 ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
1383 &link_vars_start_index);
1384 if (ret != 0)
1385 return ret;
1386
1387 for (j = 0; j < dc_state->stream_count; j++) {
1388 if (dc_state->streams[j]->link == stream->link)
1389 computed_streams[j] = true;
1390 }
1391 }
1392
1393 for (i = 0; i < dc_state->stream_count; i++) {
1394 stream = dc_state->streams[i];
1395
1396 if (stream->timing.flags.DSC == 1)
1397 if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
1398 return -EINVAL;
1399 }
1400
1401 return ret;
1402 }
1403
pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state * state,struct dc_state * dc_state,struct dsc_mst_fairness_vars * vars)1404 static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1405 struct dc_state *dc_state,
1406 struct dsc_mst_fairness_vars *vars)
1407 {
1408 int i, j;
1409 struct dc_stream_state *stream;
1410 bool computed_streams[MAX_PIPES];
1411 struct amdgpu_dm_connector *aconnector;
1412 struct drm_dp_mst_topology_mgr *mst_mgr;
1413 int link_vars_start_index = 0;
1414 int ret = 0;
1415
1416 for (i = 0; i < dc_state->stream_count; i++)
1417 computed_streams[i] = false;
1418
1419 for (i = 0; i < dc_state->stream_count; i++) {
1420 stream = dc_state->streams[i];
1421
1422 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
1423 continue;
1424
1425 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1426
1427 if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
1428 continue;
1429
1430 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
1431 continue;
1432
1433 if (computed_streams[i])
1434 continue;
1435
1436 if (!is_dsc_need_re_compute(state, dc_state, stream->link))
1437 continue;
1438
1439 mst_mgr = aconnector->mst_output_port->mgr;
1440 ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
1441 &link_vars_start_index);
1442 if (ret != 0)
1443 return ret;
1444
1445 for (j = 0; j < dc_state->stream_count; j++) {
1446 if (dc_state->streams[j]->link == stream->link)
1447 computed_streams[j] = true;
1448 }
1449 }
1450
1451 return ret;
1452 }
1453
find_crtc_index_in_state_by_stream(struct drm_atomic_state * state,struct dc_stream_state * stream)1454 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
1455 struct dc_stream_state *stream)
1456 {
1457 int i;
1458 struct drm_crtc *crtc;
1459 struct drm_crtc_state *new_state, *old_state;
1460
1461 for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) {
1462 struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state);
1463
1464 if (dm_state->stream == stream)
1465 return i;
1466 }
1467 return -1;
1468 }
1469
is_link_to_dschub(struct dc_link * dc_link)1470 static bool is_link_to_dschub(struct dc_link *dc_link)
1471 {
1472 union dpcd_dsc_basic_capabilities *dsc_caps =
1473 &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps;
1474
1475 /* only check phy used by dsc mst branch */
1476 if (dc_link->type != dc_connection_mst_branch)
1477 return false;
1478
1479 if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT ||
1480 dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
1481 return false;
1482 return true;
1483 }
1484
is_dsc_precompute_needed(struct drm_atomic_state * state)1485 static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
1486 {
1487 int i;
1488 struct drm_crtc *crtc;
1489 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1490 bool ret = false;
1491
1492 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1493 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state);
1494
1495 if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) {
1496 ret = false;
1497 break;
1498 }
1499 if (dm_crtc_state->stream && dm_crtc_state->stream->link)
1500 if (is_link_to_dschub(dm_crtc_state->stream->link))
1501 ret = true;
1502 }
1503 return ret;
1504 }
1505
pre_validate_dsc(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state_ptr,struct dsc_mst_fairness_vars * vars)1506 int pre_validate_dsc(struct drm_atomic_state *state,
1507 struct dm_atomic_state **dm_state_ptr,
1508 struct dsc_mst_fairness_vars *vars)
1509 {
1510 int i;
1511 struct dm_atomic_state *dm_state;
1512 struct dc_state *local_dc_state = NULL;
1513 int ret = 0;
1514
1515 if (!is_dsc_precompute_needed(state)) {
1516 DRM_INFO_ONCE("DSC precompute is not needed.\n");
1517 return 0;
1518 }
1519 ret = dm_atomic_get_state(state, dm_state_ptr);
1520 if (ret != 0) {
1521 DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
1522 return ret;
1523 }
1524 dm_state = *dm_state_ptr;
1525
1526 /*
1527 * create local vailable for dc_state. copy content of streams of dm_state->context
1528 * to local variable. make sure stream pointer of local variable not the same as stream
1529 * from dm_state->context.
1530 */
1531
1532 local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
1533 if (!local_dc_state)
1534 return -ENOMEM;
1535
1536 for (i = 0; i < local_dc_state->stream_count; i++) {
1537 struct dc_stream_state *stream = dm_state->context->streams[i];
1538 int ind = find_crtc_index_in_state_by_stream(state, stream);
1539
1540 if (ind >= 0) {
1541 struct amdgpu_dm_connector *aconnector;
1542 struct drm_connector_state *drm_new_conn_state;
1543 struct dm_connector_state *dm_new_conn_state;
1544 struct dm_crtc_state *dm_old_crtc_state;
1545
1546 aconnector =
1547 amdgpu_dm_find_first_crtc_matching_connector(state,
1548 state->crtcs[ind].ptr);
1549 drm_new_conn_state =
1550 drm_atomic_get_new_connector_state(state,
1551 &aconnector->base);
1552 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
1553 dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
1554
1555 local_dc_state->streams[i] =
1556 create_validate_stream_for_sink(aconnector,
1557 &state->crtcs[ind].new_state->mode,
1558 dm_new_conn_state,
1559 dm_old_crtc_state->stream);
1560 if (local_dc_state->streams[i] == NULL) {
1561 ret = -EINVAL;
1562 break;
1563 }
1564 }
1565 }
1566
1567 if (ret != 0)
1568 goto clean_exit;
1569
1570 ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
1571 if (ret != 0) {
1572 DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
1573 ret = -EINVAL;
1574 goto clean_exit;
1575 }
1576
1577 /*
1578 * compare local_streams -> timing with dm_state->context,
1579 * if the same set crtc_state->mode-change = 0;
1580 */
1581 for (i = 0; i < local_dc_state->stream_count; i++) {
1582 struct dc_stream_state *stream = dm_state->context->streams[i];
1583
1584 if (local_dc_state->streams[i] &&
1585 dc_is_timing_changed(stream, local_dc_state->streams[i])) {
1586 DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
1587 } else {
1588 int ind = find_crtc_index_in_state_by_stream(state, stream);
1589
1590 if (ind >= 0)
1591 state->crtcs[ind].new_state->mode_changed = 0;
1592 }
1593 }
1594 clean_exit:
1595 for (i = 0; i < local_dc_state->stream_count; i++) {
1596 struct dc_stream_state *stream = dm_state->context->streams[i];
1597
1598 if (local_dc_state->streams[i] != stream)
1599 dc_stream_release(local_dc_state->streams[i]);
1600 }
1601
1602 kfree(local_dc_state);
1603
1604 return ret;
1605 }
1606
kbps_from_pbn(unsigned int pbn)1607 static uint32_t kbps_from_pbn(unsigned int pbn)
1608 {
1609 uint64_t kbps = (uint64_t)pbn;
1610
1611 kbps *= (1000000 / PEAK_FACTOR_X1000);
1612 kbps *= 8;
1613 kbps *= 54;
1614 kbps /= 64;
1615
1616 return (uint32_t)kbps;
1617 }
1618
is_dsc_common_config_possible(struct dc_stream_state * stream,struct dc_dsc_bw_range * bw_range)1619 static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
1620 struct dc_dsc_bw_range *bw_range)
1621 {
1622 struct dc_dsc_policy dsc_policy = {0};
1623
1624 dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
1625 dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
1626 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
1627 dsc_policy.min_target_bpp * 16,
1628 dsc_policy.max_target_bpp * 16,
1629 &stream->sink->dsc_caps.dsc_dec_caps,
1630 &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
1631
1632 return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
1633 }
1634
dm_dp_mst_is_port_support_mode(struct amdgpu_dm_connector * aconnector,struct dc_stream_state * stream)1635 enum dc_status dm_dp_mst_is_port_support_mode(
1636 struct amdgpu_dm_connector *aconnector,
1637 struct dc_stream_state *stream)
1638 {
1639 int bpp, pbn, branch_max_throughput_mps = 0;
1640 struct dc_link_settings cur_link_settings;
1641 unsigned int end_to_end_bw_in_kbps = 0;
1642 unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
1643 unsigned int max_compressed_bw_in_kbps = 0;
1644 struct dc_dsc_bw_range bw_range = {0};
1645 uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
1646
1647 /*
1648 * Consider the case with the depth of the mst topology tree is equal or less than 2
1649 * A. When dsc bitstream can be transmitted along the entire path
1650 * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
1651 * 2. dsc passthrough supported at MST branch, or
1652 * 3. dsc decoding supported at leaf MST device
1653 * Use maximum dsc compression as bw constraint
1654 * B. When dsc bitstream cannot be transmitted along the entire path
1655 * Use native bw as bw constraint
1656 */
1657 if (is_dsc_common_config_possible(stream, &bw_range) &&
1658 (aconnector->mst_output_port->passthrough_aux ||
1659 aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
1660 cur_link_settings = stream->link->verified_link_cap;
1661
1662 upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
1663 &cur_link_settings);
1664 down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
1665
1666 /* pick the bottleneck */
1667 end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
1668 down_link_bw_in_kbps);
1669
1670 /*
1671 * use the maximum dsc compression bandwidth as the required
1672 * bandwidth for the mode
1673 */
1674 max_compressed_bw_in_kbps = bw_range.min_kbps;
1675
1676 if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) {
1677 DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n");
1678 return DC_FAIL_BANDWIDTH_VALIDATE;
1679 }
1680 } else {
1681 /* check if mode could be supported within full_pbn */
1682 bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
1683 pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
1684 if (pbn > full_pbn)
1685 return DC_FAIL_BANDWIDTH_VALIDATE;
1686 }
1687
1688 /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
1689 switch (stream->timing.pixel_encoding) {
1690 case PIXEL_ENCODING_RGB:
1691 case PIXEL_ENCODING_YCBCR444:
1692 branch_max_throughput_mps =
1693 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps;
1694 break;
1695 case PIXEL_ENCODING_YCBCR422:
1696 case PIXEL_ENCODING_YCBCR420:
1697 branch_max_throughput_mps =
1698 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps;
1699 break;
1700 default:
1701 break;
1702 }
1703
1704 if (branch_max_throughput_mps != 0 &&
1705 ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000))
1706 return DC_FAIL_BANDWIDTH_VALIDATE;
1707
1708 return DC_OK;
1709 }
1710