1 /*
2  * Copyright 2012-15 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <drm/drm_atomic.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include <drm/drm_dp_helper.h>
30 #include "dm_services.h"
31 #include "amdgpu.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_mst_types.h"
34 
35 #include "dc.h"
36 #include "dm_helpers.h"
37 
38 #include "dc_link_ddc.h"
39 #include "ddc_service_types.h"
40 #include "dpcd_defs.h"
41 
42 #include "i2caux_interface.h"
43 #include "dmub_cmd.h"
44 #if defined(CONFIG_DEBUG_FS)
45 #include "amdgpu_dm_debugfs.h"
46 #endif
47 
48 #if defined(CONFIG_DRM_AMD_DC_DCN)
49 #include "dc/dcn20/dcn20_resource.h"
50 #endif
51 
52 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
53 				  struct drm_dp_aux_msg *msg)
54 {
55 	ssize_t result = 0;
56 	struct aux_payload payload;
57 	enum aux_return_code_type operation_result;
58 
59 	if (WARN_ON(msg->size > 16))
60 		return -E2BIG;
61 
62 	payload.address = msg->address;
63 	payload.data = msg->buffer;
64 	payload.length = msg->size;
65 	payload.reply = &msg->reply;
66 	payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
67 	payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
68 	payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
69 	payload.write_status_update =
70 			(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
71 	payload.defer_delay = 0;
72 
73 	result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
74 				      &operation_result);
75 
76 	if (payload.write && result >= 0)
77 		result = msg->size;
78 
79 	if (result < 0)
80 		switch (operation_result) {
81 		case AUX_RET_SUCCESS:
82 			break;
83 		case AUX_RET_ERROR_HPD_DISCON:
84 		case AUX_RET_ERROR_UNKNOWN:
85 		case AUX_RET_ERROR_INVALID_OPERATION:
86 		case AUX_RET_ERROR_PROTOCOL_ERROR:
87 			result = -EIO;
88 			break;
89 		case AUX_RET_ERROR_INVALID_REPLY:
90 		case AUX_RET_ERROR_ENGINE_ACQUIRE:
91 			result = -EBUSY;
92 			break;
93 		case AUX_RET_ERROR_TIMEOUT:
94 			result = -ETIMEDOUT;
95 			break;
96 		}
97 
98 	return result;
99 }
100 
101 static void
102 dm_dp_mst_connector_destroy(struct drm_connector *connector)
103 {
104 	struct amdgpu_dm_connector *aconnector =
105 		to_amdgpu_dm_connector(connector);
106 
107 	if (aconnector->dc_sink) {
108 		dc_link_remove_remote_sink(aconnector->dc_link,
109 					   aconnector->dc_sink);
110 		dc_sink_release(aconnector->dc_sink);
111 	}
112 
113 	kfree(aconnector->edid);
114 
115 	drm_connector_cleanup(connector);
116 	drm_dp_mst_put_port_malloc(aconnector->port);
117 	kfree(aconnector);
118 }
119 
120 static int
121 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
122 {
123 	struct amdgpu_dm_connector *amdgpu_dm_connector =
124 		to_amdgpu_dm_connector(connector);
125 	int r;
126 
127 	r = drm_dp_mst_connector_late_register(connector,
128 					       amdgpu_dm_connector->port);
129 	if (r < 0)
130 		return r;
131 
132 #if defined(CONFIG_DEBUG_FS)
133 	connector_debugfs_init(amdgpu_dm_connector);
134 #endif
135 
136 	return 0;
137 }
138 
139 static void
140 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
141 {
142 	struct amdgpu_dm_connector *amdgpu_dm_connector =
143 		to_amdgpu_dm_connector(connector);
144 	struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
145 
146 	drm_dp_mst_connector_early_unregister(connector, port);
147 }
148 
149 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
150 	.fill_modes = drm_helper_probe_single_connector_modes,
151 	.destroy = dm_dp_mst_connector_destroy,
152 	.reset = amdgpu_dm_connector_funcs_reset,
153 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
154 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
155 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
156 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
157 	.late_register = amdgpu_dm_mst_connector_late_register,
158 	.early_unregister = amdgpu_dm_mst_connector_early_unregister,
159 };
160 
161 #if defined(CONFIG_DRM_AMD_DC_DCN)
162 static bool needs_dsc_aux_workaround(struct dc_link *link)
163 {
164 	if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
165 	    (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
166 	    link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
167 		return true;
168 
169 	return false;
170 }
171 
172 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
173 {
174 	struct dc_sink *dc_sink = aconnector->dc_sink;
175 	struct drm_dp_mst_port *port = aconnector->port;
176 	u8 dsc_caps[16] = { 0 };
177 	u8 dsc_branch_dec_caps_raw[3] = { 0 };	// DSC branch decoder caps 0xA0 ~ 0xA2
178 	u8 *dsc_branch_dec_caps = NULL;
179 
180 	aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
181 
182 	/*
183 	 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
184 	 * because it only check the dsc/fec caps of the "port variable" and not the dock
185 	 *
186 	 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
187 	 *
188 	 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
189 	 *
190 	 */
191 	if (!aconnector->dsc_aux && !port->parent->port_parent &&
192 	    needs_dsc_aux_workaround(aconnector->dc_link))
193 		aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
194 
195 	if (!aconnector->dsc_aux)
196 		return false;
197 
198 	if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
199 		return false;
200 
201 	if (drm_dp_dpcd_read(aconnector->dsc_aux,
202 			DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3)
203 		dsc_branch_dec_caps = dsc_branch_dec_caps_raw;
204 
205 	if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
206 				  dsc_caps, dsc_branch_dec_caps,
207 				  &dc_sink->dsc_caps.dsc_dec_caps))
208 		return false;
209 
210 	return true;
211 }
212 #endif
213 
214 static int dm_dp_mst_get_modes(struct drm_connector *connector)
215 {
216 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
217 	int ret = 0;
218 
219 	if (!aconnector)
220 		return drm_add_edid_modes(connector, NULL);
221 
222 	if (!aconnector->edid) {
223 		struct edid *edid;
224 		edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
225 
226 		if (!edid) {
227 			drm_connector_update_edid_property(
228 				&aconnector->base,
229 				NULL);
230 
231 			DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name);
232 			if (!aconnector->dc_sink) {
233 				struct dc_sink *dc_sink;
234 				struct dc_sink_init_data init_params = {
235 					.link = aconnector->dc_link,
236 					.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
237 
238 				dc_sink = dc_link_add_remote_sink(
239 					aconnector->dc_link,
240 					NULL,
241 					0,
242 					&init_params);
243 
244 				if (!dc_sink) {
245 					DRM_ERROR("Unable to add a remote sink\n");
246 					return 0;
247 				}
248 
249 				dc_sink->priv = aconnector;
250 				aconnector->dc_sink = dc_sink;
251 			}
252 
253 			return ret;
254 		}
255 
256 		aconnector->edid = edid;
257 	}
258 
259 	if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
260 		dc_sink_release(aconnector->dc_sink);
261 		aconnector->dc_sink = NULL;
262 	}
263 
264 	if (!aconnector->dc_sink) {
265 		struct dc_sink *dc_sink;
266 		struct dc_sink_init_data init_params = {
267 				.link = aconnector->dc_link,
268 				.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
269 		dc_sink = dc_link_add_remote_sink(
270 			aconnector->dc_link,
271 			(uint8_t *)aconnector->edid,
272 			(aconnector->edid->extensions + 1) * EDID_LENGTH,
273 			&init_params);
274 
275 		if (!dc_sink) {
276 			DRM_ERROR("Unable to add a remote sink\n");
277 			return 0;
278 		}
279 
280 		dc_sink->priv = aconnector;
281 		/* dc_link_add_remote_sink returns a new reference */
282 		aconnector->dc_sink = dc_sink;
283 
284 		if (aconnector->dc_sink) {
285 			amdgpu_dm_update_freesync_caps(
286 					connector, aconnector->edid);
287 
288 #if defined(CONFIG_DRM_AMD_DC_DCN)
289 			if (!validate_dsc_caps_on_connector(aconnector))
290 				memset(&aconnector->dc_sink->dsc_caps,
291 				       0, sizeof(aconnector->dc_sink->dsc_caps));
292 #endif
293 		}
294 	}
295 
296 	drm_connector_update_edid_property(
297 					&aconnector->base, aconnector->edid);
298 
299 	ret = drm_add_edid_modes(connector, aconnector->edid);
300 
301 	return ret;
302 }
303 
304 static struct drm_encoder *
305 dm_mst_atomic_best_encoder(struct drm_connector *connector,
306 			   struct drm_atomic_state *state)
307 {
308 	struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
309 											 connector);
310 	struct drm_device *dev = connector->dev;
311 	struct amdgpu_device *adev = drm_to_adev(dev);
312 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
313 
314 	return &adev->dm.mst_encoders[acrtc->crtc_id].base;
315 }
316 
317 static int
318 dm_dp_mst_detect(struct drm_connector *connector,
319 		 struct drm_modeset_acquire_ctx *ctx, bool force)
320 {
321 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
322 	struct amdgpu_dm_connector *master = aconnector->mst_port;
323 
324 	if (drm_connector_is_unregistered(connector))
325 		return connector_status_disconnected;
326 
327 	return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
328 				      aconnector->port);
329 }
330 
331 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
332 				struct drm_atomic_state *state)
333 {
334 	struct drm_connector_state *new_conn_state =
335 			drm_atomic_get_new_connector_state(state, connector);
336 	struct drm_connector_state *old_conn_state =
337 			drm_atomic_get_old_connector_state(state, connector);
338 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
339 	struct drm_crtc_state *new_crtc_state;
340 	struct drm_dp_mst_topology_mgr *mst_mgr;
341 	struct drm_dp_mst_port *mst_port;
342 
343 	mst_port = aconnector->port;
344 	mst_mgr = &aconnector->mst_port->mst_mgr;
345 
346 	if (!old_conn_state->crtc)
347 		return 0;
348 
349 	if (new_conn_state->crtc) {
350 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
351 		if (!new_crtc_state ||
352 		    !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
353 		    new_crtc_state->enable)
354 			return 0;
355 		}
356 
357 	return drm_dp_atomic_release_vcpi_slots(state,
358 						mst_mgr,
359 						mst_port);
360 }
361 
362 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
363 	.get_modes = dm_dp_mst_get_modes,
364 	.mode_valid = amdgpu_dm_connector_mode_valid,
365 	.atomic_best_encoder = dm_mst_atomic_best_encoder,
366 	.detect_ctx = dm_dp_mst_detect,
367 	.atomic_check = dm_dp_mst_atomic_check,
368 };
369 
370 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
371 {
372 	drm_encoder_cleanup(encoder);
373 	kfree(encoder);
374 }
375 
376 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
377 	.destroy = amdgpu_dm_encoder_destroy,
378 };
379 
380 void
381 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
382 {
383 	struct drm_device *dev = adev_to_drm(adev);
384 	int i;
385 
386 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
387 		struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
388 		struct drm_encoder *encoder = &amdgpu_encoder->base;
389 
390 		encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
391 
392 		drm_encoder_init(
393 			dev,
394 			&amdgpu_encoder->base,
395 			&amdgpu_dm_encoder_funcs,
396 			DRM_MODE_ENCODER_DPMST,
397 			NULL);
398 
399 		drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
400 	}
401 }
402 
403 static struct drm_connector *
404 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
405 			struct drm_dp_mst_port *port,
406 			const char *pathprop)
407 {
408 	struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
409 	struct drm_device *dev = master->base.dev;
410 	struct amdgpu_device *adev = drm_to_adev(dev);
411 	struct amdgpu_dm_connector *aconnector;
412 	struct drm_connector *connector;
413 	int i;
414 
415 	aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
416 	if (!aconnector)
417 		return NULL;
418 
419 	connector = &aconnector->base;
420 	aconnector->port = port;
421 	aconnector->mst_port = master;
422 
423 	if (drm_connector_init(
424 		dev,
425 		connector,
426 		&dm_dp_mst_connector_funcs,
427 		DRM_MODE_CONNECTOR_DisplayPort)) {
428 		kfree(aconnector);
429 		return NULL;
430 	}
431 	drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
432 
433 	amdgpu_dm_connector_init_helper(
434 		&adev->dm,
435 		aconnector,
436 		DRM_MODE_CONNECTOR_DisplayPort,
437 		master->dc_link,
438 		master->connector_id);
439 
440 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
441 		drm_connector_attach_encoder(&aconnector->base,
442 					     &adev->dm.mst_encoders[i].base);
443 	}
444 
445 	connector->max_bpc_property = master->base.max_bpc_property;
446 	if (connector->max_bpc_property)
447 		drm_connector_attach_max_bpc_property(connector, 8, 16);
448 
449 	connector->vrr_capable_property = master->base.vrr_capable_property;
450 	if (connector->vrr_capable_property)
451 		drm_connector_attach_vrr_capable_property(connector);
452 
453 	drm_object_attach_property(
454 		&connector->base,
455 		dev->mode_config.path_property,
456 		0);
457 	drm_object_attach_property(
458 		&connector->base,
459 		dev->mode_config.tile_property,
460 		0);
461 
462 	drm_connector_set_path_property(connector, pathprop);
463 
464 	/*
465 	 * Initialize connector state before adding the connectror to drm and
466 	 * framebuffer lists
467 	 */
468 	amdgpu_dm_connector_funcs_reset(connector);
469 
470 	drm_dp_mst_get_port_malloc(port);
471 
472 	return connector;
473 }
474 
475 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
476 	.add_connector = dm_dp_add_mst_connector,
477 };
478 
479 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
480 				       struct amdgpu_dm_connector *aconnector,
481 				       int link_index)
482 {
483 	struct dc_link_settings max_link_enc_cap = {0};
484 
485 	aconnector->dm_dp_aux.aux.name =
486 		kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
487 			  link_index);
488 	aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
489 	aconnector->dm_dp_aux.aux.drm_dev = dm->ddev;
490 	aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
491 
492 	drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
493 	drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
494 				      &aconnector->base);
495 
496 	if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
497 		return;
498 
499 	dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
500 	aconnector->mst_mgr.cbs = &dm_mst_cbs;
501 	drm_dp_mst_topology_mgr_init(
502 		&aconnector->mst_mgr,
503 		adev_to_drm(dm->adev),
504 		&aconnector->dm_dp_aux.aux,
505 		16,
506 		4,
507 		max_link_enc_cap.lane_count,
508 		drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate),
509 		aconnector->connector_id);
510 
511 	drm_connector_attach_dp_subconnector_property(&aconnector->base);
512 }
513 
514 int dm_mst_get_pbn_divider(struct dc_link *link)
515 {
516 	if (!link)
517 		return 0;
518 
519 	return dc_link_bandwidth_kbps(link,
520 			dc_link_get_link_cap(link)) / (8 * 1000 * 54);
521 }
522 
523 #if defined(CONFIG_DRM_AMD_DC_DCN)
524 
525 struct dsc_mst_fairness_params {
526 	struct dc_crtc_timing *timing;
527 	struct dc_sink *sink;
528 	struct dc_dsc_bw_range bw_range;
529 	bool compression_possible;
530 	struct drm_dp_mst_port *port;
531 	enum dsc_clock_force_state clock_force_enable;
532 	uint32_t num_slices_h;
533 	uint32_t num_slices_v;
534 	uint32_t bpp_overwrite;
535 	struct amdgpu_dm_connector *aconnector;
536 };
537 
538 static int kbps_to_peak_pbn(int kbps)
539 {
540 	u64 peak_kbps = kbps;
541 
542 	peak_kbps *= 1006;
543 	peak_kbps = div_u64(peak_kbps, 1000);
544 	return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
545 }
546 
547 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
548 		struct dsc_mst_fairness_vars *vars,
549 		int count,
550 		int k)
551 {
552 	int i;
553 
554 	for (i = 0; i < count; i++) {
555 		memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
556 		if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
557 					params[i].sink->ctx->dc->res_pool->dscs[0],
558 					&params[i].sink->dsc_caps.dsc_dec_caps,
559 					params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
560 					params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
561 					0,
562 					params[i].timing,
563 					&params[i].timing->dsc_cfg)) {
564 			params[i].timing->flags.DSC = 1;
565 
566 			if (params[i].bpp_overwrite)
567 				params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
568 			else
569 				params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16;
570 
571 			if (params[i].num_slices_h)
572 				params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
573 
574 			if (params[i].num_slices_v)
575 				params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
576 		} else {
577 			params[i].timing->flags.DSC = 0;
578 		}
579 	}
580 }
581 
582 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
583 {
584 	struct dc_dsc_config dsc_config;
585 	u64 kbps;
586 
587 	kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
588 	dc_dsc_compute_config(
589 			param.sink->ctx->dc->res_pool->dscs[0],
590 			&param.sink->dsc_caps.dsc_dec_caps,
591 			param.sink->ctx->dc->debug.dsc_min_slice_height_override,
592 			param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
593 			(int) kbps, param.timing, &dsc_config);
594 
595 	return dsc_config.bits_per_pixel;
596 }
597 
598 static void increase_dsc_bpp(struct drm_atomic_state *state,
599 			     struct dc_link *dc_link,
600 			     struct dsc_mst_fairness_params *params,
601 			     struct dsc_mst_fairness_vars *vars,
602 			     int count,
603 			     int k)
604 {
605 	int i;
606 	bool bpp_increased[MAX_PIPES];
607 	int initial_slack[MAX_PIPES];
608 	int min_initial_slack;
609 	int next_index;
610 	int remaining_to_increase = 0;
611 	int pbn_per_timeslot;
612 	int link_timeslots_used;
613 	int fair_pbn_alloc;
614 
615 	pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
616 
617 	for (i = 0; i < count; i++) {
618 		if (vars[i + k].dsc_enabled) {
619 			initial_slack[i] =
620 			kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
621 			bpp_increased[i] = false;
622 			remaining_to_increase += 1;
623 		} else {
624 			initial_slack[i] = 0;
625 			bpp_increased[i] = true;
626 		}
627 	}
628 
629 	while (remaining_to_increase) {
630 		next_index = -1;
631 		min_initial_slack = -1;
632 		for (i = 0; i < count; i++) {
633 			if (!bpp_increased[i]) {
634 				if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
635 					min_initial_slack = initial_slack[i];
636 					next_index = i;
637 				}
638 			}
639 		}
640 
641 		if (next_index == -1)
642 			break;
643 
644 		link_timeslots_used = 0;
645 
646 		for (i = 0; i < count; i++)
647 			link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot);
648 
649 		fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
650 
651 		if (initial_slack[next_index] > fair_pbn_alloc) {
652 			vars[next_index].pbn += fair_pbn_alloc;
653 			if (drm_dp_atomic_find_vcpi_slots(state,
654 							  params[next_index].port->mgr,
655 							  params[next_index].port,
656 							  vars[next_index].pbn,
657 							  pbn_per_timeslot) < 0)
658 				return;
659 			if (!drm_dp_mst_atomic_check(state)) {
660 				vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
661 			} else {
662 				vars[next_index].pbn -= fair_pbn_alloc;
663 				if (drm_dp_atomic_find_vcpi_slots(state,
664 								  params[next_index].port->mgr,
665 								  params[next_index].port,
666 								  vars[next_index].pbn,
667 								  pbn_per_timeslot) < 0)
668 					return;
669 			}
670 		} else {
671 			vars[next_index].pbn += initial_slack[next_index];
672 			if (drm_dp_atomic_find_vcpi_slots(state,
673 							  params[next_index].port->mgr,
674 							  params[next_index].port,
675 							  vars[next_index].pbn,
676 							  pbn_per_timeslot) < 0)
677 				return;
678 			if (!drm_dp_mst_atomic_check(state)) {
679 				vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
680 			} else {
681 				vars[next_index].pbn -= initial_slack[next_index];
682 				if (drm_dp_atomic_find_vcpi_slots(state,
683 								  params[next_index].port->mgr,
684 								  params[next_index].port,
685 								  vars[next_index].pbn,
686 								  pbn_per_timeslot) < 0)
687 					return;
688 			}
689 		}
690 
691 		bpp_increased[next_index] = true;
692 		remaining_to_increase--;
693 	}
694 }
695 
696 static void try_disable_dsc(struct drm_atomic_state *state,
697 			    struct dc_link *dc_link,
698 			    struct dsc_mst_fairness_params *params,
699 			    struct dsc_mst_fairness_vars *vars,
700 			    int count,
701 			    int k)
702 {
703 	int i;
704 	bool tried[MAX_PIPES];
705 	int kbps_increase[MAX_PIPES];
706 	int max_kbps_increase;
707 	int next_index;
708 	int remaining_to_try = 0;
709 
710 	for (i = 0; i < count; i++) {
711 		if (vars[i + k].dsc_enabled
712 				&& vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16
713 				&& params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
714 			kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
715 			tried[i] = false;
716 			remaining_to_try += 1;
717 		} else {
718 			kbps_increase[i] = 0;
719 			tried[i] = true;
720 		}
721 	}
722 
723 	while (remaining_to_try) {
724 		next_index = -1;
725 		max_kbps_increase = -1;
726 		for (i = 0; i < count; i++) {
727 			if (!tried[i]) {
728 				if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
729 					max_kbps_increase = kbps_increase[i];
730 					next_index = i;
731 				}
732 			}
733 		}
734 
735 		if (next_index == -1)
736 			break;
737 
738 		vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
739 		if (drm_dp_atomic_find_vcpi_slots(state,
740 						  params[next_index].port->mgr,
741 						  params[next_index].port,
742 						  vars[next_index].pbn,
743 						  dm_mst_get_pbn_divider(dc_link)) < 0)
744 			return;
745 
746 		if (!drm_dp_mst_atomic_check(state)) {
747 			vars[next_index].dsc_enabled = false;
748 			vars[next_index].bpp_x16 = 0;
749 		} else {
750 			vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
751 			if (drm_dp_atomic_find_vcpi_slots(state,
752 							  params[next_index].port->mgr,
753 							  params[next_index].port,
754 							  vars[next_index].pbn,
755 							  dm_mst_get_pbn_divider(dc_link)) < 0)
756 				return;
757 		}
758 
759 		tried[next_index] = true;
760 		remaining_to_try--;
761 	}
762 }
763 
764 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
765 					     struct dc_state *dc_state,
766 					     struct dc_link *dc_link,
767 					     struct dsc_mst_fairness_vars *vars,
768 					     int *link_vars_start_index)
769 {
770 	int i, k;
771 	struct dc_stream_state *stream;
772 	struct dsc_mst_fairness_params params[MAX_PIPES];
773 	struct amdgpu_dm_connector *aconnector;
774 	int count = 0;
775 	bool debugfs_overwrite = false;
776 
777 	memset(params, 0, sizeof(params));
778 
779 	/* Set up params */
780 	for (i = 0; i < dc_state->stream_count; i++) {
781 		struct dc_dsc_policy dsc_policy = {0};
782 
783 		stream = dc_state->streams[i];
784 
785 		if (stream->link != dc_link)
786 			continue;
787 
788 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
789 		if (!aconnector)
790 			continue;
791 
792 		if (!aconnector->port)
793 			continue;
794 
795 		stream->timing.flags.DSC = 0;
796 
797 		params[count].timing = &stream->timing;
798 		params[count].sink = stream->sink;
799 		params[count].aconnector = aconnector;
800 		params[count].port = aconnector->port;
801 		params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
802 		if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
803 			debugfs_overwrite = true;
804 		params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
805 		params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
806 		params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
807 		params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
808 		dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy);
809 		if (!dc_dsc_compute_bandwidth_range(
810 				stream->sink->ctx->dc->res_pool->dscs[0],
811 				stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
812 				dsc_policy.min_target_bpp * 16,
813 				dsc_policy.max_target_bpp * 16,
814 				&stream->sink->dsc_caps.dsc_dec_caps,
815 				&stream->timing, &params[count].bw_range))
816 			params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
817 
818 		count++;
819 	}
820 
821 	if (count == 0) {
822 		ASSERT(0);
823 		return true;
824 	}
825 
826 	/* k is start index of vars for current phy link used by mst hub */
827 	k = *link_vars_start_index;
828 	/* set vars start index for next mst hub phy link */
829 	*link_vars_start_index += count;
830 
831 	/* Try no compression */
832 	for (i = 0; i < count; i++) {
833 		vars[i + k].aconnector = params[i].aconnector;
834 		vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
835 		vars[i + k].dsc_enabled = false;
836 		vars[i + k].bpp_x16 = 0;
837 		if (drm_dp_atomic_find_vcpi_slots(state,
838 						 params[i].port->mgr,
839 						 params[i].port,
840 						 vars[i + k].pbn,
841 						 dm_mst_get_pbn_divider(dc_link)) < 0)
842 			return false;
843 	}
844 	if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
845 		set_dsc_configs_from_fairness_vars(params, vars, count, k);
846 		return true;
847 	}
848 
849 	/* Try max compression */
850 	for (i = 0; i < count; i++) {
851 		if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
852 			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
853 			vars[i + k].dsc_enabled = true;
854 			vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
855 			if (drm_dp_atomic_find_vcpi_slots(state,
856 							  params[i].port->mgr,
857 							  params[i].port,
858 							  vars[i + k].pbn,
859 							  dm_mst_get_pbn_divider(dc_link)) < 0)
860 				return false;
861 		} else {
862 			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
863 			vars[i + k].dsc_enabled = false;
864 			vars[i + k].bpp_x16 = 0;
865 			if (drm_dp_atomic_find_vcpi_slots(state,
866 							  params[i].port->mgr,
867 							  params[i].port,
868 							  vars[i + k].pbn,
869 							  dm_mst_get_pbn_divider(dc_link)) < 0)
870 				return false;
871 		}
872 	}
873 	if (drm_dp_mst_atomic_check(state))
874 		return false;
875 
876 	/* Optimize degree of compression */
877 	increase_dsc_bpp(state, dc_link, params, vars, count, k);
878 
879 	try_disable_dsc(state, dc_link, params, vars, count, k);
880 
881 	set_dsc_configs_from_fairness_vars(params, vars, count, k);
882 
883 	return true;
884 }
885 
886 static bool is_dsc_need_re_compute(
887 	struct drm_atomic_state *state,
888 	struct dc_state *dc_state,
889 	struct dc_link *dc_link)
890 {
891 	int i;
892 	bool is_dsc_need_re_compute = false;
893 
894 	/* only check phy used by mst branch */
895 	if (dc_link->type != dc_connection_mst_branch)
896 		return false;
897 
898 	/* check if there is mode change in new request */
899 	for (i = 0; i < dc_state->stream_count; i++) {
900 		struct amdgpu_dm_connector *aconnector;
901 		struct dc_stream_state *stream;
902 		struct drm_crtc_state *new_crtc_state;
903 		struct drm_connector_state *new_conn_state;
904 
905 		stream = dc_state->streams[i];
906 
907 		if (!stream)
908 			continue;
909 
910 		/* check if stream using the same link for mst */
911 		if (stream->link != dc_link)
912 			continue;
913 
914 		aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
915 		if (!aconnector)
916 			continue;
917 
918 		new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
919 
920 		if (!new_conn_state)
921 			continue;
922 
923 		if (IS_ERR(new_conn_state))
924 			continue;
925 
926 		if (!new_conn_state->crtc)
927 			continue;
928 
929 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
930 
931 		if (!new_crtc_state)
932 			continue;
933 
934 		if (IS_ERR(new_crtc_state))
935 			continue;
936 
937 		if (new_crtc_state->enable && new_crtc_state->active) {
938 			if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
939 				new_crtc_state->connectors_changed)
940 				is_dsc_need_re_compute = true;
941 		}
942 	}
943 
944 	return is_dsc_need_re_compute;
945 }
946 
947 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
948 				       struct dc_state *dc_state,
949 				       struct dsc_mst_fairness_vars *vars)
950 {
951 	int i, j;
952 	struct dc_stream_state *stream;
953 	bool computed_streams[MAX_PIPES];
954 	struct amdgpu_dm_connector *aconnector;
955 	int link_vars_start_index = 0;
956 
957 	for (i = 0; i < dc_state->stream_count; i++)
958 		computed_streams[i] = false;
959 
960 	for (i = 0; i < dc_state->stream_count; i++) {
961 		stream = dc_state->streams[i];
962 
963 		if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
964 			continue;
965 
966 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
967 
968 		if (!aconnector || !aconnector->dc_sink)
969 			continue;
970 
971 		if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
972 			continue;
973 
974 		if (computed_streams[i])
975 			continue;
976 
977 		if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
978 			return false;
979 
980 		if (!is_dsc_need_re_compute(state, dc_state, stream->link))
981 			continue;
982 
983 		mutex_lock(&aconnector->mst_mgr.lock);
984 		if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link,
985 			vars, &link_vars_start_index)) {
986 			mutex_unlock(&aconnector->mst_mgr.lock);
987 			return false;
988 		}
989 		mutex_unlock(&aconnector->mst_mgr.lock);
990 
991 		for (j = 0; j < dc_state->stream_count; j++) {
992 			if (dc_state->streams[j]->link == stream->link)
993 				computed_streams[j] = true;
994 		}
995 	}
996 
997 	for (i = 0; i < dc_state->stream_count; i++) {
998 		stream = dc_state->streams[i];
999 
1000 		if (stream->timing.flags.DSC == 1)
1001 			if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
1002 				return false;
1003 	}
1004 
1005 	return true;
1006 }
1007 
1008 #endif
1009