1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/string.h>
27 #include <linux/acpi.h>
28 #include <linux/i2c.h>
29 
30 #include <drm/drm_probe_helper.h>
31 #include <drm/amdgpu_drm.h>
32 #include <drm/drm_edid.h>
33 
34 #include "dm_services.h"
35 #include "amdgpu.h"
36 #include "dc.h"
37 #include "amdgpu_dm.h"
38 #include "amdgpu_dm_irq.h"
39 #include "amdgpu_dm_mst_types.h"
40 
41 #include "dm_helpers.h"
42 
43 struct monitor_patch_info {
44 	unsigned int manufacturer_id;
45 	unsigned int product_id;
46 	void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
47 	unsigned int patch_param;
48 };
49 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
50 
51 static const struct monitor_patch_info monitor_patch_table[] = {
52 {0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
53 {0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
54 };
55 
56 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
57 {
58 	if (edid_caps)
59 		edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
60 }
61 
62 static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
63 {
64 	int i, ret = 0;
65 
66 	for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
67 		if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
68 			&&  (edid_caps->product_id == monitor_patch_table[i].product_id)) {
69 			monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
70 			ret++;
71 		}
72 
73 	return ret;
74 }
75 
76 /* dm_helpers_parse_edid_caps
77  *
78  * Parse edid caps
79  *
80  * @edid:	[in] pointer to edid
81  *  edid_caps:	[in] pointer to edid caps
82  * @return
83  *	void
84  * */
85 enum dc_edid_status dm_helpers_parse_edid_caps(
86 		struct dc_link *link,
87 		const struct dc_edid *edid,
88 		struct dc_edid_caps *edid_caps)
89 {
90 	struct amdgpu_dm_connector *aconnector = link->priv;
91 	struct drm_connector *connector = &aconnector->base;
92 	struct edid *edid_buf = (struct edid *) edid->raw_edid;
93 	struct cea_sad *sads;
94 	int sad_count = -1;
95 	int sadb_count = -1;
96 	int i = 0;
97 	uint8_t *sadb = NULL;
98 
99 	enum dc_edid_status result = EDID_OK;
100 
101 	if (!edid_caps || !edid)
102 		return EDID_BAD_INPUT;
103 
104 	if (!drm_edid_is_valid(edid_buf))
105 		result = EDID_BAD_CHECKSUM;
106 
107 	edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
108 					((uint16_t) edid_buf->mfg_id[1])<<8;
109 	edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
110 					((uint16_t) edid_buf->prod_code[1])<<8;
111 	edid_caps->serial_number = edid_buf->serial;
112 	edid_caps->manufacture_week = edid_buf->mfg_week;
113 	edid_caps->manufacture_year = edid_buf->mfg_year;
114 
115 	drm_edid_get_monitor_name(edid_buf,
116 				  edid_caps->display_name,
117 				  AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
118 
119 	edid_caps->edid_hdmi = connector->display_info.is_hdmi;
120 
121 	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
122 	if (sad_count <= 0)
123 		return result;
124 
125 	edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
126 	for (i = 0; i < edid_caps->audio_mode_count; ++i) {
127 		struct cea_sad *sad = &sads[i];
128 
129 		edid_caps->audio_modes[i].format_code = sad->format;
130 		edid_caps->audio_modes[i].channel_count = sad->channels + 1;
131 		edid_caps->audio_modes[i].sample_rate = sad->freq;
132 		edid_caps->audio_modes[i].sample_size = sad->byte2;
133 	}
134 
135 	sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
136 
137 	if (sadb_count < 0) {
138 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
139 		sadb_count = 0;
140 	}
141 
142 	if (sadb_count)
143 		edid_caps->speaker_flags = sadb[0];
144 	else
145 		edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
146 
147 	kfree(sads);
148 	kfree(sadb);
149 
150 	amdgpu_dm_patch_edid_caps(edid_caps);
151 
152 	return result;
153 }
154 
155 static void get_payload_table(
156 		struct amdgpu_dm_connector *aconnector,
157 		struct dp_mst_stream_allocation_table *proposed_table)
158 {
159 	int i;
160 	struct drm_dp_mst_topology_mgr *mst_mgr =
161 			&aconnector->mst_port->mst_mgr;
162 
163 	mutex_lock(&mst_mgr->payload_lock);
164 
165 	proposed_table->stream_count = 0;
166 
167 	/* number of active streams */
168 	for (i = 0; i < mst_mgr->max_payloads; i++) {
169 		if (mst_mgr->payloads[i].num_slots == 0)
170 			break; /* end of vcp_id table */
171 
172 		ASSERT(mst_mgr->payloads[i].payload_state !=
173 				DP_PAYLOAD_DELETE_LOCAL);
174 
175 		if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
176 			mst_mgr->payloads[i].payload_state ==
177 					DP_PAYLOAD_REMOTE) {
178 
179 			struct dp_mst_stream_allocation *sa =
180 					&proposed_table->stream_allocations[
181 						proposed_table->stream_count];
182 
183 			sa->slot_count = mst_mgr->payloads[i].num_slots;
184 			sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
185 			proposed_table->stream_count++;
186 		}
187 	}
188 
189 	mutex_unlock(&mst_mgr->payload_lock);
190 }
191 
192 void dm_helpers_dp_update_branch_info(
193 	struct dc_context *ctx,
194 	const struct dc_link *link)
195 {}
196 
197 /*
198  * Writes payload allocation table in immediate downstream device.
199  */
200 bool dm_helpers_dp_mst_write_payload_allocation_table(
201 		struct dc_context *ctx,
202 		const struct dc_stream_state *stream,
203 		struct dp_mst_stream_allocation_table *proposed_table,
204 		bool enable)
205 {
206 	struct amdgpu_dm_connector *aconnector;
207 	struct dm_connector_state *dm_conn_state;
208 	struct drm_dp_mst_topology_mgr *mst_mgr;
209 	struct drm_dp_mst_port *mst_port;
210 	bool ret;
211 	u8 link_coding_cap = DP_8b_10b_ENCODING;
212 
213 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
214 	/* Accessing the connector state is required for vcpi_slots allocation
215 	 * and directly relies on behaviour in commit check
216 	 * that blocks before commit guaranteeing that the state
217 	 * is not gonna be swapped while still in use in commit tail */
218 
219 	if (!aconnector || !aconnector->mst_port)
220 		return false;
221 
222 	dm_conn_state = to_dm_connector_state(aconnector->base.state);
223 
224 	mst_mgr = &aconnector->mst_port->mst_mgr;
225 
226 	if (!mst_mgr->mst_state)
227 		return false;
228 
229 	mst_port = aconnector->port;
230 
231 #if defined(CONFIG_DRM_AMD_DC_DCN)
232 	link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
233 #endif
234 
235 	if (enable) {
236 
237 		ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
238 					       dm_conn_state->pbn,
239 					       dm_conn_state->vcpi_slots);
240 		if (!ret)
241 			return false;
242 
243 	} else {
244 		drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
245 	}
246 
247 	/* It's OK for this to fail */
248 	drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
249 
250 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
251 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
252 	 * stream. AMD ASIC stream slot allocation should follow the same
253 	 * sequence. copy DRM MST allocation to dc */
254 
255 	get_payload_table(aconnector, proposed_table);
256 
257 	return true;
258 }
259 
260 /*
261  * poll pending down reply
262  */
263 void dm_helpers_dp_mst_poll_pending_down_reply(
264 	struct dc_context *ctx,
265 	const struct dc_link *link)
266 {}
267 
268 /*
269  * Clear payload allocation table before enable MST DP link.
270  */
271 void dm_helpers_dp_mst_clear_payload_allocation_table(
272 	struct dc_context *ctx,
273 	const struct dc_link *link)
274 {}
275 
276 /*
277  * Polls for ACT (allocation change trigger) handled and sends
278  * ALLOCATE_PAYLOAD message.
279  */
280 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
281 		struct dc_context *ctx,
282 		const struct dc_stream_state *stream)
283 {
284 	struct amdgpu_dm_connector *aconnector;
285 	struct drm_dp_mst_topology_mgr *mst_mgr;
286 	int ret;
287 
288 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
289 
290 	if (!aconnector || !aconnector->mst_port)
291 		return ACT_FAILED;
292 
293 	mst_mgr = &aconnector->mst_port->mst_mgr;
294 
295 	if (!mst_mgr->mst_state)
296 		return ACT_FAILED;
297 
298 	ret = drm_dp_check_act_status(mst_mgr);
299 
300 	if (ret)
301 		return ACT_FAILED;
302 
303 	return ACT_SUCCESS;
304 }
305 
306 bool dm_helpers_dp_mst_send_payload_allocation(
307 		struct dc_context *ctx,
308 		const struct dc_stream_state *stream,
309 		bool enable)
310 {
311 	struct amdgpu_dm_connector *aconnector;
312 	struct drm_dp_mst_topology_mgr *mst_mgr;
313 	struct drm_dp_mst_port *mst_port;
314 
315 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
316 
317 	if (!aconnector || !aconnector->mst_port)
318 		return false;
319 
320 	mst_port = aconnector->port;
321 
322 	mst_mgr = &aconnector->mst_port->mst_mgr;
323 
324 	if (!mst_mgr->mst_state)
325 		return false;
326 
327 	/* It's OK for this to fail */
328 	drm_dp_update_payload_part2(mst_mgr);
329 
330 	if (!enable)
331 		drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
332 
333 	return true;
334 }
335 
336 void dm_dtn_log_begin(struct dc_context *ctx,
337 	struct dc_log_buffer_ctx *log_ctx)
338 {
339 	static const char msg[] = "[dtn begin]\n";
340 
341 	if (!log_ctx) {
342 		pr_info("%s", msg);
343 		return;
344 	}
345 
346 	dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
347 }
348 
349 __printf(3, 4)
350 void dm_dtn_log_append_v(struct dc_context *ctx,
351 	struct dc_log_buffer_ctx *log_ctx,
352 	const char *msg, ...)
353 {
354 	va_list args;
355 	size_t total;
356 	int n;
357 
358 	if (!log_ctx) {
359 		/* No context, redirect to dmesg. */
360 		struct va_format vaf;
361 
362 		vaf.fmt = msg;
363 		vaf.va = &args;
364 
365 		va_start(args, msg);
366 		pr_info("%pV", &vaf);
367 		va_end(args);
368 
369 		return;
370 	}
371 
372 	/* Measure the output. */
373 	va_start(args, msg);
374 	n = vsnprintf(NULL, 0, msg, args);
375 	va_end(args);
376 
377 	if (n <= 0)
378 		return;
379 
380 	/* Reallocate the string buffer as needed. */
381 	total = log_ctx->pos + n + 1;
382 
383 	if (total > log_ctx->size) {
384 		char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
385 
386 		if (buf) {
387 			memcpy(buf, log_ctx->buf, log_ctx->pos);
388 			kfree(log_ctx->buf);
389 
390 			log_ctx->buf = buf;
391 			log_ctx->size = total;
392 		}
393 	}
394 
395 	if (!log_ctx->buf)
396 		return;
397 
398 	/* Write the formatted string to the log buffer. */
399 	va_start(args, msg);
400 	n = vscnprintf(
401 		log_ctx->buf + log_ctx->pos,
402 		log_ctx->size - log_ctx->pos,
403 		msg,
404 		args);
405 	va_end(args);
406 
407 	if (n > 0)
408 		log_ctx->pos += n;
409 }
410 
411 void dm_dtn_log_end(struct dc_context *ctx,
412 	struct dc_log_buffer_ctx *log_ctx)
413 {
414 	static const char msg[] = "[dtn end]\n";
415 
416 	if (!log_ctx) {
417 		pr_info("%s", msg);
418 		return;
419 	}
420 
421 	dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
422 }
423 
424 bool dm_helpers_dp_mst_start_top_mgr(
425 		struct dc_context *ctx,
426 		const struct dc_link *link,
427 		bool boot)
428 {
429 	struct amdgpu_dm_connector *aconnector = link->priv;
430 
431 	if (!aconnector) {
432 		DRM_ERROR("Failed to find connector for link!");
433 		return false;
434 	}
435 
436 	if (boot) {
437 		DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
438 					aconnector, aconnector->base.base.id);
439 		return true;
440 	}
441 
442 	DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
443 			aconnector, aconnector->base.base.id);
444 
445 	return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
446 }
447 
448 void dm_helpers_dp_mst_stop_top_mgr(
449 		struct dc_context *ctx,
450 		struct dc_link *link)
451 {
452 	struct amdgpu_dm_connector *aconnector = link->priv;
453 	uint8_t i;
454 
455 	if (!aconnector) {
456 		DRM_ERROR("Failed to find connector for link!");
457 		return;
458 	}
459 
460 	DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
461 			aconnector, aconnector->base.base.id);
462 
463 	if (aconnector->mst_mgr.mst_state == true) {
464 		drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
465 
466 		for (i = 0; i < MAX_SINKS_PER_LINK; i++) {
467 			if (link->remote_sinks[i] == NULL)
468 				continue;
469 
470 			if (link->remote_sinks[i]->sink_signal ==
471 			    SIGNAL_TYPE_DISPLAY_PORT_MST) {
472 				dc_link_remove_remote_sink(link, link->remote_sinks[i]);
473 
474 				if (aconnector->dc_sink) {
475 					dc_sink_release(aconnector->dc_sink);
476 					aconnector->dc_sink = NULL;
477 					aconnector->dc_link->cur_link_settings.lane_count = 0;
478 				}
479 			}
480 		}
481 	}
482 }
483 
484 bool dm_helpers_dp_read_dpcd(
485 		struct dc_context *ctx,
486 		const struct dc_link *link,
487 		uint32_t address,
488 		uint8_t *data,
489 		uint32_t size)
490 {
491 
492 	struct amdgpu_dm_connector *aconnector = link->priv;
493 
494 	if (!aconnector) {
495 		DC_LOG_DC("Failed to find connector for link!\n");
496 		return false;
497 	}
498 
499 	return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
500 			data, size) > 0;
501 }
502 
503 bool dm_helpers_dp_write_dpcd(
504 		struct dc_context *ctx,
505 		const struct dc_link *link,
506 		uint32_t address,
507 		const uint8_t *data,
508 		uint32_t size)
509 {
510 	struct amdgpu_dm_connector *aconnector = link->priv;
511 
512 	if (!aconnector) {
513 		DRM_ERROR("Failed to find connector for link!");
514 		return false;
515 	}
516 
517 	return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
518 			address, (uint8_t *)data, size) > 0;
519 }
520 
521 bool dm_helpers_submit_i2c(
522 		struct dc_context *ctx,
523 		const struct dc_link *link,
524 		struct i2c_command *cmd)
525 {
526 	struct amdgpu_dm_connector *aconnector = link->priv;
527 	struct i2c_msg *msgs;
528 	int i = 0;
529 	int num = cmd->number_of_payloads;
530 	bool result;
531 
532 	if (!aconnector) {
533 		DRM_ERROR("Failed to find connector for link!");
534 		return false;
535 	}
536 
537 	msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
538 
539 	if (!msgs)
540 		return false;
541 
542 	for (i = 0; i < num; i++) {
543 		msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
544 		msgs[i].addr = cmd->payloads[i].address;
545 		msgs[i].len = cmd->payloads[i].length;
546 		msgs[i].buf = cmd->payloads[i].data;
547 	}
548 
549 	result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
550 
551 	kfree(msgs);
552 
553 	return result;
554 }
555 bool dm_helpers_dp_write_dsc_enable(
556 		struct dc_context *ctx,
557 		const struct dc_stream_state *stream,
558 		bool enable)
559 {
560 	uint8_t enable_dsc = enable ? 1 : 0;
561 	struct amdgpu_dm_connector *aconnector;
562 	uint8_t ret = 0;
563 
564 	if (!stream)
565 		return false;
566 
567 	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
568 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
569 
570 		if (!aconnector->dsc_aux)
571 			return false;
572 
573 		ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
574 	}
575 
576 	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
577 #if defined(CONFIG_DRM_AMD_DC_DCN)
578 		if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
579 #endif
580 			ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
581 			DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
582 #if defined(CONFIG_DRM_AMD_DC_DCN)
583 		} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
584 			ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
585 			DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
586 		}
587 #endif
588 	}
589 
590 	return (ret > 0);
591 }
592 
593 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
594 {
595 	bool dp_sink_present;
596 	struct amdgpu_dm_connector *aconnector = link->priv;
597 
598 	if (!aconnector) {
599 		BUG_ON("Failed to find connector for link!");
600 		return true;
601 	}
602 
603 	mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
604 	dp_sink_present = dc_link_is_dp_sink_present(link);
605 	mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
606 	return dp_sink_present;
607 }
608 
609 enum dc_edid_status dm_helpers_read_local_edid(
610 		struct dc_context *ctx,
611 		struct dc_link *link,
612 		struct dc_sink *sink)
613 {
614 	struct amdgpu_dm_connector *aconnector = link->priv;
615 	struct drm_connector *connector = &aconnector->base;
616 	struct i2c_adapter *ddc;
617 	int retry = 3;
618 	enum dc_edid_status edid_status;
619 	struct edid *edid;
620 
621 	if (link->aux_mode)
622 		ddc = &aconnector->dm_dp_aux.aux.ddc;
623 	else
624 		ddc = &aconnector->i2c->base;
625 
626 	/* some dongles read edid incorrectly the first time,
627 	 * do check sum and retry to make sure read correct edid.
628 	 */
629 	do {
630 
631 		edid = drm_get_edid(&aconnector->base, ddc);
632 
633 		/* DP Compliance Test 4.2.2.6 */
634 		if (link->aux_mode && connector->edid_corrupt)
635 			drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
636 
637 		if (!edid && connector->edid_corrupt) {
638 			connector->edid_corrupt = false;
639 			return EDID_BAD_CHECKSUM;
640 		}
641 
642 		if (!edid)
643 			return EDID_NO_RESPONSE;
644 
645 		sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
646 		memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
647 
648 		/* We don't need the original edid anymore */
649 		kfree(edid);
650 
651 		/* connector->display_info is parsed from EDID and saved
652 		 * into drm_connector->display_info
653 		 *
654 		 * drm_connector->display_info will be used by amdgpu_dm funcs,
655 		 * like fill_stream_properties_from_drm_display_mode
656 		 */
657 		amdgpu_dm_update_connector_after_detect(aconnector);
658 
659 		edid_status = dm_helpers_parse_edid_caps(
660 						link,
661 						&sink->dc_edid,
662 						&sink->edid_caps);
663 
664 	} while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
665 
666 	if (edid_status != EDID_OK)
667 		DRM_ERROR("EDID err: %d, on connector: %s",
668 				edid_status,
669 				aconnector->base.name);
670 
671 	/* DP Compliance Test 4.2.2.3 */
672 	if (link->aux_mode)
673 		drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
674 
675 	return edid_status;
676 }
677 int dm_helper_dmub_aux_transfer_sync(
678 		struct dc_context *ctx,
679 		const struct dc_link *link,
680 		struct aux_payload *payload,
681 		enum aux_return_code_type *operation_result)
682 {
683 	return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx,
684 			link->link_index, (void *)payload,
685 			(void *)operation_result);
686 }
687 
688 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
689 		const struct dc_link *link,
690 		struct set_config_cmd_payload *payload,
691 		enum set_config_status *operation_result)
692 {
693 	return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx,
694 			link->link_index, (void *)payload,
695 			(void *)operation_result);
696 }
697 
698 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
699 {
700 	/* TODO: something */
701 }
702 
703 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us)
704 {
705 	// TODO:
706 	//amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
707 }
708 
709 void *dm_helpers_allocate_gpu_mem(
710 		struct dc_context *ctx,
711 		enum dc_gpu_mem_alloc_type type,
712 		size_t size,
713 		long long *addr)
714 {
715 	struct amdgpu_device *adev = ctx->driver_context;
716 	struct dal_allocation *da;
717 	u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
718 		AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
719 	int ret;
720 
721 	da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
722 	if (!da)
723 		return NULL;
724 
725 	ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
726 				      domain, &da->bo,
727 				      &da->gpu_addr, &da->cpu_ptr);
728 
729 	*addr = da->gpu_addr;
730 
731 	if (ret) {
732 		kfree(da);
733 		return NULL;
734 	}
735 
736 	/* add da to list in dm */
737 	list_add(&da->list, &adev->dm.da_list);
738 
739 	return da->cpu_ptr;
740 }
741 
742 void dm_helpers_free_gpu_mem(
743 		struct dc_context *ctx,
744 		enum dc_gpu_mem_alloc_type type,
745 		void *pvMem)
746 {
747 	struct amdgpu_device *adev = ctx->driver_context;
748 	struct dal_allocation *da;
749 
750 	/* walk the da list in DM */
751 	list_for_each_entry(da, &adev->dm.da_list, list) {
752 		if (pvMem == da->cpu_ptr) {
753 			amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
754 			list_del(&da->list);
755 			kfree(da);
756 			break;
757 		}
758 	}
759 }
760 
761 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
762 {
763 	enum dc_irq_source irq_source;
764 	bool ret;
765 
766 	irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
767 
768 	ret = dc_interrupt_set(ctx->dc, irq_source, enable);
769 
770 	DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
771 			 enable ? "en" : "dis", ret);
772 	return ret;
773 }
774 
775 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
776 {
777 	/* TODO: virtual DPCD */
778 	struct dc_link *link = stream->link;
779 	union down_spread_ctrl old_downspread;
780 	union down_spread_ctrl new_downspread;
781 
782 	if (link->aux_access_disabled)
783 		return;
784 
785 	if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
786 				     &old_downspread.raw,
787 				     sizeof(old_downspread)))
788 		return;
789 
790 	new_downspread.raw = old_downspread.raw;
791 	new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
792 		(stream->ignore_msa_timing_param) ? 1 : 0;
793 
794 	if (new_downspread.raw != old_downspread.raw)
795 		dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
796 					 &new_downspread.raw,
797 					 sizeof(new_downspread));
798 }
799 
800 #if defined(CONFIG_DRM_AMD_DC_DCN)
801 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
802 {
803        // FPGA programming for this clock in diags framework that
804        // needs to go through dm layer, therefore leave dummy interace here
805 }
806 
807 
808 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
809 {
810 	/* TODO: add peridic detection implementation */
811 }
812 #endif
813