1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services_types.h"
27 #include "dc.h"
28 
29 #include "vid.h"
30 #include "amdgpu.h"
31 #include "amdgpu_display.h"
32 #include "atom.h"
33 #include "amdgpu_dm.h"
34 #include "amdgpu_dm_types.h"
35 
36 #include "amd_shared.h"
37 #include "amdgpu_dm_irq.h"
38 #include "dm_helpers.h"
39 
40 #include "ivsrcid/ivsrcid_vislands30.h"
41 
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/version.h>
45 
46 #include <drm/drm_atomic.h>
47 #include <drm/drm_atomic_helper.h>
48 #include <drm/drm_dp_mst_helper.h>
49 
50 #include "modules/inc/mod_freesync.h"
51 
52 /*
53  * dm_vblank_get_counter
54  *
55  * @brief
56  * Get counter for number of vertical blanks
57  *
58  * @param
59  * struct amdgpu_device *adev - [in] desired amdgpu device
60  * int disp_idx - [in] which CRTC to get the counter from
61  *
62  * @return
63  * Counter for vertical blanks
64  */
65 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
66 {
67 	if (crtc >= adev->mode_info.num_crtc)
68 		return 0;
69 	else {
70 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
71 
72 		if (NULL == acrtc->stream) {
73 			DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
74 			return 0;
75 		}
76 
77 		return dc_stream_get_vblank_counter(acrtc->stream);
78 	}
79 }
80 
81 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
82 					u32 *vbl, u32 *position)
83 {
84 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
85 		return -EINVAL;
86 	else {
87 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
88 
89 		if (NULL == acrtc->stream) {
90 			DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
91 			return 0;
92 		}
93 
94 		return dc_stream_get_scanoutpos(acrtc->stream, vbl, position);
95 	}
96 
97 	return 0;
98 }
99 
100 static bool dm_is_idle(void *handle)
101 {
102 	/* XXX todo */
103 	return true;
104 }
105 
106 static int dm_wait_for_idle(void *handle)
107 {
108 	/* XXX todo */
109 	return 0;
110 }
111 
112 static bool dm_check_soft_reset(void *handle)
113 {
114 	return false;
115 }
116 
117 static int dm_soft_reset(void *handle)
118 {
119 	/* XXX todo */
120 	return 0;
121 }
122 
123 static struct amdgpu_crtc *get_crtc_by_otg_inst(
124 	struct amdgpu_device *adev,
125 	int otg_inst)
126 {
127 	struct drm_device *dev = adev->ddev;
128 	struct drm_crtc *crtc;
129 	struct amdgpu_crtc *amdgpu_crtc;
130 
131 	/*
132 	 * following if is check inherited from both functions where this one is
133 	 * used now. Need to be checked why it could happen.
134 	 */
135 	if (otg_inst == -1) {
136 		WARN_ON(1);
137 		return adev->mode_info.crtcs[0];
138 	}
139 
140 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
141 		amdgpu_crtc = to_amdgpu_crtc(crtc);
142 
143 		if (amdgpu_crtc->otg_inst == otg_inst)
144 			return amdgpu_crtc;
145 	}
146 
147 	return NULL;
148 }
149 
150 static void dm_pflip_high_irq(void *interrupt_params)
151 {
152 	struct amdgpu_crtc *amdgpu_crtc;
153 	struct common_irq_params *irq_params = interrupt_params;
154 	struct amdgpu_device *adev = irq_params->adev;
155 	unsigned long flags;
156 
157 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
158 
159 	/* IRQ could occur when in initial stage */
160 	/*TODO work and BO cleanup */
161 	if (amdgpu_crtc == NULL) {
162 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
163 		return;
164 	}
165 
166 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
167 
168 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
169 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
170 						 amdgpu_crtc->pflip_status,
171 						 AMDGPU_FLIP_SUBMITTED,
172 						 amdgpu_crtc->crtc_id,
173 						 amdgpu_crtc);
174 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
175 		return;
176 	}
177 
178 
179 	/* wakeup usersapce */
180 	if (amdgpu_crtc->event
181 			&& amdgpu_crtc->event->event.base.type
182 			== DRM_EVENT_FLIP_COMPLETE) {
183 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
184 		/* page flip completed. clean up */
185 		amdgpu_crtc->event = NULL;
186 	} else
187 		WARN_ON(1);
188 
189 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
190 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
191 
192 	DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
193 					__func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
194 
195 	drm_crtc_vblank_put(&amdgpu_crtc->base);
196 }
197 
198 static void dm_crtc_high_irq(void *interrupt_params)
199 {
200 	struct common_irq_params *irq_params = interrupt_params;
201 	struct amdgpu_device *adev = irq_params->adev;
202 	uint8_t crtc_index = 0;
203 	struct amdgpu_crtc *acrtc;
204 
205 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
206 
207 	if (acrtc)
208 		crtc_index = acrtc->crtc_id;
209 
210 	drm_handle_vblank(adev->ddev, crtc_index);
211 }
212 
213 static int dm_set_clockgating_state(void *handle,
214 		  enum amd_clockgating_state state)
215 {
216 	return 0;
217 }
218 
219 static int dm_set_powergating_state(void *handle,
220 		  enum amd_powergating_state state)
221 {
222 	return 0;
223 }
224 
225 /* Prototypes of private functions */
226 static int dm_early_init(void* handle);
227 
228 static void hotplug_notify_work_func(struct work_struct *work)
229 {
230 	struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
231 	struct drm_device *dev = dm->ddev;
232 
233 	drm_kms_helper_hotplug_event(dev);
234 }
235 
236 /* Init display KMS
237  *
238  * Returns 0 on success
239  */
240 int amdgpu_dm_init(struct amdgpu_device *adev)
241 {
242 	struct dc_init_data init_data;
243 	adev->dm.ddev = adev->ddev;
244 	adev->dm.adev = adev;
245 
246 	DRM_INFO("DAL is enabled\n");
247 	/* Zero all the fields */
248 	memset(&init_data, 0, sizeof(init_data));
249 
250 	/* initialize DAL's lock (for SYNC context use) */
251 	spin_lock_init(&adev->dm.dal_lock);
252 
253 	/* initialize DAL's mutex */
254 	mutex_init(&adev->dm.dal_mutex);
255 
256 	if(amdgpu_dm_irq_init(adev)) {
257 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
258 		goto error;
259 	}
260 
261 	init_data.asic_id.chip_family = adev->family;
262 
263 	init_data.asic_id.pci_revision_id = adev->rev_id;
264 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
265 
266 	init_data.asic_id.vram_width = adev->mc.vram_width;
267 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
268 	init_data.asic_id.atombios_base_address =
269 		adev->mode_info.atom_context->bios;
270 
271 	init_data.driver = adev;
272 
273 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
274 
275 	if (!adev->dm.cgs_device) {
276 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
277 		goto error;
278 	}
279 
280 	init_data.cgs_device = adev->dm.cgs_device;
281 
282 	adev->dm.dal = NULL;
283 
284 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
285 
286 	/* Display Core create. */
287 	adev->dm.dc = dc_create(&init_data);
288 
289 	if (!adev->dm.dc)
290 		DRM_INFO("Display Core failed to initialize!\n");
291 
292 	INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
293 
294 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
295 	if (!adev->dm.freesync_module) {
296 		DRM_ERROR(
297 		"amdgpu: failed to initialize freesync_module.\n");
298 	} else
299 		DRM_INFO("amdgpu: freesync_module init done %p.\n",
300 				adev->dm.freesync_module);
301 
302 	if (amdgpu_dm_initialize_drm_device(adev)) {
303 		DRM_ERROR(
304 		"amdgpu: failed to initialize sw for display support.\n");
305 		goto error;
306 	}
307 
308 	/* Update the actual used number of crtc */
309 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
310 
311 	/* TODO: Add_display_info? */
312 
313 	/* TODO use dynamic cursor width */
314 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
315 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
316 
317 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
318 		DRM_ERROR(
319 		"amdgpu: failed to initialize sw for display support.\n");
320 		goto error;
321 	}
322 
323 	DRM_INFO("KMS initialized.\n");
324 
325 	return 0;
326 error:
327 	amdgpu_dm_fini(adev);
328 
329 	return -1;
330 }
331 
332 void amdgpu_dm_fini(struct amdgpu_device *adev)
333 {
334 	amdgpu_dm_destroy_drm_device(&adev->dm);
335 	/*
336 	 * TODO: pageflip, vlank interrupt
337 	 *
338 	 * amdgpu_dm_irq_fini(adev);
339 	 */
340 
341 	if (adev->dm.cgs_device) {
342 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
343 		adev->dm.cgs_device = NULL;
344 	}
345 	if (adev->dm.freesync_module) {
346 		mod_freesync_destroy(adev->dm.freesync_module);
347 		adev->dm.freesync_module = NULL;
348 	}
349 	/* DC Destroy TODO: Replace destroy DAL */
350 	{
351 		dc_destroy(&adev->dm.dc);
352 	}
353 	return;
354 }
355 
356 /* moved from amdgpu_dm_kms.c */
357 void amdgpu_dm_destroy()
358 {
359 }
360 
361 static int dm_sw_init(void *handle)
362 {
363 	return 0;
364 }
365 
366 static int dm_sw_fini(void *handle)
367 {
368 	return 0;
369 }
370 
371 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
372 {
373 	struct amdgpu_connector *aconnector;
374 	struct drm_connector *connector;
375 	int ret = 0;
376 
377 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
378 
379 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
380 		   aconnector = to_amdgpu_connector(connector);
381 		if (aconnector->dc_link->type == dc_connection_mst_branch) {
382 			DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
383 					aconnector, aconnector->base.base.id);
384 
385 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
386 			if (ret < 0) {
387 				DRM_ERROR("DM_MST: Failed to start MST\n");
388 				((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
389 				return ret;
390 				}
391 			}
392 	}
393 
394 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
395 	return ret;
396 }
397 
398 static int dm_late_init(void *handle)
399 {
400 	struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
401 	int r = detect_mst_link_for_all_connectors(dev);
402 
403 	return r;
404 }
405 
406 static void s3_handle_mst(struct drm_device *dev, bool suspend)
407 {
408 	struct amdgpu_connector *aconnector;
409 	struct drm_connector *connector;
410 
411 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
412 
413 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
414 		   aconnector = to_amdgpu_connector(connector);
415 		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
416 				   !aconnector->mst_port) {
417 
418 			   if (suspend)
419 				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
420 			   else
421 				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
422 		   }
423 	}
424 
425 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
426 }
427 
428 static int dm_hw_init(void *handle)
429 {
430 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
431 	/* Create DAL display manager */
432 	amdgpu_dm_init(adev);
433 	amdgpu_dm_hpd_init(adev);
434 
435 	return 0;
436 }
437 
438 static int dm_hw_fini(void *handle)
439 {
440 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
441 
442 	amdgpu_dm_hpd_fini(adev);
443 
444 	amdgpu_dm_irq_fini(adev);
445 
446 	return 0;
447 }
448 
449 static int dm_suspend(void *handle)
450 {
451 	struct amdgpu_device *adev = handle;
452 	struct amdgpu_display_manager *dm = &adev->dm;
453 	int ret = 0;
454 
455 	s3_handle_mst(adev->ddev, true);
456 
457 	amdgpu_dm_irq_suspend(adev);
458 
459 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
460 
461 	dc_set_power_state(
462 		dm->dc,
463 		DC_ACPI_CM_POWER_STATE_D3
464 		);
465 
466 	return ret;
467 }
468 
469 struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
470 	struct drm_atomic_state *state,
471 	struct drm_crtc *crtc,
472 	bool from_state_var)
473 {
474 	uint32_t i;
475 	struct drm_connector_state *conn_state;
476 	struct drm_connector *connector;
477 	struct drm_crtc *crtc_from_state;
478 
479 	for_each_connector_in_state(
480 		state,
481 		connector,
482 		conn_state,
483 		i) {
484 		crtc_from_state =
485 			from_state_var ?
486 				conn_state->crtc :
487 				connector->state->crtc;
488 
489 		if (crtc_from_state == crtc)
490 			return to_amdgpu_connector(connector);
491 	}
492 
493 	return NULL;
494 }
495 
496 static int dm_resume(void *handle)
497 {
498 	struct amdgpu_device *adev = handle;
499 	struct amdgpu_display_manager *dm = &adev->dm;
500 
501 	/* power on hardware */
502 	dc_set_power_state(
503 		dm->dc,
504 		DC_ACPI_CM_POWER_STATE_D0
505 		);
506 
507 	return 0;
508 }
509 
510 int amdgpu_dm_display_resume(struct amdgpu_device *adev )
511 {
512 	struct drm_device *ddev = adev->ddev;
513 	struct amdgpu_display_manager *dm = &adev->dm;
514 	struct amdgpu_connector *aconnector;
515 	struct drm_connector *connector;
516 	struct drm_crtc *crtc;
517 	struct drm_crtc_state *crtc_state;
518 	int ret = 0;
519 	int i;
520 
521 	/* program HPD filter */
522 	dc_resume(dm->dc);
523 
524 	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
525 	s3_handle_mst(ddev, false);
526 
527 	/*
528 	 * early enable HPD Rx IRQ, should be done before set mode as short
529 	 * pulse interrupts are used for MST
530 	 */
531 	amdgpu_dm_irq_resume_early(adev);
532 
533 	/* Do detection*/
534 	list_for_each_entry(connector,
535 			&ddev->mode_config.connector_list, head) {
536 		aconnector = to_amdgpu_connector(connector);
537 
538 		/*
539 		 * this is the case when traversing through already created
540 		 * MST connectors, should be skipped
541 		 */
542 		if (aconnector->mst_port)
543 			continue;
544 
545 		dc_link_detect(aconnector->dc_link, false);
546 		aconnector->dc_sink = NULL;
547 		amdgpu_dm_update_connector_after_detect(aconnector);
548 	}
549 
550 	/* Force mode set in atomic comit */
551 	for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
552 			crtc_state->active_changed = true;
553 
554 	ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
555 
556 	amdgpu_dm_irq_resume(adev);
557 
558 	return ret;
559 }
560 
561 static const struct amd_ip_funcs amdgpu_dm_funcs = {
562 	.name = "dm",
563 	.early_init = dm_early_init,
564 	.late_init = dm_late_init,
565 	.sw_init = dm_sw_init,
566 	.sw_fini = dm_sw_fini,
567 	.hw_init = dm_hw_init,
568 	.hw_fini = dm_hw_fini,
569 	.suspend = dm_suspend,
570 	.resume = dm_resume,
571 	.is_idle = dm_is_idle,
572 	.wait_for_idle = dm_wait_for_idle,
573 	.check_soft_reset = dm_check_soft_reset,
574 	.soft_reset = dm_soft_reset,
575 	.set_clockgating_state = dm_set_clockgating_state,
576 	.set_powergating_state = dm_set_powergating_state,
577 };
578 
579 const struct amdgpu_ip_block_version dm_ip_block =
580 {
581 	.type = AMD_IP_BLOCK_TYPE_DCE,
582 	.major = 1,
583 	.minor = 0,
584 	.rev = 0,
585 	.funcs = &amdgpu_dm_funcs,
586 };
587 
588 /* TODO: it is temporary non-const, should fixed later */
589 static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
590 	.fb_create = amdgpu_user_framebuffer_create,
591 	.output_poll_changed = amdgpu_output_poll_changed,
592 	.atomic_check = amdgpu_dm_atomic_check,
593 	.atomic_commit = drm_atomic_helper_commit
594 };
595 
596 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
597 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
598 };
599 
600 void amdgpu_dm_update_connector_after_detect(
601 	struct amdgpu_connector *aconnector)
602 {
603 	struct drm_connector *connector = &aconnector->base;
604 	struct drm_device *dev = connector->dev;
605 	const struct dc_sink *sink;
606 
607 	/* MST handled by drm_mst framework */
608 	if (aconnector->mst_mgr.mst_state == true)
609 		return;
610 
611 
612 	sink = aconnector->dc_link->local_sink;
613 
614 	/* Edid mgmt connector gets first update only in mode_valid hook and then
615 	 * the connector sink is set to either fake or physical sink depends on link status.
616 	 * don't do it here if u are during boot
617 	 */
618 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
619 			&& aconnector->dc_em_sink) {
620 
621 		/* For S3 resume with headless use eml_sink to fake stream
622 		 * because on resume connecotr->sink is set ti NULL
623 		 */
624 		mutex_lock(&dev->mode_config.mutex);
625 
626 		if (sink) {
627 			if (aconnector->dc_sink) {
628 				amdgpu_dm_remove_sink_from_freesync_module(
629 								connector);
630 				/* retain and release bellow are used for
631 				 * bump up refcount for sink because the link don't point
632 				 * to it anymore after disconnect so on next crtc to connector
633 				 * reshuffle by UMD we will get into unwanted dc_sink release
634 				 */
635 				if (aconnector->dc_sink != aconnector->dc_em_sink)
636 					dc_sink_release(aconnector->dc_sink);
637 			}
638 			aconnector->dc_sink = sink;
639 			amdgpu_dm_add_sink_to_freesync_module(
640 						connector, aconnector->edid);
641 		} else {
642 			amdgpu_dm_remove_sink_from_freesync_module(connector);
643 			if (!aconnector->dc_sink)
644 				aconnector->dc_sink = aconnector->dc_em_sink;
645 			else if (aconnector->dc_sink != aconnector->dc_em_sink)
646 				dc_sink_retain(aconnector->dc_sink);
647 		}
648 
649 		mutex_unlock(&dev->mode_config.mutex);
650 		return;
651 	}
652 
653 	/*
654 	 * TODO: temporary guard to look for proper fix
655 	 * if this sink is MST sink, we should not do anything
656 	 */
657 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
658 		return;
659 
660 	if (aconnector->dc_sink == sink) {
661 		/* We got a DP short pulse (Link Loss, DP CTS, etc...).
662 		 * Do nothing!! */
663 		DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
664 				aconnector->connector_id);
665 		return;
666 	}
667 
668 	DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
669 		aconnector->connector_id, aconnector->dc_sink, sink);
670 
671 	mutex_lock(&dev->mode_config.mutex);
672 
673 	/* 1. Update status of the drm connector
674 	 * 2. Send an event and let userspace tell us what to do */
675 	if (sink) {
676 		/* TODO: check if we still need the S3 mode update workaround.
677 		 * If yes, put it here. */
678 		if (aconnector->dc_sink)
679 			amdgpu_dm_remove_sink_from_freesync_module(
680 							connector);
681 
682 		aconnector->dc_sink = sink;
683 		if (sink->dc_edid.length == 0)
684 			aconnector->edid = NULL;
685 		else {
686 			aconnector->edid =
687 				(struct edid *) sink->dc_edid.raw_edid;
688 
689 
690 			drm_mode_connector_update_edid_property(connector,
691 					aconnector->edid);
692 		}
693 		amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
694 
695 	} else {
696 		amdgpu_dm_remove_sink_from_freesync_module(connector);
697 		drm_mode_connector_update_edid_property(connector, NULL);
698 		aconnector->num_modes = 0;
699 		aconnector->dc_sink = NULL;
700 	}
701 
702 	mutex_unlock(&dev->mode_config.mutex);
703 }
704 
705 static void handle_hpd_irq(void *param)
706 {
707 	struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
708 	struct drm_connector *connector = &aconnector->base;
709 	struct drm_device *dev = connector->dev;
710 
711 	/* In case of failure or MST no need to update connector status or notify the OS
712 	 * since (for MST case) MST does this in it's own context.
713 	 */
714 	mutex_lock(&aconnector->hpd_lock);
715 	if (dc_link_detect(aconnector->dc_link, false)) {
716 		amdgpu_dm_update_connector_after_detect(aconnector);
717 
718 
719 		drm_modeset_lock_all(dev);
720 		dm_restore_drm_connector_state(dev, connector);
721 		drm_modeset_unlock_all(dev);
722 
723 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
724 			drm_kms_helper_hotplug_event(dev);
725 	}
726 	mutex_unlock(&aconnector->hpd_lock);
727 
728 }
729 
730 static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
731 {
732 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
733 	uint8_t dret;
734 	bool new_irq_handled = false;
735 	int dpcd_addr;
736 	int dpcd_bytes_to_read;
737 
738 	const int max_process_count = 30;
739 	int process_count = 0;
740 
741 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
742 
743 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
744 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
745 		/* DPCD 0x200 - 0x201 for downstream IRQ */
746 		dpcd_addr = DP_SINK_COUNT;
747 	} else {
748 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
749 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
750 		dpcd_addr = DP_SINK_COUNT_ESI;
751 	}
752 
753 	dret = drm_dp_dpcd_read(
754 		&aconnector->dm_dp_aux.aux,
755 		dpcd_addr,
756 		esi,
757 		dpcd_bytes_to_read);
758 
759 	while (dret == dpcd_bytes_to_read &&
760 		process_count < max_process_count) {
761 		uint8_t retry;
762 		dret = 0;
763 
764 		process_count++;
765 
766 		DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
767 		/* handle HPD short pulse irq */
768 		if (aconnector->mst_mgr.mst_state)
769 			drm_dp_mst_hpd_irq(
770 				&aconnector->mst_mgr,
771 				esi,
772 				&new_irq_handled);
773 
774 		if (new_irq_handled) {
775 			/* ACK at DPCD to notify down stream */
776 			const int ack_dpcd_bytes_to_write =
777 				dpcd_bytes_to_read - 1;
778 
779 			for (retry = 0; retry < 3; retry++) {
780 				uint8_t wret;
781 
782 				wret = drm_dp_dpcd_write(
783 					&aconnector->dm_dp_aux.aux,
784 					dpcd_addr + 1,
785 					&esi[1],
786 					ack_dpcd_bytes_to_write);
787 				if (wret == ack_dpcd_bytes_to_write)
788 					break;
789 			}
790 
791 			/* check if there is new irq to be handle */
792 			dret = drm_dp_dpcd_read(
793 				&aconnector->dm_dp_aux.aux,
794 				dpcd_addr,
795 				esi,
796 				dpcd_bytes_to_read);
797 
798 			new_irq_handled = false;
799 		} else
800 			break;
801 	}
802 
803 	if (process_count == max_process_count)
804 		DRM_DEBUG_KMS("Loop exceeded max iterations\n");
805 }
806 
807 static void handle_hpd_rx_irq(void *param)
808 {
809 	struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
810 	struct drm_connector *connector = &aconnector->base;
811 	struct drm_device *dev = connector->dev;
812 	const struct dc_link *dc_link = aconnector->dc_link;
813 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
814 
815 	/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
816 	 * conflict, after implement i2c helper, this mutex should be
817 	 * retired.
818 	 */
819 	if (aconnector->dc_link->type != dc_connection_mst_branch)
820 		mutex_lock(&aconnector->hpd_lock);
821 
822 	if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
823 			!is_mst_root_connector) {
824 		/* Downstream Port status changed. */
825 		if (dc_link_detect(aconnector->dc_link, false)) {
826 			amdgpu_dm_update_connector_after_detect(aconnector);
827 
828 
829 			drm_modeset_lock_all(dev);
830 			dm_restore_drm_connector_state(dev, connector);
831 			drm_modeset_unlock_all(dev);
832 
833 			drm_kms_helper_hotplug_event(dev);
834 		}
835 	}
836 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
837 				(dc_link->type == dc_connection_mst_branch))
838 		dm_handle_hpd_rx_irq(aconnector);
839 
840 	if (aconnector->dc_link->type != dc_connection_mst_branch)
841 		mutex_unlock(&aconnector->hpd_lock);
842 }
843 
844 static void register_hpd_handlers(struct amdgpu_device *adev)
845 {
846 	struct drm_device *dev = adev->ddev;
847 	struct drm_connector *connector;
848 	struct amdgpu_connector *aconnector;
849 	const struct dc_link *dc_link;
850 	struct dc_interrupt_params int_params = {0};
851 
852 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
853 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
854 
855 	list_for_each_entry(connector,
856 			&dev->mode_config.connector_list, head)	{
857 
858 		aconnector = to_amdgpu_connector(connector);
859 		dc_link = aconnector->dc_link;
860 
861 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
862 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
863 			int_params.irq_source = dc_link->irq_source_hpd;
864 
865 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
866 					handle_hpd_irq,
867 					(void *) aconnector);
868 		}
869 
870 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
871 
872 			/* Also register for DP short pulse (hpd_rx). */
873 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
874 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
875 
876 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
877 					handle_hpd_rx_irq,
878 					(void *) aconnector);
879 		}
880 	}
881 }
882 
883 /* Register IRQ sources and initialize IRQ callbacks */
884 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
885 {
886 	struct dc *dc = adev->dm.dc;
887 	struct common_irq_params *c_irq_params;
888 	struct dc_interrupt_params int_params = {0};
889 	int r;
890 	int i;
891 	unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
892 
893 	if (adev->asic_type == CHIP_VEGA10)
894 		client_id = AMDGPU_IH_CLIENTID_DCE;
895 
896 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
897 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
898 
899 	/* Actions of amdgpu_irq_add_id():
900 	 * 1. Register a set() function with base driver.
901 	 *    Base driver will call set() function to enable/disable an
902 	 *    interrupt in DC hardware.
903 	 * 2. Register amdgpu_dm_irq_handler().
904 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
905 	 *    coming from DC hardware.
906 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
907 	 *    for acknowledging and handling. */
908 
909 	/* Use VBLANK interrupt */
910 	for (i = 1; i <= adev->mode_info.num_crtc; i++) {
911 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
912 
913 		if (r) {
914 			DRM_ERROR("Failed to add crtc irq id!\n");
915 			return r;
916 		}
917 
918 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
919 		int_params.irq_source =
920 			dc_interrupt_to_irq_source(dc, i, 0);
921 
922 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
923 
924 		c_irq_params->adev = adev;
925 		c_irq_params->irq_src = int_params.irq_source;
926 
927 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
928 				dm_crtc_high_irq, c_irq_params);
929 	}
930 
931 	/* Use GRPH_PFLIP interrupt */
932 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
933 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
934 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
935 		if (r) {
936 			DRM_ERROR("Failed to add page flip irq id!\n");
937 			return r;
938 		}
939 
940 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
941 		int_params.irq_source =
942 			dc_interrupt_to_irq_source(dc, i, 0);
943 
944 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
945 
946 		c_irq_params->adev = adev;
947 		c_irq_params->irq_src = int_params.irq_source;
948 
949 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
950 				dm_pflip_high_irq, c_irq_params);
951 
952 	}
953 
954 	/* HPD */
955 	r = amdgpu_irq_add_id(adev, client_id,
956 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
957 	if (r) {
958 		DRM_ERROR("Failed to add hpd irq id!\n");
959 		return r;
960 	}
961 
962 	register_hpd_handlers(adev);
963 
964 	return 0;
965 }
966 
967 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
968 {
969 	int r;
970 
971 	adev->mode_info.mode_config_initialized = true;
972 
973 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
974 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
975 
976 	adev->ddev->mode_config.max_width = 16384;
977 	adev->ddev->mode_config.max_height = 16384;
978 
979 	adev->ddev->mode_config.preferred_depth = 24;
980 	adev->ddev->mode_config.prefer_shadow = 1;
981 	/* indicate support of immediate flip */
982 	adev->ddev->mode_config.async_page_flip = true;
983 
984 	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
985 
986 	r = amdgpu_modeset_create_props(adev);
987 	if (r)
988 		return r;
989 
990 	return 0;
991 }
992 
993 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
994 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
995 
996 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
997 {
998 	struct amdgpu_display_manager *dm = bl_get_data(bd);
999 
1000 	if (dc_link_set_backlight_level(dm->backlight_link,
1001 			bd->props.brightness, 0, 0))
1002 		return 0;
1003 	else
1004 		return 1;
1005 }
1006 
1007 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1008 {
1009 	return bd->props.brightness;
1010 }
1011 
1012 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1013 	.get_brightness = amdgpu_dm_backlight_get_brightness,
1014 	.update_status	= amdgpu_dm_backlight_update_status,
1015 };
1016 
1017 void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1018 {
1019 	char bl_name[16];
1020 	struct backlight_properties props = { 0 };
1021 
1022 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1023 	props.type = BACKLIGHT_RAW;
1024 
1025 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1026 			dm->adev->ddev->primary->index);
1027 
1028 	dm->backlight_dev = backlight_device_register(bl_name,
1029 			dm->adev->ddev->dev,
1030 			dm,
1031 			&amdgpu_dm_backlight_ops,
1032 			&props);
1033 
1034 	if (NULL == dm->backlight_dev)
1035 		DRM_ERROR("DM: Backlight registration failed!\n");
1036 	else
1037 		DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1038 }
1039 
1040 #endif
1041 
1042 /* In this architecture, the association
1043  * connector -> encoder -> crtc
1044  * id not really requried. The crtc and connector will hold the
1045  * display_index as an abstraction to use with DAL component
1046  *
1047  * Returns 0 on success
1048  */
1049 int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1050 {
1051 	struct amdgpu_display_manager *dm = &adev->dm;
1052 	uint32_t i;
1053 	struct amdgpu_connector *aconnector;
1054 	struct amdgpu_encoder *aencoder;
1055 	struct amdgpu_crtc *acrtc;
1056 	uint32_t link_cnt;
1057 
1058 	link_cnt = dm->dc->caps.max_links;
1059 
1060 	if (amdgpu_dm_mode_config_init(dm->adev)) {
1061 		DRM_ERROR("DM: Failed to initialize mode config\n");
1062 		return -1;
1063 	}
1064 
1065 	for (i = 0; i < dm->dc->caps.max_streams; i++) {
1066 		acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
1067 		if (!acrtc)
1068 			goto fail;
1069 
1070 		if (amdgpu_dm_crtc_init(
1071 			dm,
1072 			acrtc,
1073 			i)) {
1074 			DRM_ERROR("KMS: Failed to initialize crtc\n");
1075 			kfree(acrtc);
1076 			goto fail;
1077 		}
1078 	}
1079 
1080 	dm->display_indexes_num = dm->dc->caps.max_streams;
1081 
1082 	/* loops over all connectors on the board */
1083 	for (i = 0; i < link_cnt; i++) {
1084 
1085 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1086 			DRM_ERROR(
1087 				"KMS: Cannot support more than %d display indexes\n",
1088 					AMDGPU_DM_MAX_DISPLAY_INDEX);
1089 			continue;
1090 		}
1091 
1092 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1093 		if (!aconnector)
1094 			goto fail;
1095 
1096 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1097 		if (!aencoder) {
1098 			goto fail_free_connector;
1099 		}
1100 
1101 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1102 			DRM_ERROR("KMS: Failed to initialize encoder\n");
1103 			goto fail_free_encoder;
1104 		}
1105 
1106 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1107 			DRM_ERROR("KMS: Failed to initialize connector\n");
1108 			goto fail_free_connector;
1109 		}
1110 
1111 		if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1112 			amdgpu_dm_update_connector_after_detect(aconnector);
1113 	}
1114 
1115 	/* Software is initialized. Now we can register interrupt handlers. */
1116 	switch (adev->asic_type) {
1117 	case CHIP_BONAIRE:
1118 	case CHIP_HAWAII:
1119 	case CHIP_TONGA:
1120 	case CHIP_FIJI:
1121 	case CHIP_CARRIZO:
1122 	case CHIP_STONEY:
1123 	case CHIP_POLARIS11:
1124 	case CHIP_POLARIS10:
1125 	case CHIP_POLARIS12:
1126 	case CHIP_VEGA10:
1127 		if (dce110_register_irq_handlers(dm->adev)) {
1128 			DRM_ERROR("DM: Failed to initialize IRQ\n");
1129 			return -1;
1130 		}
1131 		break;
1132 	default:
1133 		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1134 		return -1;
1135 	}
1136 
1137 	drm_mode_config_reset(dm->ddev);
1138 
1139 	return 0;
1140 fail_free_encoder:
1141 	kfree(aencoder);
1142 fail_free_connector:
1143 	kfree(aconnector);
1144 fail:
1145 	return -1;
1146 }
1147 
1148 void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1149 {
1150 	drm_mode_config_cleanup(dm->ddev);
1151 	return;
1152 }
1153 
1154 /******************************************************************************
1155  * amdgpu_display_funcs functions
1156  *****************************************************************************/
1157 
1158 /**
1159  * dm_bandwidth_update - program display watermarks
1160  *
1161  * @adev: amdgpu_device pointer
1162  *
1163  * Calculate and program the display watermarks and line buffer allocation.
1164  */
1165 static void dm_bandwidth_update(struct amdgpu_device *adev)
1166 {
1167 	/* TODO: implement later */
1168 }
1169 
1170 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1171 				     u8 level)
1172 {
1173 	/* TODO: translate amdgpu_encoder to display_index and call DAL */
1174 }
1175 
1176 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1177 {
1178 	/* TODO: translate amdgpu_encoder to display_index and call DAL */
1179 	return 0;
1180 }
1181 
1182 /******************************************************************************
1183  * Page Flip functions
1184  ******************************************************************************/
1185 
1186 /**
1187  * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1188  * 			via DRM IOCTL, by user mode.
1189  *
1190  * @adev: amdgpu_device pointer
1191  * @crtc_id: crtc to cleanup pageflip on
1192  * @crtc_base: new address of the crtc (GPU MC address)
1193  *
1194  * Does the actual pageflip (surface address update).
1195  */
1196 static void dm_page_flip(struct amdgpu_device *adev,
1197 			 int crtc_id, u64 crtc_base, bool async)
1198 {
1199 	struct amdgpu_crtc *acrtc;
1200 	const struct dc_stream *stream;
1201 	struct dc_flip_addrs addr = { {0} };
1202 
1203 	/*
1204 	 * TODO risk of concurrency issues
1205 	 *
1206 	 * This should guarded by the dal_mutex but we can't do this since the
1207 	 * caller uses a spin_lock on event_lock.
1208 	 *
1209 	 * If we wait on the dal_mutex a second page flip interrupt might come,
1210 	 * spin on the event_lock, disabling interrupts while it does so. At
1211 	 * this point the core can no longer be pre-empted and return to the
1212 	 * thread that waited on the dal_mutex and we're deadlocked.
1213 	 *
1214 	 * With multiple cores the same essentially happens but might just take
1215 	 * a little longer to lock up all cores.
1216 	 *
1217 	 * The reason we should lock on dal_mutex is so that we can be sure
1218 	 * nobody messes with acrtc->stream after we read and check its value.
1219 	 *
1220 	 * We might be able to fix our concurrency issues with a work queue
1221 	 * where we schedule all work items (mode_set, page_flip, etc.) and
1222 	 * execute them one by one. Care needs to be taken to still deal with
1223 	 * any potential concurrency issues arising from interrupt calls.
1224 	 */
1225 
1226 	acrtc = adev->mode_info.crtcs[crtc_id];
1227 	stream = acrtc->stream;
1228 
1229 
1230 	if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
1231 		DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
1232 		/* In commit tail framework this cannot happen */
1233 		BUG_ON(0);
1234 	}
1235 
1236 
1237 	/*
1238 	 * Received a page flip call after the display has been reset.
1239 	 * Just return in this case. Everything should be clean-up on reset.
1240 	 */
1241 
1242 	if (!stream) {
1243 		WARN_ON(1);
1244 		return;
1245 	}
1246 
1247 	addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1248 	addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1249 	addr.flip_immediate = async;
1250 
1251 
1252 	if (acrtc->base.state->event &&
1253 	    acrtc->base.state->event->event.base.type ==
1254 			    DRM_EVENT_FLIP_COMPLETE) {
1255 		acrtc->event = acrtc->base.state->event;
1256 
1257 		/* Set the flip status */
1258 		acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
1259 
1260 		/* Mark this event as consumed */
1261 		acrtc->base.state->event = NULL;
1262 	}
1263 
1264 	dc_flip_surface_addrs(adev->dm.dc,
1265 			      dc_stream_get_status(stream)->surfaces,
1266 			      &addr, 1);
1267 
1268 	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1269 			 __func__,
1270 			 addr.address.grph.addr.high_part,
1271 			 addr.address.grph.addr.low_part);
1272 
1273 }
1274 
1275 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1276 				struct drm_file *filp)
1277 {
1278 	struct mod_freesync_params freesync_params;
1279 	uint8_t num_streams;
1280 	uint8_t i;
1281 
1282 	struct amdgpu_device *adev = dev->dev_private;
1283 	int r = 0;
1284 
1285 	/* Get freesync enable flag from DRM */
1286 
1287 	num_streams = dc_get_current_stream_count(adev->dm.dc);
1288 
1289 	for (i = 0; i < num_streams; i++) {
1290 		const struct dc_stream *stream;
1291 		stream = dc_get_stream_at_index(adev->dm.dc, i);
1292 
1293 		mod_freesync_update_state(adev->dm.freesync_module,
1294 					  &stream, 1, &freesync_params);
1295 	}
1296 
1297 	return r;
1298 }
1299 
1300 static const struct amdgpu_display_funcs dm_display_funcs = {
1301 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1302 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1303 	.vblank_wait = NULL,
1304 	.backlight_set_level =
1305 		dm_set_backlight_level,/* called unconditionally */
1306 	.backlight_get_level =
1307 		dm_get_backlight_level,/* called unconditionally */
1308 	.hpd_sense = NULL,/* called unconditionally */
1309 	.hpd_set_polarity = NULL, /* called unconditionally */
1310 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1311 	.page_flip = dm_page_flip, /* called unconditionally */
1312 	.page_flip_get_scanoutpos =
1313 		dm_crtc_get_scanoutpos,/* called unconditionally */
1314 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1315 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
1316 	.notify_freesync = amdgpu_notify_freesync,
1317 
1318 };
1319 
1320 
1321 #if defined(CONFIG_DEBUG_KERNEL_DC)
1322 
1323 static ssize_t s3_debug_store(
1324 	struct device *device,
1325 	struct device_attribute *attr,
1326 	const char *buf,
1327 	size_t count)
1328 {
1329 	int ret;
1330 	int s3_state;
1331 	struct pci_dev *pdev = to_pci_dev(device);
1332 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1333 	struct amdgpu_device *adev = drm_dev->dev_private;
1334 
1335 	ret = kstrtoint(buf, 0, &s3_state);
1336 
1337 	if (ret == 0) {
1338 		if (s3_state) {
1339 			dm_resume(adev);
1340 			amdgpu_dm_display_resume(adev);
1341 			drm_kms_helper_hotplug_event(adev->ddev);
1342 		} else
1343 			dm_suspend(adev);
1344 	}
1345 
1346 	return ret == 0 ? count : 0;
1347 }
1348 
1349 DEVICE_ATTR_WO(s3_debug);
1350 
1351 #endif
1352 
1353 static int dm_early_init(void *handle)
1354 {
1355 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1356 
1357 	amdgpu_dm_set_irq_funcs(adev);
1358 
1359 	switch (adev->asic_type) {
1360 	case CHIP_BONAIRE:
1361 	case CHIP_HAWAII:
1362 		adev->mode_info.num_crtc = 6;
1363 		adev->mode_info.num_hpd = 6;
1364 		adev->mode_info.num_dig = 6;
1365 		break;
1366 	case CHIP_FIJI:
1367 	case CHIP_TONGA:
1368 		adev->mode_info.num_crtc = 6;
1369 		adev->mode_info.num_hpd = 6;
1370 		adev->mode_info.num_dig = 7;
1371 		break;
1372 	case CHIP_CARRIZO:
1373 		adev->mode_info.num_crtc = 3;
1374 		adev->mode_info.num_hpd = 6;
1375 		adev->mode_info.num_dig = 9;
1376 		break;
1377 	case CHIP_STONEY:
1378 		adev->mode_info.num_crtc = 2;
1379 		adev->mode_info.num_hpd = 6;
1380 		adev->mode_info.num_dig = 9;
1381 		break;
1382 	case CHIP_POLARIS11:
1383 	case CHIP_POLARIS12:
1384 		adev->mode_info.num_crtc = 5;
1385 		adev->mode_info.num_hpd = 5;
1386 		adev->mode_info.num_dig = 5;
1387 		break;
1388 	case CHIP_POLARIS10:
1389 		adev->mode_info.num_crtc = 6;
1390 		adev->mode_info.num_hpd = 6;
1391 		adev->mode_info.num_dig = 6;
1392 		break;
1393 	case CHIP_VEGA10:
1394 		adev->mode_info.num_crtc = 6;
1395 		adev->mode_info.num_hpd = 6;
1396 		adev->mode_info.num_dig = 6;
1397 		break;
1398 	default:
1399 		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1400 		return -EINVAL;
1401 	}
1402 
1403 	if (adev->mode_info.funcs == NULL)
1404 		adev->mode_info.funcs = &dm_display_funcs;
1405 
1406 	/* Note: Do NOT change adev->audio_endpt_rreg and
1407 	 * adev->audio_endpt_wreg because they are initialised in
1408 	 * amdgpu_device_init() */
1409 #if defined(CONFIG_DEBUG_KERNEL_DC)
1410 	device_create_file(
1411 		adev->ddev->dev,
1412 		&dev_attr_s3_debug);
1413 #endif
1414 
1415 	return 0;
1416 }
1417 
1418 bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1419 {
1420 	/* TODO */
1421 	return true;
1422 }
1423 
1424 bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1425 {
1426 	/* TODO */
1427 	return true;
1428 }
1429 
1430 
1431