1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services_types.h"
27 #include "dc.h"
28 
29 #include "vid.h"
30 #include "amdgpu.h"
31 #include "amdgpu_display.h"
32 #include "atom.h"
33 #include "amdgpu_dm.h"
34 #include "amdgpu_dm_types.h"
35 
36 #include "amd_shared.h"
37 #include "amdgpu_dm_irq.h"
38 #include "dm_helpers.h"
39 
40 #include "ivsrcid/ivsrcid_vislands30.h"
41 
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/version.h>
45 
46 #include <drm/drm_atomic.h>
47 #include <drm/drm_atomic_helper.h>
48 #include <drm/drm_dp_mst_helper.h>
49 
50 #include "modules/inc/mod_freesync.h"
51 
52 static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
53 	DRM_PLANE_TYPE_PRIMARY,
54 	DRM_PLANE_TYPE_PRIMARY,
55 	DRM_PLANE_TYPE_PRIMARY,
56 	DRM_PLANE_TYPE_PRIMARY,
57 	DRM_PLANE_TYPE_PRIMARY,
58 	DRM_PLANE_TYPE_PRIMARY,
59 };
60 
61 static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
62 	DRM_PLANE_TYPE_PRIMARY,
63 	DRM_PLANE_TYPE_PRIMARY,
64 	DRM_PLANE_TYPE_PRIMARY,
65 	DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
66 };
67 
68 static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
69 	DRM_PLANE_TYPE_PRIMARY,
70 	DRM_PLANE_TYPE_PRIMARY,
71 	DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
72 };
73 
74 /*
75  * dm_vblank_get_counter
76  *
77  * @brief
78  * Get counter for number of vertical blanks
79  *
80  * @param
81  * struct amdgpu_device *adev - [in] desired amdgpu device
82  * int disp_idx - [in] which CRTC to get the counter from
83  *
84  * @return
85  * Counter for vertical blanks
86  */
87 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
88 {
89 	if (crtc >= adev->mode_info.num_crtc)
90 		return 0;
91 	else {
92 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
93 
94 		if (NULL == acrtc->stream) {
95 			DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
96 			return 0;
97 		}
98 
99 		return dc_stream_get_vblank_counter(acrtc->stream);
100 	}
101 }
102 
103 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
104 					u32 *vbl, u32 *position)
105 {
106 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
107 
108 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
109 		return -EINVAL;
110 	else {
111 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
112 
113 		if (NULL == acrtc->stream) {
114 			DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
115 			return 0;
116 		}
117 
118 		/*
119 		 * TODO rework base driver to use values directly.
120 		 * for now parse it back into reg-format
121 		 */
122 		dc_stream_get_scanoutpos(acrtc->stream,
123 					 &v_blank_start,
124 					 &v_blank_end,
125 					 &h_position,
126 					 &v_position);
127 
128 		*position = (v_position) || (h_position << 16);
129 		*vbl = (v_blank_start) || (v_blank_end << 16);
130 	}
131 
132 	return 0;
133 }
134 
135 static bool dm_is_idle(void *handle)
136 {
137 	/* XXX todo */
138 	return true;
139 }
140 
141 static int dm_wait_for_idle(void *handle)
142 {
143 	/* XXX todo */
144 	return 0;
145 }
146 
147 static bool dm_check_soft_reset(void *handle)
148 {
149 	return false;
150 }
151 
152 static int dm_soft_reset(void *handle)
153 {
154 	/* XXX todo */
155 	return 0;
156 }
157 
158 static struct amdgpu_crtc *get_crtc_by_otg_inst(
159 	struct amdgpu_device *adev,
160 	int otg_inst)
161 {
162 	struct drm_device *dev = adev->ddev;
163 	struct drm_crtc *crtc;
164 	struct amdgpu_crtc *amdgpu_crtc;
165 
166 	/*
167 	 * following if is check inherited from both functions where this one is
168 	 * used now. Need to be checked why it could happen.
169 	 */
170 	if (otg_inst == -1) {
171 		WARN_ON(1);
172 		return adev->mode_info.crtcs[0];
173 	}
174 
175 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
176 		amdgpu_crtc = to_amdgpu_crtc(crtc);
177 
178 		if (amdgpu_crtc->otg_inst == otg_inst)
179 			return amdgpu_crtc;
180 	}
181 
182 	return NULL;
183 }
184 
185 static void dm_pflip_high_irq(void *interrupt_params)
186 {
187 	struct amdgpu_crtc *amdgpu_crtc;
188 	struct common_irq_params *irq_params = interrupt_params;
189 	struct amdgpu_device *adev = irq_params->adev;
190 	unsigned long flags;
191 
192 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
193 
194 	/* IRQ could occur when in initial stage */
195 	/*TODO work and BO cleanup */
196 	if (amdgpu_crtc == NULL) {
197 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
198 		return;
199 	}
200 
201 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
202 
203 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
204 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
205 						 amdgpu_crtc->pflip_status,
206 						 AMDGPU_FLIP_SUBMITTED,
207 						 amdgpu_crtc->crtc_id,
208 						 amdgpu_crtc);
209 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
210 		return;
211 	}
212 
213 
214 	/* wakeup usersapce */
215 	if (amdgpu_crtc->event
216 			&& amdgpu_crtc->event->event.base.type
217 			== DRM_EVENT_FLIP_COMPLETE) {
218 		/* Update to correct count/ts if racing with vblank irq */
219 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
220 
221 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
222 		/* page flip completed. clean up */
223 		amdgpu_crtc->event = NULL;
224 	} else
225 		WARN_ON(1);
226 
227 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
228 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
229 
230 	DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
231 					__func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
232 
233 	drm_crtc_vblank_put(&amdgpu_crtc->base);
234 }
235 
236 static void dm_crtc_high_irq(void *interrupt_params)
237 {
238 	struct common_irq_params *irq_params = interrupt_params;
239 	struct amdgpu_device *adev = irq_params->adev;
240 	uint8_t crtc_index = 0;
241 	struct amdgpu_crtc *acrtc;
242 
243 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
244 
245 	if (acrtc)
246 		crtc_index = acrtc->crtc_id;
247 
248 	drm_handle_vblank(adev->ddev, crtc_index);
249 }
250 
251 static int dm_set_clockgating_state(void *handle,
252 		  enum amd_clockgating_state state)
253 {
254 	return 0;
255 }
256 
257 static int dm_set_powergating_state(void *handle,
258 		  enum amd_powergating_state state)
259 {
260 	return 0;
261 }
262 
263 /* Prototypes of private functions */
264 static int dm_early_init(void* handle);
265 
266 static void hotplug_notify_work_func(struct work_struct *work)
267 {
268 	struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
269 	struct drm_device *dev = dm->ddev;
270 
271 	drm_kms_helper_hotplug_event(dev);
272 }
273 
274 /* Init display KMS
275  *
276  * Returns 0 on success
277  */
278 int amdgpu_dm_init(struct amdgpu_device *adev)
279 {
280 	struct dc_init_data init_data;
281 	adev->dm.ddev = adev->ddev;
282 	adev->dm.adev = adev;
283 
284 	DRM_INFO("DAL is enabled\n");
285 	/* Zero all the fields */
286 	memset(&init_data, 0, sizeof(init_data));
287 
288 	/* initialize DAL's lock (for SYNC context use) */
289 	spin_lock_init(&adev->dm.dal_lock);
290 
291 	/* initialize DAL's mutex */
292 	mutex_init(&adev->dm.dal_mutex);
293 
294 	if(amdgpu_dm_irq_init(adev)) {
295 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
296 		goto error;
297 	}
298 
299 	init_data.asic_id.chip_family = adev->family;
300 
301 	init_data.asic_id.pci_revision_id = adev->rev_id;
302 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
303 
304 	init_data.asic_id.vram_width = adev->mc.vram_width;
305 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
306 	init_data.asic_id.atombios_base_address =
307 		adev->mode_info.atom_context->bios;
308 
309 	init_data.driver = adev;
310 
311 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
312 
313 	if (!adev->dm.cgs_device) {
314 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
315 		goto error;
316 	}
317 
318 	init_data.cgs_device = adev->dm.cgs_device;
319 
320 	adev->dm.dal = NULL;
321 
322 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
323 
324 	/* Display Core create. */
325 	adev->dm.dc = dc_create(&init_data);
326 
327 	if (!adev->dm.dc)
328 		DRM_INFO("Display Core failed to initialize!\n");
329 
330 	INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
331 
332 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
333 	if (!adev->dm.freesync_module) {
334 		DRM_ERROR(
335 		"amdgpu: failed to initialize freesync_module.\n");
336 	} else
337 		DRM_INFO("amdgpu: freesync_module init done %p.\n",
338 				adev->dm.freesync_module);
339 
340 	if (amdgpu_dm_initialize_drm_device(adev)) {
341 		DRM_ERROR(
342 		"amdgpu: failed to initialize sw for display support.\n");
343 		goto error;
344 	}
345 
346 	/* Update the actual used number of crtc */
347 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
348 
349 	/* TODO: Add_display_info? */
350 
351 	/* TODO use dynamic cursor width */
352 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
353 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
354 
355 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
356 		DRM_ERROR(
357 		"amdgpu: failed to initialize sw for display support.\n");
358 		goto error;
359 	}
360 
361 	DRM_INFO("KMS initialized.\n");
362 
363 	return 0;
364 error:
365 	amdgpu_dm_fini(adev);
366 
367 	return -1;
368 }
369 
370 void amdgpu_dm_fini(struct amdgpu_device *adev)
371 {
372 	amdgpu_dm_destroy_drm_device(&adev->dm);
373 	/*
374 	 * TODO: pageflip, vlank interrupt
375 	 *
376 	 * amdgpu_dm_irq_fini(adev);
377 	 */
378 
379 	if (adev->dm.cgs_device) {
380 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
381 		adev->dm.cgs_device = NULL;
382 	}
383 	if (adev->dm.freesync_module) {
384 		mod_freesync_destroy(adev->dm.freesync_module);
385 		adev->dm.freesync_module = NULL;
386 	}
387 	/* DC Destroy TODO: Replace destroy DAL */
388 	{
389 		dc_destroy(&adev->dm.dc);
390 	}
391 	return;
392 }
393 
394 /* moved from amdgpu_dm_kms.c */
395 void amdgpu_dm_destroy()
396 {
397 }
398 
399 static int dm_sw_init(void *handle)
400 {
401 	return 0;
402 }
403 
404 static int dm_sw_fini(void *handle)
405 {
406 	return 0;
407 }
408 
409 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
410 {
411 	struct amdgpu_connector *aconnector;
412 	struct drm_connector *connector;
413 	int ret = 0;
414 
415 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
416 
417 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
418 		   aconnector = to_amdgpu_connector(connector);
419 		if (aconnector->dc_link->type == dc_connection_mst_branch) {
420 			DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
421 					aconnector, aconnector->base.base.id);
422 
423 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
424 			if (ret < 0) {
425 				DRM_ERROR("DM_MST: Failed to start MST\n");
426 				((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
427 				return ret;
428 				}
429 			}
430 	}
431 
432 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
433 	return ret;
434 }
435 
436 static int dm_late_init(void *handle)
437 {
438 	struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
439 	int r = detect_mst_link_for_all_connectors(dev);
440 
441 	return r;
442 }
443 
444 static void s3_handle_mst(struct drm_device *dev, bool suspend)
445 {
446 	struct amdgpu_connector *aconnector;
447 	struct drm_connector *connector;
448 
449 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
450 
451 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
452 		   aconnector = to_amdgpu_connector(connector);
453 		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
454 				   !aconnector->mst_port) {
455 
456 			   if (suspend)
457 				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
458 			   else
459 				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
460 		   }
461 	}
462 
463 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
464 }
465 
466 static int dm_hw_init(void *handle)
467 {
468 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
469 	/* Create DAL display manager */
470 	amdgpu_dm_init(adev);
471 	amdgpu_dm_hpd_init(adev);
472 
473 	return 0;
474 }
475 
476 static int dm_hw_fini(void *handle)
477 {
478 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
479 
480 	amdgpu_dm_hpd_fini(adev);
481 
482 	amdgpu_dm_irq_fini(adev);
483 
484 	return 0;
485 }
486 
487 static int dm_suspend(void *handle)
488 {
489 	struct amdgpu_device *adev = handle;
490 	struct amdgpu_display_manager *dm = &adev->dm;
491 	int ret = 0;
492 
493 	s3_handle_mst(adev->ddev, true);
494 
495 	amdgpu_dm_irq_suspend(adev);
496 
497 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
498 
499 	dc_set_power_state(
500 		dm->dc,
501 		DC_ACPI_CM_POWER_STATE_D3
502 		);
503 
504 	return ret;
505 }
506 
507 struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
508 	struct drm_atomic_state *state,
509 	struct drm_crtc *crtc,
510 	bool from_state_var)
511 {
512 	uint32_t i;
513 	struct drm_connector_state *conn_state;
514 	struct drm_connector *connector;
515 	struct drm_crtc *crtc_from_state;
516 
517 	for_each_connector_in_state(
518 		state,
519 		connector,
520 		conn_state,
521 		i) {
522 		crtc_from_state =
523 			from_state_var ?
524 				conn_state->crtc :
525 				connector->state->crtc;
526 
527 		if (crtc_from_state == crtc)
528 			return to_amdgpu_connector(connector);
529 	}
530 
531 	return NULL;
532 }
533 
534 static int dm_resume(void *handle)
535 {
536 	struct amdgpu_device *adev = handle;
537 	struct amdgpu_display_manager *dm = &adev->dm;
538 
539 	/* power on hardware */
540 	dc_set_power_state(
541 		dm->dc,
542 		DC_ACPI_CM_POWER_STATE_D0
543 		);
544 
545 	return 0;
546 }
547 
548 int amdgpu_dm_display_resume(struct amdgpu_device *adev )
549 {
550 	struct drm_device *ddev = adev->ddev;
551 	struct amdgpu_display_manager *dm = &adev->dm;
552 	struct amdgpu_connector *aconnector;
553 	struct drm_connector *connector;
554 	struct drm_crtc *crtc;
555 	struct drm_crtc_state *crtc_state;
556 	int ret = 0;
557 	int i;
558 
559 	/* program HPD filter */
560 	dc_resume(dm->dc);
561 
562 	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
563 	s3_handle_mst(ddev, false);
564 
565 	/*
566 	 * early enable HPD Rx IRQ, should be done before set mode as short
567 	 * pulse interrupts are used for MST
568 	 */
569 	amdgpu_dm_irq_resume_early(adev);
570 
571 	/* Do detection*/
572 	list_for_each_entry(connector,
573 			&ddev->mode_config.connector_list, head) {
574 		aconnector = to_amdgpu_connector(connector);
575 
576 		/*
577 		 * this is the case when traversing through already created
578 		 * MST connectors, should be skipped
579 		 */
580 		if (aconnector->mst_port)
581 			continue;
582 
583 		mutex_lock(&aconnector->hpd_lock);
584 		dc_link_detect(aconnector->dc_link, false);
585 		aconnector->dc_sink = NULL;
586 		amdgpu_dm_update_connector_after_detect(aconnector);
587 		mutex_unlock(&aconnector->hpd_lock);
588 	}
589 
590 	/* Force mode set in atomic comit */
591 	for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
592 			crtc_state->active_changed = true;
593 
594 	ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
595 
596 	amdgpu_dm_irq_resume_late(adev);
597 
598 	return ret;
599 }
600 
601 static const struct amd_ip_funcs amdgpu_dm_funcs = {
602 	.name = "dm",
603 	.early_init = dm_early_init,
604 	.late_init = dm_late_init,
605 	.sw_init = dm_sw_init,
606 	.sw_fini = dm_sw_fini,
607 	.hw_init = dm_hw_init,
608 	.hw_fini = dm_hw_fini,
609 	.suspend = dm_suspend,
610 	.resume = dm_resume,
611 	.is_idle = dm_is_idle,
612 	.wait_for_idle = dm_wait_for_idle,
613 	.check_soft_reset = dm_check_soft_reset,
614 	.soft_reset = dm_soft_reset,
615 	.set_clockgating_state = dm_set_clockgating_state,
616 	.set_powergating_state = dm_set_powergating_state,
617 };
618 
619 const struct amdgpu_ip_block_version dm_ip_block =
620 {
621 	.type = AMD_IP_BLOCK_TYPE_DCE,
622 	.major = 1,
623 	.minor = 0,
624 	.rev = 0,
625 	.funcs = &amdgpu_dm_funcs,
626 };
627 
628 /* TODO: it is temporary non-const, should fixed later */
629 static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
630 	.fb_create = amdgpu_user_framebuffer_create,
631 	.output_poll_changed = amdgpu_output_poll_changed,
632 	.atomic_check = amdgpu_dm_atomic_check,
633 	.atomic_commit = drm_atomic_helper_commit
634 };
635 
636 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
637 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
638 };
639 
640 void amdgpu_dm_update_connector_after_detect(
641 	struct amdgpu_connector *aconnector)
642 {
643 	struct drm_connector *connector = &aconnector->base;
644 	struct drm_device *dev = connector->dev;
645 	const struct dc_sink *sink;
646 
647 	/* MST handled by drm_mst framework */
648 	if (aconnector->mst_mgr.mst_state == true)
649 		return;
650 
651 
652 	sink = aconnector->dc_link->local_sink;
653 
654 	/* Edid mgmt connector gets first update only in mode_valid hook and then
655 	 * the connector sink is set to either fake or physical sink depends on link status.
656 	 * don't do it here if u are during boot
657 	 */
658 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
659 			&& aconnector->dc_em_sink) {
660 
661 		/* For S3 resume with headless use eml_sink to fake stream
662 		 * because on resume connecotr->sink is set ti NULL
663 		 */
664 		mutex_lock(&dev->mode_config.mutex);
665 
666 		if (sink) {
667 			if (aconnector->dc_sink) {
668 				amdgpu_dm_remove_sink_from_freesync_module(
669 								connector);
670 				/* retain and release bellow are used for
671 				 * bump up refcount for sink because the link don't point
672 				 * to it anymore after disconnect so on next crtc to connector
673 				 * reshuffle by UMD we will get into unwanted dc_sink release
674 				 */
675 				if (aconnector->dc_sink != aconnector->dc_em_sink)
676 					dc_sink_release(aconnector->dc_sink);
677 			}
678 			aconnector->dc_sink = sink;
679 			amdgpu_dm_add_sink_to_freesync_module(
680 						connector, aconnector->edid);
681 		} else {
682 			amdgpu_dm_remove_sink_from_freesync_module(connector);
683 			if (!aconnector->dc_sink)
684 				aconnector->dc_sink = aconnector->dc_em_sink;
685 			else if (aconnector->dc_sink != aconnector->dc_em_sink)
686 				dc_sink_retain(aconnector->dc_sink);
687 		}
688 
689 		mutex_unlock(&dev->mode_config.mutex);
690 		return;
691 	}
692 
693 	/*
694 	 * TODO: temporary guard to look for proper fix
695 	 * if this sink is MST sink, we should not do anything
696 	 */
697 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
698 		return;
699 
700 	if (aconnector->dc_sink == sink) {
701 		/* We got a DP short pulse (Link Loss, DP CTS, etc...).
702 		 * Do nothing!! */
703 		DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
704 				aconnector->connector_id);
705 		return;
706 	}
707 
708 	DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
709 		aconnector->connector_id, aconnector->dc_sink, sink);
710 
711 	mutex_lock(&dev->mode_config.mutex);
712 
713 	/* 1. Update status of the drm connector
714 	 * 2. Send an event and let userspace tell us what to do */
715 	if (sink) {
716 		/* TODO: check if we still need the S3 mode update workaround.
717 		 * If yes, put it here. */
718 		if (aconnector->dc_sink)
719 			amdgpu_dm_remove_sink_from_freesync_module(
720 							connector);
721 
722 		aconnector->dc_sink = sink;
723 		if (sink->dc_edid.length == 0)
724 			aconnector->edid = NULL;
725 		else {
726 			aconnector->edid =
727 				(struct edid *) sink->dc_edid.raw_edid;
728 
729 
730 			drm_mode_connector_update_edid_property(connector,
731 					aconnector->edid);
732 		}
733 		amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
734 
735 	} else {
736 		amdgpu_dm_remove_sink_from_freesync_module(connector);
737 		drm_mode_connector_update_edid_property(connector, NULL);
738 		aconnector->num_modes = 0;
739 		aconnector->dc_sink = NULL;
740 	}
741 
742 	mutex_unlock(&dev->mode_config.mutex);
743 }
744 
745 static void handle_hpd_irq(void *param)
746 {
747 	struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
748 	struct drm_connector *connector = &aconnector->base;
749 	struct drm_device *dev = connector->dev;
750 
751 	/* In case of failure or MST no need to update connector status or notify the OS
752 	 * since (for MST case) MST does this in it's own context.
753 	 */
754 	mutex_lock(&aconnector->hpd_lock);
755 	if (dc_link_detect(aconnector->dc_link, false)) {
756 		amdgpu_dm_update_connector_after_detect(aconnector);
757 
758 
759 		drm_modeset_lock_all(dev);
760 		dm_restore_drm_connector_state(dev, connector);
761 		drm_modeset_unlock_all(dev);
762 
763 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
764 			drm_kms_helper_hotplug_event(dev);
765 	}
766 	mutex_unlock(&aconnector->hpd_lock);
767 
768 }
769 
770 static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
771 {
772 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
773 	uint8_t dret;
774 	bool new_irq_handled = false;
775 	int dpcd_addr;
776 	int dpcd_bytes_to_read;
777 
778 	const int max_process_count = 30;
779 	int process_count = 0;
780 
781 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
782 
783 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
784 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
785 		/* DPCD 0x200 - 0x201 for downstream IRQ */
786 		dpcd_addr = DP_SINK_COUNT;
787 	} else {
788 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
789 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
790 		dpcd_addr = DP_SINK_COUNT_ESI;
791 	}
792 
793 	dret = drm_dp_dpcd_read(
794 		&aconnector->dm_dp_aux.aux,
795 		dpcd_addr,
796 		esi,
797 		dpcd_bytes_to_read);
798 
799 	while (dret == dpcd_bytes_to_read &&
800 		process_count < max_process_count) {
801 		uint8_t retry;
802 		dret = 0;
803 
804 		process_count++;
805 
806 		DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
807 		/* handle HPD short pulse irq */
808 		if (aconnector->mst_mgr.mst_state)
809 			drm_dp_mst_hpd_irq(
810 				&aconnector->mst_mgr,
811 				esi,
812 				&new_irq_handled);
813 
814 		if (new_irq_handled) {
815 			/* ACK at DPCD to notify down stream */
816 			const int ack_dpcd_bytes_to_write =
817 				dpcd_bytes_to_read - 1;
818 
819 			for (retry = 0; retry < 3; retry++) {
820 				uint8_t wret;
821 
822 				wret = drm_dp_dpcd_write(
823 					&aconnector->dm_dp_aux.aux,
824 					dpcd_addr + 1,
825 					&esi[1],
826 					ack_dpcd_bytes_to_write);
827 				if (wret == ack_dpcd_bytes_to_write)
828 					break;
829 			}
830 
831 			/* check if there is new irq to be handle */
832 			dret = drm_dp_dpcd_read(
833 				&aconnector->dm_dp_aux.aux,
834 				dpcd_addr,
835 				esi,
836 				dpcd_bytes_to_read);
837 
838 			new_irq_handled = false;
839 		} else
840 			break;
841 	}
842 
843 	if (process_count == max_process_count)
844 		DRM_DEBUG_KMS("Loop exceeded max iterations\n");
845 }
846 
847 static void handle_hpd_rx_irq(void *param)
848 {
849 	struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
850 	struct drm_connector *connector = &aconnector->base;
851 	struct drm_device *dev = connector->dev;
852 	const struct dc_link *dc_link = aconnector->dc_link;
853 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
854 
855 	/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
856 	 * conflict, after implement i2c helper, this mutex should be
857 	 * retired.
858 	 */
859 	if (aconnector->dc_link->type != dc_connection_mst_branch)
860 		mutex_lock(&aconnector->hpd_lock);
861 
862 	if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
863 			!is_mst_root_connector) {
864 		/* Downstream Port status changed. */
865 		if (dc_link_detect(aconnector->dc_link, false)) {
866 			amdgpu_dm_update_connector_after_detect(aconnector);
867 
868 
869 			drm_modeset_lock_all(dev);
870 			dm_restore_drm_connector_state(dev, connector);
871 			drm_modeset_unlock_all(dev);
872 
873 			drm_kms_helper_hotplug_event(dev);
874 		}
875 	}
876 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
877 				(dc_link->type == dc_connection_mst_branch))
878 		dm_handle_hpd_rx_irq(aconnector);
879 
880 	if (aconnector->dc_link->type != dc_connection_mst_branch)
881 		mutex_unlock(&aconnector->hpd_lock);
882 }
883 
884 static void register_hpd_handlers(struct amdgpu_device *adev)
885 {
886 	struct drm_device *dev = adev->ddev;
887 	struct drm_connector *connector;
888 	struct amdgpu_connector *aconnector;
889 	const struct dc_link *dc_link;
890 	struct dc_interrupt_params int_params = {0};
891 
892 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
893 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
894 
895 	list_for_each_entry(connector,
896 			&dev->mode_config.connector_list, head)	{
897 
898 		aconnector = to_amdgpu_connector(connector);
899 		dc_link = aconnector->dc_link;
900 
901 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
902 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
903 			int_params.irq_source = dc_link->irq_source_hpd;
904 
905 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
906 					handle_hpd_irq,
907 					(void *) aconnector);
908 		}
909 
910 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
911 
912 			/* Also register for DP short pulse (hpd_rx). */
913 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
914 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
915 
916 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
917 					handle_hpd_rx_irq,
918 					(void *) aconnector);
919 		}
920 	}
921 }
922 
923 /* Register IRQ sources and initialize IRQ callbacks */
924 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
925 {
926 	struct dc *dc = adev->dm.dc;
927 	struct common_irq_params *c_irq_params;
928 	struct dc_interrupt_params int_params = {0};
929 	int r;
930 	int i;
931 	unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
932 
933 	if (adev->asic_type == CHIP_VEGA10)
934 		client_id = AMDGPU_IH_CLIENTID_DCE;
935 
936 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
937 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
938 
939 	/* Actions of amdgpu_irq_add_id():
940 	 * 1. Register a set() function with base driver.
941 	 *    Base driver will call set() function to enable/disable an
942 	 *    interrupt in DC hardware.
943 	 * 2. Register amdgpu_dm_irq_handler().
944 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
945 	 *    coming from DC hardware.
946 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
947 	 *    for acknowledging and handling. */
948 
949 	/* Use VBLANK interrupt */
950 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
951 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
952 		if (r) {
953 			DRM_ERROR("Failed to add crtc irq id!\n");
954 			return r;
955 		}
956 
957 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
958 		int_params.irq_source =
959 			dc_interrupt_to_irq_source(dc, i, 0);
960 
961 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
962 
963 		c_irq_params->adev = adev;
964 		c_irq_params->irq_src = int_params.irq_source;
965 
966 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
967 				dm_crtc_high_irq, c_irq_params);
968 	}
969 
970 	/* Use GRPH_PFLIP interrupt */
971 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
972 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
973 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
974 		if (r) {
975 			DRM_ERROR("Failed to add page flip irq id!\n");
976 			return r;
977 		}
978 
979 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
980 		int_params.irq_source =
981 			dc_interrupt_to_irq_source(dc, i, 0);
982 
983 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
984 
985 		c_irq_params->adev = adev;
986 		c_irq_params->irq_src = int_params.irq_source;
987 
988 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
989 				dm_pflip_high_irq, c_irq_params);
990 
991 	}
992 
993 	/* HPD */
994 	r = amdgpu_irq_add_id(adev, client_id,
995 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
996 	if (r) {
997 		DRM_ERROR("Failed to add hpd irq id!\n");
998 		return r;
999 	}
1000 
1001 	register_hpd_handlers(adev);
1002 
1003 	return 0;
1004 }
1005 
1006 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1007 {
1008 	int r;
1009 
1010 	adev->mode_info.mode_config_initialized = true;
1011 
1012 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1013 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1014 
1015 	adev->ddev->mode_config.max_width = 16384;
1016 	adev->ddev->mode_config.max_height = 16384;
1017 
1018 	adev->ddev->mode_config.preferred_depth = 24;
1019 	adev->ddev->mode_config.prefer_shadow = 1;
1020 	/* indicate support of immediate flip */
1021 	adev->ddev->mode_config.async_page_flip = true;
1022 
1023 	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1024 
1025 	r = amdgpu_modeset_create_props(adev);
1026 	if (r)
1027 		return r;
1028 
1029 	return 0;
1030 }
1031 
1032 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1033 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1034 
1035 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1036 {
1037 	struct amdgpu_display_manager *dm = bl_get_data(bd);
1038 
1039 	if (dc_link_set_backlight_level(dm->backlight_link,
1040 			bd->props.brightness, 0, 0))
1041 		return 0;
1042 	else
1043 		return 1;
1044 }
1045 
1046 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1047 {
1048 	return bd->props.brightness;
1049 }
1050 
1051 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1052 	.get_brightness = amdgpu_dm_backlight_get_brightness,
1053 	.update_status	= amdgpu_dm_backlight_update_status,
1054 };
1055 
1056 void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1057 {
1058 	char bl_name[16];
1059 	struct backlight_properties props = { 0 };
1060 
1061 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1062 	props.type = BACKLIGHT_RAW;
1063 
1064 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1065 			dm->adev->ddev->primary->index);
1066 
1067 	dm->backlight_dev = backlight_device_register(bl_name,
1068 			dm->adev->ddev->dev,
1069 			dm,
1070 			&amdgpu_dm_backlight_ops,
1071 			&props);
1072 
1073 	if (NULL == dm->backlight_dev)
1074 		DRM_ERROR("DM: Backlight registration failed!\n");
1075 	else
1076 		DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1077 }
1078 
1079 #endif
1080 
1081 /* In this architecture, the association
1082  * connector -> encoder -> crtc
1083  * id not really requried. The crtc and connector will hold the
1084  * display_index as an abstraction to use with DAL component
1085  *
1086  * Returns 0 on success
1087  */
1088 int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1089 {
1090 	struct amdgpu_display_manager *dm = &adev->dm;
1091 	uint32_t i;
1092 	struct amdgpu_connector *aconnector = NULL;
1093 	struct amdgpu_encoder *aencoder = NULL;
1094 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
1095 	uint32_t link_cnt;
1096 
1097 	link_cnt = dm->dc->caps.max_links;
1098 	if (amdgpu_dm_mode_config_init(dm->adev)) {
1099 		DRM_ERROR("DM: Failed to initialize mode config\n");
1100 		return -1;
1101 	}
1102 
1103 	for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
1104 		mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
1105 								 GFP_KERNEL);
1106 		if (!mode_info->planes[i]) {
1107 			DRM_ERROR("KMS: Failed to allocate surface\n");
1108 			goto fail_free_planes;
1109 		}
1110 		mode_info->planes[i]->plane_type = mode_info->plane_type[i];
1111 		if (amdgpu_dm_plane_init(dm, mode_info->planes[i], 0xff)) {
1112 			DRM_ERROR("KMS: Failed to initialize plane\n");
1113 			goto fail_free_planes;
1114 		}
1115 	}
1116 
1117 	for (i = 0; i < dm->dc->caps.max_streams; i++)
1118 		if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1119 			DRM_ERROR("KMS: Failed to initialize crtc\n");
1120 			goto fail_free_planes;
1121 		}
1122 
1123 	dm->display_indexes_num = dm->dc->caps.max_streams;
1124 
1125 	/* loops over all connectors on the board */
1126 	for (i = 0; i < link_cnt; i++) {
1127 
1128 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1129 			DRM_ERROR(
1130 				"KMS: Cannot support more than %d display indexes\n",
1131 					AMDGPU_DM_MAX_DISPLAY_INDEX);
1132 			continue;
1133 		}
1134 
1135 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1136 		if (!aconnector)
1137 			goto fail_free_planes;
1138 
1139 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1140 		if (!aencoder) {
1141 			goto fail_free_connector;
1142 		}
1143 
1144 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1145 			DRM_ERROR("KMS: Failed to initialize encoder\n");
1146 			goto fail_free_encoder;
1147 		}
1148 
1149 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1150 			DRM_ERROR("KMS: Failed to initialize connector\n");
1151 			goto fail_free_encoder;
1152 		}
1153 
1154 		if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1155 			amdgpu_dm_update_connector_after_detect(aconnector);
1156 	}
1157 
1158 	/* Software is initialized. Now we can register interrupt handlers. */
1159 	switch (adev->asic_type) {
1160 	case CHIP_BONAIRE:
1161 	case CHIP_HAWAII:
1162 	case CHIP_TONGA:
1163 	case CHIP_FIJI:
1164 	case CHIP_CARRIZO:
1165 	case CHIP_STONEY:
1166 	case CHIP_POLARIS11:
1167 	case CHIP_POLARIS10:
1168 	case CHIP_POLARIS12:
1169 	case CHIP_VEGA10:
1170 		if (dce110_register_irq_handlers(dm->adev)) {
1171 			DRM_ERROR("DM: Failed to initialize IRQ\n");
1172 			goto fail_free_encoder;
1173 		}
1174 		break;
1175 	default:
1176 		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1177 		goto fail_free_encoder;
1178 	}
1179 
1180 	drm_mode_config_reset(dm->ddev);
1181 
1182 	return 0;
1183 fail_free_encoder:
1184 	kfree(aencoder);
1185 fail_free_connector:
1186 	kfree(aconnector);
1187 fail_free_planes:
1188 	for (i = 0; i < dm->dc->caps.max_surfaces; i++)
1189 		kfree(mode_info->planes[i]);
1190 	return -1;
1191 }
1192 
1193 void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1194 {
1195 	drm_mode_config_cleanup(dm->ddev);
1196 	return;
1197 }
1198 
1199 /******************************************************************************
1200  * amdgpu_display_funcs functions
1201  *****************************************************************************/
1202 
1203 /**
1204  * dm_bandwidth_update - program display watermarks
1205  *
1206  * @adev: amdgpu_device pointer
1207  *
1208  * Calculate and program the display watermarks and line buffer allocation.
1209  */
1210 static void dm_bandwidth_update(struct amdgpu_device *adev)
1211 {
1212 	/* TODO: implement later */
1213 }
1214 
1215 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1216 				     u8 level)
1217 {
1218 	/* TODO: translate amdgpu_encoder to display_index and call DAL */
1219 }
1220 
1221 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1222 {
1223 	/* TODO: translate amdgpu_encoder to display_index and call DAL */
1224 	return 0;
1225 }
1226 
1227 /******************************************************************************
1228  * Page Flip functions
1229  ******************************************************************************/
1230 
1231 /**
1232  * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1233  * 			via DRM IOCTL, by user mode.
1234  *
1235  * @adev: amdgpu_device pointer
1236  * @crtc_id: crtc to cleanup pageflip on
1237  * @crtc_base: new address of the crtc (GPU MC address)
1238  *
1239  * Does the actual pageflip (surface address update).
1240  */
1241 static void dm_page_flip(struct amdgpu_device *adev,
1242 			 int crtc_id, u64 crtc_base, bool async)
1243 {
1244 	struct amdgpu_crtc *acrtc;
1245 	const struct dc_stream *stream;
1246 	struct dc_flip_addrs addr = { {0} };
1247 
1248 	/*
1249 	 * TODO risk of concurrency issues
1250 	 *
1251 	 * This should guarded by the dal_mutex but we can't do this since the
1252 	 * caller uses a spin_lock on event_lock.
1253 	 *
1254 	 * If we wait on the dal_mutex a second page flip interrupt might come,
1255 	 * spin on the event_lock, disabling interrupts while it does so. At
1256 	 * this point the core can no longer be pre-empted and return to the
1257 	 * thread that waited on the dal_mutex and we're deadlocked.
1258 	 *
1259 	 * With multiple cores the same essentially happens but might just take
1260 	 * a little longer to lock up all cores.
1261 	 *
1262 	 * The reason we should lock on dal_mutex is so that we can be sure
1263 	 * nobody messes with acrtc->stream after we read and check its value.
1264 	 *
1265 	 * We might be able to fix our concurrency issues with a work queue
1266 	 * where we schedule all work items (mode_set, page_flip, etc.) and
1267 	 * execute them one by one. Care needs to be taken to still deal with
1268 	 * any potential concurrency issues arising from interrupt calls.
1269 	 */
1270 
1271 	acrtc = adev->mode_info.crtcs[crtc_id];
1272 	stream = acrtc->stream;
1273 
1274 
1275 	if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
1276 		DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
1277 		/* In commit tail framework this cannot happen */
1278 		BUG_ON(0);
1279 	}
1280 
1281 
1282 	/*
1283 	 * Received a page flip call after the display has been reset.
1284 	 * Just return in this case. Everything should be clean-up on reset.
1285 	 */
1286 
1287 	if (!stream) {
1288 		WARN_ON(1);
1289 		return;
1290 	}
1291 
1292 	addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1293 	addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1294 	addr.flip_immediate = async;
1295 
1296 
1297 	if (acrtc->base.state->event &&
1298 	    acrtc->base.state->event->event.base.type ==
1299 			    DRM_EVENT_FLIP_COMPLETE) {
1300 		acrtc->event = acrtc->base.state->event;
1301 
1302 		/* Set the flip status */
1303 		acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
1304 
1305 		/* Mark this event as consumed */
1306 		acrtc->base.state->event = NULL;
1307 	}
1308 
1309 	dc_flip_surface_addrs(adev->dm.dc,
1310 			      dc_stream_get_status(stream)->surfaces,
1311 			      &addr, 1);
1312 
1313 	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1314 			 __func__,
1315 			 addr.address.grph.addr.high_part,
1316 			 addr.address.grph.addr.low_part);
1317 
1318 }
1319 
1320 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1321 				struct drm_file *filp)
1322 {
1323 	struct mod_freesync_params freesync_params;
1324 	uint8_t num_streams;
1325 	uint8_t i;
1326 
1327 	struct amdgpu_device *adev = dev->dev_private;
1328 	int r = 0;
1329 
1330 	/* Get freesync enable flag from DRM */
1331 
1332 	num_streams = dc_get_current_stream_count(adev->dm.dc);
1333 
1334 	for (i = 0; i < num_streams; i++) {
1335 		const struct dc_stream *stream;
1336 		stream = dc_get_stream_at_index(adev->dm.dc, i);
1337 
1338 		mod_freesync_update_state(adev->dm.freesync_module,
1339 					  &stream, 1, &freesync_params);
1340 	}
1341 
1342 	return r;
1343 }
1344 
1345 static const struct amdgpu_display_funcs dm_display_funcs = {
1346 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1347 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1348 	.vblank_wait = NULL,
1349 	.backlight_set_level =
1350 		dm_set_backlight_level,/* called unconditionally */
1351 	.backlight_get_level =
1352 		dm_get_backlight_level,/* called unconditionally */
1353 	.hpd_sense = NULL,/* called unconditionally */
1354 	.hpd_set_polarity = NULL, /* called unconditionally */
1355 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1356 	.page_flip = dm_page_flip, /* called unconditionally */
1357 	.page_flip_get_scanoutpos =
1358 		dm_crtc_get_scanoutpos,/* called unconditionally */
1359 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1360 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
1361 	.notify_freesync = amdgpu_notify_freesync,
1362 
1363 };
1364 
1365 
1366 #if defined(CONFIG_DEBUG_KERNEL_DC)
1367 
1368 static ssize_t s3_debug_store(
1369 	struct device *device,
1370 	struct device_attribute *attr,
1371 	const char *buf,
1372 	size_t count)
1373 {
1374 	int ret;
1375 	int s3_state;
1376 	struct pci_dev *pdev = to_pci_dev(device);
1377 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1378 	struct amdgpu_device *adev = drm_dev->dev_private;
1379 
1380 	ret = kstrtoint(buf, 0, &s3_state);
1381 
1382 	if (ret == 0) {
1383 		if (s3_state) {
1384 			dm_resume(adev);
1385 			amdgpu_dm_display_resume(adev);
1386 			drm_kms_helper_hotplug_event(adev->ddev);
1387 		} else
1388 			dm_suspend(adev);
1389 	}
1390 
1391 	return ret == 0 ? count : 0;
1392 }
1393 
1394 DEVICE_ATTR_WO(s3_debug);
1395 
1396 #endif
1397 
1398 static int dm_early_init(void *handle)
1399 {
1400 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1401 
1402 	amdgpu_dm_set_irq_funcs(adev);
1403 
1404 	switch (adev->asic_type) {
1405 	case CHIP_BONAIRE:
1406 	case CHIP_HAWAII:
1407 		adev->mode_info.num_crtc = 6;
1408 		adev->mode_info.num_hpd = 6;
1409 		adev->mode_info.num_dig = 6;
1410 		adev->mode_info.plane_type = dm_surfaces_type_default;
1411 		break;
1412 	case CHIP_FIJI:
1413 	case CHIP_TONGA:
1414 		adev->mode_info.num_crtc = 6;
1415 		adev->mode_info.num_hpd = 6;
1416 		adev->mode_info.num_dig = 7;
1417 		adev->mode_info.plane_type = dm_surfaces_type_default;
1418 		break;
1419 	case CHIP_CARRIZO:
1420 		adev->mode_info.num_crtc = 3;
1421 		adev->mode_info.num_hpd = 6;
1422 		adev->mode_info.num_dig = 9;
1423 		adev->mode_info.plane_type = dm_surfaces_type_carizzo;
1424 		break;
1425 	case CHIP_STONEY:
1426 		adev->mode_info.num_crtc = 2;
1427 		adev->mode_info.num_hpd = 6;
1428 		adev->mode_info.num_dig = 9;
1429 		adev->mode_info.plane_type = dm_surfaces_type_stoney;
1430 		break;
1431 	case CHIP_POLARIS11:
1432 	case CHIP_POLARIS12:
1433 		adev->mode_info.num_crtc = 5;
1434 		adev->mode_info.num_hpd = 5;
1435 		adev->mode_info.num_dig = 5;
1436 		adev->mode_info.plane_type = dm_surfaces_type_default;
1437 		break;
1438 	case CHIP_POLARIS10:
1439 		adev->mode_info.num_crtc = 6;
1440 		adev->mode_info.num_hpd = 6;
1441 		adev->mode_info.num_dig = 6;
1442 		adev->mode_info.plane_type = dm_surfaces_type_default;
1443 		break;
1444 	case CHIP_VEGA10:
1445 		adev->mode_info.num_crtc = 6;
1446 		adev->mode_info.num_hpd = 6;
1447 		adev->mode_info.num_dig = 6;
1448 		adev->mode_info.plane_type = dm_surfaces_type_default;
1449 		break;
1450 	default:
1451 		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1452 		return -EINVAL;
1453 	}
1454 
1455 	if (adev->mode_info.funcs == NULL)
1456 		adev->mode_info.funcs = &dm_display_funcs;
1457 
1458 	/* Note: Do NOT change adev->audio_endpt_rreg and
1459 	 * adev->audio_endpt_wreg because they are initialised in
1460 	 * amdgpu_device_init() */
1461 #if defined(CONFIG_DEBUG_KERNEL_DC)
1462 	device_create_file(
1463 		adev->ddev->dev,
1464 		&dev_attr_s3_debug);
1465 #endif
1466 
1467 	return 0;
1468 }
1469 
1470 bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1471 {
1472 	/* TODO */
1473 	return true;
1474 }
1475 
1476 bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
1477 {
1478 	/* TODO */
1479 	return true;
1480 }
1481 
1482 
1483