1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29 
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36 
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42 
43 #include "ivsrcid/ivsrcid_vislands30.h"
44 
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49 
50 #include <drm/drmP.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
56 
57 #include "modules/inc/mod_freesync.h"
58 
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61 
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "soc15ip.h"
65 
66 #include "soc15_common.h"
67 #endif
68 
69 #include "modules/inc/mod_freesync.h"
70 
71 #include "i2caux_interface.h"
72 
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device *adev);
75 static void amdgpu_dm_fini(struct amdgpu_device *adev);
76 
77 /* initializes drm_device display related structures, based on the information
78  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79  * drm_encoder, drm_mode_config
80  *
81  * Returns 0 on success
82  */
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
86 
87 static void
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
89 
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
91 				struct amdgpu_plane *aplane,
92 				unsigned long possible_crtcs);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
94 			       struct drm_plane *plane,
95 			       uint32_t link_index);
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
97 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
98 				    uint32_t link_index,
99 				    struct amdgpu_encoder *amdgpu_encoder);
100 static int amdgpu_dm_encoder_init(struct drm_device *dev,
101 				  struct amdgpu_encoder *aencoder,
102 				  uint32_t link_index);
103 
104 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
105 
106 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
107 				   struct drm_atomic_state *state,
108 				   bool nonblock);
109 
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
111 
112 static int amdgpu_dm_atomic_check(struct drm_device *dev,
113 				  struct drm_atomic_state *state);
114 
115 
116 
117 
118 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
119 	DRM_PLANE_TYPE_PRIMARY,
120 	DRM_PLANE_TYPE_PRIMARY,
121 	DRM_PLANE_TYPE_PRIMARY,
122 	DRM_PLANE_TYPE_PRIMARY,
123 	DRM_PLANE_TYPE_PRIMARY,
124 	DRM_PLANE_TYPE_PRIMARY,
125 };
126 
127 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
128 	DRM_PLANE_TYPE_PRIMARY,
129 	DRM_PLANE_TYPE_PRIMARY,
130 	DRM_PLANE_TYPE_PRIMARY,
131 	DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
132 };
133 
134 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
135 	DRM_PLANE_TYPE_PRIMARY,
136 	DRM_PLANE_TYPE_PRIMARY,
137 	DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
138 };
139 
140 /*
141  * dm_vblank_get_counter
142  *
143  * @brief
144  * Get counter for number of vertical blanks
145  *
146  * @param
147  * struct amdgpu_device *adev - [in] desired amdgpu device
148  * int disp_idx - [in] which CRTC to get the counter from
149  *
150  * @return
151  * Counter for vertical blanks
152  */
153 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155 	if (crtc >= adev->mode_info.num_crtc)
156 		return 0;
157 	else {
158 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
159 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160 				acrtc->base.state);
161 
162 
163 		if (acrtc_state->stream == NULL) {
164 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165 				  crtc);
166 			return 0;
167 		}
168 
169 		return dc_stream_get_vblank_counter(acrtc_state->stream);
170 	}
171 }
172 
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
174 				  u32 *vbl, u32 *position)
175 {
176 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
177 
178 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179 		return -EINVAL;
180 	else {
181 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183 						acrtc->base.state);
184 
185 		if (acrtc_state->stream ==  NULL) {
186 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187 				  crtc);
188 			return 0;
189 		}
190 
191 		/*
192 		 * TODO rework base driver to use values directly.
193 		 * for now parse it back into reg-format
194 		 */
195 		dc_stream_get_scanoutpos(acrtc_state->stream,
196 					 &v_blank_start,
197 					 &v_blank_end,
198 					 &h_position,
199 					 &v_position);
200 
201 		*position = v_position | (h_position << 16);
202 		*vbl = v_blank_start | (v_blank_end << 16);
203 	}
204 
205 	return 0;
206 }
207 
208 static bool dm_is_idle(void *handle)
209 {
210 	/* XXX todo */
211 	return true;
212 }
213 
214 static int dm_wait_for_idle(void *handle)
215 {
216 	/* XXX todo */
217 	return 0;
218 }
219 
220 static bool dm_check_soft_reset(void *handle)
221 {
222 	return false;
223 }
224 
225 static int dm_soft_reset(void *handle)
226 {
227 	/* XXX todo */
228 	return 0;
229 }
230 
231 static struct amdgpu_crtc *
232 get_crtc_by_otg_inst(struct amdgpu_device *adev,
233 		     int otg_inst)
234 {
235 	struct drm_device *dev = adev->ddev;
236 	struct drm_crtc *crtc;
237 	struct amdgpu_crtc *amdgpu_crtc;
238 
239 	/*
240 	 * following if is check inherited from both functions where this one is
241 	 * used now. Need to be checked why it could happen.
242 	 */
243 	if (otg_inst == -1) {
244 		WARN_ON(1);
245 		return adev->mode_info.crtcs[0];
246 	}
247 
248 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
249 		amdgpu_crtc = to_amdgpu_crtc(crtc);
250 
251 		if (amdgpu_crtc->otg_inst == otg_inst)
252 			return amdgpu_crtc;
253 	}
254 
255 	return NULL;
256 }
257 
258 static void dm_pflip_high_irq(void *interrupt_params)
259 {
260 	struct amdgpu_crtc *amdgpu_crtc;
261 	struct common_irq_params *irq_params = interrupt_params;
262 	struct amdgpu_device *adev = irq_params->adev;
263 	unsigned long flags;
264 
265 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
266 
267 	/* IRQ could occur when in initial stage */
268 	/*TODO work and BO cleanup */
269 	if (amdgpu_crtc == NULL) {
270 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
271 		return;
272 	}
273 
274 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
275 
276 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
277 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278 						 amdgpu_crtc->pflip_status,
279 						 AMDGPU_FLIP_SUBMITTED,
280 						 amdgpu_crtc->crtc_id,
281 						 amdgpu_crtc);
282 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
283 		return;
284 	}
285 
286 
287 	/* wakeup usersapce */
288 	if (amdgpu_crtc->event) {
289 		/* Update to correct count/ts if racing with vblank irq */
290 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
291 
292 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
293 
294 		/* page flip completed. clean up */
295 		amdgpu_crtc->event = NULL;
296 
297 	} else
298 		WARN_ON(1);
299 
300 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
301 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
302 
303 	DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304 					__func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
305 
306 	drm_crtc_vblank_put(&amdgpu_crtc->base);
307 }
308 
309 static void dm_crtc_high_irq(void *interrupt_params)
310 {
311 	struct common_irq_params *irq_params = interrupt_params;
312 	struct amdgpu_device *adev = irq_params->adev;
313 	uint8_t crtc_index = 0;
314 	struct amdgpu_crtc *acrtc;
315 
316 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
317 
318 	if (acrtc)
319 		crtc_index = acrtc->crtc_id;
320 
321 	drm_handle_vblank(adev->ddev, crtc_index);
322 }
323 
324 static int dm_set_clockgating_state(void *handle,
325 		  enum amd_clockgating_state state)
326 {
327 	return 0;
328 }
329 
330 static int dm_set_powergating_state(void *handle,
331 		  enum amd_powergating_state state)
332 {
333 	return 0;
334 }
335 
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle);
338 
339 static void hotplug_notify_work_func(struct work_struct *work)
340 {
341 	struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
342 	struct drm_device *dev = dm->ddev;
343 
344 	drm_kms_helper_hotplug_event(dev);
345 }
346 
347 #if defined(CONFIG_DRM_AMD_DC_FBC)
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data  */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE    (3840 * 2160 * 4)
352 
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
354 {
355 	int r;
356 	struct dm_comressor_info *compressor = &adev->dm.compressor;
357 
358 	if (!compressor->bo_ptr) {
359 		r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
360 				AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
361 				&compressor->gpu_addr, &compressor->cpu_addr);
362 
363 		if (r)
364 			DRM_ERROR("DM: Failed to initialize fbc\n");
365 	}
366 
367 }
368 #endif
369 
370 
371 /* Init display KMS
372  *
373  * Returns 0 on success
374  */
375 static int amdgpu_dm_init(struct amdgpu_device *adev)
376 {
377 	struct dc_init_data init_data;
378 	adev->dm.ddev = adev->ddev;
379 	adev->dm.adev = adev;
380 
381 	/* Zero all the fields */
382 	memset(&init_data, 0, sizeof(init_data));
383 
384 	/* initialize DAL's lock (for SYNC context use) */
385 	spin_lock_init(&adev->dm.dal_lock);
386 
387 	/* initialize DAL's mutex */
388 	mutex_init(&adev->dm.dal_mutex);
389 
390 	if(amdgpu_dm_irq_init(adev)) {
391 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
392 		goto error;
393 	}
394 
395 	init_data.asic_id.chip_family = adev->family;
396 
397 	init_data.asic_id.pci_revision_id = adev->rev_id;
398 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
399 
400 	init_data.asic_id.vram_width = adev->mc.vram_width;
401 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
402 	init_data.asic_id.atombios_base_address =
403 		adev->mode_info.atom_context->bios;
404 
405 	init_data.driver = adev;
406 
407 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
408 
409 	if (!adev->dm.cgs_device) {
410 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
411 		goto error;
412 	}
413 
414 	init_data.cgs_device = adev->dm.cgs_device;
415 
416 	adev->dm.dal = NULL;
417 
418 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
419 
420 	if (amdgpu_dc_log)
421 		init_data.log_mask = DC_DEFAULT_LOG_MASK;
422 	else
423 		init_data.log_mask = DC_MIN_LOG_MASK;
424 
425 #if defined(CONFIG_DRM_AMD_DC_FBC)
426 	if (adev->family == FAMILY_CZ)
427 		amdgpu_dm_initialize_fbc(adev);
428 	init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
429 #endif
430 	/* Display Core create. */
431 	adev->dm.dc = dc_create(&init_data);
432 
433 	if (adev->dm.dc) {
434 		DRM_INFO("Display Core initialized!\n");
435 	} else {
436 		DRM_INFO("Display Core failed to initialize!\n");
437 		goto error;
438 	}
439 
440 	INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
441 
442 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
443 	if (!adev->dm.freesync_module) {
444 		DRM_ERROR(
445 		"amdgpu: failed to initialize freesync_module.\n");
446 	} else
447 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
448 				adev->dm.freesync_module);
449 
450 	if (amdgpu_dm_initialize_drm_device(adev)) {
451 		DRM_ERROR(
452 		"amdgpu: failed to initialize sw for display support.\n");
453 		goto error;
454 	}
455 
456 	/* Update the actual used number of crtc */
457 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
458 
459 	/* TODO: Add_display_info? */
460 
461 	/* TODO use dynamic cursor width */
462 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
463 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
464 
465 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
466 		DRM_ERROR(
467 		"amdgpu: failed to initialize sw for display support.\n");
468 		goto error;
469 	}
470 
471 	DRM_DEBUG_DRIVER("KMS initialized.\n");
472 
473 	return 0;
474 error:
475 	amdgpu_dm_fini(adev);
476 
477 	return -1;
478 }
479 
480 static void amdgpu_dm_fini(struct amdgpu_device *adev)
481 {
482 	amdgpu_dm_destroy_drm_device(&adev->dm);
483 	/*
484 	 * TODO: pageflip, vlank interrupt
485 	 *
486 	 * amdgpu_dm_irq_fini(adev);
487 	 */
488 
489 	if (adev->dm.cgs_device) {
490 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
491 		adev->dm.cgs_device = NULL;
492 	}
493 	if (adev->dm.freesync_module) {
494 		mod_freesync_destroy(adev->dm.freesync_module);
495 		adev->dm.freesync_module = NULL;
496 	}
497 	/* DC Destroy TODO: Replace destroy DAL */
498 	if (adev->dm.dc)
499 		dc_destroy(&adev->dm.dc);
500 	return;
501 }
502 
503 static int dm_sw_init(void *handle)
504 {
505 	return 0;
506 }
507 
508 static int dm_sw_fini(void *handle)
509 {
510 	return 0;
511 }
512 
513 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
514 {
515 	struct amdgpu_dm_connector *aconnector;
516 	struct drm_connector *connector;
517 	int ret = 0;
518 
519 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
520 
521 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
522 		aconnector = to_amdgpu_dm_connector(connector);
523 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
524 		    aconnector->mst_mgr.aux) {
525 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
526 					aconnector, aconnector->base.base.id);
527 
528 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
529 			if (ret < 0) {
530 				DRM_ERROR("DM_MST: Failed to start MST\n");
531 				((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
532 				return ret;
533 				}
534 			}
535 	}
536 
537 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
538 	return ret;
539 }
540 
541 static int dm_late_init(void *handle)
542 {
543 	struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
544 
545 	return detect_mst_link_for_all_connectors(dev);
546 }
547 
548 static void s3_handle_mst(struct drm_device *dev, bool suspend)
549 {
550 	struct amdgpu_dm_connector *aconnector;
551 	struct drm_connector *connector;
552 
553 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
554 
555 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
556 		   aconnector = to_amdgpu_dm_connector(connector);
557 		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
558 				   !aconnector->mst_port) {
559 
560 			   if (suspend)
561 				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
562 			   else
563 				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
564 		   }
565 	}
566 
567 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
568 }
569 
570 static int dm_hw_init(void *handle)
571 {
572 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
573 	/* Create DAL display manager */
574 	amdgpu_dm_init(adev);
575 	amdgpu_dm_hpd_init(adev);
576 
577 	return 0;
578 }
579 
580 static int dm_hw_fini(void *handle)
581 {
582 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583 
584 	amdgpu_dm_hpd_fini(adev);
585 
586 	amdgpu_dm_irq_fini(adev);
587 	amdgpu_dm_fini(adev);
588 	return 0;
589 }
590 
591 static int dm_suspend(void *handle)
592 {
593 	struct amdgpu_device *adev = handle;
594 	struct amdgpu_display_manager *dm = &adev->dm;
595 	int ret = 0;
596 
597 	s3_handle_mst(adev->ddev, true);
598 
599 	amdgpu_dm_irq_suspend(adev);
600 
601 	WARN_ON(adev->dm.cached_state);
602 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
603 
604 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
605 
606 	return ret;
607 }
608 
609 static struct amdgpu_dm_connector *
610 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
611 					     struct drm_crtc *crtc)
612 {
613 	uint32_t i;
614 	struct drm_connector_state *new_con_state;
615 	struct drm_connector *connector;
616 	struct drm_crtc *crtc_from_state;
617 
618 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
619 		crtc_from_state = new_con_state->crtc;
620 
621 		if (crtc_from_state == crtc)
622 			return to_amdgpu_dm_connector(connector);
623 	}
624 
625 	return NULL;
626 }
627 
628 static int dm_resume(void *handle)
629 {
630 	struct amdgpu_device *adev = handle;
631 	struct amdgpu_display_manager *dm = &adev->dm;
632 
633 	/* power on hardware */
634 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
635 
636 	return 0;
637 }
638 
639 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
640 {
641 	struct drm_device *ddev = adev->ddev;
642 	struct amdgpu_display_manager *dm = &adev->dm;
643 	struct amdgpu_dm_connector *aconnector;
644 	struct drm_connector *connector;
645 	struct drm_crtc *crtc;
646 	struct drm_crtc_state *new_crtc_state;
647 	struct dm_crtc_state *dm_new_crtc_state;
648 	struct drm_plane *plane;
649 	struct drm_plane_state *new_plane_state;
650 	struct dm_plane_state *dm_new_plane_state;
651 
652 	int ret = 0;
653 	int i;
654 
655 	/* program HPD filter */
656 	dc_resume(dm->dc);
657 
658 	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
659 	s3_handle_mst(ddev, false);
660 
661 	/*
662 	 * early enable HPD Rx IRQ, should be done before set mode as short
663 	 * pulse interrupts are used for MST
664 	 */
665 	amdgpu_dm_irq_resume_early(adev);
666 
667 	/* Do detection*/
668 	list_for_each_entry(connector,
669 			&ddev->mode_config.connector_list, head) {
670 		aconnector = to_amdgpu_dm_connector(connector);
671 
672 		/*
673 		 * this is the case when traversing through already created
674 		 * MST connectors, should be skipped
675 		 */
676 		if (aconnector->mst_port)
677 			continue;
678 
679 		mutex_lock(&aconnector->hpd_lock);
680 		dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
681 
682 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
683 			aconnector->fake_enable = false;
684 
685 		aconnector->dc_sink = NULL;
686 		amdgpu_dm_update_connector_after_detect(aconnector);
687 		mutex_unlock(&aconnector->hpd_lock);
688 	}
689 
690 	/* Force mode set in atomic comit */
691 	for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
692 		new_crtc_state->active_changed = true;
693 
694 	/*
695 	 * atomic_check is expected to create the dc states. We need to release
696 	 * them here, since they were duplicated as part of the suspend
697 	 * procedure.
698 	 */
699 	for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
700 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
701 		if (dm_new_crtc_state->stream) {
702 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
703 			dc_stream_release(dm_new_crtc_state->stream);
704 			dm_new_crtc_state->stream = NULL;
705 		}
706 	}
707 
708 	for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
709 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
710 		if (dm_new_plane_state->dc_state) {
711 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
712 			dc_plane_state_release(dm_new_plane_state->dc_state);
713 			dm_new_plane_state->dc_state = NULL;
714 		}
715 	}
716 
717 	ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
718 
719 	adev->dm.cached_state = NULL;
720 
721 	amdgpu_dm_irq_resume_late(adev);
722 
723 	return ret;
724 }
725 
726 static const struct amd_ip_funcs amdgpu_dm_funcs = {
727 	.name = "dm",
728 	.early_init = dm_early_init,
729 	.late_init = dm_late_init,
730 	.sw_init = dm_sw_init,
731 	.sw_fini = dm_sw_fini,
732 	.hw_init = dm_hw_init,
733 	.hw_fini = dm_hw_fini,
734 	.suspend = dm_suspend,
735 	.resume = dm_resume,
736 	.is_idle = dm_is_idle,
737 	.wait_for_idle = dm_wait_for_idle,
738 	.check_soft_reset = dm_check_soft_reset,
739 	.soft_reset = dm_soft_reset,
740 	.set_clockgating_state = dm_set_clockgating_state,
741 	.set_powergating_state = dm_set_powergating_state,
742 };
743 
744 const struct amdgpu_ip_block_version dm_ip_block =
745 {
746 	.type = AMD_IP_BLOCK_TYPE_DCE,
747 	.major = 1,
748 	.minor = 0,
749 	.rev = 0,
750 	.funcs = &amdgpu_dm_funcs,
751 };
752 
753 
754 static struct drm_atomic_state *
755 dm_atomic_state_alloc(struct drm_device *dev)
756 {
757 	struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
758 
759 	if (!state)
760 		return NULL;
761 
762 	if (drm_atomic_state_init(dev, &state->base) < 0)
763 		goto fail;
764 
765 	return &state->base;
766 
767 fail:
768 	kfree(state);
769 	return NULL;
770 }
771 
772 static void
773 dm_atomic_state_clear(struct drm_atomic_state *state)
774 {
775 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
776 
777 	if (dm_state->context) {
778 		dc_release_state(dm_state->context);
779 		dm_state->context = NULL;
780 	}
781 
782 	drm_atomic_state_default_clear(state);
783 }
784 
785 static void
786 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
787 {
788 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
789 	drm_atomic_state_default_release(state);
790 	kfree(dm_state);
791 }
792 
793 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
794 	.fb_create = amdgpu_user_framebuffer_create,
795 	.output_poll_changed = amdgpu_output_poll_changed,
796 	.atomic_check = amdgpu_dm_atomic_check,
797 	.atomic_commit = amdgpu_dm_atomic_commit,
798 	.atomic_state_alloc = dm_atomic_state_alloc,
799 	.atomic_state_clear = dm_atomic_state_clear,
800 	.atomic_state_free = dm_atomic_state_alloc_free
801 };
802 
803 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
804 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
805 };
806 
807 static void
808 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
809 {
810 	struct drm_connector *connector = &aconnector->base;
811 	struct drm_device *dev = connector->dev;
812 	struct dc_sink *sink;
813 
814 	/* MST handled by drm_mst framework */
815 	if (aconnector->mst_mgr.mst_state == true)
816 		return;
817 
818 
819 	sink = aconnector->dc_link->local_sink;
820 
821 	/* Edid mgmt connector gets first update only in mode_valid hook and then
822 	 * the connector sink is set to either fake or physical sink depends on link status.
823 	 * don't do it here if u are during boot
824 	 */
825 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
826 			&& aconnector->dc_em_sink) {
827 
828 		/* For S3 resume with headless use eml_sink to fake stream
829 		 * because on resume connecotr->sink is set ti NULL
830 		 */
831 		mutex_lock(&dev->mode_config.mutex);
832 
833 		if (sink) {
834 			if (aconnector->dc_sink) {
835 				amdgpu_dm_remove_sink_from_freesync_module(
836 								connector);
837 				/* retain and release bellow are used for
838 				 * bump up refcount for sink because the link don't point
839 				 * to it anymore after disconnect so on next crtc to connector
840 				 * reshuffle by UMD we will get into unwanted dc_sink release
841 				 */
842 				if (aconnector->dc_sink != aconnector->dc_em_sink)
843 					dc_sink_release(aconnector->dc_sink);
844 			}
845 			aconnector->dc_sink = sink;
846 			amdgpu_dm_add_sink_to_freesync_module(
847 						connector, aconnector->edid);
848 		} else {
849 			amdgpu_dm_remove_sink_from_freesync_module(connector);
850 			if (!aconnector->dc_sink)
851 				aconnector->dc_sink = aconnector->dc_em_sink;
852 			else if (aconnector->dc_sink != aconnector->dc_em_sink)
853 				dc_sink_retain(aconnector->dc_sink);
854 		}
855 
856 		mutex_unlock(&dev->mode_config.mutex);
857 		return;
858 	}
859 
860 	/*
861 	 * TODO: temporary guard to look for proper fix
862 	 * if this sink is MST sink, we should not do anything
863 	 */
864 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
865 		return;
866 
867 	if (aconnector->dc_sink == sink) {
868 		/* We got a DP short pulse (Link Loss, DP CTS, etc...).
869 		 * Do nothing!! */
870 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
871 				aconnector->connector_id);
872 		return;
873 	}
874 
875 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
876 		aconnector->connector_id, aconnector->dc_sink, sink);
877 
878 	mutex_lock(&dev->mode_config.mutex);
879 
880 	/* 1. Update status of the drm connector
881 	 * 2. Send an event and let userspace tell us what to do */
882 	if (sink) {
883 		/* TODO: check if we still need the S3 mode update workaround.
884 		 * If yes, put it here. */
885 		if (aconnector->dc_sink)
886 			amdgpu_dm_remove_sink_from_freesync_module(
887 							connector);
888 
889 		aconnector->dc_sink = sink;
890 		if (sink->dc_edid.length == 0) {
891 			aconnector->edid = NULL;
892 		} else {
893 			aconnector->edid =
894 				(struct edid *) sink->dc_edid.raw_edid;
895 
896 
897 			drm_mode_connector_update_edid_property(connector,
898 					aconnector->edid);
899 		}
900 		amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
901 
902 	} else {
903 		amdgpu_dm_remove_sink_from_freesync_module(connector);
904 		drm_mode_connector_update_edid_property(connector, NULL);
905 		aconnector->num_modes = 0;
906 		aconnector->dc_sink = NULL;
907 	}
908 
909 	mutex_unlock(&dev->mode_config.mutex);
910 }
911 
912 static void handle_hpd_irq(void *param)
913 {
914 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
915 	struct drm_connector *connector = &aconnector->base;
916 	struct drm_device *dev = connector->dev;
917 
918 	/* In case of failure or MST no need to update connector status or notify the OS
919 	 * since (for MST case) MST does this in it's own context.
920 	 */
921 	mutex_lock(&aconnector->hpd_lock);
922 
923 	if (aconnector->fake_enable)
924 		aconnector->fake_enable = false;
925 
926 	if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
927 		amdgpu_dm_update_connector_after_detect(aconnector);
928 
929 
930 		drm_modeset_lock_all(dev);
931 		dm_restore_drm_connector_state(dev, connector);
932 		drm_modeset_unlock_all(dev);
933 
934 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
935 			drm_kms_helper_hotplug_event(dev);
936 	}
937 	mutex_unlock(&aconnector->hpd_lock);
938 
939 }
940 
941 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
942 {
943 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
944 	uint8_t dret;
945 	bool new_irq_handled = false;
946 	int dpcd_addr;
947 	int dpcd_bytes_to_read;
948 
949 	const int max_process_count = 30;
950 	int process_count = 0;
951 
952 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
953 
954 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
955 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
956 		/* DPCD 0x200 - 0x201 for downstream IRQ */
957 		dpcd_addr = DP_SINK_COUNT;
958 	} else {
959 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
960 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
961 		dpcd_addr = DP_SINK_COUNT_ESI;
962 	}
963 
964 	dret = drm_dp_dpcd_read(
965 		&aconnector->dm_dp_aux.aux,
966 		dpcd_addr,
967 		esi,
968 		dpcd_bytes_to_read);
969 
970 	while (dret == dpcd_bytes_to_read &&
971 		process_count < max_process_count) {
972 		uint8_t retry;
973 		dret = 0;
974 
975 		process_count++;
976 
977 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
978 		/* handle HPD short pulse irq */
979 		if (aconnector->mst_mgr.mst_state)
980 			drm_dp_mst_hpd_irq(
981 				&aconnector->mst_mgr,
982 				esi,
983 				&new_irq_handled);
984 
985 		if (new_irq_handled) {
986 			/* ACK at DPCD to notify down stream */
987 			const int ack_dpcd_bytes_to_write =
988 				dpcd_bytes_to_read - 1;
989 
990 			for (retry = 0; retry < 3; retry++) {
991 				uint8_t wret;
992 
993 				wret = drm_dp_dpcd_write(
994 					&aconnector->dm_dp_aux.aux,
995 					dpcd_addr + 1,
996 					&esi[1],
997 					ack_dpcd_bytes_to_write);
998 				if (wret == ack_dpcd_bytes_to_write)
999 					break;
1000 			}
1001 
1002 			/* check if there is new irq to be handle */
1003 			dret = drm_dp_dpcd_read(
1004 				&aconnector->dm_dp_aux.aux,
1005 				dpcd_addr,
1006 				esi,
1007 				dpcd_bytes_to_read);
1008 
1009 			new_irq_handled = false;
1010 		} else {
1011 			break;
1012 		}
1013 	}
1014 
1015 	if (process_count == max_process_count)
1016 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1017 }
1018 
1019 static void handle_hpd_rx_irq(void *param)
1020 {
1021 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1022 	struct drm_connector *connector = &aconnector->base;
1023 	struct drm_device *dev = connector->dev;
1024 	struct dc_link *dc_link = aconnector->dc_link;
1025 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1026 
1027 	/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1028 	 * conflict, after implement i2c helper, this mutex should be
1029 	 * retired.
1030 	 */
1031 	if (dc_link->type != dc_connection_mst_branch)
1032 		mutex_lock(&aconnector->hpd_lock);
1033 
1034 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1035 			!is_mst_root_connector) {
1036 		/* Downstream Port status changed. */
1037 		if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1038 			amdgpu_dm_update_connector_after_detect(aconnector);
1039 
1040 
1041 			drm_modeset_lock_all(dev);
1042 			dm_restore_drm_connector_state(dev, connector);
1043 			drm_modeset_unlock_all(dev);
1044 
1045 			drm_kms_helper_hotplug_event(dev);
1046 		}
1047 	}
1048 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1049 	    (dc_link->type == dc_connection_mst_branch))
1050 		dm_handle_hpd_rx_irq(aconnector);
1051 
1052 	if (dc_link->type != dc_connection_mst_branch)
1053 		mutex_unlock(&aconnector->hpd_lock);
1054 }
1055 
1056 static void register_hpd_handlers(struct amdgpu_device *adev)
1057 {
1058 	struct drm_device *dev = adev->ddev;
1059 	struct drm_connector *connector;
1060 	struct amdgpu_dm_connector *aconnector;
1061 	const struct dc_link *dc_link;
1062 	struct dc_interrupt_params int_params = {0};
1063 
1064 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1065 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1066 
1067 	list_for_each_entry(connector,
1068 			&dev->mode_config.connector_list, head)	{
1069 
1070 		aconnector = to_amdgpu_dm_connector(connector);
1071 		dc_link = aconnector->dc_link;
1072 
1073 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1074 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1075 			int_params.irq_source = dc_link->irq_source_hpd;
1076 
1077 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1078 					handle_hpd_irq,
1079 					(void *) aconnector);
1080 		}
1081 
1082 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1083 
1084 			/* Also register for DP short pulse (hpd_rx). */
1085 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1086 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
1087 
1088 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1089 					handle_hpd_rx_irq,
1090 					(void *) aconnector);
1091 		}
1092 	}
1093 }
1094 
1095 /* Register IRQ sources and initialize IRQ callbacks */
1096 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1097 {
1098 	struct dc *dc = adev->dm.dc;
1099 	struct common_irq_params *c_irq_params;
1100 	struct dc_interrupt_params int_params = {0};
1101 	int r;
1102 	int i;
1103 	unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1104 
1105 	if (adev->asic_type == CHIP_VEGA10 ||
1106 	    adev->asic_type == CHIP_RAVEN)
1107 		client_id = AMDGPU_IH_CLIENTID_DCE;
1108 
1109 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1110 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1111 
1112 	/* Actions of amdgpu_irq_add_id():
1113 	 * 1. Register a set() function with base driver.
1114 	 *    Base driver will call set() function to enable/disable an
1115 	 *    interrupt in DC hardware.
1116 	 * 2. Register amdgpu_dm_irq_handler().
1117 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1118 	 *    coming from DC hardware.
1119 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1120 	 *    for acknowledging and handling. */
1121 
1122 	/* Use VBLANK interrupt */
1123 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1124 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1125 		if (r) {
1126 			DRM_ERROR("Failed to add crtc irq id!\n");
1127 			return r;
1128 		}
1129 
1130 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1131 		int_params.irq_source =
1132 			dc_interrupt_to_irq_source(dc, i, 0);
1133 
1134 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1135 
1136 		c_irq_params->adev = adev;
1137 		c_irq_params->irq_src = int_params.irq_source;
1138 
1139 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1140 				dm_crtc_high_irq, c_irq_params);
1141 	}
1142 
1143 	/* Use GRPH_PFLIP interrupt */
1144 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1145 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1146 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1147 		if (r) {
1148 			DRM_ERROR("Failed to add page flip irq id!\n");
1149 			return r;
1150 		}
1151 
1152 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1153 		int_params.irq_source =
1154 			dc_interrupt_to_irq_source(dc, i, 0);
1155 
1156 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1157 
1158 		c_irq_params->adev = adev;
1159 		c_irq_params->irq_src = int_params.irq_source;
1160 
1161 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1162 				dm_pflip_high_irq, c_irq_params);
1163 
1164 	}
1165 
1166 	/* HPD */
1167 	r = amdgpu_irq_add_id(adev, client_id,
1168 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1169 	if (r) {
1170 		DRM_ERROR("Failed to add hpd irq id!\n");
1171 		return r;
1172 	}
1173 
1174 	register_hpd_handlers(adev);
1175 
1176 	return 0;
1177 }
1178 
1179 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1180 /* Register IRQ sources and initialize IRQ callbacks */
1181 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1182 {
1183 	struct dc *dc = adev->dm.dc;
1184 	struct common_irq_params *c_irq_params;
1185 	struct dc_interrupt_params int_params = {0};
1186 	int r;
1187 	int i;
1188 
1189 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1190 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1191 
1192 	/* Actions of amdgpu_irq_add_id():
1193 	 * 1. Register a set() function with base driver.
1194 	 *    Base driver will call set() function to enable/disable an
1195 	 *    interrupt in DC hardware.
1196 	 * 2. Register amdgpu_dm_irq_handler().
1197 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1198 	 *    coming from DC hardware.
1199 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1200 	 *    for acknowledging and handling.
1201 	 * */
1202 
1203 	/* Use VSTARTUP interrupt */
1204 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1205 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1206 			i++) {
1207 		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1208 
1209 		if (r) {
1210 			DRM_ERROR("Failed to add crtc irq id!\n");
1211 			return r;
1212 		}
1213 
1214 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1215 		int_params.irq_source =
1216 			dc_interrupt_to_irq_source(dc, i, 0);
1217 
1218 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1219 
1220 		c_irq_params->adev = adev;
1221 		c_irq_params->irq_src = int_params.irq_source;
1222 
1223 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1224 				dm_crtc_high_irq, c_irq_params);
1225 	}
1226 
1227 	/* Use GRPH_PFLIP interrupt */
1228 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1229 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1230 			i++) {
1231 		r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1232 		if (r) {
1233 			DRM_ERROR("Failed to add page flip irq id!\n");
1234 			return r;
1235 		}
1236 
1237 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1238 		int_params.irq_source =
1239 			dc_interrupt_to_irq_source(dc, i, 0);
1240 
1241 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1242 
1243 		c_irq_params->adev = adev;
1244 		c_irq_params->irq_src = int_params.irq_source;
1245 
1246 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1247 				dm_pflip_high_irq, c_irq_params);
1248 
1249 	}
1250 
1251 	/* HPD */
1252 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1253 			&adev->hpd_irq);
1254 	if (r) {
1255 		DRM_ERROR("Failed to add hpd irq id!\n");
1256 		return r;
1257 	}
1258 
1259 	register_hpd_handlers(adev);
1260 
1261 	return 0;
1262 }
1263 #endif
1264 
1265 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1266 {
1267 	int r;
1268 
1269 	adev->mode_info.mode_config_initialized = true;
1270 
1271 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1272 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1273 
1274 	adev->ddev->mode_config.max_width = 16384;
1275 	adev->ddev->mode_config.max_height = 16384;
1276 
1277 	adev->ddev->mode_config.preferred_depth = 24;
1278 	adev->ddev->mode_config.prefer_shadow = 1;
1279 	/* indicate support of immediate flip */
1280 	adev->ddev->mode_config.async_page_flip = true;
1281 
1282 	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1283 
1284 	r = amdgpu_modeset_create_props(adev);
1285 	if (r)
1286 		return r;
1287 
1288 	return 0;
1289 }
1290 
1291 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1292 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1293 
1294 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1295 {
1296 	struct amdgpu_display_manager *dm = bl_get_data(bd);
1297 
1298 	if (dc_link_set_backlight_level(dm->backlight_link,
1299 			bd->props.brightness, 0, 0))
1300 		return 0;
1301 	else
1302 		return 1;
1303 }
1304 
1305 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1306 {
1307 	return bd->props.brightness;
1308 }
1309 
1310 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1311 	.get_brightness = amdgpu_dm_backlight_get_brightness,
1312 	.update_status	= amdgpu_dm_backlight_update_status,
1313 };
1314 
1315 static void
1316 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1317 {
1318 	char bl_name[16];
1319 	struct backlight_properties props = { 0 };
1320 
1321 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1322 	props.type = BACKLIGHT_RAW;
1323 
1324 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1325 			dm->adev->ddev->primary->index);
1326 
1327 	dm->backlight_dev = backlight_device_register(bl_name,
1328 			dm->adev->ddev->dev,
1329 			dm,
1330 			&amdgpu_dm_backlight_ops,
1331 			&props);
1332 
1333 	if (IS_ERR(dm->backlight_dev))
1334 		DRM_ERROR("DM: Backlight registration failed!\n");
1335 	else
1336 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1337 }
1338 
1339 #endif
1340 
1341 /* In this architecture, the association
1342  * connector -> encoder -> crtc
1343  * id not really requried. The crtc and connector will hold the
1344  * display_index as an abstraction to use with DAL component
1345  *
1346  * Returns 0 on success
1347  */
1348 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1349 {
1350 	struct amdgpu_display_manager *dm = &adev->dm;
1351 	uint32_t i;
1352 	struct amdgpu_dm_connector *aconnector = NULL;
1353 	struct amdgpu_encoder *aencoder = NULL;
1354 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
1355 	uint32_t link_cnt;
1356 	unsigned long possible_crtcs;
1357 
1358 	link_cnt = dm->dc->caps.max_links;
1359 	if (amdgpu_dm_mode_config_init(dm->adev)) {
1360 		DRM_ERROR("DM: Failed to initialize mode config\n");
1361 		return -1;
1362 	}
1363 
1364 	for (i = 0; i < dm->dc->caps.max_planes; i++) {
1365 		struct amdgpu_plane *plane;
1366 
1367 		plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1368 		mode_info->planes[i] = plane;
1369 
1370 		if (!plane) {
1371 			DRM_ERROR("KMS: Failed to allocate plane\n");
1372 			goto fail;
1373 		}
1374 		plane->base.type = mode_info->plane_type[i];
1375 
1376 		/*
1377 		 * HACK: IGT tests expect that each plane can only have one
1378 		 * one possible CRTC. For now, set one CRTC for each
1379 		 * plane that is not an underlay, but still allow multiple
1380 		 * CRTCs for underlay planes.
1381 		 */
1382 		possible_crtcs = 1 << i;
1383 		if (i >= dm->dc->caps.max_streams)
1384 			possible_crtcs = 0xff;
1385 
1386 		if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
1387 			DRM_ERROR("KMS: Failed to initialize plane\n");
1388 			goto fail;
1389 		}
1390 	}
1391 
1392 	for (i = 0; i < dm->dc->caps.max_streams; i++)
1393 		if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1394 			DRM_ERROR("KMS: Failed to initialize crtc\n");
1395 			goto fail;
1396 		}
1397 
1398 	dm->display_indexes_num = dm->dc->caps.max_streams;
1399 
1400 	/* loops over all connectors on the board */
1401 	for (i = 0; i < link_cnt; i++) {
1402 
1403 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1404 			DRM_ERROR(
1405 				"KMS: Cannot support more than %d display indexes\n",
1406 					AMDGPU_DM_MAX_DISPLAY_INDEX);
1407 			continue;
1408 		}
1409 
1410 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1411 		if (!aconnector)
1412 			goto fail;
1413 
1414 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1415 		if (!aencoder)
1416 			goto fail;
1417 
1418 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1419 			DRM_ERROR("KMS: Failed to initialize encoder\n");
1420 			goto fail;
1421 		}
1422 
1423 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1424 			DRM_ERROR("KMS: Failed to initialize connector\n");
1425 			goto fail;
1426 		}
1427 
1428 		if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1429 				DETECT_REASON_BOOT))
1430 			amdgpu_dm_update_connector_after_detect(aconnector);
1431 	}
1432 
1433 	/* Software is initialized. Now we can register interrupt handlers. */
1434 	switch (adev->asic_type) {
1435 	case CHIP_BONAIRE:
1436 	case CHIP_HAWAII:
1437 	case CHIP_KAVERI:
1438 	case CHIP_KABINI:
1439 	case CHIP_MULLINS:
1440 	case CHIP_TONGA:
1441 	case CHIP_FIJI:
1442 	case CHIP_CARRIZO:
1443 	case CHIP_STONEY:
1444 	case CHIP_POLARIS11:
1445 	case CHIP_POLARIS10:
1446 	case CHIP_POLARIS12:
1447 	case CHIP_VEGA10:
1448 		if (dce110_register_irq_handlers(dm->adev)) {
1449 			DRM_ERROR("DM: Failed to initialize IRQ\n");
1450 			goto fail;
1451 		}
1452 		break;
1453 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1454 	case CHIP_RAVEN:
1455 		if (dcn10_register_irq_handlers(dm->adev)) {
1456 			DRM_ERROR("DM: Failed to initialize IRQ\n");
1457 			goto fail;
1458 		}
1459 		/*
1460 		 * Temporary disable until pplib/smu interaction is implemented
1461 		 */
1462 		dm->dc->debug.disable_stutter = true;
1463 		break;
1464 #endif
1465 	default:
1466 		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1467 		goto fail;
1468 	}
1469 
1470 	return 0;
1471 fail:
1472 	kfree(aencoder);
1473 	kfree(aconnector);
1474 	for (i = 0; i < dm->dc->caps.max_planes; i++)
1475 		kfree(mode_info->planes[i]);
1476 	return -1;
1477 }
1478 
1479 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1480 {
1481 	drm_mode_config_cleanup(dm->ddev);
1482 	return;
1483 }
1484 
1485 /******************************************************************************
1486  * amdgpu_display_funcs functions
1487  *****************************************************************************/
1488 
1489 /**
1490  * dm_bandwidth_update - program display watermarks
1491  *
1492  * @adev: amdgpu_device pointer
1493  *
1494  * Calculate and program the display watermarks and line buffer allocation.
1495  */
1496 static void dm_bandwidth_update(struct amdgpu_device *adev)
1497 {
1498 	/* TODO: implement later */
1499 }
1500 
1501 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1502 				     u8 level)
1503 {
1504 	/* TODO: translate amdgpu_encoder to display_index and call DAL */
1505 }
1506 
1507 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1508 {
1509 	/* TODO: translate amdgpu_encoder to display_index and call DAL */
1510 	return 0;
1511 }
1512 
1513 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1514 				struct drm_file *filp)
1515 {
1516 	struct mod_freesync_params freesync_params;
1517 	uint8_t num_streams;
1518 	uint8_t i;
1519 
1520 	struct amdgpu_device *adev = dev->dev_private;
1521 	int r = 0;
1522 
1523 	/* Get freesync enable flag from DRM */
1524 
1525 	num_streams = dc_get_current_stream_count(adev->dm.dc);
1526 
1527 	for (i = 0; i < num_streams; i++) {
1528 		struct dc_stream_state *stream;
1529 		stream = dc_get_stream_at_index(adev->dm.dc, i);
1530 
1531 		mod_freesync_update_state(adev->dm.freesync_module,
1532 					  &stream, 1, &freesync_params);
1533 	}
1534 
1535 	return r;
1536 }
1537 
1538 static const struct amdgpu_display_funcs dm_display_funcs = {
1539 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1540 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1541 	.vblank_wait = NULL,
1542 	.backlight_set_level =
1543 		dm_set_backlight_level,/* called unconditionally */
1544 	.backlight_get_level =
1545 		dm_get_backlight_level,/* called unconditionally */
1546 	.hpd_sense = NULL,/* called unconditionally */
1547 	.hpd_set_polarity = NULL, /* called unconditionally */
1548 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1549 	.page_flip_get_scanoutpos =
1550 		dm_crtc_get_scanoutpos,/* called unconditionally */
1551 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1552 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
1553 	.notify_freesync = amdgpu_notify_freesync,
1554 
1555 };
1556 
1557 #if defined(CONFIG_DEBUG_KERNEL_DC)
1558 
1559 static ssize_t s3_debug_store(struct device *device,
1560 			      struct device_attribute *attr,
1561 			      const char *buf,
1562 			      size_t count)
1563 {
1564 	int ret;
1565 	int s3_state;
1566 	struct pci_dev *pdev = to_pci_dev(device);
1567 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1568 	struct amdgpu_device *adev = drm_dev->dev_private;
1569 
1570 	ret = kstrtoint(buf, 0, &s3_state);
1571 
1572 	if (ret == 0) {
1573 		if (s3_state) {
1574 			dm_resume(adev);
1575 			amdgpu_dm_display_resume(adev);
1576 			drm_kms_helper_hotplug_event(adev->ddev);
1577 		} else
1578 			dm_suspend(adev);
1579 	}
1580 
1581 	return ret == 0 ? count : 0;
1582 }
1583 
1584 DEVICE_ATTR_WO(s3_debug);
1585 
1586 #endif
1587 
1588 static int dm_early_init(void *handle)
1589 {
1590 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1591 
1592 	adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
1593 
1594 	switch (adev->asic_type) {
1595 	case CHIP_BONAIRE:
1596 	case CHIP_HAWAII:
1597 		adev->mode_info.num_crtc = 6;
1598 		adev->mode_info.num_hpd = 6;
1599 		adev->mode_info.num_dig = 6;
1600 		adev->mode_info.plane_type = dm_plane_type_default;
1601 		break;
1602 	case CHIP_KAVERI:
1603 		adev->mode_info.num_crtc = 4;
1604 		adev->mode_info.num_hpd = 6;
1605 		adev->mode_info.num_dig = 7;
1606 		adev->mode_info.plane_type = dm_plane_type_default;
1607 		break;
1608 	case CHIP_KABINI:
1609 	case CHIP_MULLINS:
1610 		adev->mode_info.num_crtc = 2;
1611 		adev->mode_info.num_hpd = 6;
1612 		adev->mode_info.num_dig = 6;
1613 		adev->mode_info.plane_type = dm_plane_type_default;
1614 		break;
1615 	case CHIP_FIJI:
1616 	case CHIP_TONGA:
1617 		adev->mode_info.num_crtc = 6;
1618 		adev->mode_info.num_hpd = 6;
1619 		adev->mode_info.num_dig = 7;
1620 		adev->mode_info.plane_type = dm_plane_type_default;
1621 		break;
1622 	case CHIP_CARRIZO:
1623 		adev->mode_info.num_crtc = 3;
1624 		adev->mode_info.num_hpd = 6;
1625 		adev->mode_info.num_dig = 9;
1626 		adev->mode_info.plane_type = dm_plane_type_carizzo;
1627 		break;
1628 	case CHIP_STONEY:
1629 		adev->mode_info.num_crtc = 2;
1630 		adev->mode_info.num_hpd = 6;
1631 		adev->mode_info.num_dig = 9;
1632 		adev->mode_info.plane_type = dm_plane_type_stoney;
1633 		break;
1634 	case CHIP_POLARIS11:
1635 	case CHIP_POLARIS12:
1636 		adev->mode_info.num_crtc = 5;
1637 		adev->mode_info.num_hpd = 5;
1638 		adev->mode_info.num_dig = 5;
1639 		adev->mode_info.plane_type = dm_plane_type_default;
1640 		break;
1641 	case CHIP_POLARIS10:
1642 		adev->mode_info.num_crtc = 6;
1643 		adev->mode_info.num_hpd = 6;
1644 		adev->mode_info.num_dig = 6;
1645 		adev->mode_info.plane_type = dm_plane_type_default;
1646 		break;
1647 	case CHIP_VEGA10:
1648 		adev->mode_info.num_crtc = 6;
1649 		adev->mode_info.num_hpd = 6;
1650 		adev->mode_info.num_dig = 6;
1651 		adev->mode_info.plane_type = dm_plane_type_default;
1652 		break;
1653 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1654 	case CHIP_RAVEN:
1655 		adev->mode_info.num_crtc = 4;
1656 		adev->mode_info.num_hpd = 4;
1657 		adev->mode_info.num_dig = 4;
1658 		adev->mode_info.plane_type = dm_plane_type_default;
1659 		break;
1660 #endif
1661 	default:
1662 		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1663 		return -EINVAL;
1664 	}
1665 
1666 	amdgpu_dm_set_irq_funcs(adev);
1667 
1668 	if (adev->mode_info.funcs == NULL)
1669 		adev->mode_info.funcs = &dm_display_funcs;
1670 
1671 	/* Note: Do NOT change adev->audio_endpt_rreg and
1672 	 * adev->audio_endpt_wreg because they are initialised in
1673 	 * amdgpu_device_init() */
1674 #if defined(CONFIG_DEBUG_KERNEL_DC)
1675 	device_create_file(
1676 		adev->ddev->dev,
1677 		&dev_attr_s3_debug);
1678 #endif
1679 
1680 	return 0;
1681 }
1682 
1683 static bool modeset_required(struct drm_crtc_state *crtc_state,
1684 			     struct dc_stream_state *new_stream,
1685 			     struct dc_stream_state *old_stream)
1686 {
1687 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
1688 		return false;
1689 
1690 	if (!crtc_state->enable)
1691 		return false;
1692 
1693 	return crtc_state->active;
1694 }
1695 
1696 static bool modereset_required(struct drm_crtc_state *crtc_state)
1697 {
1698 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
1699 		return false;
1700 
1701 	return !crtc_state->enable || !crtc_state->active;
1702 }
1703 
1704 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1705 {
1706 	drm_encoder_cleanup(encoder);
1707 	kfree(encoder);
1708 }
1709 
1710 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1711 	.destroy = amdgpu_dm_encoder_destroy,
1712 };
1713 
1714 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1715 					struct dc_plane_state *plane_state)
1716 {
1717 	plane_state->src_rect.x = state->src_x >> 16;
1718 	plane_state->src_rect.y = state->src_y >> 16;
1719 	/*we ignore for now mantissa and do not to deal with floating pixels :(*/
1720 	plane_state->src_rect.width = state->src_w >> 16;
1721 
1722 	if (plane_state->src_rect.width == 0)
1723 		return false;
1724 
1725 	plane_state->src_rect.height = state->src_h >> 16;
1726 	if (plane_state->src_rect.height == 0)
1727 		return false;
1728 
1729 	plane_state->dst_rect.x = state->crtc_x;
1730 	plane_state->dst_rect.y = state->crtc_y;
1731 
1732 	if (state->crtc_w == 0)
1733 		return false;
1734 
1735 	plane_state->dst_rect.width = state->crtc_w;
1736 
1737 	if (state->crtc_h == 0)
1738 		return false;
1739 
1740 	plane_state->dst_rect.height = state->crtc_h;
1741 
1742 	plane_state->clip_rect = plane_state->dst_rect;
1743 
1744 	switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1745 	case DRM_MODE_ROTATE_0:
1746 		plane_state->rotation = ROTATION_ANGLE_0;
1747 		break;
1748 	case DRM_MODE_ROTATE_90:
1749 		plane_state->rotation = ROTATION_ANGLE_90;
1750 		break;
1751 	case DRM_MODE_ROTATE_180:
1752 		plane_state->rotation = ROTATION_ANGLE_180;
1753 		break;
1754 	case DRM_MODE_ROTATE_270:
1755 		plane_state->rotation = ROTATION_ANGLE_270;
1756 		break;
1757 	default:
1758 		plane_state->rotation = ROTATION_ANGLE_0;
1759 		break;
1760 	}
1761 
1762 	return true;
1763 }
1764 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1765 		       uint64_t *tiling_flags)
1766 {
1767 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1768 	int r = amdgpu_bo_reserve(rbo, false);
1769 
1770 	if (unlikely(r)) {
1771 		// Don't show error msg. when return -ERESTARTSYS
1772 		if (r != -ERESTARTSYS)
1773 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
1774 		return r;
1775 	}
1776 
1777 	if (tiling_flags)
1778 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1779 
1780 	amdgpu_bo_unreserve(rbo);
1781 
1782 	return r;
1783 }
1784 
1785 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1786 					 struct dc_plane_state *plane_state,
1787 					 const struct amdgpu_framebuffer *amdgpu_fb)
1788 {
1789 	uint64_t tiling_flags;
1790 	unsigned int awidth;
1791 	const struct drm_framebuffer *fb = &amdgpu_fb->base;
1792 	int ret = 0;
1793 	struct drm_format_name_buf format_name;
1794 
1795 	ret = get_fb_info(
1796 		amdgpu_fb,
1797 		&tiling_flags);
1798 
1799 	if (ret)
1800 		return ret;
1801 
1802 	switch (fb->format->format) {
1803 	case DRM_FORMAT_C8:
1804 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1805 		break;
1806 	case DRM_FORMAT_RGB565:
1807 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1808 		break;
1809 	case DRM_FORMAT_XRGB8888:
1810 	case DRM_FORMAT_ARGB8888:
1811 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1812 		break;
1813 	case DRM_FORMAT_XRGB2101010:
1814 	case DRM_FORMAT_ARGB2101010:
1815 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1816 		break;
1817 	case DRM_FORMAT_XBGR2101010:
1818 	case DRM_FORMAT_ABGR2101010:
1819 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1820 		break;
1821 	case DRM_FORMAT_NV21:
1822 		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1823 		break;
1824 	case DRM_FORMAT_NV12:
1825 		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1826 		break;
1827 	default:
1828 		DRM_ERROR("Unsupported screen format %s\n",
1829 			  drm_get_format_name(fb->format->format, &format_name));
1830 		return -EINVAL;
1831 	}
1832 
1833 	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1834 		plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1835 		plane_state->plane_size.grph.surface_size.x = 0;
1836 		plane_state->plane_size.grph.surface_size.y = 0;
1837 		plane_state->plane_size.grph.surface_size.width = fb->width;
1838 		plane_state->plane_size.grph.surface_size.height = fb->height;
1839 		plane_state->plane_size.grph.surface_pitch =
1840 				fb->pitches[0] / fb->format->cpp[0];
1841 		/* TODO: unhardcode */
1842 		plane_state->color_space = COLOR_SPACE_SRGB;
1843 
1844 	} else {
1845 		awidth = ALIGN(fb->width, 64);
1846 		plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1847 		plane_state->plane_size.video.luma_size.x = 0;
1848 		plane_state->plane_size.video.luma_size.y = 0;
1849 		plane_state->plane_size.video.luma_size.width = awidth;
1850 		plane_state->plane_size.video.luma_size.height = fb->height;
1851 		/* TODO: unhardcode */
1852 		plane_state->plane_size.video.luma_pitch = awidth;
1853 
1854 		plane_state->plane_size.video.chroma_size.x = 0;
1855 		plane_state->plane_size.video.chroma_size.y = 0;
1856 		plane_state->plane_size.video.chroma_size.width = awidth;
1857 		plane_state->plane_size.video.chroma_size.height = fb->height;
1858 		plane_state->plane_size.video.chroma_pitch = awidth / 2;
1859 
1860 		/* TODO: unhardcode */
1861 		plane_state->color_space = COLOR_SPACE_YCBCR709;
1862 	}
1863 
1864 	memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1865 
1866 	/* Fill GFX8 params */
1867 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1868 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1869 
1870 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1871 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1872 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1873 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1874 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1875 
1876 		/* XXX fix me for VI */
1877 		plane_state->tiling_info.gfx8.num_banks = num_banks;
1878 		plane_state->tiling_info.gfx8.array_mode =
1879 				DC_ARRAY_2D_TILED_THIN1;
1880 		plane_state->tiling_info.gfx8.tile_split = tile_split;
1881 		plane_state->tiling_info.gfx8.bank_width = bankw;
1882 		plane_state->tiling_info.gfx8.bank_height = bankh;
1883 		plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1884 		plane_state->tiling_info.gfx8.tile_mode =
1885 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1886 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1887 			== DC_ARRAY_1D_TILED_THIN1) {
1888 		plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1889 	}
1890 
1891 	plane_state->tiling_info.gfx8.pipe_config =
1892 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1893 
1894 	if (adev->asic_type == CHIP_VEGA10 ||
1895 	    adev->asic_type == CHIP_RAVEN) {
1896 		/* Fill GFX9 params */
1897 		plane_state->tiling_info.gfx9.num_pipes =
1898 			adev->gfx.config.gb_addr_config_fields.num_pipes;
1899 		plane_state->tiling_info.gfx9.num_banks =
1900 			adev->gfx.config.gb_addr_config_fields.num_banks;
1901 		plane_state->tiling_info.gfx9.pipe_interleave =
1902 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1903 		plane_state->tiling_info.gfx9.num_shader_engines =
1904 			adev->gfx.config.gb_addr_config_fields.num_se;
1905 		plane_state->tiling_info.gfx9.max_compressed_frags =
1906 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1907 		plane_state->tiling_info.gfx9.num_rb_per_se =
1908 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1909 		plane_state->tiling_info.gfx9.swizzle =
1910 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1911 		plane_state->tiling_info.gfx9.shaderEnable = 1;
1912 	}
1913 
1914 	plane_state->visible = true;
1915 	plane_state->scaling_quality.h_taps_c = 0;
1916 	plane_state->scaling_quality.v_taps_c = 0;
1917 
1918 	/* is this needed? is plane_state zeroed at allocation? */
1919 	plane_state->scaling_quality.h_taps = 0;
1920 	plane_state->scaling_quality.v_taps = 0;
1921 	plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1922 
1923 	return ret;
1924 
1925 }
1926 
1927 static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
1928 				       struct dc_plane_state *plane_state)
1929 {
1930 	int i;
1931 	struct dc_gamma *gamma;
1932 	struct drm_color_lut *lut =
1933 			(struct drm_color_lut *) crtc_state->gamma_lut->data;
1934 
1935 	gamma = dc_create_gamma();
1936 
1937 	if (gamma == NULL) {
1938 		WARN_ON(1);
1939 		return;
1940 	}
1941 
1942 	gamma->type = GAMMA_RGB_256;
1943 	gamma->num_entries = GAMMA_RGB_256_ENTRIES;
1944 	for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1945 		gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1946 		gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1947 		gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
1948 	}
1949 
1950 	plane_state->gamma_correction = gamma;
1951 }
1952 
1953 static int fill_plane_attributes(struct amdgpu_device *adev,
1954 				 struct dc_plane_state *dc_plane_state,
1955 				 struct drm_plane_state *plane_state,
1956 				 struct drm_crtc_state *crtc_state)
1957 {
1958 	const struct amdgpu_framebuffer *amdgpu_fb =
1959 		to_amdgpu_framebuffer(plane_state->fb);
1960 	const struct drm_crtc *crtc = plane_state->crtc;
1961 	struct dc_transfer_func *input_tf;
1962 	int ret = 0;
1963 
1964 	if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
1965 		return -EINVAL;
1966 
1967 	ret = fill_plane_attributes_from_fb(
1968 		crtc->dev->dev_private,
1969 		dc_plane_state,
1970 		amdgpu_fb);
1971 
1972 	if (ret)
1973 		return ret;
1974 
1975 	input_tf = dc_create_transfer_func();
1976 
1977 	if (input_tf == NULL)
1978 		return -ENOMEM;
1979 
1980 	input_tf->type = TF_TYPE_PREDEFINED;
1981 	input_tf->tf = TRANSFER_FUNCTION_SRGB;
1982 
1983 	dc_plane_state->in_transfer_func = input_tf;
1984 
1985 	/* In case of gamma set, update gamma value */
1986 	if (crtc_state->gamma_lut)
1987 		fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
1988 
1989 	return ret;
1990 }
1991 
1992 /*****************************************************************************/
1993 
1994 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
1995 					   const struct dm_connector_state *dm_state,
1996 					   struct dc_stream_state *stream)
1997 {
1998 	enum amdgpu_rmx_type rmx_type;
1999 
2000 	struct rect src = { 0 }; /* viewport in composition space*/
2001 	struct rect dst = { 0 }; /* stream addressable area */
2002 
2003 	/* no mode. nothing to be done */
2004 	if (!mode)
2005 		return;
2006 
2007 	/* Full screen scaling by default */
2008 	src.width = mode->hdisplay;
2009 	src.height = mode->vdisplay;
2010 	dst.width = stream->timing.h_addressable;
2011 	dst.height = stream->timing.v_addressable;
2012 
2013 	rmx_type = dm_state->scaling;
2014 	if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2015 		if (src.width * dst.height <
2016 				src.height * dst.width) {
2017 			/* height needs less upscaling/more downscaling */
2018 			dst.width = src.width *
2019 					dst.height / src.height;
2020 		} else {
2021 			/* width needs less upscaling/more downscaling */
2022 			dst.height = src.height *
2023 					dst.width / src.width;
2024 		}
2025 	} else if (rmx_type == RMX_CENTER) {
2026 		dst = src;
2027 	}
2028 
2029 	dst.x = (stream->timing.h_addressable - dst.width) / 2;
2030 	dst.y = (stream->timing.v_addressable - dst.height) / 2;
2031 
2032 	if (dm_state->underscan_enable) {
2033 		dst.x += dm_state->underscan_hborder / 2;
2034 		dst.y += dm_state->underscan_vborder / 2;
2035 		dst.width -= dm_state->underscan_hborder;
2036 		dst.height -= dm_state->underscan_vborder;
2037 	}
2038 
2039 	stream->src = src;
2040 	stream->dst = dst;
2041 
2042 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2043 			dst.x, dst.y, dst.width, dst.height);
2044 
2045 }
2046 
2047 static enum dc_color_depth
2048 convert_color_depth_from_display_info(const struct drm_connector *connector)
2049 {
2050 	uint32_t bpc = connector->display_info.bpc;
2051 
2052 	/* Limited color depth to 8bit
2053 	 * TODO: Still need to handle deep color
2054 	 */
2055 	if (bpc > 8)
2056 		bpc = 8;
2057 
2058 	switch (bpc) {
2059 	case 0:
2060 		/* Temporary Work around, DRM don't parse color depth for
2061 		 * EDID revision before 1.4
2062 		 * TODO: Fix edid parsing
2063 		 */
2064 		return COLOR_DEPTH_888;
2065 	case 6:
2066 		return COLOR_DEPTH_666;
2067 	case 8:
2068 		return COLOR_DEPTH_888;
2069 	case 10:
2070 		return COLOR_DEPTH_101010;
2071 	case 12:
2072 		return COLOR_DEPTH_121212;
2073 	case 14:
2074 		return COLOR_DEPTH_141414;
2075 	case 16:
2076 		return COLOR_DEPTH_161616;
2077 	default:
2078 		return COLOR_DEPTH_UNDEFINED;
2079 	}
2080 }
2081 
2082 static enum dc_aspect_ratio
2083 get_aspect_ratio(const struct drm_display_mode *mode_in)
2084 {
2085 	int32_t width = mode_in->crtc_hdisplay * 9;
2086 	int32_t height = mode_in->crtc_vdisplay * 16;
2087 
2088 	if ((width - height) < 10 && (width - height) > -10)
2089 		return ASPECT_RATIO_16_9;
2090 	else
2091 		return ASPECT_RATIO_4_3;
2092 }
2093 
2094 static enum dc_color_space
2095 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2096 {
2097 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
2098 
2099 	switch (dc_crtc_timing->pixel_encoding)	{
2100 	case PIXEL_ENCODING_YCBCR422:
2101 	case PIXEL_ENCODING_YCBCR444:
2102 	case PIXEL_ENCODING_YCBCR420:
2103 	{
2104 		/*
2105 		 * 27030khz is the separation point between HDTV and SDTV
2106 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
2107 		 * respectively
2108 		 */
2109 		if (dc_crtc_timing->pix_clk_khz > 27030) {
2110 			if (dc_crtc_timing->flags.Y_ONLY)
2111 				color_space =
2112 					COLOR_SPACE_YCBCR709_LIMITED;
2113 			else
2114 				color_space = COLOR_SPACE_YCBCR709;
2115 		} else {
2116 			if (dc_crtc_timing->flags.Y_ONLY)
2117 				color_space =
2118 					COLOR_SPACE_YCBCR601_LIMITED;
2119 			else
2120 				color_space = COLOR_SPACE_YCBCR601;
2121 		}
2122 
2123 	}
2124 	break;
2125 	case PIXEL_ENCODING_RGB:
2126 		color_space = COLOR_SPACE_SRGB;
2127 		break;
2128 
2129 	default:
2130 		WARN_ON(1);
2131 		break;
2132 	}
2133 
2134 	return color_space;
2135 }
2136 
2137 /*****************************************************************************/
2138 
2139 static void
2140 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2141 					     const struct drm_display_mode *mode_in,
2142 					     const struct drm_connector *connector)
2143 {
2144 	struct dc_crtc_timing *timing_out = &stream->timing;
2145 	struct dc_transfer_func *tf = dc_create_transfer_func();
2146 
2147 	memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2148 
2149 	timing_out->h_border_left = 0;
2150 	timing_out->h_border_right = 0;
2151 	timing_out->v_border_top = 0;
2152 	timing_out->v_border_bottom = 0;
2153 	/* TODO: un-hardcode */
2154 
2155 	if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2156 			&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2157 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2158 	else
2159 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2160 
2161 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2162 	timing_out->display_color_depth = convert_color_depth_from_display_info(
2163 			connector);
2164 	timing_out->scan_type = SCANNING_TYPE_NODATA;
2165 	timing_out->hdmi_vic = 0;
2166 	timing_out->vic = drm_match_cea_mode(mode_in);
2167 
2168 	timing_out->h_addressable = mode_in->crtc_hdisplay;
2169 	timing_out->h_total = mode_in->crtc_htotal;
2170 	timing_out->h_sync_width =
2171 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2172 	timing_out->h_front_porch =
2173 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2174 	timing_out->v_total = mode_in->crtc_vtotal;
2175 	timing_out->v_addressable = mode_in->crtc_vdisplay;
2176 	timing_out->v_front_porch =
2177 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2178 	timing_out->v_sync_width =
2179 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2180 	timing_out->pix_clk_khz = mode_in->crtc_clock;
2181 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2182 	if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2183 		timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2184 	if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2185 		timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2186 
2187 	stream->output_color_space = get_output_color_space(timing_out);
2188 
2189 	tf->type = TF_TYPE_PREDEFINED;
2190 	tf->tf = TRANSFER_FUNCTION_SRGB;
2191 	stream->out_transfer_func = tf;
2192 }
2193 
2194 static void fill_audio_info(struct audio_info *audio_info,
2195 			    const struct drm_connector *drm_connector,
2196 			    const struct dc_sink *dc_sink)
2197 {
2198 	int i = 0;
2199 	int cea_revision = 0;
2200 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2201 
2202 	audio_info->manufacture_id = edid_caps->manufacturer_id;
2203 	audio_info->product_id = edid_caps->product_id;
2204 
2205 	cea_revision = drm_connector->display_info.cea_rev;
2206 
2207 	strncpy(audio_info->display_name,
2208 		edid_caps->display_name,
2209 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2210 
2211 	if (cea_revision >= 3) {
2212 		audio_info->mode_count = edid_caps->audio_mode_count;
2213 
2214 		for (i = 0; i < audio_info->mode_count; ++i) {
2215 			audio_info->modes[i].format_code =
2216 					(enum audio_format_code)
2217 					(edid_caps->audio_modes[i].format_code);
2218 			audio_info->modes[i].channel_count =
2219 					edid_caps->audio_modes[i].channel_count;
2220 			audio_info->modes[i].sample_rates.all =
2221 					edid_caps->audio_modes[i].sample_rate;
2222 			audio_info->modes[i].sample_size =
2223 					edid_caps->audio_modes[i].sample_size;
2224 		}
2225 	}
2226 
2227 	audio_info->flags.all = edid_caps->speaker_flags;
2228 
2229 	/* TODO: We only check for the progressive mode, check for interlace mode too */
2230 	if (drm_connector->latency_present[0]) {
2231 		audio_info->video_latency = drm_connector->video_latency[0];
2232 		audio_info->audio_latency = drm_connector->audio_latency[0];
2233 	}
2234 
2235 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2236 
2237 }
2238 
2239 static void
2240 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2241 				      struct drm_display_mode *dst_mode)
2242 {
2243 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2244 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2245 	dst_mode->crtc_clock = src_mode->crtc_clock;
2246 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2247 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2248 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2249 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2250 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
2251 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
2252 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2253 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2254 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2255 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2256 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2257 }
2258 
2259 static void
2260 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2261 					const struct drm_display_mode *native_mode,
2262 					bool scale_enabled)
2263 {
2264 	if (scale_enabled) {
2265 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2266 	} else if (native_mode->clock == drm_mode->clock &&
2267 			native_mode->htotal == drm_mode->htotal &&
2268 			native_mode->vtotal == drm_mode->vtotal) {
2269 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2270 	} else {
2271 		/* no scaling nor amdgpu inserted, no need to patch */
2272 	}
2273 }
2274 
2275 static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
2276 {
2277 	struct dc_sink *sink = NULL;
2278 	struct dc_sink_init_data sink_init_data = { 0 };
2279 
2280 	sink_init_data.link = aconnector->dc_link;
2281 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2282 
2283 	sink = dc_sink_create(&sink_init_data);
2284 	if (!sink) {
2285 		DRM_ERROR("Failed to create sink!\n");
2286 		return -ENOMEM;
2287 	}
2288 
2289 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2290 	aconnector->fake_enable = true;
2291 
2292 	aconnector->dc_sink = sink;
2293 	aconnector->dc_link->local_sink = sink;
2294 
2295 	return 0;
2296 }
2297 
2298 static void set_multisync_trigger_params(
2299 		struct dc_stream_state *stream)
2300 {
2301 	if (stream->triggered_crtc_reset.enabled) {
2302 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2303 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2304 	}
2305 }
2306 
2307 static void set_master_stream(struct dc_stream_state *stream_set[],
2308 			      int stream_count)
2309 {
2310 	int j, highest_rfr = 0, master_stream = 0;
2311 
2312 	for (j = 0;  j < stream_count; j++) {
2313 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2314 			int refresh_rate = 0;
2315 
2316 			refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2317 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2318 			if (refresh_rate > highest_rfr) {
2319 				highest_rfr = refresh_rate;
2320 				master_stream = j;
2321 			}
2322 		}
2323 	}
2324 	for (j = 0;  j < stream_count; j++) {
2325 		if (stream_set[j] && j != master_stream)
2326 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2327 	}
2328 }
2329 
2330 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2331 {
2332 	int i = 0;
2333 
2334 	if (context->stream_count < 2)
2335 		return;
2336 	for (i = 0; i < context->stream_count ; i++) {
2337 		if (!context->streams[i])
2338 			continue;
2339 		/* TODO: add a function to read AMD VSDB bits and will set
2340 		 * crtc_sync_master.multi_sync_enabled flag
2341 		 * For now its set to false
2342 		 */
2343 		set_multisync_trigger_params(context->streams[i]);
2344 	}
2345 	set_master_stream(context->streams, context->stream_count);
2346 }
2347 
2348 static struct dc_stream_state *
2349 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2350 		       const struct drm_display_mode *drm_mode,
2351 		       const struct dm_connector_state *dm_state)
2352 {
2353 	struct drm_display_mode *preferred_mode = NULL;
2354 	const struct drm_connector *drm_connector;
2355 	struct dc_stream_state *stream = NULL;
2356 	struct drm_display_mode mode = *drm_mode;
2357 	bool native_mode_found = false;
2358 
2359 	if (aconnector == NULL) {
2360 		DRM_ERROR("aconnector is NULL!\n");
2361 		goto drm_connector_null;
2362 	}
2363 
2364 	if (dm_state == NULL) {
2365 		DRM_ERROR("dm_state is NULL!\n");
2366 		goto dm_state_null;
2367 	}
2368 
2369 	drm_connector = &aconnector->base;
2370 
2371 	if (!aconnector->dc_sink) {
2372 		/*
2373 		 * Exclude MST from creating fake_sink
2374 		 * TODO: need to enable MST into fake_sink feature
2375 		 */
2376 		if (aconnector->mst_port)
2377 			goto stream_create_fail;
2378 
2379 		if (create_fake_sink(aconnector))
2380 			goto stream_create_fail;
2381 	}
2382 
2383 	stream = dc_create_stream_for_sink(aconnector->dc_sink);
2384 
2385 	if (stream == NULL) {
2386 		DRM_ERROR("Failed to create stream for sink!\n");
2387 		goto stream_create_fail;
2388 	}
2389 
2390 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2391 		/* Search for preferred mode */
2392 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2393 			native_mode_found = true;
2394 			break;
2395 		}
2396 	}
2397 	if (!native_mode_found)
2398 		preferred_mode = list_first_entry_or_null(
2399 				&aconnector->base.modes,
2400 				struct drm_display_mode,
2401 				head);
2402 
2403 	if (preferred_mode == NULL) {
2404 		/* This may not be an error, the use case is when we we have no
2405 		 * usermode calls to reset and set mode upon hotplug. In this
2406 		 * case, we call set mode ourselves to restore the previous mode
2407 		 * and the modelist may not be filled in in time.
2408 		 */
2409 		DRM_DEBUG_DRIVER("No preferred mode found\n");
2410 	} else {
2411 		decide_crtc_timing_for_drm_display_mode(
2412 				&mode, preferred_mode,
2413 				dm_state->scaling != RMX_OFF);
2414 	}
2415 
2416 	fill_stream_properties_from_drm_display_mode(stream,
2417 			&mode, &aconnector->base);
2418 	update_stream_scaling_settings(&mode, dm_state, stream);
2419 
2420 	fill_audio_info(
2421 		&stream->audio_info,
2422 		drm_connector,
2423 		aconnector->dc_sink);
2424 
2425 stream_create_fail:
2426 dm_state_null:
2427 drm_connector_null:
2428 	return stream;
2429 }
2430 
2431 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2432 {
2433 	drm_crtc_cleanup(crtc);
2434 	kfree(crtc);
2435 }
2436 
2437 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2438 				  struct drm_crtc_state *state)
2439 {
2440 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
2441 
2442 	/* TODO Destroy dc_stream objects are stream object is flattened */
2443 	if (cur->stream)
2444 		dc_stream_release(cur->stream);
2445 
2446 
2447 	__drm_atomic_helper_crtc_destroy_state(state);
2448 
2449 
2450 	kfree(state);
2451 }
2452 
2453 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2454 {
2455 	struct dm_crtc_state *state;
2456 
2457 	if (crtc->state)
2458 		dm_crtc_destroy_state(crtc, crtc->state);
2459 
2460 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2461 	if (WARN_ON(!state))
2462 		return;
2463 
2464 	crtc->state = &state->base;
2465 	crtc->state->crtc = crtc;
2466 
2467 }
2468 
2469 static struct drm_crtc_state *
2470 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2471 {
2472 	struct dm_crtc_state *state, *cur;
2473 
2474 	cur = to_dm_crtc_state(crtc->state);
2475 
2476 	if (WARN_ON(!crtc->state))
2477 		return NULL;
2478 
2479 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2480 	if (!state)
2481 		return NULL;
2482 
2483 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2484 
2485 	if (cur->stream) {
2486 		state->stream = cur->stream;
2487 		dc_stream_retain(state->stream);
2488 	}
2489 
2490 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
2491 
2492 	return &state->base;
2493 }
2494 
2495 /* Implemented only the options currently availible for the driver */
2496 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2497 	.reset = dm_crtc_reset_state,
2498 	.destroy = amdgpu_dm_crtc_destroy,
2499 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
2500 	.set_config = drm_atomic_helper_set_config,
2501 	.page_flip = drm_atomic_helper_page_flip,
2502 	.atomic_duplicate_state = dm_crtc_duplicate_state,
2503 	.atomic_destroy_state = dm_crtc_destroy_state,
2504 };
2505 
2506 static enum drm_connector_status
2507 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2508 {
2509 	bool connected;
2510 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2511 
2512 	/* Notes:
2513 	 * 1. This interface is NOT called in context of HPD irq.
2514 	 * 2. This interface *is called* in context of user-mode ioctl. Which
2515 	 * makes it a bad place for *any* MST-related activit. */
2516 
2517 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2518 	    !aconnector->fake_enable)
2519 		connected = (aconnector->dc_sink != NULL);
2520 	else
2521 		connected = (aconnector->base.force == DRM_FORCE_ON);
2522 
2523 	return (connected ? connector_status_connected :
2524 			connector_status_disconnected);
2525 }
2526 
2527 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2528 					    struct drm_connector_state *connector_state,
2529 					    struct drm_property *property,
2530 					    uint64_t val)
2531 {
2532 	struct drm_device *dev = connector->dev;
2533 	struct amdgpu_device *adev = dev->dev_private;
2534 	struct dm_connector_state *dm_old_state =
2535 		to_dm_connector_state(connector->state);
2536 	struct dm_connector_state *dm_new_state =
2537 		to_dm_connector_state(connector_state);
2538 
2539 	int ret = -EINVAL;
2540 
2541 	if (property == dev->mode_config.scaling_mode_property) {
2542 		enum amdgpu_rmx_type rmx_type;
2543 
2544 		switch (val) {
2545 		case DRM_MODE_SCALE_CENTER:
2546 			rmx_type = RMX_CENTER;
2547 			break;
2548 		case DRM_MODE_SCALE_ASPECT:
2549 			rmx_type = RMX_ASPECT;
2550 			break;
2551 		case DRM_MODE_SCALE_FULLSCREEN:
2552 			rmx_type = RMX_FULL;
2553 			break;
2554 		case DRM_MODE_SCALE_NONE:
2555 		default:
2556 			rmx_type = RMX_OFF;
2557 			break;
2558 		}
2559 
2560 		if (dm_old_state->scaling == rmx_type)
2561 			return 0;
2562 
2563 		dm_new_state->scaling = rmx_type;
2564 		ret = 0;
2565 	} else if (property == adev->mode_info.underscan_hborder_property) {
2566 		dm_new_state->underscan_hborder = val;
2567 		ret = 0;
2568 	} else if (property == adev->mode_info.underscan_vborder_property) {
2569 		dm_new_state->underscan_vborder = val;
2570 		ret = 0;
2571 	} else if (property == adev->mode_info.underscan_property) {
2572 		dm_new_state->underscan_enable = val;
2573 		ret = 0;
2574 	}
2575 
2576 	return ret;
2577 }
2578 
2579 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2580 					    const struct drm_connector_state *state,
2581 					    struct drm_property *property,
2582 					    uint64_t *val)
2583 {
2584 	struct drm_device *dev = connector->dev;
2585 	struct amdgpu_device *adev = dev->dev_private;
2586 	struct dm_connector_state *dm_state =
2587 		to_dm_connector_state(state);
2588 	int ret = -EINVAL;
2589 
2590 	if (property == dev->mode_config.scaling_mode_property) {
2591 		switch (dm_state->scaling) {
2592 		case RMX_CENTER:
2593 			*val = DRM_MODE_SCALE_CENTER;
2594 			break;
2595 		case RMX_ASPECT:
2596 			*val = DRM_MODE_SCALE_ASPECT;
2597 			break;
2598 		case RMX_FULL:
2599 			*val = DRM_MODE_SCALE_FULLSCREEN;
2600 			break;
2601 		case RMX_OFF:
2602 		default:
2603 			*val = DRM_MODE_SCALE_NONE;
2604 			break;
2605 		}
2606 		ret = 0;
2607 	} else if (property == adev->mode_info.underscan_hborder_property) {
2608 		*val = dm_state->underscan_hborder;
2609 		ret = 0;
2610 	} else if (property == adev->mode_info.underscan_vborder_property) {
2611 		*val = dm_state->underscan_vborder;
2612 		ret = 0;
2613 	} else if (property == adev->mode_info.underscan_property) {
2614 		*val = dm_state->underscan_enable;
2615 		ret = 0;
2616 	}
2617 	return ret;
2618 }
2619 
2620 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2621 {
2622 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2623 	const struct dc_link *link = aconnector->dc_link;
2624 	struct amdgpu_device *adev = connector->dev->dev_private;
2625 	struct amdgpu_display_manager *dm = &adev->dm;
2626 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2627 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2628 
2629 	if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2630 		amdgpu_dm_register_backlight_device(dm);
2631 
2632 		if (dm->backlight_dev) {
2633 			backlight_device_unregister(dm->backlight_dev);
2634 			dm->backlight_dev = NULL;
2635 		}
2636 
2637 	}
2638 #endif
2639 	drm_connector_unregister(connector);
2640 	drm_connector_cleanup(connector);
2641 	kfree(connector);
2642 }
2643 
2644 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2645 {
2646 	struct dm_connector_state *state =
2647 		to_dm_connector_state(connector->state);
2648 
2649 	kfree(state);
2650 
2651 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2652 
2653 	if (state) {
2654 		state->scaling = RMX_OFF;
2655 		state->underscan_enable = false;
2656 		state->underscan_hborder = 0;
2657 		state->underscan_vborder = 0;
2658 
2659 		connector->state = &state->base;
2660 		connector->state->connector = connector;
2661 	}
2662 }
2663 
2664 struct drm_connector_state *
2665 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2666 {
2667 	struct dm_connector_state *state =
2668 		to_dm_connector_state(connector->state);
2669 
2670 	struct dm_connector_state *new_state =
2671 			kmemdup(state, sizeof(*state), GFP_KERNEL);
2672 
2673 	if (new_state) {
2674 		__drm_atomic_helper_connector_duplicate_state(connector,
2675 							      &new_state->base);
2676 		return &new_state->base;
2677 	}
2678 
2679 	return NULL;
2680 }
2681 
2682 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2683 	.reset = amdgpu_dm_connector_funcs_reset,
2684 	.detect = amdgpu_dm_connector_detect,
2685 	.fill_modes = drm_helper_probe_single_connector_modes,
2686 	.destroy = amdgpu_dm_connector_destroy,
2687 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2688 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2689 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2690 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property
2691 };
2692 
2693 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2694 {
2695 	int enc_id = connector->encoder_ids[0];
2696 	struct drm_mode_object *obj;
2697 	struct drm_encoder *encoder;
2698 
2699 	DRM_DEBUG_DRIVER("Finding the best encoder\n");
2700 
2701 	/* pick the encoder ids */
2702 	if (enc_id) {
2703 		obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2704 		if (!obj) {
2705 			DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2706 			return NULL;
2707 		}
2708 		encoder = obj_to_encoder(obj);
2709 		return encoder;
2710 	}
2711 	DRM_ERROR("No encoder id\n");
2712 	return NULL;
2713 }
2714 
2715 static int get_modes(struct drm_connector *connector)
2716 {
2717 	return amdgpu_dm_connector_get_modes(connector);
2718 }
2719 
2720 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2721 {
2722 	struct dc_sink_init_data init_params = {
2723 			.link = aconnector->dc_link,
2724 			.sink_signal = SIGNAL_TYPE_VIRTUAL
2725 	};
2726 	struct edid *edid;
2727 
2728 	if (!aconnector->base.edid_blob_ptr ||
2729 		!aconnector->base.edid_blob_ptr->data) {
2730 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2731 				aconnector->base.name);
2732 
2733 		aconnector->base.force = DRM_FORCE_OFF;
2734 		aconnector->base.override_edid = false;
2735 		return;
2736 	}
2737 
2738 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2739 
2740 	aconnector->edid = edid;
2741 
2742 	aconnector->dc_em_sink = dc_link_add_remote_sink(
2743 		aconnector->dc_link,
2744 		(uint8_t *)edid,
2745 		(edid->extensions + 1) * EDID_LENGTH,
2746 		&init_params);
2747 
2748 	if (aconnector->base.force == DRM_FORCE_ON)
2749 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
2750 		aconnector->dc_link->local_sink :
2751 		aconnector->dc_em_sink;
2752 }
2753 
2754 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2755 {
2756 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2757 
2758 	/* In case of headless boot with force on for DP managed connector
2759 	 * Those settings have to be != 0 to get initial modeset
2760 	 */
2761 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2762 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2763 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2764 	}
2765 
2766 
2767 	aconnector->base.override_edid = true;
2768 	create_eml_sink(aconnector);
2769 }
2770 
2771 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2772 				   struct drm_display_mode *mode)
2773 {
2774 	int result = MODE_ERROR;
2775 	struct dc_sink *dc_sink;
2776 	struct amdgpu_device *adev = connector->dev->dev_private;
2777 	/* TODO: Unhardcode stream count */
2778 	struct dc_stream_state *stream;
2779 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2780 
2781 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2782 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
2783 		return result;
2784 
2785 	/* Only run this the first time mode_valid is called to initilialize
2786 	 * EDID mgmt
2787 	 */
2788 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2789 		!aconnector->dc_em_sink)
2790 		handle_edid_mgmt(aconnector);
2791 
2792 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2793 
2794 	if (dc_sink == NULL) {
2795 		DRM_ERROR("dc_sink is NULL!\n");
2796 		goto fail;
2797 	}
2798 
2799 	stream = dc_create_stream_for_sink(dc_sink);
2800 	if (stream == NULL) {
2801 		DRM_ERROR("Failed to create stream for sink!\n");
2802 		goto fail;
2803 	}
2804 
2805 	drm_mode_set_crtcinfo(mode, 0);
2806 	fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2807 
2808 	stream->src.width = mode->hdisplay;
2809 	stream->src.height = mode->vdisplay;
2810 	stream->dst = stream->src;
2811 
2812 	if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
2813 		result = MODE_OK;
2814 
2815 	dc_stream_release(stream);
2816 
2817 fail:
2818 	/* TODO: error handling*/
2819 	return result;
2820 }
2821 
2822 static const struct drm_connector_helper_funcs
2823 amdgpu_dm_connector_helper_funcs = {
2824 	/*
2825 	 * If hotplug a second bigger display in FB Con mode, bigger resolution
2826 	 * modes will be filtered by drm_mode_validate_size(), and those modes
2827 	 * is missing after user start lightdm. So we need to renew modes list.
2828 	 * in get_modes call back, not just return the modes count
2829 	 */
2830 	.get_modes = get_modes,
2831 	.mode_valid = amdgpu_dm_connector_mode_valid,
2832 	.best_encoder = best_encoder
2833 };
2834 
2835 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2836 {
2837 }
2838 
2839 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2840 				       struct drm_crtc_state *state)
2841 {
2842 	struct amdgpu_device *adev = crtc->dev->dev_private;
2843 	struct dc *dc = adev->dm.dc;
2844 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2845 	int ret = -EINVAL;
2846 
2847 	if (unlikely(!dm_crtc_state->stream &&
2848 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
2849 		WARN_ON(1);
2850 		return ret;
2851 	}
2852 
2853 	/* In some use cases, like reset, no stream  is attached */
2854 	if (!dm_crtc_state->stream)
2855 		return 0;
2856 
2857 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2858 		return 0;
2859 
2860 	return ret;
2861 }
2862 
2863 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2864 				      const struct drm_display_mode *mode,
2865 				      struct drm_display_mode *adjusted_mode)
2866 {
2867 	return true;
2868 }
2869 
2870 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2871 	.disable = dm_crtc_helper_disable,
2872 	.atomic_check = dm_crtc_helper_atomic_check,
2873 	.mode_fixup = dm_crtc_helper_mode_fixup
2874 };
2875 
2876 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2877 {
2878 
2879 }
2880 
2881 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2882 					  struct drm_crtc_state *crtc_state,
2883 					  struct drm_connector_state *conn_state)
2884 {
2885 	return 0;
2886 }
2887 
2888 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2889 	.disable = dm_encoder_helper_disable,
2890 	.atomic_check = dm_encoder_helper_atomic_check
2891 };
2892 
2893 static void dm_drm_plane_reset(struct drm_plane *plane)
2894 {
2895 	struct dm_plane_state *amdgpu_state = NULL;
2896 
2897 	if (plane->state)
2898 		plane->funcs->atomic_destroy_state(plane, plane->state);
2899 
2900 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2901 	WARN_ON(amdgpu_state == NULL);
2902 
2903 	if (amdgpu_state) {
2904 		plane->state = &amdgpu_state->base;
2905 		plane->state->plane = plane;
2906 		plane->state->rotation = DRM_MODE_ROTATE_0;
2907 	}
2908 }
2909 
2910 static struct drm_plane_state *
2911 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2912 {
2913 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2914 
2915 	old_dm_plane_state = to_dm_plane_state(plane->state);
2916 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2917 	if (!dm_plane_state)
2918 		return NULL;
2919 
2920 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2921 
2922 	if (old_dm_plane_state->dc_state) {
2923 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2924 		dc_plane_state_retain(dm_plane_state->dc_state);
2925 	}
2926 
2927 	return &dm_plane_state->base;
2928 }
2929 
2930 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2931 				struct drm_plane_state *state)
2932 {
2933 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2934 
2935 	if (dm_plane_state->dc_state)
2936 		dc_plane_state_release(dm_plane_state->dc_state);
2937 
2938 	drm_atomic_helper_plane_destroy_state(plane, state);
2939 }
2940 
2941 static const struct drm_plane_funcs dm_plane_funcs = {
2942 	.update_plane	= drm_atomic_helper_update_plane,
2943 	.disable_plane	= drm_atomic_helper_disable_plane,
2944 	.destroy	= drm_plane_cleanup,
2945 	.reset = dm_drm_plane_reset,
2946 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
2947 	.atomic_destroy_state = dm_drm_plane_destroy_state,
2948 };
2949 
2950 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2951 				      struct drm_plane_state *new_state)
2952 {
2953 	struct amdgpu_framebuffer *afb;
2954 	struct drm_gem_object *obj;
2955 	struct amdgpu_bo *rbo;
2956 	uint64_t chroma_addr = 0;
2957 	int r;
2958 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2959 	unsigned int awidth;
2960 
2961 	dm_plane_state_old = to_dm_plane_state(plane->state);
2962 	dm_plane_state_new = to_dm_plane_state(new_state);
2963 
2964 	if (!new_state->fb) {
2965 		DRM_DEBUG_DRIVER("No FB bound\n");
2966 		return 0;
2967 	}
2968 
2969 	afb = to_amdgpu_framebuffer(new_state->fb);
2970 
2971 	obj = afb->obj;
2972 	rbo = gem_to_amdgpu_bo(obj);
2973 	r = amdgpu_bo_reserve(rbo, false);
2974 	if (unlikely(r != 0))
2975 		return r;
2976 
2977 	r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
2978 
2979 
2980 	amdgpu_bo_unreserve(rbo);
2981 
2982 	if (unlikely(r != 0)) {
2983 		if (r != -ERESTARTSYS)
2984 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
2985 		return r;
2986 	}
2987 
2988 	amdgpu_bo_ref(rbo);
2989 
2990 	if (dm_plane_state_new->dc_state &&
2991 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
2992 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
2993 
2994 		if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2995 			plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
2996 			plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
2997 		} else {
2998 			awidth = ALIGN(new_state->fb->width, 64);
2999 			plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3000 			plane_state->address.video_progressive.luma_addr.low_part
3001 							= lower_32_bits(afb->address);
3002 			plane_state->address.video_progressive.luma_addr.high_part
3003 							= upper_32_bits(afb->address);
3004 			chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3005 			plane_state->address.video_progressive.chroma_addr.low_part
3006 							= lower_32_bits(chroma_addr);
3007 			plane_state->address.video_progressive.chroma_addr.high_part
3008 							= upper_32_bits(chroma_addr);
3009 		}
3010 	}
3011 
3012 	/* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3013 	 * prepare and cleanup in drm_atomic_helper_prepare_planes
3014 	 * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3015 	 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3016 	 * code touching fram buffers should be avoided for DC.
3017 	 */
3018 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3019 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
3020 
3021 		acrtc->cursor_bo = obj;
3022 	}
3023 	return 0;
3024 }
3025 
3026 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3027 				       struct drm_plane_state *old_state)
3028 {
3029 	struct amdgpu_bo *rbo;
3030 	struct amdgpu_framebuffer *afb;
3031 	int r;
3032 
3033 	if (!old_state->fb)
3034 		return;
3035 
3036 	afb = to_amdgpu_framebuffer(old_state->fb);
3037 	rbo = gem_to_amdgpu_bo(afb->obj);
3038 	r = amdgpu_bo_reserve(rbo, false);
3039 	if (unlikely(r)) {
3040 		DRM_ERROR("failed to reserve rbo before unpin\n");
3041 		return;
3042 	}
3043 
3044 	amdgpu_bo_unpin(rbo);
3045 	amdgpu_bo_unreserve(rbo);
3046 	amdgpu_bo_unref(&rbo);
3047 }
3048 
3049 static int dm_plane_atomic_check(struct drm_plane *plane,
3050 				 struct drm_plane_state *state)
3051 {
3052 	struct amdgpu_device *adev = plane->dev->dev_private;
3053 	struct dc *dc = adev->dm.dc;
3054 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3055 
3056 	if (!dm_plane_state->dc_state)
3057 		return 0;
3058 
3059 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3060 		return 0;
3061 
3062 	return -EINVAL;
3063 }
3064 
3065 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3066 	.prepare_fb = dm_plane_helper_prepare_fb,
3067 	.cleanup_fb = dm_plane_helper_cleanup_fb,
3068 	.atomic_check = dm_plane_atomic_check,
3069 };
3070 
3071 /*
3072  * TODO: these are currently initialized to rgb formats only.
3073  * For future use cases we should either initialize them dynamically based on
3074  * plane capabilities, or initialize this array to all formats, so internal drm
3075  * check will succeed, and let DC to implement proper check
3076  */
3077 static const uint32_t rgb_formats[] = {
3078 	DRM_FORMAT_RGB888,
3079 	DRM_FORMAT_XRGB8888,
3080 	DRM_FORMAT_ARGB8888,
3081 	DRM_FORMAT_RGBA8888,
3082 	DRM_FORMAT_XRGB2101010,
3083 	DRM_FORMAT_XBGR2101010,
3084 	DRM_FORMAT_ARGB2101010,
3085 	DRM_FORMAT_ABGR2101010,
3086 };
3087 
3088 static const uint32_t yuv_formats[] = {
3089 	DRM_FORMAT_NV12,
3090 	DRM_FORMAT_NV21,
3091 };
3092 
3093 static const u32 cursor_formats[] = {
3094 	DRM_FORMAT_ARGB8888
3095 };
3096 
3097 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3098 				struct amdgpu_plane *aplane,
3099 				unsigned long possible_crtcs)
3100 {
3101 	int res = -EPERM;
3102 
3103 	switch (aplane->base.type) {
3104 	case DRM_PLANE_TYPE_PRIMARY:
3105 		aplane->base.format_default = true;
3106 
3107 		res = drm_universal_plane_init(
3108 				dm->adev->ddev,
3109 				&aplane->base,
3110 				possible_crtcs,
3111 				&dm_plane_funcs,
3112 				rgb_formats,
3113 				ARRAY_SIZE(rgb_formats),
3114 				NULL, aplane->base.type, NULL);
3115 		break;
3116 	case DRM_PLANE_TYPE_OVERLAY:
3117 		res = drm_universal_plane_init(
3118 				dm->adev->ddev,
3119 				&aplane->base,
3120 				possible_crtcs,
3121 				&dm_plane_funcs,
3122 				yuv_formats,
3123 				ARRAY_SIZE(yuv_formats),
3124 				NULL, aplane->base.type, NULL);
3125 		break;
3126 	case DRM_PLANE_TYPE_CURSOR:
3127 		res = drm_universal_plane_init(
3128 				dm->adev->ddev,
3129 				&aplane->base,
3130 				possible_crtcs,
3131 				&dm_plane_funcs,
3132 				cursor_formats,
3133 				ARRAY_SIZE(cursor_formats),
3134 				NULL, aplane->base.type, NULL);
3135 		break;
3136 	}
3137 
3138 	drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3139 
3140 	/* Create (reset) the plane state */
3141 	if (aplane->base.funcs->reset)
3142 		aplane->base.funcs->reset(&aplane->base);
3143 
3144 
3145 	return res;
3146 }
3147 
3148 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3149 			       struct drm_plane *plane,
3150 			       uint32_t crtc_index)
3151 {
3152 	struct amdgpu_crtc *acrtc = NULL;
3153 	struct amdgpu_plane *cursor_plane;
3154 
3155 	int res = -ENOMEM;
3156 
3157 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3158 	if (!cursor_plane)
3159 		goto fail;
3160 
3161 	cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3162 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3163 
3164 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3165 	if (!acrtc)
3166 		goto fail;
3167 
3168 	res = drm_crtc_init_with_planes(
3169 			dm->ddev,
3170 			&acrtc->base,
3171 			plane,
3172 			&cursor_plane->base,
3173 			&amdgpu_dm_crtc_funcs, NULL);
3174 
3175 	if (res)
3176 		goto fail;
3177 
3178 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3179 
3180 	/* Create (reset) the plane state */
3181 	if (acrtc->base.funcs->reset)
3182 		acrtc->base.funcs->reset(&acrtc->base);
3183 
3184 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3185 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3186 
3187 	acrtc->crtc_id = crtc_index;
3188 	acrtc->base.enabled = false;
3189 
3190 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3191 	drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
3192 
3193 	return 0;
3194 
3195 fail:
3196 	kfree(acrtc);
3197 	kfree(cursor_plane);
3198 	return res;
3199 }
3200 
3201 
3202 static int to_drm_connector_type(enum signal_type st)
3203 {
3204 	switch (st) {
3205 	case SIGNAL_TYPE_HDMI_TYPE_A:
3206 		return DRM_MODE_CONNECTOR_HDMIA;
3207 	case SIGNAL_TYPE_EDP:
3208 		return DRM_MODE_CONNECTOR_eDP;
3209 	case SIGNAL_TYPE_RGB:
3210 		return DRM_MODE_CONNECTOR_VGA;
3211 	case SIGNAL_TYPE_DISPLAY_PORT:
3212 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
3213 		return DRM_MODE_CONNECTOR_DisplayPort;
3214 	case SIGNAL_TYPE_DVI_DUAL_LINK:
3215 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
3216 		return DRM_MODE_CONNECTOR_DVID;
3217 	case SIGNAL_TYPE_VIRTUAL:
3218 		return DRM_MODE_CONNECTOR_VIRTUAL;
3219 
3220 	default:
3221 		return DRM_MODE_CONNECTOR_Unknown;
3222 	}
3223 }
3224 
3225 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3226 {
3227 	const struct drm_connector_helper_funcs *helper =
3228 		connector->helper_private;
3229 	struct drm_encoder *encoder;
3230 	struct amdgpu_encoder *amdgpu_encoder;
3231 
3232 	encoder = helper->best_encoder(connector);
3233 
3234 	if (encoder == NULL)
3235 		return;
3236 
3237 	amdgpu_encoder = to_amdgpu_encoder(encoder);
3238 
3239 	amdgpu_encoder->native_mode.clock = 0;
3240 
3241 	if (!list_empty(&connector->probed_modes)) {
3242 		struct drm_display_mode *preferred_mode = NULL;
3243 
3244 		list_for_each_entry(preferred_mode,
3245 				    &connector->probed_modes,
3246 				    head) {
3247 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3248 				amdgpu_encoder->native_mode = *preferred_mode;
3249 
3250 			break;
3251 		}
3252 
3253 	}
3254 }
3255 
3256 static struct drm_display_mode *
3257 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3258 			     char *name,
3259 			     int hdisplay, int vdisplay)
3260 {
3261 	struct drm_device *dev = encoder->dev;
3262 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3263 	struct drm_display_mode *mode = NULL;
3264 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3265 
3266 	mode = drm_mode_duplicate(dev, native_mode);
3267 
3268 	if (mode == NULL)
3269 		return NULL;
3270 
3271 	mode->hdisplay = hdisplay;
3272 	mode->vdisplay = vdisplay;
3273 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3274 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3275 
3276 	return mode;
3277 
3278 }
3279 
3280 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3281 						 struct drm_connector *connector)
3282 {
3283 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3284 	struct drm_display_mode *mode = NULL;
3285 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3286 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3287 				to_amdgpu_dm_connector(connector);
3288 	int i;
3289 	int n;
3290 	struct mode_size {
3291 		char name[DRM_DISPLAY_MODE_LEN];
3292 		int w;
3293 		int h;
3294 	} common_modes[] = {
3295 		{  "640x480",  640,  480},
3296 		{  "800x600",  800,  600},
3297 		{ "1024x768", 1024,  768},
3298 		{ "1280x720", 1280,  720},
3299 		{ "1280x800", 1280,  800},
3300 		{"1280x1024", 1280, 1024},
3301 		{ "1440x900", 1440,  900},
3302 		{"1680x1050", 1680, 1050},
3303 		{"1600x1200", 1600, 1200},
3304 		{"1920x1080", 1920, 1080},
3305 		{"1920x1200", 1920, 1200}
3306 	};
3307 
3308 	n = ARRAY_SIZE(common_modes);
3309 
3310 	for (i = 0; i < n; i++) {
3311 		struct drm_display_mode *curmode = NULL;
3312 		bool mode_existed = false;
3313 
3314 		if (common_modes[i].w > native_mode->hdisplay ||
3315 		    common_modes[i].h > native_mode->vdisplay ||
3316 		   (common_modes[i].w == native_mode->hdisplay &&
3317 		    common_modes[i].h == native_mode->vdisplay))
3318 			continue;
3319 
3320 		list_for_each_entry(curmode, &connector->probed_modes, head) {
3321 			if (common_modes[i].w == curmode->hdisplay &&
3322 			    common_modes[i].h == curmode->vdisplay) {
3323 				mode_existed = true;
3324 				break;
3325 			}
3326 		}
3327 
3328 		if (mode_existed)
3329 			continue;
3330 
3331 		mode = amdgpu_dm_create_common_mode(encoder,
3332 				common_modes[i].name, common_modes[i].w,
3333 				common_modes[i].h);
3334 		drm_mode_probed_add(connector, mode);
3335 		amdgpu_dm_connector->num_modes++;
3336 	}
3337 }
3338 
3339 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3340 					      struct edid *edid)
3341 {
3342 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3343 			to_amdgpu_dm_connector(connector);
3344 
3345 	if (edid) {
3346 		/* empty probed_modes */
3347 		INIT_LIST_HEAD(&connector->probed_modes);
3348 		amdgpu_dm_connector->num_modes =
3349 				drm_add_edid_modes(connector, edid);
3350 
3351 		amdgpu_dm_get_native_mode(connector);
3352 	} else {
3353 		amdgpu_dm_connector->num_modes = 0;
3354 	}
3355 }
3356 
3357 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3358 {
3359 	const struct drm_connector_helper_funcs *helper =
3360 			connector->helper_private;
3361 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3362 			to_amdgpu_dm_connector(connector);
3363 	struct drm_encoder *encoder;
3364 	struct edid *edid = amdgpu_dm_connector->edid;
3365 
3366 	encoder = helper->best_encoder(connector);
3367 
3368 	amdgpu_dm_connector_ddc_get_modes(connector, edid);
3369 	amdgpu_dm_connector_add_common_modes(encoder, connector);
3370 	return amdgpu_dm_connector->num_modes;
3371 }
3372 
3373 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3374 				     struct amdgpu_dm_connector *aconnector,
3375 				     int connector_type,
3376 				     struct dc_link *link,
3377 				     int link_index)
3378 {
3379 	struct amdgpu_device *adev = dm->ddev->dev_private;
3380 
3381 	aconnector->connector_id = link_index;
3382 	aconnector->dc_link = link;
3383 	aconnector->base.interlace_allowed = false;
3384 	aconnector->base.doublescan_allowed = false;
3385 	aconnector->base.stereo_allowed = false;
3386 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3387 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3388 
3389 	mutex_init(&aconnector->hpd_lock);
3390 
3391 	/* configure support HPD hot plug connector_>polled default value is 0
3392 	 * which means HPD hot plug not supported
3393 	 */
3394 	switch (connector_type) {
3395 	case DRM_MODE_CONNECTOR_HDMIA:
3396 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3397 		break;
3398 	case DRM_MODE_CONNECTOR_DisplayPort:
3399 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3400 		break;
3401 	case DRM_MODE_CONNECTOR_DVID:
3402 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3403 		break;
3404 	default:
3405 		break;
3406 	}
3407 
3408 	drm_object_attach_property(&aconnector->base.base,
3409 				dm->ddev->mode_config.scaling_mode_property,
3410 				DRM_MODE_SCALE_NONE);
3411 
3412 	drm_object_attach_property(&aconnector->base.base,
3413 				adev->mode_info.underscan_property,
3414 				UNDERSCAN_OFF);
3415 	drm_object_attach_property(&aconnector->base.base,
3416 				adev->mode_info.underscan_hborder_property,
3417 				0);
3418 	drm_object_attach_property(&aconnector->base.base,
3419 				adev->mode_info.underscan_vborder_property,
3420 				0);
3421 
3422 }
3423 
3424 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3425 			      struct i2c_msg *msgs, int num)
3426 {
3427 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3428 	struct ddc_service *ddc_service = i2c->ddc_service;
3429 	struct i2c_command cmd;
3430 	int i;
3431 	int result = -EIO;
3432 
3433 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3434 
3435 	if (!cmd.payloads)
3436 		return result;
3437 
3438 	cmd.number_of_payloads = num;
3439 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3440 	cmd.speed = 100;
3441 
3442 	for (i = 0; i < num; i++) {
3443 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3444 		cmd.payloads[i].address = msgs[i].addr;
3445 		cmd.payloads[i].length = msgs[i].len;
3446 		cmd.payloads[i].data = msgs[i].buf;
3447 	}
3448 
3449 	if (dal_i2caux_submit_i2c_command(
3450 			ddc_service->ctx->i2caux,
3451 			ddc_service->ddc_pin,
3452 			&cmd))
3453 		result = num;
3454 
3455 	kfree(cmd.payloads);
3456 	return result;
3457 }
3458 
3459 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3460 {
3461 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3462 }
3463 
3464 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3465 	.master_xfer = amdgpu_dm_i2c_xfer,
3466 	.functionality = amdgpu_dm_i2c_func,
3467 };
3468 
3469 static struct amdgpu_i2c_adapter *
3470 create_i2c(struct ddc_service *ddc_service,
3471 	   int link_index,
3472 	   int *res)
3473 {
3474 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3475 	struct amdgpu_i2c_adapter *i2c;
3476 
3477 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3478 	if (!i2c)
3479 		return NULL;
3480 	i2c->base.owner = THIS_MODULE;
3481 	i2c->base.class = I2C_CLASS_DDC;
3482 	i2c->base.dev.parent = &adev->pdev->dev;
3483 	i2c->base.algo = &amdgpu_dm_i2c_algo;
3484 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3485 	i2c_set_adapdata(&i2c->base, i2c);
3486 	i2c->ddc_service = ddc_service;
3487 
3488 	return i2c;
3489 }
3490 
3491 /* Note: this function assumes that dc_link_detect() was called for the
3492  * dc_link which will be represented by this aconnector.
3493  */
3494 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3495 				    struct amdgpu_dm_connector *aconnector,
3496 				    uint32_t link_index,
3497 				    struct amdgpu_encoder *aencoder)
3498 {
3499 	int res = 0;
3500 	int connector_type;
3501 	struct dc *dc = dm->dc;
3502 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
3503 	struct amdgpu_i2c_adapter *i2c;
3504 
3505 	link->priv = aconnector;
3506 
3507 	DRM_DEBUG_DRIVER("%s()\n", __func__);
3508 
3509 	i2c = create_i2c(link->ddc, link->link_index, &res);
3510 	if (!i2c) {
3511 		DRM_ERROR("Failed to create i2c adapter data\n");
3512 		return -ENOMEM;
3513 	}
3514 
3515 	aconnector->i2c = i2c;
3516 	res = i2c_add_adapter(&i2c->base);
3517 
3518 	if (res) {
3519 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3520 		goto out_free;
3521 	}
3522 
3523 	connector_type = to_drm_connector_type(link->connector_signal);
3524 
3525 	res = drm_connector_init(
3526 			dm->ddev,
3527 			&aconnector->base,
3528 			&amdgpu_dm_connector_funcs,
3529 			connector_type);
3530 
3531 	if (res) {
3532 		DRM_ERROR("connector_init failed\n");
3533 		aconnector->connector_id = -1;
3534 		goto out_free;
3535 	}
3536 
3537 	drm_connector_helper_add(
3538 			&aconnector->base,
3539 			&amdgpu_dm_connector_helper_funcs);
3540 
3541 	if (aconnector->base.funcs->reset)
3542 		aconnector->base.funcs->reset(&aconnector->base);
3543 
3544 	amdgpu_dm_connector_init_helper(
3545 		dm,
3546 		aconnector,
3547 		connector_type,
3548 		link,
3549 		link_index);
3550 
3551 	drm_mode_connector_attach_encoder(
3552 		&aconnector->base, &aencoder->base);
3553 
3554 	drm_connector_register(&aconnector->base);
3555 
3556 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3557 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
3558 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
3559 
3560 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3561 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3562 
3563 	/* NOTE: this currently will create backlight device even if a panel
3564 	 * is not connected to the eDP/LVDS connector.
3565 	 *
3566 	 * This is less than ideal but we don't have sink information at this
3567 	 * stage since detection happens after. We can't do detection earlier
3568 	 * since MST detection needs connectors to be created first.
3569 	 */
3570 	if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3571 		/* Event if registration failed, we should continue with
3572 		 * DM initialization because not having a backlight control
3573 		 * is better then a black screen.
3574 		 */
3575 		amdgpu_dm_register_backlight_device(dm);
3576 
3577 		if (dm->backlight_dev)
3578 			dm->backlight_link = link;
3579 	}
3580 #endif
3581 
3582 out_free:
3583 	if (res) {
3584 		kfree(i2c);
3585 		aconnector->i2c = NULL;
3586 	}
3587 	return res;
3588 }
3589 
3590 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3591 {
3592 	switch (adev->mode_info.num_crtc) {
3593 	case 1:
3594 		return 0x1;
3595 	case 2:
3596 		return 0x3;
3597 	case 3:
3598 		return 0x7;
3599 	case 4:
3600 		return 0xf;
3601 	case 5:
3602 		return 0x1f;
3603 	case 6:
3604 	default:
3605 		return 0x3f;
3606 	}
3607 }
3608 
3609 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3610 				  struct amdgpu_encoder *aencoder,
3611 				  uint32_t link_index)
3612 {
3613 	struct amdgpu_device *adev = dev->dev_private;
3614 
3615 	int res = drm_encoder_init(dev,
3616 				   &aencoder->base,
3617 				   &amdgpu_dm_encoder_funcs,
3618 				   DRM_MODE_ENCODER_TMDS,
3619 				   NULL);
3620 
3621 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3622 
3623 	if (!res)
3624 		aencoder->encoder_id = link_index;
3625 	else
3626 		aencoder->encoder_id = -1;
3627 
3628 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3629 
3630 	return res;
3631 }
3632 
3633 static void manage_dm_interrupts(struct amdgpu_device *adev,
3634 				 struct amdgpu_crtc *acrtc,
3635 				 bool enable)
3636 {
3637 	/*
3638 	 * this is not correct translation but will work as soon as VBLANK
3639 	 * constant is the same as PFLIP
3640 	 */
3641 	int irq_type =
3642 		amdgpu_crtc_idx_to_irq_type(
3643 			adev,
3644 			acrtc->crtc_id);
3645 
3646 	if (enable) {
3647 		drm_crtc_vblank_on(&acrtc->base);
3648 		amdgpu_irq_get(
3649 			adev,
3650 			&adev->pageflip_irq,
3651 			irq_type);
3652 	} else {
3653 
3654 		amdgpu_irq_put(
3655 			adev,
3656 			&adev->pageflip_irq,
3657 			irq_type);
3658 		drm_crtc_vblank_off(&acrtc->base);
3659 	}
3660 }
3661 
3662 static bool
3663 is_scaling_state_different(const struct dm_connector_state *dm_state,
3664 			   const struct dm_connector_state *old_dm_state)
3665 {
3666 	if (dm_state->scaling != old_dm_state->scaling)
3667 		return true;
3668 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3669 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3670 			return true;
3671 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3672 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3673 			return true;
3674 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3675 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3676 		return true;
3677 	return false;
3678 }
3679 
3680 static void remove_stream(struct amdgpu_device *adev,
3681 			  struct amdgpu_crtc *acrtc,
3682 			  struct dc_stream_state *stream)
3683 {
3684 	/* this is the update mode case */
3685 	if (adev->dm.freesync_module)
3686 		mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3687 
3688 	acrtc->otg_inst = -1;
3689 	acrtc->enabled = false;
3690 }
3691 
3692 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3693 			       struct dc_cursor_position *position)
3694 {
3695 	struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3696 	int x, y;
3697 	int xorigin = 0, yorigin = 0;
3698 
3699 	if (!crtc || !plane->state->fb) {
3700 		position->enable = false;
3701 		position->x = 0;
3702 		position->y = 0;
3703 		return 0;
3704 	}
3705 
3706 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3707 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3708 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3709 			  __func__,
3710 			  plane->state->crtc_w,
3711 			  plane->state->crtc_h);
3712 		return -EINVAL;
3713 	}
3714 
3715 	x = plane->state->crtc_x;
3716 	y = plane->state->crtc_y;
3717 	/* avivo cursor are offset into the total surface */
3718 	x += crtc->primary->state->src_x >> 16;
3719 	y += crtc->primary->state->src_y >> 16;
3720 	if (x < 0) {
3721 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3722 		x = 0;
3723 	}
3724 	if (y < 0) {
3725 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3726 		y = 0;
3727 	}
3728 	position->enable = true;
3729 	position->x = x;
3730 	position->y = y;
3731 	position->x_hotspot = xorigin;
3732 	position->y_hotspot = yorigin;
3733 
3734 	return 0;
3735 }
3736 
3737 static void handle_cursor_update(struct drm_plane *plane,
3738 				 struct drm_plane_state *old_plane_state)
3739 {
3740 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3741 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3742 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3743 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3744 	uint64_t address = afb ? afb->address : 0;
3745 	struct dc_cursor_position position;
3746 	struct dc_cursor_attributes attributes;
3747 	int ret;
3748 
3749 	if (!plane->state->fb && !old_plane_state->fb)
3750 		return;
3751 
3752 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3753 			 __func__,
3754 			 amdgpu_crtc->crtc_id,
3755 			 plane->state->crtc_w,
3756 			 plane->state->crtc_h);
3757 
3758 	ret = get_cursor_position(plane, crtc, &position);
3759 	if (ret)
3760 		return;
3761 
3762 	if (!position.enable) {
3763 		/* turn off cursor */
3764 		if (crtc_state && crtc_state->stream)
3765 			dc_stream_set_cursor_position(crtc_state->stream,
3766 						      &position);
3767 		return;
3768 	}
3769 
3770 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
3771 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
3772 
3773 	attributes.address.high_part = upper_32_bits(address);
3774 	attributes.address.low_part  = lower_32_bits(address);
3775 	attributes.width             = plane->state->crtc_w;
3776 	attributes.height            = plane->state->crtc_h;
3777 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3778 	attributes.rotation_angle    = 0;
3779 	attributes.attribute_flags.value = 0;
3780 
3781 	attributes.pitch = attributes.width;
3782 
3783 	if (crtc_state->stream) {
3784 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3785 							 &attributes))
3786 			DRM_ERROR("DC failed to set cursor attributes\n");
3787 
3788 		if (!dc_stream_set_cursor_position(crtc_state->stream,
3789 						   &position))
3790 			DRM_ERROR("DC failed to set cursor position\n");
3791 	}
3792 }
3793 
3794 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3795 {
3796 
3797 	assert_spin_locked(&acrtc->base.dev->event_lock);
3798 	WARN_ON(acrtc->event);
3799 
3800 	acrtc->event = acrtc->base.state->event;
3801 
3802 	/* Set the flip status */
3803 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3804 
3805 	/* Mark this event as consumed */
3806 	acrtc->base.state->event = NULL;
3807 
3808 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3809 						 acrtc->crtc_id);
3810 }
3811 
3812 /*
3813  * Executes flip
3814  *
3815  * Waits on all BO's fences and for proper vblank count
3816  */
3817 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3818 			      struct drm_framebuffer *fb,
3819 			      uint32_t target,
3820 			      struct dc_state *state)
3821 {
3822 	unsigned long flags;
3823 	uint32_t target_vblank;
3824 	int r, vpos, hpos;
3825 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3826 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3827 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
3828 	struct amdgpu_device *adev = crtc->dev->dev_private;
3829 	bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3830 	struct dc_flip_addrs addr = { {0} };
3831 	/* TODO eliminate or rename surface_update */
3832 	struct dc_surface_update surface_updates[1] = { {0} };
3833 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3834 
3835 
3836 	/* Prepare wait for target vblank early - before the fence-waits */
3837 	target_vblank = target - drm_crtc_vblank_count(crtc) +
3838 			amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3839 
3840 	/* TODO This might fail and hence better not used, wait
3841 	 * explicitly on fences instead
3842 	 * and in general should be called for
3843 	 * blocking commit to as per framework helpers
3844 	 */
3845 	r = amdgpu_bo_reserve(abo, true);
3846 	if (unlikely(r != 0)) {
3847 		DRM_ERROR("failed to reserve buffer before flip\n");
3848 		WARN_ON(1);
3849 	}
3850 
3851 	/* Wait for all fences on this FB */
3852 	WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3853 								    MAX_SCHEDULE_TIMEOUT) < 0);
3854 
3855 	amdgpu_bo_unreserve(abo);
3856 
3857 	/* Wait until we're out of the vertical blank period before the one
3858 	 * targeted by the flip
3859 	 */
3860 	while ((acrtc->enabled &&
3861 		(amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
3862 					&vpos, &hpos, NULL, NULL,
3863 					&crtc->hwmode)
3864 		 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3865 		(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3866 		(int)(target_vblank -
3867 		  amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3868 		usleep_range(1000, 1100);
3869 	}
3870 
3871 	/* Flip */
3872 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
3873 	/* update crtc fb */
3874 	crtc->primary->fb = fb;
3875 
3876 	WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3877 	WARN_ON(!acrtc_state->stream);
3878 
3879 	addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3880 	addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3881 	addr.flip_immediate = async_flip;
3882 
3883 
3884 	if (acrtc->base.state->event)
3885 		prepare_flip_isr(acrtc);
3886 
3887 	surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3888 	surface_updates->flip_addr = &addr;
3889 
3890 
3891 	dc_commit_updates_for_stream(adev->dm.dc,
3892 					     surface_updates,
3893 					     1,
3894 					     acrtc_state->stream,
3895 					     NULL,
3896 					     &surface_updates->surface,
3897 					     state);
3898 
3899 	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3900 			 __func__,
3901 			 addr.address.grph.addr.high_part,
3902 			 addr.address.grph.addr.low_part);
3903 
3904 
3905 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3906 }
3907 
3908 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3909 				    struct drm_device *dev,
3910 				    struct amdgpu_display_manager *dm,
3911 				    struct drm_crtc *pcrtc,
3912 				    bool *wait_for_vblank)
3913 {
3914 	uint32_t i;
3915 	struct drm_plane *plane;
3916 	struct drm_plane_state *old_plane_state, *new_plane_state;
3917 	struct dc_stream_state *dc_stream_attach;
3918 	struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
3919 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
3920 	struct drm_crtc_state *new_pcrtc_state =
3921 			drm_atomic_get_new_crtc_state(state, pcrtc);
3922 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
3923 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3924 	int planes_count = 0;
3925 	unsigned long flags;
3926 
3927 	/* update planes when needed */
3928 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3929 		struct drm_crtc *crtc = new_plane_state->crtc;
3930 		struct drm_crtc_state *new_crtc_state;
3931 		struct drm_framebuffer *fb = new_plane_state->fb;
3932 		bool pflip_needed;
3933 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
3934 
3935 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3936 			handle_cursor_update(plane, old_plane_state);
3937 			continue;
3938 		}
3939 
3940 		if (!fb || !crtc || pcrtc != crtc)
3941 			continue;
3942 
3943 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3944 		if (!new_crtc_state->active)
3945 			continue;
3946 
3947 		pflip_needed = !state->allow_modeset;
3948 
3949 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
3950 		if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3951 			DRM_ERROR("%s: acrtc %d, already busy\n",
3952 				  __func__,
3953 				  acrtc_attach->crtc_id);
3954 			/* In commit tail framework this cannot happen */
3955 			WARN_ON(1);
3956 		}
3957 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3958 
3959 		if (!pflip_needed) {
3960 			WARN_ON(!dm_new_plane_state->dc_state);
3961 
3962 			plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
3963 
3964 			dc_stream_attach = acrtc_state->stream;
3965 			planes_count++;
3966 
3967 		} else if (new_crtc_state->planes_changed) {
3968 			/* Assume even ONE crtc with immediate flip means
3969 			 * entire can't wait for VBLANK
3970 			 * TODO Check if it's correct
3971 			 */
3972 			*wait_for_vblank =
3973 					new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
3974 				false : true;
3975 
3976 			/* TODO: Needs rework for multiplane flip */
3977 			if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3978 				drm_crtc_vblank_get(crtc);
3979 
3980 			amdgpu_dm_do_flip(
3981 				crtc,
3982 				fb,
3983 				drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3984 				dm_state->context);
3985 		}
3986 
3987 	}
3988 
3989 	if (planes_count) {
3990 		unsigned long flags;
3991 
3992 		if (new_pcrtc_state->event) {
3993 
3994 			drm_crtc_vblank_get(pcrtc);
3995 
3996 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
3997 			prepare_flip_isr(acrtc_attach);
3998 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
3999 		}
4000 
4001 		if (false == dc_commit_planes_to_stream(dm->dc,
4002 							plane_states_constructed,
4003 							planes_count,
4004 							dc_stream_attach,
4005 							dm_state->context))
4006 			dm_error("%s: Failed to attach plane!\n", __func__);
4007 	} else {
4008 		/*TODO BUG Here should go disable planes on CRTC. */
4009 	}
4010 }
4011 
4012 /**
4013  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4014  * @crtc_state: the DRM CRTC state
4015  * @stream_state: the DC stream state.
4016  *
4017  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4018  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4019  */
4020 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4021 						struct dc_stream_state *stream_state)
4022 {
4023 	stream_state->mode_changed = crtc_state->mode_changed;
4024 }
4025 
4026 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4027 				   struct drm_atomic_state *state,
4028 				   bool nonblock)
4029 {
4030 	struct drm_crtc *crtc;
4031 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4032 	struct amdgpu_device *adev = dev->dev_private;
4033 	int i;
4034 
4035 	/*
4036 	 * We evade vblanks and pflips on crtc that
4037 	 * should be changed. We do it here to flush & disable
4038 	 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4039 	 * it will update crtc->dm_crtc_state->stream pointer which is used in
4040 	 * the ISRs.
4041 	 */
4042 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4043 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4044 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4045 
4046 		if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
4047 			manage_dm_interrupts(adev, acrtc, false);
4048 	}
4049 	/* Add check here for SoC's that support hardware cursor plane, to
4050 	 * unset legacy_cursor_update */
4051 
4052 	return drm_atomic_helper_commit(dev, state, nonblock);
4053 
4054 	/*TODO Handle EINTR, reenable IRQ*/
4055 }
4056 
4057 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4058 {
4059 	struct drm_device *dev = state->dev;
4060 	struct amdgpu_device *adev = dev->dev_private;
4061 	struct amdgpu_display_manager *dm = &adev->dm;
4062 	struct dm_atomic_state *dm_state;
4063 	uint32_t i, j;
4064 	struct drm_crtc *crtc;
4065 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4066 	unsigned long flags;
4067 	bool wait_for_vblank = true;
4068 	struct drm_connector *connector;
4069 	struct drm_connector_state *old_con_state, *new_con_state;
4070 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4071 
4072 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
4073 
4074 	dm_state = to_dm_atomic_state(state);
4075 
4076 	/* update changed items */
4077 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4078 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4079 
4080 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4081 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4082 
4083 		DRM_DEBUG_DRIVER(
4084 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4085 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
4086 			"connectors_changed:%d\n",
4087 			acrtc->crtc_id,
4088 			new_crtc_state->enable,
4089 			new_crtc_state->active,
4090 			new_crtc_state->planes_changed,
4091 			new_crtc_state->mode_changed,
4092 			new_crtc_state->active_changed,
4093 			new_crtc_state->connectors_changed);
4094 
4095 		/* Copy all transient state flags into dc state */
4096 		if (dm_new_crtc_state->stream) {
4097 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
4098 							    dm_new_crtc_state->stream);
4099 		}
4100 
4101 		/* handles headless hotplug case, updating new_state and
4102 		 * aconnector as needed
4103 		 */
4104 
4105 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4106 
4107 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4108 
4109 			if (!dm_new_crtc_state->stream) {
4110 				/*
4111 				 * this could happen because of issues with
4112 				 * userspace notifications delivery.
4113 				 * In this case userspace tries to set mode on
4114 				 * display which is disconnect in fact.
4115 				 * dc_sink in NULL in this case on aconnector.
4116 				 * We expect reset mode will come soon.
4117 				 *
4118 				 * This can also happen when unplug is done
4119 				 * during resume sequence ended
4120 				 *
4121 				 * In this case, we want to pretend we still
4122 				 * have a sink to keep the pipe running so that
4123 				 * hw state is consistent with the sw state
4124 				 */
4125 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4126 						__func__, acrtc->base.base.id);
4127 				continue;
4128 			}
4129 
4130 			if (dm_old_crtc_state->stream)
4131 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4132 
4133 			acrtc->enabled = true;
4134 			acrtc->hw_mode = new_crtc_state->mode;
4135 			crtc->hwmode = new_crtc_state->mode;
4136 		} else if (modereset_required(new_crtc_state)) {
4137 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4138 
4139 			/* i.e. reset mode */
4140 			if (dm_old_crtc_state->stream)
4141 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4142 		}
4143 	} /* for_each_crtc_in_state() */
4144 
4145 	/*
4146 	 * Add streams after required streams from new and replaced streams
4147 	 * are removed from freesync module
4148 	 */
4149 	if (adev->dm.freesync_module) {
4150 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4151 					      new_crtc_state, i) {
4152 			struct amdgpu_dm_connector *aconnector = NULL;
4153 			struct dm_connector_state *dm_new_con_state = NULL;
4154 			struct amdgpu_crtc *acrtc = NULL;
4155 			bool modeset_needed;
4156 
4157 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4158 			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4159 			modeset_needed = modeset_required(
4160 					new_crtc_state,
4161 					dm_new_crtc_state->stream,
4162 					dm_old_crtc_state->stream);
4163 			/* We add stream to freesync if:
4164 			 * 1. Said stream is not null, and
4165 			 * 2. A modeset is requested. This means that the
4166 			 *    stream was removed previously, and needs to be
4167 			 *    replaced.
4168 			 */
4169 			if (dm_new_crtc_state->stream == NULL ||
4170 					!modeset_needed)
4171 				continue;
4172 
4173 			acrtc = to_amdgpu_crtc(crtc);
4174 
4175 			aconnector =
4176 				amdgpu_dm_find_first_crtc_matching_connector(
4177 					state, crtc);
4178 			if (!aconnector) {
4179 				DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4180 						 "find connector for acrtc "
4181 						 "id:%d skipping freesync "
4182 						 "init\n",
4183 						 acrtc->crtc_id);
4184 				continue;
4185 			}
4186 
4187 			mod_freesync_add_stream(adev->dm.freesync_module,
4188 						dm_new_crtc_state->stream,
4189 						&aconnector->caps);
4190 			new_con_state = drm_atomic_get_new_connector_state(
4191 					state, &aconnector->base);
4192 			dm_new_con_state = to_dm_connector_state(new_con_state);
4193 
4194 			mod_freesync_set_user_enable(adev->dm.freesync_module,
4195 						     &dm_new_crtc_state->stream,
4196 						     1,
4197 						     &dm_new_con_state->user_enable);
4198 		}
4199 	}
4200 
4201 	if (dm_state->context) {
4202 		dm_enable_per_frame_crtc_master_sync(dm_state->context);
4203 		WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4204 	}
4205 
4206 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4207 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4208 
4209 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4210 
4211 		if (dm_new_crtc_state->stream != NULL) {
4212 			const struct dc_stream_status *status =
4213 					dc_stream_get_status(dm_new_crtc_state->stream);
4214 
4215 			if (!status)
4216 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4217 			else
4218 				acrtc->otg_inst = status->primary_otg_inst;
4219 		}
4220 	}
4221 
4222 	/* Handle scaling and underscan changes*/
4223 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4224 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4225 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4226 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4227 		struct dc_stream_status *status = NULL;
4228 
4229 		if (acrtc)
4230 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4231 
4232 		/* Skip any modesets/resets */
4233 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4234 			continue;
4235 
4236 		/* Skip any thing not scale or underscan changes */
4237 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4238 			continue;
4239 
4240 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4241 
4242 		update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4243 				dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4244 
4245 		if (!dm_new_crtc_state->stream)
4246 			continue;
4247 
4248 		status = dc_stream_get_status(dm_new_crtc_state->stream);
4249 		WARN_ON(!status);
4250 		WARN_ON(!status->plane_count);
4251 
4252 		/*TODO How it works with MPO ?*/
4253 		if (!dc_commit_planes_to_stream(
4254 				dm->dc,
4255 				status->plane_states,
4256 				status->plane_count,
4257 				dm_new_crtc_state->stream,
4258 				dm_state->context))
4259 			dm_error("%s: Failed to update stream scaling!\n", __func__);
4260 	}
4261 
4262 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4263 			new_crtc_state, i) {
4264 		/*
4265 		 * loop to enable interrupts on newly arrived crtc
4266 		 */
4267 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4268 		bool modeset_needed;
4269 
4270 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4271 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4272 		modeset_needed = modeset_required(
4273 				new_crtc_state,
4274 				dm_new_crtc_state->stream,
4275 				dm_old_crtc_state->stream);
4276 
4277 		if (dm_new_crtc_state->stream == NULL || !modeset_needed)
4278 			continue;
4279 
4280 		if (adev->dm.freesync_module)
4281 			mod_freesync_notify_mode_change(
4282 				adev->dm.freesync_module,
4283 				&dm_new_crtc_state->stream, 1);
4284 
4285 		manage_dm_interrupts(adev, acrtc, true);
4286 	}
4287 
4288 	/* update planes when needed per crtc*/
4289 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4290 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4291 
4292 		if (dm_new_crtc_state->stream)
4293 			amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4294 	}
4295 
4296 
4297 	/*
4298 	 * send vblank event on all events not handled in flip and
4299 	 * mark consumed event for drm_atomic_helper_commit_hw_done
4300 	 */
4301 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
4302 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4303 
4304 		if (new_crtc_state->event)
4305 			drm_send_event_locked(dev, &new_crtc_state->event->base);
4306 
4307 		new_crtc_state->event = NULL;
4308 	}
4309 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4310 
4311 	/* Signal HW programming completion */
4312 	drm_atomic_helper_commit_hw_done(state);
4313 
4314 	if (wait_for_vblank)
4315 		drm_atomic_helper_wait_for_flip_done(dev, state);
4316 
4317 	drm_atomic_helper_cleanup_planes(dev, state);
4318 }
4319 
4320 
4321 static int dm_force_atomic_commit(struct drm_connector *connector)
4322 {
4323 	int ret = 0;
4324 	struct drm_device *ddev = connector->dev;
4325 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4326 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4327 	struct drm_plane *plane = disconnected_acrtc->base.primary;
4328 	struct drm_connector_state *conn_state;
4329 	struct drm_crtc_state *crtc_state;
4330 	struct drm_plane_state *plane_state;
4331 
4332 	if (!state)
4333 		return -ENOMEM;
4334 
4335 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
4336 
4337 	/* Construct an atomic state to restore previous display setting */
4338 
4339 	/*
4340 	 * Attach connectors to drm_atomic_state
4341 	 */
4342 	conn_state = drm_atomic_get_connector_state(state, connector);
4343 
4344 	ret = PTR_ERR_OR_ZERO(conn_state);
4345 	if (ret)
4346 		goto err;
4347 
4348 	/* Attach crtc to drm_atomic_state*/
4349 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4350 
4351 	ret = PTR_ERR_OR_ZERO(crtc_state);
4352 	if (ret)
4353 		goto err;
4354 
4355 	/* force a restore */
4356 	crtc_state->mode_changed = true;
4357 
4358 	/* Attach plane to drm_atomic_state */
4359 	plane_state = drm_atomic_get_plane_state(state, plane);
4360 
4361 	ret = PTR_ERR_OR_ZERO(plane_state);
4362 	if (ret)
4363 		goto err;
4364 
4365 
4366 	/* Call commit internally with the state we just constructed */
4367 	ret = drm_atomic_commit(state);
4368 	if (!ret)
4369 		return 0;
4370 
4371 err:
4372 	DRM_ERROR("Restoring old state failed with %i\n", ret);
4373 	drm_atomic_state_put(state);
4374 
4375 	return ret;
4376 }
4377 
4378 /*
4379  * This functions handle all cases when set mode does not come upon hotplug.
4380  * This include when the same display is unplugged then plugged back into the
4381  * same port and when we are running without usermode desktop manager supprot
4382  */
4383 void dm_restore_drm_connector_state(struct drm_device *dev,
4384 				    struct drm_connector *connector)
4385 {
4386 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4387 	struct amdgpu_crtc *disconnected_acrtc;
4388 	struct dm_crtc_state *acrtc_state;
4389 
4390 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4391 		return;
4392 
4393 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4394 	if (!disconnected_acrtc)
4395 		return;
4396 
4397 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4398 	if (!acrtc_state->stream)
4399 		return;
4400 
4401 	/*
4402 	 * If the previous sink is not released and different from the current,
4403 	 * we deduce we are in a state where we can not rely on usermode call
4404 	 * to turn on the display, so we do it here
4405 	 */
4406 	if (acrtc_state->stream->sink != aconnector->dc_sink)
4407 		dm_force_atomic_commit(&aconnector->base);
4408 }
4409 
4410 /*`
4411  * Grabs all modesetting locks to serialize against any blocking commits,
4412  * Waits for completion of all non blocking commits.
4413  */
4414 static int do_aquire_global_lock(struct drm_device *dev,
4415 				 struct drm_atomic_state *state)
4416 {
4417 	struct drm_crtc *crtc;
4418 	struct drm_crtc_commit *commit;
4419 	long ret;
4420 
4421 	/* Adding all modeset locks to aquire_ctx will
4422 	 * ensure that when the framework release it the
4423 	 * extra locks we are locking here will get released to
4424 	 */
4425 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4426 	if (ret)
4427 		return ret;
4428 
4429 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4430 		spin_lock(&crtc->commit_lock);
4431 		commit = list_first_entry_or_null(&crtc->commit_list,
4432 				struct drm_crtc_commit, commit_entry);
4433 		if (commit)
4434 			drm_crtc_commit_get(commit);
4435 		spin_unlock(&crtc->commit_lock);
4436 
4437 		if (!commit)
4438 			continue;
4439 
4440 		/* Make sure all pending HW programming completed and
4441 		 * page flips done
4442 		 */
4443 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4444 
4445 		if (ret > 0)
4446 			ret = wait_for_completion_interruptible_timeout(
4447 					&commit->flip_done, 10*HZ);
4448 
4449 		if (ret == 0)
4450 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4451 				  "timed out\n", crtc->base.id, crtc->name);
4452 
4453 		drm_crtc_commit_put(commit);
4454 	}
4455 
4456 	return ret < 0 ? ret : 0;
4457 }
4458 
4459 static int dm_update_crtcs_state(struct dc *dc,
4460 				 struct drm_atomic_state *state,
4461 				 bool enable,
4462 				 bool *lock_and_validation_needed)
4463 {
4464 	struct drm_crtc *crtc;
4465 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4466 	int i;
4467 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4468 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4469 	struct dc_stream_state *new_stream;
4470 	int ret = 0;
4471 
4472 	/*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4473 	/* update changed items */
4474 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4475 		struct amdgpu_crtc *acrtc = NULL;
4476 		struct amdgpu_dm_connector *aconnector = NULL;
4477 		struct drm_connector_state *new_con_state = NULL;
4478 		struct dm_connector_state *dm_conn_state = NULL;
4479 
4480 		new_stream = NULL;
4481 
4482 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4483 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4484 		acrtc = to_amdgpu_crtc(crtc);
4485 
4486 		aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4487 
4488 		/* TODO This hack should go away */
4489 		if (aconnector && enable) {
4490 			// Make sure fake sink is created in plug-in scenario
4491 			new_con_state = drm_atomic_get_connector_state(state,
4492  								    &aconnector->base);
4493 
4494 			if (IS_ERR(new_con_state)) {
4495 				ret = PTR_ERR_OR_ZERO(new_con_state);
4496 				break;
4497 			}
4498 
4499 			dm_conn_state = to_dm_connector_state(new_con_state);
4500 
4501 			new_stream = create_stream_for_sink(aconnector,
4502 							     &new_crtc_state->mode,
4503 							    dm_conn_state);
4504 
4505 			/*
4506 			 * we can have no stream on ACTION_SET if a display
4507 			 * was disconnected during S3, in this case it not and
4508 			 * error, the OS will be updated after detection, and
4509 			 * do the right thing on next atomic commit
4510 			 */
4511 
4512 			if (!new_stream) {
4513 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4514 						__func__, acrtc->base.base.id);
4515 				break;
4516 			}
4517 		}
4518 
4519 		if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4520 				dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4521 
4522 			new_crtc_state->mode_changed = false;
4523 
4524 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4525 				         new_crtc_state->mode_changed);
4526 		}
4527 
4528 
4529 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4530 			goto next_crtc;
4531 
4532 		DRM_DEBUG_DRIVER(
4533 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4534 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
4535 			"connectors_changed:%d\n",
4536 			acrtc->crtc_id,
4537 			new_crtc_state->enable,
4538 			new_crtc_state->active,
4539 			new_crtc_state->planes_changed,
4540 			new_crtc_state->mode_changed,
4541 			new_crtc_state->active_changed,
4542 			new_crtc_state->connectors_changed);
4543 
4544 		/* Remove stream for any changed/disabled CRTC */
4545 		if (!enable) {
4546 
4547 			if (!dm_old_crtc_state->stream)
4548 				goto next_crtc;
4549 
4550 			DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4551 					crtc->base.id);
4552 
4553 			/* i.e. reset mode */
4554 			if (dc_remove_stream_from_ctx(
4555 					dc,
4556 					dm_state->context,
4557 					dm_old_crtc_state->stream) != DC_OK) {
4558 				ret = -EINVAL;
4559 				goto fail;
4560 			}
4561 
4562 			dc_stream_release(dm_old_crtc_state->stream);
4563 			dm_new_crtc_state->stream = NULL;
4564 
4565 			*lock_and_validation_needed = true;
4566 
4567 		} else {/* Add stream for any updated/enabled CRTC */
4568 			/*
4569 			 * Quick fix to prevent NULL pointer on new_stream when
4570 			 * added MST connectors not found in existing crtc_state in the chained mode
4571 			 * TODO: need to dig out the root cause of that
4572 			 */
4573 			if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4574 				goto next_crtc;
4575 
4576 			if (modereset_required(new_crtc_state))
4577 				goto next_crtc;
4578 
4579 			if (modeset_required(new_crtc_state, new_stream,
4580 					     dm_old_crtc_state->stream)) {
4581 
4582 				WARN_ON(dm_new_crtc_state->stream);
4583 
4584 				dm_new_crtc_state->stream = new_stream;
4585 
4586 				dc_stream_retain(new_stream);
4587 
4588 				DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4589 							crtc->base.id);
4590 
4591 				if (dc_add_stream_to_ctx(
4592 						dc,
4593 						dm_state->context,
4594 						dm_new_crtc_state->stream) != DC_OK) {
4595 					ret = -EINVAL;
4596 					goto fail;
4597 				}
4598 
4599 				*lock_and_validation_needed = true;
4600 			}
4601 		}
4602 
4603 next_crtc:
4604 		/* Release extra reference */
4605 		if (new_stream)
4606 			 dc_stream_release(new_stream);
4607 	}
4608 
4609 	return ret;
4610 
4611 fail:
4612 	if (new_stream)
4613 		dc_stream_release(new_stream);
4614 	return ret;
4615 }
4616 
4617 static int dm_update_planes_state(struct dc *dc,
4618 				  struct drm_atomic_state *state,
4619 				  bool enable,
4620 				  bool *lock_and_validation_needed)
4621 {
4622 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4623 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4624 	struct drm_plane *plane;
4625 	struct drm_plane_state *old_plane_state, *new_plane_state;
4626 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
4627 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4628 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
4629 	int i ;
4630 	/* TODO return page_flip_needed() function */
4631 	bool pflip_needed  = !state->allow_modeset;
4632 	int ret = 0;
4633 
4634 	if (pflip_needed)
4635 		return ret;
4636 
4637 	/* Add new planes */
4638 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4639 		new_plane_crtc = new_plane_state->crtc;
4640 		old_plane_crtc = old_plane_state->crtc;
4641 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
4642 		dm_old_plane_state = to_dm_plane_state(old_plane_state);
4643 
4644 		/*TODO Implement atomic check for cursor plane */
4645 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
4646 			continue;
4647 
4648 		/* Remove any changed/removed planes */
4649 		if (!enable) {
4650 
4651 			if (!old_plane_crtc)
4652 				continue;
4653 
4654 			old_crtc_state = drm_atomic_get_old_crtc_state(
4655 					state, old_plane_crtc);
4656 			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4657 
4658 			if (!dm_old_crtc_state->stream)
4659 				continue;
4660 
4661 			DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4662 					plane->base.id, old_plane_crtc->base.id);
4663 
4664 			if (!dc_remove_plane_from_context(
4665 					dc,
4666 					dm_old_crtc_state->stream,
4667 					dm_old_plane_state->dc_state,
4668 					dm_state->context)) {
4669 
4670 				ret = EINVAL;
4671 				return ret;
4672 			}
4673 
4674 
4675 			dc_plane_state_release(dm_old_plane_state->dc_state);
4676 			dm_new_plane_state->dc_state = NULL;
4677 
4678 			*lock_and_validation_needed = true;
4679 
4680 		} else { /* Add new planes */
4681 
4682 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4683 				continue;
4684 
4685 			if (!new_plane_crtc)
4686 				continue;
4687 
4688 			new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4689 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4690 
4691 			if (!dm_new_crtc_state->stream)
4692 				continue;
4693 
4694 
4695 			WARN_ON(dm_new_plane_state->dc_state);
4696 
4697 			dm_new_plane_state->dc_state = dc_create_plane_state(dc);
4698 
4699 			DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4700 					plane->base.id, new_plane_crtc->base.id);
4701 
4702 			if (!dm_new_plane_state->dc_state) {
4703 				ret = -EINVAL;
4704 				return ret;
4705 			}
4706 
4707 			ret = fill_plane_attributes(
4708 				new_plane_crtc->dev->dev_private,
4709 				dm_new_plane_state->dc_state,
4710 				new_plane_state,
4711 				new_crtc_state);
4712 			if (ret)
4713 				return ret;
4714 
4715 
4716 			if (!dc_add_plane_to_context(
4717 					dc,
4718 					dm_new_crtc_state->stream,
4719 					dm_new_plane_state->dc_state,
4720 					dm_state->context)) {
4721 
4722 				ret = -EINVAL;
4723 				return ret;
4724 			}
4725 
4726 			/* Tell DC to do a full surface update every time there
4727 			 * is a plane change. Inefficient, but works for now.
4728 			 */
4729 			dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
4730 
4731 			*lock_and_validation_needed = true;
4732 		}
4733 	}
4734 
4735 
4736 	return ret;
4737 }
4738 
4739 static int amdgpu_dm_atomic_check(struct drm_device *dev,
4740 				  struct drm_atomic_state *state)
4741 {
4742 	int i;
4743 	int ret;
4744 	struct amdgpu_device *adev = dev->dev_private;
4745 	struct dc *dc = adev->dm.dc;
4746 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4747 	struct drm_connector *connector;
4748 	struct drm_connector_state *old_con_state, *new_con_state;
4749 	struct drm_crtc *crtc;
4750 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4751 
4752 	/*
4753 	 * This bool will be set for true for any modeset/reset
4754 	 * or plane update which implies non fast surface update.
4755 	 */
4756 	bool lock_and_validation_needed = false;
4757 
4758 	ret = drm_atomic_helper_check_modeset(dev, state);
4759 	if (ret)
4760 		goto fail;
4761 
4762 	/*
4763 	 * legacy_cursor_update should be made false for SoC's having
4764 	 * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
4765 	 * otherwise for software cursor plane,
4766 	 * we should not add it to list of affected planes.
4767 	 */
4768 	if (state->legacy_cursor_update) {
4769 		for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4770 			if (new_crtc_state->color_mgmt_changed) {
4771 				ret = drm_atomic_add_affected_planes(state, crtc);
4772 				if (ret)
4773 					goto fail;
4774 			}
4775 		}
4776 	} else {
4777 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4778 			if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4779 					!new_crtc_state->color_mgmt_changed)
4780 				continue;
4781 
4782 			if (!new_crtc_state->enable)
4783 				continue;
4784 
4785 			ret = drm_atomic_add_affected_connectors(state, crtc);
4786 			if (ret)
4787 				return ret;
4788 
4789 			ret = drm_atomic_add_affected_planes(state, crtc);
4790 			if (ret)
4791 				goto fail;
4792 		}
4793 	}
4794 
4795 	dm_state->context = dc_create_state();
4796 	ASSERT(dm_state->context);
4797 	dc_resource_state_copy_construct_current(dc, dm_state->context);
4798 
4799 	/* Remove exiting planes if they are modified */
4800 	ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4801 	if (ret) {
4802 		goto fail;
4803 	}
4804 
4805 	/* Disable all crtcs which require disable */
4806 	ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4807 	if (ret) {
4808 		goto fail;
4809 	}
4810 
4811 	/* Enable all crtcs which require enable */
4812 	ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4813 	if (ret) {
4814 		goto fail;
4815 	}
4816 
4817 	/* Add new/modified planes */
4818 	ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4819 	if (ret) {
4820 		goto fail;
4821 	}
4822 
4823 	/* Run this here since we want to validate the streams we created */
4824 	ret = drm_atomic_helper_check_planes(dev, state);
4825 	if (ret)
4826 		goto fail;
4827 
4828 	/* Check scaling and underscan changes*/
4829 	/*TODO Removed scaling changes validation due to inability to commit
4830 	 * new stream into context w\o causing full reset. Need to
4831 	 * decide how to handle.
4832 	 */
4833 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4834 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4835 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4836 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4837 
4838 		/* Skip any modesets/resets */
4839 		if (!acrtc || drm_atomic_crtc_needs_modeset(
4840 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
4841 			continue;
4842 
4843 		/* Skip any thing not scale or underscan changes */
4844 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4845 			continue;
4846 
4847 		lock_and_validation_needed = true;
4848 	}
4849 
4850 	/*
4851 	 * For full updates case when
4852 	 * removing/adding/updating  streams on once CRTC while flipping
4853 	 * on another CRTC,
4854 	 * acquiring global lock  will guarantee that any such full
4855 	 * update commit
4856 	 * will wait for completion of any outstanding flip using DRMs
4857 	 * synchronization events.
4858 	 */
4859 
4860 	if (lock_and_validation_needed) {
4861 
4862 		ret = do_aquire_global_lock(dev, state);
4863 		if (ret)
4864 			goto fail;
4865 
4866 		if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
4867 			ret = -EINVAL;
4868 			goto fail;
4869 		}
4870 	}
4871 
4872 	/* Must be success */
4873 	WARN_ON(ret);
4874 	return ret;
4875 
4876 fail:
4877 	if (ret == -EDEADLK)
4878 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
4879 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
4880 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
4881 	else
4882 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
4883 
4884 	return ret;
4885 }
4886 
4887 static bool is_dp_capable_without_timing_msa(struct dc *dc,
4888 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
4889 {
4890 	uint8_t dpcd_data;
4891 	bool capable = false;
4892 
4893 	if (amdgpu_dm_connector->dc_link &&
4894 		dm_helpers_dp_read_dpcd(
4895 				NULL,
4896 				amdgpu_dm_connector->dc_link,
4897 				DP_DOWN_STREAM_PORT_COUNT,
4898 				&dpcd_data,
4899 				sizeof(dpcd_data))) {
4900 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4901 	}
4902 
4903 	return capable;
4904 }
4905 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
4906 					   struct edid *edid)
4907 {
4908 	int i;
4909 	uint64_t val_capable;
4910 	bool edid_check_required;
4911 	struct detailed_timing *timing;
4912 	struct detailed_non_pixel *data;
4913 	struct detailed_data_monitor_range *range;
4914 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4915 			to_amdgpu_dm_connector(connector);
4916 
4917 	struct drm_device *dev = connector->dev;
4918 	struct amdgpu_device *adev = dev->dev_private;
4919 
4920 	edid_check_required = false;
4921 	if (!amdgpu_dm_connector->dc_sink) {
4922 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4923 		return;
4924 	}
4925 	if (!adev->dm.freesync_module)
4926 		return;
4927 	/*
4928 	 * if edid non zero restrict freesync only for dp and edp
4929 	 */
4930 	if (edid) {
4931 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
4932 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
4933 			edid_check_required = is_dp_capable_without_timing_msa(
4934 						adev->dm.dc,
4935 						amdgpu_dm_connector);
4936 		}
4937 	}
4938 	val_capable = 0;
4939 	if (edid_check_required == true && (edid->version > 1 ||
4940 	   (edid->version == 1 && edid->revision > 1))) {
4941 		for (i = 0; i < 4; i++) {
4942 
4943 			timing	= &edid->detailed_timings[i];
4944 			data	= &timing->data.other_data;
4945 			range	= &data->data.range;
4946 			/*
4947 			 * Check if monitor has continuous frequency mode
4948 			 */
4949 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
4950 				continue;
4951 			/*
4952 			 * Check for flag range limits only. If flag == 1 then
4953 			 * no additional timing information provided.
4954 			 * Default GTF, GTF Secondary curve and CVT are not
4955 			 * supported
4956 			 */
4957 			if (range->flags != 1)
4958 				continue;
4959 
4960 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
4961 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
4962 			amdgpu_dm_connector->pixel_clock_mhz =
4963 				range->pixel_clock_mhz * 10;
4964 			break;
4965 		}
4966 
4967 		if (amdgpu_dm_connector->max_vfreq -
4968 				amdgpu_dm_connector->min_vfreq > 10) {
4969 			amdgpu_dm_connector->caps.supported = true;
4970 			amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
4971 					amdgpu_dm_connector->min_vfreq * 1000000;
4972 			amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
4973 					amdgpu_dm_connector->max_vfreq * 1000000;
4974 				val_capable = 1;
4975 		}
4976 	}
4977 
4978 	/*
4979 	 * TODO figure out how to notify user-mode or DRM of freesync caps
4980 	 * once we figure out how to deal with freesync in an upstreamable
4981 	 * fashion
4982 	 */
4983 
4984 }
4985 
4986 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
4987 {
4988 	/*
4989 	 * TODO fill in once we figure out how to deal with freesync in
4990 	 * an upstreamable fashion
4991 	 */
4992 }
4993