xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision de167752a889d19b9bb018f8eecbc1ebbfe07b2f)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29 
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36 
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42 
43 #include "ivsrcid/ivsrcid_vislands30.h"
44 
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49 #include <linux/pm_runtime.h>
50 
51 #include <drm/drmP.h>
52 #include <drm/drm_atomic.h>
53 #include <drm/drm_atomic_helper.h>
54 #include <drm/drm_dp_mst_helper.h>
55 #include <drm/drm_fb_helper.h>
56 #include <drm/drm_edid.h>
57 
58 #include "modules/inc/mod_freesync.h"
59 
60 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
61 #include "ivsrcid/irqsrcs_dcn_1_0.h"
62 
63 #include "dcn/dcn_1_0_offset.h"
64 #include "dcn/dcn_1_0_sh_mask.h"
65 #include "soc15_hw_ip.h"
66 #include "vega10_ip_offset.h"
67 
68 #include "soc15_common.h"
69 #endif
70 
71 #include "modules/inc/mod_freesync.h"
72 
73 #include "i2caux_interface.h"
74 
75 /* basic init/fini API */
76 static int amdgpu_dm_init(struct amdgpu_device *adev);
77 static void amdgpu_dm_fini(struct amdgpu_device *adev);
78 
79 /* initializes drm_device display related structures, based on the information
80  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
81  * drm_encoder, drm_mode_config
82  *
83  * Returns 0 on success
84  */
85 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
86 /* removes and deallocates the drm structures, created by the above function */
87 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
88 
89 static void
90 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
91 
92 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
93 				struct amdgpu_plane *aplane,
94 				unsigned long possible_crtcs);
95 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
96 			       struct drm_plane *plane,
97 			       uint32_t link_index);
98 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
99 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
100 				    uint32_t link_index,
101 				    struct amdgpu_encoder *amdgpu_encoder);
102 static int amdgpu_dm_encoder_init(struct drm_device *dev,
103 				  struct amdgpu_encoder *aencoder,
104 				  uint32_t link_index);
105 
106 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
107 
108 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
109 				   struct drm_atomic_state *state,
110 				   bool nonblock);
111 
112 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
113 
114 static int amdgpu_dm_atomic_check(struct drm_device *dev,
115 				  struct drm_atomic_state *state);
116 
117 
118 
119 
120 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
121 	DRM_PLANE_TYPE_PRIMARY,
122 	DRM_PLANE_TYPE_PRIMARY,
123 	DRM_PLANE_TYPE_PRIMARY,
124 	DRM_PLANE_TYPE_PRIMARY,
125 	DRM_PLANE_TYPE_PRIMARY,
126 	DRM_PLANE_TYPE_PRIMARY,
127 };
128 
129 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
130 	DRM_PLANE_TYPE_PRIMARY,
131 	DRM_PLANE_TYPE_PRIMARY,
132 	DRM_PLANE_TYPE_PRIMARY,
133 	DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
134 };
135 
136 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
137 	DRM_PLANE_TYPE_PRIMARY,
138 	DRM_PLANE_TYPE_PRIMARY,
139 	DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
140 };
141 
142 /*
143  * dm_vblank_get_counter
144  *
145  * @brief
146  * Get counter for number of vertical blanks
147  *
148  * @param
149  * struct amdgpu_device *adev - [in] desired amdgpu device
150  * int disp_idx - [in] which CRTC to get the counter from
151  *
152  * @return
153  * Counter for vertical blanks
154  */
155 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
156 {
157 	if (crtc >= adev->mode_info.num_crtc)
158 		return 0;
159 	else {
160 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
161 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
162 				acrtc->base.state);
163 
164 
165 		if (acrtc_state->stream == NULL) {
166 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
167 				  crtc);
168 			return 0;
169 		}
170 
171 		return dc_stream_get_vblank_counter(acrtc_state->stream);
172 	}
173 }
174 
175 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
176 				  u32 *vbl, u32 *position)
177 {
178 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
179 
180 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
181 		return -EINVAL;
182 	else {
183 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
184 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
185 						acrtc->base.state);
186 
187 		if (acrtc_state->stream ==  NULL) {
188 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
189 				  crtc);
190 			return 0;
191 		}
192 
193 		/*
194 		 * TODO rework base driver to use values directly.
195 		 * for now parse it back into reg-format
196 		 */
197 		dc_stream_get_scanoutpos(acrtc_state->stream,
198 					 &v_blank_start,
199 					 &v_blank_end,
200 					 &h_position,
201 					 &v_position);
202 
203 		*position = v_position | (h_position << 16);
204 		*vbl = v_blank_start | (v_blank_end << 16);
205 	}
206 
207 	return 0;
208 }
209 
210 static bool dm_is_idle(void *handle)
211 {
212 	/* XXX todo */
213 	return true;
214 }
215 
216 static int dm_wait_for_idle(void *handle)
217 {
218 	/* XXX todo */
219 	return 0;
220 }
221 
222 static bool dm_check_soft_reset(void *handle)
223 {
224 	return false;
225 }
226 
227 static int dm_soft_reset(void *handle)
228 {
229 	/* XXX todo */
230 	return 0;
231 }
232 
233 static struct amdgpu_crtc *
234 get_crtc_by_otg_inst(struct amdgpu_device *adev,
235 		     int otg_inst)
236 {
237 	struct drm_device *dev = adev->ddev;
238 	struct drm_crtc *crtc;
239 	struct amdgpu_crtc *amdgpu_crtc;
240 
241 	/*
242 	 * following if is check inherited from both functions where this one is
243 	 * used now. Need to be checked why it could happen.
244 	 */
245 	if (otg_inst == -1) {
246 		WARN_ON(1);
247 		return adev->mode_info.crtcs[0];
248 	}
249 
250 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
251 		amdgpu_crtc = to_amdgpu_crtc(crtc);
252 
253 		if (amdgpu_crtc->otg_inst == otg_inst)
254 			return amdgpu_crtc;
255 	}
256 
257 	return NULL;
258 }
259 
260 static void dm_pflip_high_irq(void *interrupt_params)
261 {
262 	struct amdgpu_crtc *amdgpu_crtc;
263 	struct common_irq_params *irq_params = interrupt_params;
264 	struct amdgpu_device *adev = irq_params->adev;
265 	unsigned long flags;
266 
267 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
268 
269 	/* IRQ could occur when in initial stage */
270 	/*TODO work and BO cleanup */
271 	if (amdgpu_crtc == NULL) {
272 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
273 		return;
274 	}
275 
276 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
277 
278 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
279 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
280 						 amdgpu_crtc->pflip_status,
281 						 AMDGPU_FLIP_SUBMITTED,
282 						 amdgpu_crtc->crtc_id,
283 						 amdgpu_crtc);
284 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
285 		return;
286 	}
287 
288 
289 	/* wakeup usersapce */
290 	if (amdgpu_crtc->event) {
291 		/* Update to correct count/ts if racing with vblank irq */
292 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
293 
294 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
295 
296 		/* page flip completed. clean up */
297 		amdgpu_crtc->event = NULL;
298 
299 	} else
300 		WARN_ON(1);
301 
302 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
303 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
304 
305 	DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
306 					__func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
307 
308 	drm_crtc_vblank_put(&amdgpu_crtc->base);
309 }
310 
311 static void dm_crtc_high_irq(void *interrupt_params)
312 {
313 	struct common_irq_params *irq_params = interrupt_params;
314 	struct amdgpu_device *adev = irq_params->adev;
315 	uint8_t crtc_index = 0;
316 	struct amdgpu_crtc *acrtc;
317 
318 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
319 
320 	if (acrtc)
321 		crtc_index = acrtc->crtc_id;
322 
323 	drm_handle_vblank(adev->ddev, crtc_index);
324 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
325 }
326 
327 static int dm_set_clockgating_state(void *handle,
328 		  enum amd_clockgating_state state)
329 {
330 	return 0;
331 }
332 
333 static int dm_set_powergating_state(void *handle,
334 		  enum amd_powergating_state state)
335 {
336 	return 0;
337 }
338 
339 /* Prototypes of private functions */
340 static int dm_early_init(void* handle);
341 
342 static void hotplug_notify_work_func(struct work_struct *work)
343 {
344 	struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
345 	struct drm_device *dev = dm->ddev;
346 
347 	drm_kms_helper_hotplug_event(dev);
348 }
349 
350 #if defined(CONFIG_DRM_AMD_DC_FBC)
351 /* Allocate memory for FBC compressed data  */
352 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
353 {
354 	struct drm_device *dev = connector->dev;
355 	struct amdgpu_device *adev = dev->dev_private;
356 	struct dm_comressor_info *compressor = &adev->dm.compressor;
357 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
358 	struct drm_display_mode *mode;
359 	unsigned long max_size = 0;
360 
361 	if (adev->dm.dc->fbc_compressor == NULL)
362 		return;
363 
364 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
365 		return;
366 
367 	if (compressor->bo_ptr)
368 		return;
369 
370 
371 	list_for_each_entry(mode, &connector->modes, head) {
372 		if (max_size < mode->htotal * mode->vtotal)
373 			max_size = mode->htotal * mode->vtotal;
374 	}
375 
376 	if (max_size) {
377 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
378 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
379 			    &compressor->gpu_addr, &compressor->cpu_addr);
380 
381 		if (r)
382 			DRM_ERROR("DM: Failed to initialize FBC\n");
383 		else {
384 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
385 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
386 		}
387 
388 	}
389 
390 }
391 #endif
392 
393 
394 /* Init display KMS
395  *
396  * Returns 0 on success
397  */
398 static int amdgpu_dm_init(struct amdgpu_device *adev)
399 {
400 	struct dc_init_data init_data;
401 	adev->dm.ddev = adev->ddev;
402 	adev->dm.adev = adev;
403 
404 	/* Zero all the fields */
405 	memset(&init_data, 0, sizeof(init_data));
406 
407 	if(amdgpu_dm_irq_init(adev)) {
408 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
409 		goto error;
410 	}
411 
412 	init_data.asic_id.chip_family = adev->family;
413 
414 	init_data.asic_id.pci_revision_id = adev->rev_id;
415 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
416 
417 	init_data.asic_id.vram_width = adev->gmc.vram_width;
418 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
419 	init_data.asic_id.atombios_base_address =
420 		adev->mode_info.atom_context->bios;
421 
422 	init_data.driver = adev;
423 
424 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
425 
426 	if (!adev->dm.cgs_device) {
427 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
428 		goto error;
429 	}
430 
431 	init_data.cgs_device = adev->dm.cgs_device;
432 
433 	adev->dm.dal = NULL;
434 
435 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
436 
437 	/*
438 	 * TODO debug why this doesn't work on Raven
439 	 */
440 	if (adev->flags & AMD_IS_APU &&
441 	    adev->asic_type >= CHIP_CARRIZO &&
442 	    adev->asic_type < CHIP_RAVEN)
443 		init_data.flags.gpu_vm_support = true;
444 
445 	/* Display Core create. */
446 	adev->dm.dc = dc_create(&init_data);
447 
448 	if (adev->dm.dc) {
449 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
450 	} else {
451 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
452 		goto error;
453 	}
454 
455 	INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
456 
457 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
458 	if (!adev->dm.freesync_module) {
459 		DRM_ERROR(
460 		"amdgpu: failed to initialize freesync_module.\n");
461 	} else
462 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
463 				adev->dm.freesync_module);
464 
465 	amdgpu_dm_init_color_mod();
466 
467 	if (amdgpu_dm_initialize_drm_device(adev)) {
468 		DRM_ERROR(
469 		"amdgpu: failed to initialize sw for display support.\n");
470 		goto error;
471 	}
472 
473 	/* Update the actual used number of crtc */
474 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
475 
476 	/* TODO: Add_display_info? */
477 
478 	/* TODO use dynamic cursor width */
479 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
480 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
481 
482 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
483 		DRM_ERROR(
484 		"amdgpu: failed to initialize sw for display support.\n");
485 		goto error;
486 	}
487 
488 	DRM_DEBUG_DRIVER("KMS initialized.\n");
489 
490 	return 0;
491 error:
492 	amdgpu_dm_fini(adev);
493 
494 	return -1;
495 }
496 
497 static void amdgpu_dm_fini(struct amdgpu_device *adev)
498 {
499 	amdgpu_dm_destroy_drm_device(&adev->dm);
500 	/*
501 	 * TODO: pageflip, vlank interrupt
502 	 *
503 	 * amdgpu_dm_irq_fini(adev);
504 	 */
505 
506 	if (adev->dm.cgs_device) {
507 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
508 		adev->dm.cgs_device = NULL;
509 	}
510 	if (adev->dm.freesync_module) {
511 		mod_freesync_destroy(adev->dm.freesync_module);
512 		adev->dm.freesync_module = NULL;
513 	}
514 	/* DC Destroy TODO: Replace destroy DAL */
515 	if (adev->dm.dc)
516 		dc_destroy(&adev->dm.dc);
517 	return;
518 }
519 
520 static int dm_sw_init(void *handle)
521 {
522 	return 0;
523 }
524 
525 static int dm_sw_fini(void *handle)
526 {
527 	return 0;
528 }
529 
530 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
531 {
532 	struct amdgpu_dm_connector *aconnector;
533 	struct drm_connector *connector;
534 	int ret = 0;
535 
536 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
537 
538 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
539 		aconnector = to_amdgpu_dm_connector(connector);
540 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
541 		    aconnector->mst_mgr.aux) {
542 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
543 					aconnector, aconnector->base.base.id);
544 
545 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
546 			if (ret < 0) {
547 				DRM_ERROR("DM_MST: Failed to start MST\n");
548 				((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
549 				return ret;
550 				}
551 			}
552 	}
553 
554 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
555 	return ret;
556 }
557 
558 static int dm_late_init(void *handle)
559 {
560 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
561 
562 	return detect_mst_link_for_all_connectors(adev->ddev);
563 }
564 
565 static void s3_handle_mst(struct drm_device *dev, bool suspend)
566 {
567 	struct amdgpu_dm_connector *aconnector;
568 	struct drm_connector *connector;
569 
570 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
571 
572 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
573 		   aconnector = to_amdgpu_dm_connector(connector);
574 		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
575 				   !aconnector->mst_port) {
576 
577 			   if (suspend)
578 				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
579 			   else
580 				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
581 		   }
582 	}
583 
584 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
585 }
586 
587 static int dm_hw_init(void *handle)
588 {
589 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
590 	/* Create DAL display manager */
591 	amdgpu_dm_init(adev);
592 	amdgpu_dm_hpd_init(adev);
593 
594 	return 0;
595 }
596 
597 static int dm_hw_fini(void *handle)
598 {
599 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
600 
601 	amdgpu_dm_hpd_fini(adev);
602 
603 	amdgpu_dm_irq_fini(adev);
604 	amdgpu_dm_fini(adev);
605 	return 0;
606 }
607 
608 static int dm_suspend(void *handle)
609 {
610 	struct amdgpu_device *adev = handle;
611 	struct amdgpu_display_manager *dm = &adev->dm;
612 	int ret = 0;
613 
614 	s3_handle_mst(adev->ddev, true);
615 
616 	amdgpu_dm_irq_suspend(adev);
617 
618 	WARN_ON(adev->dm.cached_state);
619 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
620 
621 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
622 
623 	return ret;
624 }
625 
626 static struct amdgpu_dm_connector *
627 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
628 					     struct drm_crtc *crtc)
629 {
630 	uint32_t i;
631 	struct drm_connector_state *new_con_state;
632 	struct drm_connector *connector;
633 	struct drm_crtc *crtc_from_state;
634 
635 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
636 		crtc_from_state = new_con_state->crtc;
637 
638 		if (crtc_from_state == crtc)
639 			return to_amdgpu_dm_connector(connector);
640 	}
641 
642 	return NULL;
643 }
644 
645 static int dm_resume(void *handle)
646 {
647 	struct amdgpu_device *adev = handle;
648 	struct drm_device *ddev = adev->ddev;
649 	struct amdgpu_display_manager *dm = &adev->dm;
650 	struct amdgpu_dm_connector *aconnector;
651 	struct drm_connector *connector;
652 	struct drm_crtc *crtc;
653 	struct drm_crtc_state *new_crtc_state;
654 	struct dm_crtc_state *dm_new_crtc_state;
655 	struct drm_plane *plane;
656 	struct drm_plane_state *new_plane_state;
657 	struct dm_plane_state *dm_new_plane_state;
658 	int ret;
659 	int i;
660 
661 	/* power on hardware */
662 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
663 
664 	/* program HPD filter */
665 	dc_resume(dm->dc);
666 
667 	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
668 	s3_handle_mst(ddev, false);
669 
670 	/*
671 	 * early enable HPD Rx IRQ, should be done before set mode as short
672 	 * pulse interrupts are used for MST
673 	 */
674 	amdgpu_dm_irq_resume_early(adev);
675 
676 	/* Do detection*/
677 	list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
678 		aconnector = to_amdgpu_dm_connector(connector);
679 
680 		/*
681 		 * this is the case when traversing through already created
682 		 * MST connectors, should be skipped
683 		 */
684 		if (aconnector->mst_port)
685 			continue;
686 
687 		mutex_lock(&aconnector->hpd_lock);
688 		dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
689 
690 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
691 			aconnector->fake_enable = false;
692 
693 		aconnector->dc_sink = NULL;
694 		amdgpu_dm_update_connector_after_detect(aconnector);
695 		mutex_unlock(&aconnector->hpd_lock);
696 	}
697 
698 	/* Force mode set in atomic comit */
699 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
700 		new_crtc_state->active_changed = true;
701 
702 	/*
703 	 * atomic_check is expected to create the dc states. We need to release
704 	 * them here, since they were duplicated as part of the suspend
705 	 * procedure.
706 	 */
707 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
708 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
709 		if (dm_new_crtc_state->stream) {
710 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
711 			dc_stream_release(dm_new_crtc_state->stream);
712 			dm_new_crtc_state->stream = NULL;
713 		}
714 	}
715 
716 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
717 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
718 		if (dm_new_plane_state->dc_state) {
719 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
720 			dc_plane_state_release(dm_new_plane_state->dc_state);
721 			dm_new_plane_state->dc_state = NULL;
722 		}
723 	}
724 
725 	ret = drm_atomic_helper_resume(ddev, dm->cached_state);
726 
727 	dm->cached_state = NULL;
728 
729 	amdgpu_dm_irq_resume_late(adev);
730 
731 	return ret;
732 }
733 
734 static const struct amd_ip_funcs amdgpu_dm_funcs = {
735 	.name = "dm",
736 	.early_init = dm_early_init,
737 	.late_init = dm_late_init,
738 	.sw_init = dm_sw_init,
739 	.sw_fini = dm_sw_fini,
740 	.hw_init = dm_hw_init,
741 	.hw_fini = dm_hw_fini,
742 	.suspend = dm_suspend,
743 	.resume = dm_resume,
744 	.is_idle = dm_is_idle,
745 	.wait_for_idle = dm_wait_for_idle,
746 	.check_soft_reset = dm_check_soft_reset,
747 	.soft_reset = dm_soft_reset,
748 	.set_clockgating_state = dm_set_clockgating_state,
749 	.set_powergating_state = dm_set_powergating_state,
750 };
751 
752 const struct amdgpu_ip_block_version dm_ip_block =
753 {
754 	.type = AMD_IP_BLOCK_TYPE_DCE,
755 	.major = 1,
756 	.minor = 0,
757 	.rev = 0,
758 	.funcs = &amdgpu_dm_funcs,
759 };
760 
761 
762 static struct drm_atomic_state *
763 dm_atomic_state_alloc(struct drm_device *dev)
764 {
765 	struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
766 
767 	if (!state)
768 		return NULL;
769 
770 	if (drm_atomic_state_init(dev, &state->base) < 0)
771 		goto fail;
772 
773 	return &state->base;
774 
775 fail:
776 	kfree(state);
777 	return NULL;
778 }
779 
780 static void
781 dm_atomic_state_clear(struct drm_atomic_state *state)
782 {
783 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
784 
785 	if (dm_state->context) {
786 		dc_release_state(dm_state->context);
787 		dm_state->context = NULL;
788 	}
789 
790 	drm_atomic_state_default_clear(state);
791 }
792 
793 static void
794 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
795 {
796 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
797 	drm_atomic_state_default_release(state);
798 	kfree(dm_state);
799 }
800 
801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
802 	.fb_create = amdgpu_display_user_framebuffer_create,
803 	.output_poll_changed = drm_fb_helper_output_poll_changed,
804 	.atomic_check = amdgpu_dm_atomic_check,
805 	.atomic_commit = amdgpu_dm_atomic_commit,
806 	.atomic_state_alloc = dm_atomic_state_alloc,
807 	.atomic_state_clear = dm_atomic_state_clear,
808 	.atomic_state_free = dm_atomic_state_alloc_free
809 };
810 
811 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
812 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
813 };
814 
815 static void
816 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
817 {
818 	struct drm_connector *connector = &aconnector->base;
819 	struct drm_device *dev = connector->dev;
820 	struct dc_sink *sink;
821 
822 	/* MST handled by drm_mst framework */
823 	if (aconnector->mst_mgr.mst_state == true)
824 		return;
825 
826 
827 	sink = aconnector->dc_link->local_sink;
828 
829 	/* Edid mgmt connector gets first update only in mode_valid hook and then
830 	 * the connector sink is set to either fake or physical sink depends on link status.
831 	 * don't do it here if u are during boot
832 	 */
833 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
834 			&& aconnector->dc_em_sink) {
835 
836 		/* For S3 resume with headless use eml_sink to fake stream
837 		 * because on resume connecotr->sink is set ti NULL
838 		 */
839 		mutex_lock(&dev->mode_config.mutex);
840 
841 		if (sink) {
842 			if (aconnector->dc_sink) {
843 				amdgpu_dm_remove_sink_from_freesync_module(
844 								connector);
845 				/* retain and release bellow are used for
846 				 * bump up refcount for sink because the link don't point
847 				 * to it anymore after disconnect so on next crtc to connector
848 				 * reshuffle by UMD we will get into unwanted dc_sink release
849 				 */
850 				if (aconnector->dc_sink != aconnector->dc_em_sink)
851 					dc_sink_release(aconnector->dc_sink);
852 			}
853 			aconnector->dc_sink = sink;
854 			amdgpu_dm_add_sink_to_freesync_module(
855 						connector, aconnector->edid);
856 		} else {
857 			amdgpu_dm_remove_sink_from_freesync_module(connector);
858 			if (!aconnector->dc_sink)
859 				aconnector->dc_sink = aconnector->dc_em_sink;
860 			else if (aconnector->dc_sink != aconnector->dc_em_sink)
861 				dc_sink_retain(aconnector->dc_sink);
862 		}
863 
864 		mutex_unlock(&dev->mode_config.mutex);
865 		return;
866 	}
867 
868 	/*
869 	 * TODO: temporary guard to look for proper fix
870 	 * if this sink is MST sink, we should not do anything
871 	 */
872 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
873 		return;
874 
875 	if (aconnector->dc_sink == sink) {
876 		/* We got a DP short pulse (Link Loss, DP CTS, etc...).
877 		 * Do nothing!! */
878 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
879 				aconnector->connector_id);
880 		return;
881 	}
882 
883 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
884 		aconnector->connector_id, aconnector->dc_sink, sink);
885 
886 	mutex_lock(&dev->mode_config.mutex);
887 
888 	/* 1. Update status of the drm connector
889 	 * 2. Send an event and let userspace tell us what to do */
890 	if (sink) {
891 		/* TODO: check if we still need the S3 mode update workaround.
892 		 * If yes, put it here. */
893 		if (aconnector->dc_sink)
894 			amdgpu_dm_remove_sink_from_freesync_module(
895 							connector);
896 
897 		aconnector->dc_sink = sink;
898 		if (sink->dc_edid.length == 0) {
899 			aconnector->edid = NULL;
900 		} else {
901 			aconnector->edid =
902 				(struct edid *) sink->dc_edid.raw_edid;
903 
904 
905 			drm_mode_connector_update_edid_property(connector,
906 					aconnector->edid);
907 		}
908 		amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
909 
910 	} else {
911 		amdgpu_dm_remove_sink_from_freesync_module(connector);
912 		drm_mode_connector_update_edid_property(connector, NULL);
913 		aconnector->num_modes = 0;
914 		aconnector->dc_sink = NULL;
915 		aconnector->edid = NULL;
916 	}
917 
918 	mutex_unlock(&dev->mode_config.mutex);
919 }
920 
921 static void handle_hpd_irq(void *param)
922 {
923 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
924 	struct drm_connector *connector = &aconnector->base;
925 	struct drm_device *dev = connector->dev;
926 
927 	/* In case of failure or MST no need to update connector status or notify the OS
928 	 * since (for MST case) MST does this in it's own context.
929 	 */
930 	mutex_lock(&aconnector->hpd_lock);
931 
932 	if (aconnector->fake_enable)
933 		aconnector->fake_enable = false;
934 
935 	if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
936 		amdgpu_dm_update_connector_after_detect(aconnector);
937 
938 
939 		drm_modeset_lock_all(dev);
940 		dm_restore_drm_connector_state(dev, connector);
941 		drm_modeset_unlock_all(dev);
942 
943 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
944 			drm_kms_helper_hotplug_event(dev);
945 	}
946 	mutex_unlock(&aconnector->hpd_lock);
947 
948 }
949 
950 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
951 {
952 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
953 	uint8_t dret;
954 	bool new_irq_handled = false;
955 	int dpcd_addr;
956 	int dpcd_bytes_to_read;
957 
958 	const int max_process_count = 30;
959 	int process_count = 0;
960 
961 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
962 
963 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
964 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
965 		/* DPCD 0x200 - 0x201 for downstream IRQ */
966 		dpcd_addr = DP_SINK_COUNT;
967 	} else {
968 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
969 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
970 		dpcd_addr = DP_SINK_COUNT_ESI;
971 	}
972 
973 	dret = drm_dp_dpcd_read(
974 		&aconnector->dm_dp_aux.aux,
975 		dpcd_addr,
976 		esi,
977 		dpcd_bytes_to_read);
978 
979 	while (dret == dpcd_bytes_to_read &&
980 		process_count < max_process_count) {
981 		uint8_t retry;
982 		dret = 0;
983 
984 		process_count++;
985 
986 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
987 		/* handle HPD short pulse irq */
988 		if (aconnector->mst_mgr.mst_state)
989 			drm_dp_mst_hpd_irq(
990 				&aconnector->mst_mgr,
991 				esi,
992 				&new_irq_handled);
993 
994 		if (new_irq_handled) {
995 			/* ACK at DPCD to notify down stream */
996 			const int ack_dpcd_bytes_to_write =
997 				dpcd_bytes_to_read - 1;
998 
999 			for (retry = 0; retry < 3; retry++) {
1000 				uint8_t wret;
1001 
1002 				wret = drm_dp_dpcd_write(
1003 					&aconnector->dm_dp_aux.aux,
1004 					dpcd_addr + 1,
1005 					&esi[1],
1006 					ack_dpcd_bytes_to_write);
1007 				if (wret == ack_dpcd_bytes_to_write)
1008 					break;
1009 			}
1010 
1011 			/* check if there is new irq to be handle */
1012 			dret = drm_dp_dpcd_read(
1013 				&aconnector->dm_dp_aux.aux,
1014 				dpcd_addr,
1015 				esi,
1016 				dpcd_bytes_to_read);
1017 
1018 			new_irq_handled = false;
1019 		} else {
1020 			break;
1021 		}
1022 	}
1023 
1024 	if (process_count == max_process_count)
1025 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1026 }
1027 
1028 static void handle_hpd_rx_irq(void *param)
1029 {
1030 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1031 	struct drm_connector *connector = &aconnector->base;
1032 	struct drm_device *dev = connector->dev;
1033 	struct dc_link *dc_link = aconnector->dc_link;
1034 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1035 
1036 	/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1037 	 * conflict, after implement i2c helper, this mutex should be
1038 	 * retired.
1039 	 */
1040 	if (dc_link->type != dc_connection_mst_branch)
1041 		mutex_lock(&aconnector->hpd_lock);
1042 
1043 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1044 			!is_mst_root_connector) {
1045 		/* Downstream Port status changed. */
1046 		if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1047 
1048 			if (aconnector->fake_enable)
1049 				aconnector->fake_enable = false;
1050 
1051 			amdgpu_dm_update_connector_after_detect(aconnector);
1052 
1053 
1054 			drm_modeset_lock_all(dev);
1055 			dm_restore_drm_connector_state(dev, connector);
1056 			drm_modeset_unlock_all(dev);
1057 
1058 			drm_kms_helper_hotplug_event(dev);
1059 		}
1060 	}
1061 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1062 	    (dc_link->type == dc_connection_mst_branch))
1063 		dm_handle_hpd_rx_irq(aconnector);
1064 
1065 	if (dc_link->type != dc_connection_mst_branch)
1066 		mutex_unlock(&aconnector->hpd_lock);
1067 }
1068 
1069 static void register_hpd_handlers(struct amdgpu_device *adev)
1070 {
1071 	struct drm_device *dev = adev->ddev;
1072 	struct drm_connector *connector;
1073 	struct amdgpu_dm_connector *aconnector;
1074 	const struct dc_link *dc_link;
1075 	struct dc_interrupt_params int_params = {0};
1076 
1077 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1078 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1079 
1080 	list_for_each_entry(connector,
1081 			&dev->mode_config.connector_list, head)	{
1082 
1083 		aconnector = to_amdgpu_dm_connector(connector);
1084 		dc_link = aconnector->dc_link;
1085 
1086 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1087 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1088 			int_params.irq_source = dc_link->irq_source_hpd;
1089 
1090 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1091 					handle_hpd_irq,
1092 					(void *) aconnector);
1093 		}
1094 
1095 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1096 
1097 			/* Also register for DP short pulse (hpd_rx). */
1098 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1099 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
1100 
1101 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1102 					handle_hpd_rx_irq,
1103 					(void *) aconnector);
1104 		}
1105 	}
1106 }
1107 
1108 /* Register IRQ sources and initialize IRQ callbacks */
1109 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1110 {
1111 	struct dc *dc = adev->dm.dc;
1112 	struct common_irq_params *c_irq_params;
1113 	struct dc_interrupt_params int_params = {0};
1114 	int r;
1115 	int i;
1116 	unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1117 
1118 	if (adev->asic_type == CHIP_VEGA10 ||
1119 	    adev->asic_type == CHIP_VEGA12 ||
1120 	    adev->asic_type == CHIP_VEGA20 ||
1121 	    adev->asic_type == CHIP_RAVEN)
1122 		client_id = SOC15_IH_CLIENTID_DCE;
1123 
1124 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1125 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1126 
1127 	/* Actions of amdgpu_irq_add_id():
1128 	 * 1. Register a set() function with base driver.
1129 	 *    Base driver will call set() function to enable/disable an
1130 	 *    interrupt in DC hardware.
1131 	 * 2. Register amdgpu_dm_irq_handler().
1132 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1133 	 *    coming from DC hardware.
1134 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1135 	 *    for acknowledging and handling. */
1136 
1137 	/* Use VBLANK interrupt */
1138 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1139 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1140 		if (r) {
1141 			DRM_ERROR("Failed to add crtc irq id!\n");
1142 			return r;
1143 		}
1144 
1145 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1146 		int_params.irq_source =
1147 			dc_interrupt_to_irq_source(dc, i, 0);
1148 
1149 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1150 
1151 		c_irq_params->adev = adev;
1152 		c_irq_params->irq_src = int_params.irq_source;
1153 
1154 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1155 				dm_crtc_high_irq, c_irq_params);
1156 	}
1157 
1158 	/* Use GRPH_PFLIP interrupt */
1159 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1160 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1161 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1162 		if (r) {
1163 			DRM_ERROR("Failed to add page flip irq id!\n");
1164 			return r;
1165 		}
1166 
1167 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1168 		int_params.irq_source =
1169 			dc_interrupt_to_irq_source(dc, i, 0);
1170 
1171 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1172 
1173 		c_irq_params->adev = adev;
1174 		c_irq_params->irq_src = int_params.irq_source;
1175 
1176 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1177 				dm_pflip_high_irq, c_irq_params);
1178 
1179 	}
1180 
1181 	/* HPD */
1182 	r = amdgpu_irq_add_id(adev, client_id,
1183 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1184 	if (r) {
1185 		DRM_ERROR("Failed to add hpd irq id!\n");
1186 		return r;
1187 	}
1188 
1189 	register_hpd_handlers(adev);
1190 
1191 	return 0;
1192 }
1193 
1194 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1195 /* Register IRQ sources and initialize IRQ callbacks */
1196 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1197 {
1198 	struct dc *dc = adev->dm.dc;
1199 	struct common_irq_params *c_irq_params;
1200 	struct dc_interrupt_params int_params = {0};
1201 	int r;
1202 	int i;
1203 
1204 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1205 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1206 
1207 	/* Actions of amdgpu_irq_add_id():
1208 	 * 1. Register a set() function with base driver.
1209 	 *    Base driver will call set() function to enable/disable an
1210 	 *    interrupt in DC hardware.
1211 	 * 2. Register amdgpu_dm_irq_handler().
1212 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1213 	 *    coming from DC hardware.
1214 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1215 	 *    for acknowledging and handling.
1216 	 * */
1217 
1218 	/* Use VSTARTUP interrupt */
1219 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1220 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1221 			i++) {
1222 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1223 
1224 		if (r) {
1225 			DRM_ERROR("Failed to add crtc irq id!\n");
1226 			return r;
1227 		}
1228 
1229 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1230 		int_params.irq_source =
1231 			dc_interrupt_to_irq_source(dc, i, 0);
1232 
1233 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1234 
1235 		c_irq_params->adev = adev;
1236 		c_irq_params->irq_src = int_params.irq_source;
1237 
1238 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1239 				dm_crtc_high_irq, c_irq_params);
1240 	}
1241 
1242 	/* Use GRPH_PFLIP interrupt */
1243 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1244 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1245 			i++) {
1246 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1247 		if (r) {
1248 			DRM_ERROR("Failed to add page flip irq id!\n");
1249 			return r;
1250 		}
1251 
1252 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1253 		int_params.irq_source =
1254 			dc_interrupt_to_irq_source(dc, i, 0);
1255 
1256 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1257 
1258 		c_irq_params->adev = adev;
1259 		c_irq_params->irq_src = int_params.irq_source;
1260 
1261 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1262 				dm_pflip_high_irq, c_irq_params);
1263 
1264 	}
1265 
1266 	/* HPD */
1267 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1268 			&adev->hpd_irq);
1269 	if (r) {
1270 		DRM_ERROR("Failed to add hpd irq id!\n");
1271 		return r;
1272 	}
1273 
1274 	register_hpd_handlers(adev);
1275 
1276 	return 0;
1277 }
1278 #endif
1279 
1280 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1281 {
1282 	int r;
1283 
1284 	adev->mode_info.mode_config_initialized = true;
1285 
1286 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1287 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1288 
1289 	adev->ddev->mode_config.max_width = 16384;
1290 	adev->ddev->mode_config.max_height = 16384;
1291 
1292 	adev->ddev->mode_config.preferred_depth = 24;
1293 	adev->ddev->mode_config.prefer_shadow = 1;
1294 	/* indicate support of immediate flip */
1295 	adev->ddev->mode_config.async_page_flip = true;
1296 
1297 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1298 
1299 	r = amdgpu_display_modeset_create_props(adev);
1300 	if (r)
1301 		return r;
1302 
1303 	return 0;
1304 }
1305 
1306 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1307 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1308 
1309 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1310 {
1311 	struct amdgpu_display_manager *dm = bl_get_data(bd);
1312 
1313 	if (dc_link_set_backlight_level(dm->backlight_link,
1314 			bd->props.brightness, 0, 0))
1315 		return 0;
1316 	else
1317 		return 1;
1318 }
1319 
1320 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1321 {
1322 	return bd->props.brightness;
1323 }
1324 
1325 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1326 	.get_brightness = amdgpu_dm_backlight_get_brightness,
1327 	.update_status	= amdgpu_dm_backlight_update_status,
1328 };
1329 
1330 static void
1331 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1332 {
1333 	char bl_name[16];
1334 	struct backlight_properties props = { 0 };
1335 
1336 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1337 	props.type = BACKLIGHT_RAW;
1338 
1339 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1340 			dm->adev->ddev->primary->index);
1341 
1342 	dm->backlight_dev = backlight_device_register(bl_name,
1343 			dm->adev->ddev->dev,
1344 			dm,
1345 			&amdgpu_dm_backlight_ops,
1346 			&props);
1347 
1348 	if (IS_ERR(dm->backlight_dev))
1349 		DRM_ERROR("DM: Backlight registration failed!\n");
1350 	else
1351 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1352 }
1353 
1354 #endif
1355 
1356 static int initialize_plane(struct amdgpu_display_manager *dm,
1357 			     struct amdgpu_mode_info *mode_info,
1358 			     int plane_id)
1359 {
1360 	struct amdgpu_plane *plane;
1361 	unsigned long possible_crtcs;
1362 	int ret = 0;
1363 
1364 	plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1365 	mode_info->planes[plane_id] = plane;
1366 
1367 	if (!plane) {
1368 		DRM_ERROR("KMS: Failed to allocate plane\n");
1369 		return -ENOMEM;
1370 	}
1371 	plane->base.type = mode_info->plane_type[plane_id];
1372 
1373 	/*
1374 	 * HACK: IGT tests expect that each plane can only have one
1375 	 * one possible CRTC. For now, set one CRTC for each
1376 	 * plane that is not an underlay, but still allow multiple
1377 	 * CRTCs for underlay planes.
1378 	 */
1379 	possible_crtcs = 1 << plane_id;
1380 	if (plane_id >= dm->dc->caps.max_streams)
1381 		possible_crtcs = 0xff;
1382 
1383 	ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1384 
1385 	if (ret) {
1386 		DRM_ERROR("KMS: Failed to initialize plane\n");
1387 		return ret;
1388 	}
1389 
1390 	return ret;
1391 }
1392 
1393 
1394 static void register_backlight_device(struct amdgpu_display_manager *dm,
1395 				      struct dc_link *link)
1396 {
1397 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1398 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1399 
1400 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
1401 	    link->type != dc_connection_none) {
1402 		/* Event if registration failed, we should continue with
1403 		 * DM initialization because not having a backlight control
1404 		 * is better then a black screen.
1405 		 */
1406 		amdgpu_dm_register_backlight_device(dm);
1407 
1408 		if (dm->backlight_dev)
1409 			dm->backlight_link = link;
1410 	}
1411 #endif
1412 }
1413 
1414 
1415 /* In this architecture, the association
1416  * connector -> encoder -> crtc
1417  * id not really requried. The crtc and connector will hold the
1418  * display_index as an abstraction to use with DAL component
1419  *
1420  * Returns 0 on success
1421  */
1422 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1423 {
1424 	struct amdgpu_display_manager *dm = &adev->dm;
1425 	int32_t i;
1426 	struct amdgpu_dm_connector *aconnector = NULL;
1427 	struct amdgpu_encoder *aencoder = NULL;
1428 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
1429 	uint32_t link_cnt;
1430 	int32_t total_overlay_planes, total_primary_planes;
1431 
1432 	link_cnt = dm->dc->caps.max_links;
1433 	if (amdgpu_dm_mode_config_init(dm->adev)) {
1434 		DRM_ERROR("DM: Failed to initialize mode config\n");
1435 		return -1;
1436 	}
1437 
1438 	/* Identify the number of planes to be initialized */
1439 	total_overlay_planes = dm->dc->caps.max_slave_planes;
1440 	total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1441 
1442 	/* First initialize overlay planes, index starting after primary planes */
1443 	for (i = (total_overlay_planes - 1); i >= 0; i--) {
1444 		if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1445 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1446 			goto fail;
1447 		}
1448 	}
1449 
1450 	/* Initialize primary planes */
1451 	for (i = (total_primary_planes - 1); i >= 0; i--) {
1452 		if (initialize_plane(dm, mode_info, i)) {
1453 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
1454 			goto fail;
1455 		}
1456 	}
1457 
1458 	for (i = 0; i < dm->dc->caps.max_streams; i++)
1459 		if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1460 			DRM_ERROR("KMS: Failed to initialize crtc\n");
1461 			goto fail;
1462 		}
1463 
1464 	dm->display_indexes_num = dm->dc->caps.max_streams;
1465 
1466 	/* loops over all connectors on the board */
1467 	for (i = 0; i < link_cnt; i++) {
1468 		struct dc_link *link = NULL;
1469 
1470 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1471 			DRM_ERROR(
1472 				"KMS: Cannot support more than %d display indexes\n",
1473 					AMDGPU_DM_MAX_DISPLAY_INDEX);
1474 			continue;
1475 		}
1476 
1477 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1478 		if (!aconnector)
1479 			goto fail;
1480 
1481 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1482 		if (!aencoder)
1483 			goto fail;
1484 
1485 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1486 			DRM_ERROR("KMS: Failed to initialize encoder\n");
1487 			goto fail;
1488 		}
1489 
1490 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1491 			DRM_ERROR("KMS: Failed to initialize connector\n");
1492 			goto fail;
1493 		}
1494 
1495 		link = dc_get_link_at_index(dm->dc, i);
1496 
1497 		if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1498 			amdgpu_dm_update_connector_after_detect(aconnector);
1499 			register_backlight_device(dm, link);
1500 		}
1501 
1502 
1503 	}
1504 
1505 	/* Software is initialized. Now we can register interrupt handlers. */
1506 	switch (adev->asic_type) {
1507 	case CHIP_BONAIRE:
1508 	case CHIP_HAWAII:
1509 	case CHIP_KAVERI:
1510 	case CHIP_KABINI:
1511 	case CHIP_MULLINS:
1512 	case CHIP_TONGA:
1513 	case CHIP_FIJI:
1514 	case CHIP_CARRIZO:
1515 	case CHIP_STONEY:
1516 	case CHIP_POLARIS11:
1517 	case CHIP_POLARIS10:
1518 	case CHIP_POLARIS12:
1519 	case CHIP_VEGAM:
1520 	case CHIP_VEGA10:
1521 	case CHIP_VEGA12:
1522 	case CHIP_VEGA20:
1523 		if (dce110_register_irq_handlers(dm->adev)) {
1524 			DRM_ERROR("DM: Failed to initialize IRQ\n");
1525 			goto fail;
1526 		}
1527 		break;
1528 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1529 	case CHIP_RAVEN:
1530 		if (dcn10_register_irq_handlers(dm->adev)) {
1531 			DRM_ERROR("DM: Failed to initialize IRQ\n");
1532 			goto fail;
1533 		}
1534 		/*
1535 		 * Temporary disable until pplib/smu interaction is implemented
1536 		 */
1537 		dm->dc->debug.disable_stutter = true;
1538 		break;
1539 #endif
1540 	default:
1541 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1542 		goto fail;
1543 	}
1544 
1545 	return 0;
1546 fail:
1547 	kfree(aencoder);
1548 	kfree(aconnector);
1549 	for (i = 0; i < dm->dc->caps.max_planes; i++)
1550 		kfree(mode_info->planes[i]);
1551 	return -1;
1552 }
1553 
1554 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1555 {
1556 	drm_mode_config_cleanup(dm->ddev);
1557 	return;
1558 }
1559 
1560 /******************************************************************************
1561  * amdgpu_display_funcs functions
1562  *****************************************************************************/
1563 
1564 /**
1565  * dm_bandwidth_update - program display watermarks
1566  *
1567  * @adev: amdgpu_device pointer
1568  *
1569  * Calculate and program the display watermarks and line buffer allocation.
1570  */
1571 static void dm_bandwidth_update(struct amdgpu_device *adev)
1572 {
1573 	/* TODO: implement later */
1574 }
1575 
1576 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1577 				     u8 level)
1578 {
1579 	/* TODO: translate amdgpu_encoder to display_index and call DAL */
1580 }
1581 
1582 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1583 {
1584 	/* TODO: translate amdgpu_encoder to display_index and call DAL */
1585 	return 0;
1586 }
1587 
1588 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1589 				struct drm_file *filp)
1590 {
1591 	struct mod_freesync_params freesync_params;
1592 	uint8_t num_streams;
1593 	uint8_t i;
1594 
1595 	struct amdgpu_device *adev = dev->dev_private;
1596 	int r = 0;
1597 
1598 	/* Get freesync enable flag from DRM */
1599 
1600 	num_streams = dc_get_current_stream_count(adev->dm.dc);
1601 
1602 	for (i = 0; i < num_streams; i++) {
1603 		struct dc_stream_state *stream;
1604 		stream = dc_get_stream_at_index(adev->dm.dc, i);
1605 
1606 		mod_freesync_update_state(adev->dm.freesync_module,
1607 					  &stream, 1, &freesync_params);
1608 	}
1609 
1610 	return r;
1611 }
1612 
1613 static const struct amdgpu_display_funcs dm_display_funcs = {
1614 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1615 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1616 	.backlight_set_level =
1617 		dm_set_backlight_level,/* called unconditionally */
1618 	.backlight_get_level =
1619 		dm_get_backlight_level,/* called unconditionally */
1620 	.hpd_sense = NULL,/* called unconditionally */
1621 	.hpd_set_polarity = NULL, /* called unconditionally */
1622 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1623 	.page_flip_get_scanoutpos =
1624 		dm_crtc_get_scanoutpos,/* called unconditionally */
1625 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1626 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
1627 	.notify_freesync = amdgpu_notify_freesync,
1628 
1629 };
1630 
1631 #if defined(CONFIG_DEBUG_KERNEL_DC)
1632 
1633 static ssize_t s3_debug_store(struct device *device,
1634 			      struct device_attribute *attr,
1635 			      const char *buf,
1636 			      size_t count)
1637 {
1638 	int ret;
1639 	int s3_state;
1640 	struct pci_dev *pdev = to_pci_dev(device);
1641 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1642 	struct amdgpu_device *adev = drm_dev->dev_private;
1643 
1644 	ret = kstrtoint(buf, 0, &s3_state);
1645 
1646 	if (ret == 0) {
1647 		if (s3_state) {
1648 			dm_resume(adev);
1649 			drm_kms_helper_hotplug_event(adev->ddev);
1650 		} else
1651 			dm_suspend(adev);
1652 	}
1653 
1654 	return ret == 0 ? count : 0;
1655 }
1656 
1657 DEVICE_ATTR_WO(s3_debug);
1658 
1659 #endif
1660 
1661 static int dm_early_init(void *handle)
1662 {
1663 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1664 
1665 	switch (adev->asic_type) {
1666 	case CHIP_BONAIRE:
1667 	case CHIP_HAWAII:
1668 		adev->mode_info.num_crtc = 6;
1669 		adev->mode_info.num_hpd = 6;
1670 		adev->mode_info.num_dig = 6;
1671 		adev->mode_info.plane_type = dm_plane_type_default;
1672 		break;
1673 	case CHIP_KAVERI:
1674 		adev->mode_info.num_crtc = 4;
1675 		adev->mode_info.num_hpd = 6;
1676 		adev->mode_info.num_dig = 7;
1677 		adev->mode_info.plane_type = dm_plane_type_default;
1678 		break;
1679 	case CHIP_KABINI:
1680 	case CHIP_MULLINS:
1681 		adev->mode_info.num_crtc = 2;
1682 		adev->mode_info.num_hpd = 6;
1683 		adev->mode_info.num_dig = 6;
1684 		adev->mode_info.plane_type = dm_plane_type_default;
1685 		break;
1686 	case CHIP_FIJI:
1687 	case CHIP_TONGA:
1688 		adev->mode_info.num_crtc = 6;
1689 		adev->mode_info.num_hpd = 6;
1690 		adev->mode_info.num_dig = 7;
1691 		adev->mode_info.plane_type = dm_plane_type_default;
1692 		break;
1693 	case CHIP_CARRIZO:
1694 		adev->mode_info.num_crtc = 3;
1695 		adev->mode_info.num_hpd = 6;
1696 		adev->mode_info.num_dig = 9;
1697 		adev->mode_info.plane_type = dm_plane_type_carizzo;
1698 		break;
1699 	case CHIP_STONEY:
1700 		adev->mode_info.num_crtc = 2;
1701 		adev->mode_info.num_hpd = 6;
1702 		adev->mode_info.num_dig = 9;
1703 		adev->mode_info.plane_type = dm_plane_type_stoney;
1704 		break;
1705 	case CHIP_POLARIS11:
1706 	case CHIP_POLARIS12:
1707 		adev->mode_info.num_crtc = 5;
1708 		adev->mode_info.num_hpd = 5;
1709 		adev->mode_info.num_dig = 5;
1710 		adev->mode_info.plane_type = dm_plane_type_default;
1711 		break;
1712 	case CHIP_POLARIS10:
1713 	case CHIP_VEGAM:
1714 		adev->mode_info.num_crtc = 6;
1715 		adev->mode_info.num_hpd = 6;
1716 		adev->mode_info.num_dig = 6;
1717 		adev->mode_info.plane_type = dm_plane_type_default;
1718 		break;
1719 	case CHIP_VEGA10:
1720 	case CHIP_VEGA12:
1721 	case CHIP_VEGA20:
1722 		adev->mode_info.num_crtc = 6;
1723 		adev->mode_info.num_hpd = 6;
1724 		adev->mode_info.num_dig = 6;
1725 		adev->mode_info.plane_type = dm_plane_type_default;
1726 		break;
1727 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1728 	case CHIP_RAVEN:
1729 		adev->mode_info.num_crtc = 4;
1730 		adev->mode_info.num_hpd = 4;
1731 		adev->mode_info.num_dig = 4;
1732 		adev->mode_info.plane_type = dm_plane_type_default;
1733 		break;
1734 #endif
1735 	default:
1736 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1737 		return -EINVAL;
1738 	}
1739 
1740 	amdgpu_dm_set_irq_funcs(adev);
1741 
1742 	if (adev->mode_info.funcs == NULL)
1743 		adev->mode_info.funcs = &dm_display_funcs;
1744 
1745 	/* Note: Do NOT change adev->audio_endpt_rreg and
1746 	 * adev->audio_endpt_wreg because they are initialised in
1747 	 * amdgpu_device_init() */
1748 #if defined(CONFIG_DEBUG_KERNEL_DC)
1749 	device_create_file(
1750 		adev->ddev->dev,
1751 		&dev_attr_s3_debug);
1752 #endif
1753 
1754 	return 0;
1755 }
1756 
1757 static bool modeset_required(struct drm_crtc_state *crtc_state,
1758 			     struct dc_stream_state *new_stream,
1759 			     struct dc_stream_state *old_stream)
1760 {
1761 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
1762 		return false;
1763 
1764 	if (!crtc_state->enable)
1765 		return false;
1766 
1767 	return crtc_state->active;
1768 }
1769 
1770 static bool modereset_required(struct drm_crtc_state *crtc_state)
1771 {
1772 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
1773 		return false;
1774 
1775 	return !crtc_state->enable || !crtc_state->active;
1776 }
1777 
1778 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1779 {
1780 	drm_encoder_cleanup(encoder);
1781 	kfree(encoder);
1782 }
1783 
1784 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1785 	.destroy = amdgpu_dm_encoder_destroy,
1786 };
1787 
1788 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1789 					struct dc_plane_state *plane_state)
1790 {
1791 	plane_state->src_rect.x = state->src_x >> 16;
1792 	plane_state->src_rect.y = state->src_y >> 16;
1793 	/*we ignore for now mantissa and do not to deal with floating pixels :(*/
1794 	plane_state->src_rect.width = state->src_w >> 16;
1795 
1796 	if (plane_state->src_rect.width == 0)
1797 		return false;
1798 
1799 	plane_state->src_rect.height = state->src_h >> 16;
1800 	if (plane_state->src_rect.height == 0)
1801 		return false;
1802 
1803 	plane_state->dst_rect.x = state->crtc_x;
1804 	plane_state->dst_rect.y = state->crtc_y;
1805 
1806 	if (state->crtc_w == 0)
1807 		return false;
1808 
1809 	plane_state->dst_rect.width = state->crtc_w;
1810 
1811 	if (state->crtc_h == 0)
1812 		return false;
1813 
1814 	plane_state->dst_rect.height = state->crtc_h;
1815 
1816 	plane_state->clip_rect = plane_state->dst_rect;
1817 
1818 	switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1819 	case DRM_MODE_ROTATE_0:
1820 		plane_state->rotation = ROTATION_ANGLE_0;
1821 		break;
1822 	case DRM_MODE_ROTATE_90:
1823 		plane_state->rotation = ROTATION_ANGLE_90;
1824 		break;
1825 	case DRM_MODE_ROTATE_180:
1826 		plane_state->rotation = ROTATION_ANGLE_180;
1827 		break;
1828 	case DRM_MODE_ROTATE_270:
1829 		plane_state->rotation = ROTATION_ANGLE_270;
1830 		break;
1831 	default:
1832 		plane_state->rotation = ROTATION_ANGLE_0;
1833 		break;
1834 	}
1835 
1836 	return true;
1837 }
1838 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1839 		       uint64_t *tiling_flags)
1840 {
1841 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1842 	int r = amdgpu_bo_reserve(rbo, false);
1843 
1844 	if (unlikely(r)) {
1845 		// Don't show error msg. when return -ERESTARTSYS
1846 		if (r != -ERESTARTSYS)
1847 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
1848 		return r;
1849 	}
1850 
1851 	if (tiling_flags)
1852 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1853 
1854 	amdgpu_bo_unreserve(rbo);
1855 
1856 	return r;
1857 }
1858 
1859 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1860 					 struct dc_plane_state *plane_state,
1861 					 const struct amdgpu_framebuffer *amdgpu_fb)
1862 {
1863 	uint64_t tiling_flags;
1864 	unsigned int awidth;
1865 	const struct drm_framebuffer *fb = &amdgpu_fb->base;
1866 	int ret = 0;
1867 	struct drm_format_name_buf format_name;
1868 
1869 	ret = get_fb_info(
1870 		amdgpu_fb,
1871 		&tiling_flags);
1872 
1873 	if (ret)
1874 		return ret;
1875 
1876 	switch (fb->format->format) {
1877 	case DRM_FORMAT_C8:
1878 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1879 		break;
1880 	case DRM_FORMAT_RGB565:
1881 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1882 		break;
1883 	case DRM_FORMAT_XRGB8888:
1884 	case DRM_FORMAT_ARGB8888:
1885 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1886 		break;
1887 	case DRM_FORMAT_XRGB2101010:
1888 	case DRM_FORMAT_ARGB2101010:
1889 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1890 		break;
1891 	case DRM_FORMAT_XBGR2101010:
1892 	case DRM_FORMAT_ABGR2101010:
1893 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1894 		break;
1895 	case DRM_FORMAT_NV21:
1896 		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1897 		break;
1898 	case DRM_FORMAT_NV12:
1899 		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1900 		break;
1901 	default:
1902 		DRM_ERROR("Unsupported screen format %s\n",
1903 			  drm_get_format_name(fb->format->format, &format_name));
1904 		return -EINVAL;
1905 	}
1906 
1907 	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1908 		plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1909 		plane_state->plane_size.grph.surface_size.x = 0;
1910 		plane_state->plane_size.grph.surface_size.y = 0;
1911 		plane_state->plane_size.grph.surface_size.width = fb->width;
1912 		plane_state->plane_size.grph.surface_size.height = fb->height;
1913 		plane_state->plane_size.grph.surface_pitch =
1914 				fb->pitches[0] / fb->format->cpp[0];
1915 		/* TODO: unhardcode */
1916 		plane_state->color_space = COLOR_SPACE_SRGB;
1917 
1918 	} else {
1919 		awidth = ALIGN(fb->width, 64);
1920 		plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1921 		plane_state->plane_size.video.luma_size.x = 0;
1922 		plane_state->plane_size.video.luma_size.y = 0;
1923 		plane_state->plane_size.video.luma_size.width = awidth;
1924 		plane_state->plane_size.video.luma_size.height = fb->height;
1925 		/* TODO: unhardcode */
1926 		plane_state->plane_size.video.luma_pitch = awidth;
1927 
1928 		plane_state->plane_size.video.chroma_size.x = 0;
1929 		plane_state->plane_size.video.chroma_size.y = 0;
1930 		plane_state->plane_size.video.chroma_size.width = awidth;
1931 		plane_state->plane_size.video.chroma_size.height = fb->height;
1932 		plane_state->plane_size.video.chroma_pitch = awidth / 2;
1933 
1934 		/* TODO: unhardcode */
1935 		plane_state->color_space = COLOR_SPACE_YCBCR709;
1936 	}
1937 
1938 	memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1939 
1940 	/* Fill GFX8 params */
1941 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1942 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1943 
1944 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1945 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1946 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1947 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1948 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1949 
1950 		/* XXX fix me for VI */
1951 		plane_state->tiling_info.gfx8.num_banks = num_banks;
1952 		plane_state->tiling_info.gfx8.array_mode =
1953 				DC_ARRAY_2D_TILED_THIN1;
1954 		plane_state->tiling_info.gfx8.tile_split = tile_split;
1955 		plane_state->tiling_info.gfx8.bank_width = bankw;
1956 		plane_state->tiling_info.gfx8.bank_height = bankh;
1957 		plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1958 		plane_state->tiling_info.gfx8.tile_mode =
1959 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1960 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1961 			== DC_ARRAY_1D_TILED_THIN1) {
1962 		plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1963 	}
1964 
1965 	plane_state->tiling_info.gfx8.pipe_config =
1966 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1967 
1968 	if (adev->asic_type == CHIP_VEGA10 ||
1969 	    adev->asic_type == CHIP_VEGA12 ||
1970 	    adev->asic_type == CHIP_VEGA20 ||
1971 	    adev->asic_type == CHIP_RAVEN) {
1972 		/* Fill GFX9 params */
1973 		plane_state->tiling_info.gfx9.num_pipes =
1974 			adev->gfx.config.gb_addr_config_fields.num_pipes;
1975 		plane_state->tiling_info.gfx9.num_banks =
1976 			adev->gfx.config.gb_addr_config_fields.num_banks;
1977 		plane_state->tiling_info.gfx9.pipe_interleave =
1978 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1979 		plane_state->tiling_info.gfx9.num_shader_engines =
1980 			adev->gfx.config.gb_addr_config_fields.num_se;
1981 		plane_state->tiling_info.gfx9.max_compressed_frags =
1982 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1983 		plane_state->tiling_info.gfx9.num_rb_per_se =
1984 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1985 		plane_state->tiling_info.gfx9.swizzle =
1986 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1987 		plane_state->tiling_info.gfx9.shaderEnable = 1;
1988 	}
1989 
1990 	plane_state->visible = true;
1991 	plane_state->scaling_quality.h_taps_c = 0;
1992 	plane_state->scaling_quality.v_taps_c = 0;
1993 
1994 	/* is this needed? is plane_state zeroed at allocation? */
1995 	plane_state->scaling_quality.h_taps = 0;
1996 	plane_state->scaling_quality.v_taps = 0;
1997 	plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1998 
1999 	return ret;
2000 
2001 }
2002 
2003 static int fill_plane_attributes(struct amdgpu_device *adev,
2004 				 struct dc_plane_state *dc_plane_state,
2005 				 struct drm_plane_state *plane_state,
2006 				 struct drm_crtc_state *crtc_state)
2007 {
2008 	const struct amdgpu_framebuffer *amdgpu_fb =
2009 		to_amdgpu_framebuffer(plane_state->fb);
2010 	const struct drm_crtc *crtc = plane_state->crtc;
2011 	int ret = 0;
2012 
2013 	if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
2014 		return -EINVAL;
2015 
2016 	ret = fill_plane_attributes_from_fb(
2017 		crtc->dev->dev_private,
2018 		dc_plane_state,
2019 		amdgpu_fb);
2020 
2021 	if (ret)
2022 		return ret;
2023 
2024 	/*
2025 	 * Always set input transfer function, since plane state is refreshed
2026 	 * every time.
2027 	 */
2028 	ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2029 	if (ret) {
2030 		dc_transfer_func_release(dc_plane_state->in_transfer_func);
2031 		dc_plane_state->in_transfer_func = NULL;
2032 	}
2033 
2034 	return ret;
2035 }
2036 
2037 /*****************************************************************************/
2038 
2039 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2040 					   const struct dm_connector_state *dm_state,
2041 					   struct dc_stream_state *stream)
2042 {
2043 	enum amdgpu_rmx_type rmx_type;
2044 
2045 	struct rect src = { 0 }; /* viewport in composition space*/
2046 	struct rect dst = { 0 }; /* stream addressable area */
2047 
2048 	/* no mode. nothing to be done */
2049 	if (!mode)
2050 		return;
2051 
2052 	/* Full screen scaling by default */
2053 	src.width = mode->hdisplay;
2054 	src.height = mode->vdisplay;
2055 	dst.width = stream->timing.h_addressable;
2056 	dst.height = stream->timing.v_addressable;
2057 
2058 	if (dm_state) {
2059 		rmx_type = dm_state->scaling;
2060 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2061 			if (src.width * dst.height <
2062 					src.height * dst.width) {
2063 				/* height needs less upscaling/more downscaling */
2064 				dst.width = src.width *
2065 						dst.height / src.height;
2066 			} else {
2067 				/* width needs less upscaling/more downscaling */
2068 				dst.height = src.height *
2069 						dst.width / src.width;
2070 			}
2071 		} else if (rmx_type == RMX_CENTER) {
2072 			dst = src;
2073 		}
2074 
2075 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
2076 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
2077 
2078 		if (dm_state->underscan_enable) {
2079 			dst.x += dm_state->underscan_hborder / 2;
2080 			dst.y += dm_state->underscan_vborder / 2;
2081 			dst.width -= dm_state->underscan_hborder;
2082 			dst.height -= dm_state->underscan_vborder;
2083 		}
2084 	}
2085 
2086 	stream->src = src;
2087 	stream->dst = dst;
2088 
2089 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2090 			dst.x, dst.y, dst.width, dst.height);
2091 
2092 }
2093 
2094 static enum dc_color_depth
2095 convert_color_depth_from_display_info(const struct drm_connector *connector)
2096 {
2097 	uint32_t bpc = connector->display_info.bpc;
2098 
2099 	switch (bpc) {
2100 	case 0:
2101 		/* Temporary Work around, DRM don't parse color depth for
2102 		 * EDID revision before 1.4
2103 		 * TODO: Fix edid parsing
2104 		 */
2105 		return COLOR_DEPTH_888;
2106 	case 6:
2107 		return COLOR_DEPTH_666;
2108 	case 8:
2109 		return COLOR_DEPTH_888;
2110 	case 10:
2111 		return COLOR_DEPTH_101010;
2112 	case 12:
2113 		return COLOR_DEPTH_121212;
2114 	case 14:
2115 		return COLOR_DEPTH_141414;
2116 	case 16:
2117 		return COLOR_DEPTH_161616;
2118 	default:
2119 		return COLOR_DEPTH_UNDEFINED;
2120 	}
2121 }
2122 
2123 static enum dc_aspect_ratio
2124 get_aspect_ratio(const struct drm_display_mode *mode_in)
2125 {
2126 	int32_t width = mode_in->crtc_hdisplay * 9;
2127 	int32_t height = mode_in->crtc_vdisplay * 16;
2128 
2129 	if ((width - height) < 10 && (width - height) > -10)
2130 		return ASPECT_RATIO_16_9;
2131 	else
2132 		return ASPECT_RATIO_4_3;
2133 }
2134 
2135 static enum dc_color_space
2136 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2137 {
2138 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
2139 
2140 	switch (dc_crtc_timing->pixel_encoding)	{
2141 	case PIXEL_ENCODING_YCBCR422:
2142 	case PIXEL_ENCODING_YCBCR444:
2143 	case PIXEL_ENCODING_YCBCR420:
2144 	{
2145 		/*
2146 		 * 27030khz is the separation point between HDTV and SDTV
2147 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
2148 		 * respectively
2149 		 */
2150 		if (dc_crtc_timing->pix_clk_khz > 27030) {
2151 			if (dc_crtc_timing->flags.Y_ONLY)
2152 				color_space =
2153 					COLOR_SPACE_YCBCR709_LIMITED;
2154 			else
2155 				color_space = COLOR_SPACE_YCBCR709;
2156 		} else {
2157 			if (dc_crtc_timing->flags.Y_ONLY)
2158 				color_space =
2159 					COLOR_SPACE_YCBCR601_LIMITED;
2160 			else
2161 				color_space = COLOR_SPACE_YCBCR601;
2162 		}
2163 
2164 	}
2165 	break;
2166 	case PIXEL_ENCODING_RGB:
2167 		color_space = COLOR_SPACE_SRGB;
2168 		break;
2169 
2170 	default:
2171 		WARN_ON(1);
2172 		break;
2173 	}
2174 
2175 	return color_space;
2176 }
2177 
2178 /*****************************************************************************/
2179 
2180 static void
2181 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2182 					     const struct drm_display_mode *mode_in,
2183 					     const struct drm_connector *connector)
2184 {
2185 	struct dc_crtc_timing *timing_out = &stream->timing;
2186 
2187 	memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2188 
2189 	timing_out->h_border_left = 0;
2190 	timing_out->h_border_right = 0;
2191 	timing_out->v_border_top = 0;
2192 	timing_out->v_border_bottom = 0;
2193 	/* TODO: un-hardcode */
2194 
2195 	if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2196 			&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2197 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2198 	else
2199 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2200 
2201 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2202 	timing_out->display_color_depth = convert_color_depth_from_display_info(
2203 			connector);
2204 	timing_out->scan_type = SCANNING_TYPE_NODATA;
2205 	timing_out->hdmi_vic = 0;
2206 	timing_out->vic = drm_match_cea_mode(mode_in);
2207 
2208 	timing_out->h_addressable = mode_in->crtc_hdisplay;
2209 	timing_out->h_total = mode_in->crtc_htotal;
2210 	timing_out->h_sync_width =
2211 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2212 	timing_out->h_front_porch =
2213 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2214 	timing_out->v_total = mode_in->crtc_vtotal;
2215 	timing_out->v_addressable = mode_in->crtc_vdisplay;
2216 	timing_out->v_front_porch =
2217 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2218 	timing_out->v_sync_width =
2219 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2220 	timing_out->pix_clk_khz = mode_in->crtc_clock;
2221 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2222 	if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2223 		timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2224 	if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2225 		timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2226 
2227 	stream->output_color_space = get_output_color_space(timing_out);
2228 
2229 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2230 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2231 }
2232 
2233 static void fill_audio_info(struct audio_info *audio_info,
2234 			    const struct drm_connector *drm_connector,
2235 			    const struct dc_sink *dc_sink)
2236 {
2237 	int i = 0;
2238 	int cea_revision = 0;
2239 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2240 
2241 	audio_info->manufacture_id = edid_caps->manufacturer_id;
2242 	audio_info->product_id = edid_caps->product_id;
2243 
2244 	cea_revision = drm_connector->display_info.cea_rev;
2245 
2246 	strncpy(audio_info->display_name,
2247 		edid_caps->display_name,
2248 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2249 
2250 	if (cea_revision >= 3) {
2251 		audio_info->mode_count = edid_caps->audio_mode_count;
2252 
2253 		for (i = 0; i < audio_info->mode_count; ++i) {
2254 			audio_info->modes[i].format_code =
2255 					(enum audio_format_code)
2256 					(edid_caps->audio_modes[i].format_code);
2257 			audio_info->modes[i].channel_count =
2258 					edid_caps->audio_modes[i].channel_count;
2259 			audio_info->modes[i].sample_rates.all =
2260 					edid_caps->audio_modes[i].sample_rate;
2261 			audio_info->modes[i].sample_size =
2262 					edid_caps->audio_modes[i].sample_size;
2263 		}
2264 	}
2265 
2266 	audio_info->flags.all = edid_caps->speaker_flags;
2267 
2268 	/* TODO: We only check for the progressive mode, check for interlace mode too */
2269 	if (drm_connector->latency_present[0]) {
2270 		audio_info->video_latency = drm_connector->video_latency[0];
2271 		audio_info->audio_latency = drm_connector->audio_latency[0];
2272 	}
2273 
2274 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2275 
2276 }
2277 
2278 static void
2279 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2280 				      struct drm_display_mode *dst_mode)
2281 {
2282 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2283 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2284 	dst_mode->crtc_clock = src_mode->crtc_clock;
2285 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2286 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2287 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2288 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2289 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
2290 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
2291 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2292 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2293 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2294 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2295 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2296 }
2297 
2298 static void
2299 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2300 					const struct drm_display_mode *native_mode,
2301 					bool scale_enabled)
2302 {
2303 	if (scale_enabled) {
2304 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2305 	} else if (native_mode->clock == drm_mode->clock &&
2306 			native_mode->htotal == drm_mode->htotal &&
2307 			native_mode->vtotal == drm_mode->vtotal) {
2308 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2309 	} else {
2310 		/* no scaling nor amdgpu inserted, no need to patch */
2311 	}
2312 }
2313 
2314 static struct dc_sink *
2315 create_fake_sink(struct amdgpu_dm_connector *aconnector)
2316 {
2317 	struct dc_sink_init_data sink_init_data = { 0 };
2318 	struct dc_sink *sink = NULL;
2319 	sink_init_data.link = aconnector->dc_link;
2320 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2321 
2322 	sink = dc_sink_create(&sink_init_data);
2323 	if (!sink) {
2324 		DRM_ERROR("Failed to create sink!\n");
2325 		return NULL;
2326 	}
2327 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2328 
2329 	return sink;
2330 }
2331 
2332 static void set_multisync_trigger_params(
2333 		struct dc_stream_state *stream)
2334 {
2335 	if (stream->triggered_crtc_reset.enabled) {
2336 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2337 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2338 	}
2339 }
2340 
2341 static void set_master_stream(struct dc_stream_state *stream_set[],
2342 			      int stream_count)
2343 {
2344 	int j, highest_rfr = 0, master_stream = 0;
2345 
2346 	for (j = 0;  j < stream_count; j++) {
2347 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2348 			int refresh_rate = 0;
2349 
2350 			refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2351 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2352 			if (refresh_rate > highest_rfr) {
2353 				highest_rfr = refresh_rate;
2354 				master_stream = j;
2355 			}
2356 		}
2357 	}
2358 	for (j = 0;  j < stream_count; j++) {
2359 		if (stream_set[j])
2360 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2361 	}
2362 }
2363 
2364 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2365 {
2366 	int i = 0;
2367 
2368 	if (context->stream_count < 2)
2369 		return;
2370 	for (i = 0; i < context->stream_count ; i++) {
2371 		if (!context->streams[i])
2372 			continue;
2373 		/* TODO: add a function to read AMD VSDB bits and will set
2374 		 * crtc_sync_master.multi_sync_enabled flag
2375 		 * For now its set to false
2376 		 */
2377 		set_multisync_trigger_params(context->streams[i]);
2378 	}
2379 	set_master_stream(context->streams, context->stream_count);
2380 }
2381 
2382 static struct dc_stream_state *
2383 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2384 		       const struct drm_display_mode *drm_mode,
2385 		       const struct dm_connector_state *dm_state)
2386 {
2387 	struct drm_display_mode *preferred_mode = NULL;
2388 	struct drm_connector *drm_connector;
2389 	struct dc_stream_state *stream = NULL;
2390 	struct drm_display_mode mode = *drm_mode;
2391 	bool native_mode_found = false;
2392 	struct dc_sink *sink = NULL;
2393 	if (aconnector == NULL) {
2394 		DRM_ERROR("aconnector is NULL!\n");
2395 		return stream;
2396 	}
2397 
2398 	drm_connector = &aconnector->base;
2399 
2400 	if (!aconnector->dc_sink) {
2401 		/*
2402 		 * Create dc_sink when necessary to MST
2403 		 * Don't apply fake_sink to MST
2404 		 */
2405 		if (aconnector->mst_port) {
2406 			dm_dp_mst_dc_sink_create(drm_connector);
2407 			return stream;
2408 		}
2409 
2410 		sink = create_fake_sink(aconnector);
2411 		if (!sink)
2412 			return stream;
2413 	} else {
2414 		sink = aconnector->dc_sink;
2415 	}
2416 
2417 	stream = dc_create_stream_for_sink(sink);
2418 
2419 	if (stream == NULL) {
2420 		DRM_ERROR("Failed to create stream for sink!\n");
2421 		goto finish;
2422 	}
2423 
2424 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2425 		/* Search for preferred mode */
2426 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2427 			native_mode_found = true;
2428 			break;
2429 		}
2430 	}
2431 	if (!native_mode_found)
2432 		preferred_mode = list_first_entry_or_null(
2433 				&aconnector->base.modes,
2434 				struct drm_display_mode,
2435 				head);
2436 
2437 	if (preferred_mode == NULL) {
2438 		/* This may not be an error, the use case is when we we have no
2439 		 * usermode calls to reset and set mode upon hotplug. In this
2440 		 * case, we call set mode ourselves to restore the previous mode
2441 		 * and the modelist may not be filled in in time.
2442 		 */
2443 		DRM_DEBUG_DRIVER("No preferred mode found\n");
2444 	} else {
2445 		decide_crtc_timing_for_drm_display_mode(
2446 				&mode, preferred_mode,
2447 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
2448 	}
2449 
2450 	if (!dm_state)
2451 		drm_mode_set_crtcinfo(&mode, 0);
2452 
2453 	fill_stream_properties_from_drm_display_mode(stream,
2454 			&mode, &aconnector->base);
2455 	update_stream_scaling_settings(&mode, dm_state, stream);
2456 
2457 	fill_audio_info(
2458 		&stream->audio_info,
2459 		drm_connector,
2460 		sink);
2461 
2462 	update_stream_signal(stream);
2463 
2464 	if (dm_state && dm_state->freesync_capable)
2465 		stream->ignore_msa_timing_param = true;
2466 finish:
2467 	if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
2468 		dc_sink_release(sink);
2469 
2470 	return stream;
2471 }
2472 
2473 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2474 {
2475 	drm_crtc_cleanup(crtc);
2476 	kfree(crtc);
2477 }
2478 
2479 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2480 				  struct drm_crtc_state *state)
2481 {
2482 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
2483 
2484 	/* TODO Destroy dc_stream objects are stream object is flattened */
2485 	if (cur->stream)
2486 		dc_stream_release(cur->stream);
2487 
2488 
2489 	__drm_atomic_helper_crtc_destroy_state(state);
2490 
2491 
2492 	kfree(state);
2493 }
2494 
2495 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2496 {
2497 	struct dm_crtc_state *state;
2498 
2499 	if (crtc->state)
2500 		dm_crtc_destroy_state(crtc, crtc->state);
2501 
2502 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2503 	if (WARN_ON(!state))
2504 		return;
2505 
2506 	crtc->state = &state->base;
2507 	crtc->state->crtc = crtc;
2508 
2509 }
2510 
2511 static struct drm_crtc_state *
2512 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2513 {
2514 	struct dm_crtc_state *state, *cur;
2515 
2516 	cur = to_dm_crtc_state(crtc->state);
2517 
2518 	if (WARN_ON(!crtc->state))
2519 		return NULL;
2520 
2521 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2522 	if (!state)
2523 		return NULL;
2524 
2525 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2526 
2527 	if (cur->stream) {
2528 		state->stream = cur->stream;
2529 		dc_stream_retain(state->stream);
2530 	}
2531 
2532 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
2533 
2534 	return &state->base;
2535 }
2536 
2537 
2538 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
2539 {
2540 	enum dc_irq_source irq_source;
2541 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2542 	struct amdgpu_device *adev = crtc->dev->dev_private;
2543 
2544 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2545 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2546 }
2547 
2548 static int dm_enable_vblank(struct drm_crtc *crtc)
2549 {
2550 	return dm_set_vblank(crtc, true);
2551 }
2552 
2553 static void dm_disable_vblank(struct drm_crtc *crtc)
2554 {
2555 	dm_set_vblank(crtc, false);
2556 }
2557 
2558 /* Implemented only the options currently availible for the driver */
2559 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2560 	.reset = dm_crtc_reset_state,
2561 	.destroy = amdgpu_dm_crtc_destroy,
2562 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
2563 	.set_config = drm_atomic_helper_set_config,
2564 	.page_flip = drm_atomic_helper_page_flip,
2565 	.atomic_duplicate_state = dm_crtc_duplicate_state,
2566 	.atomic_destroy_state = dm_crtc_destroy_state,
2567 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
2568 	.enable_vblank = dm_enable_vblank,
2569 	.disable_vblank = dm_disable_vblank,
2570 };
2571 
2572 static enum drm_connector_status
2573 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2574 {
2575 	bool connected;
2576 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2577 
2578 	/* Notes:
2579 	 * 1. This interface is NOT called in context of HPD irq.
2580 	 * 2. This interface *is called* in context of user-mode ioctl. Which
2581 	 * makes it a bad place for *any* MST-related activit. */
2582 
2583 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2584 	    !aconnector->fake_enable)
2585 		connected = (aconnector->dc_sink != NULL);
2586 	else
2587 		connected = (aconnector->base.force == DRM_FORCE_ON);
2588 
2589 	return (connected ? connector_status_connected :
2590 			connector_status_disconnected);
2591 }
2592 
2593 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2594 					    struct drm_connector_state *connector_state,
2595 					    struct drm_property *property,
2596 					    uint64_t val)
2597 {
2598 	struct drm_device *dev = connector->dev;
2599 	struct amdgpu_device *adev = dev->dev_private;
2600 	struct dm_connector_state *dm_old_state =
2601 		to_dm_connector_state(connector->state);
2602 	struct dm_connector_state *dm_new_state =
2603 		to_dm_connector_state(connector_state);
2604 
2605 	int ret = -EINVAL;
2606 
2607 	if (property == dev->mode_config.scaling_mode_property) {
2608 		enum amdgpu_rmx_type rmx_type;
2609 
2610 		switch (val) {
2611 		case DRM_MODE_SCALE_CENTER:
2612 			rmx_type = RMX_CENTER;
2613 			break;
2614 		case DRM_MODE_SCALE_ASPECT:
2615 			rmx_type = RMX_ASPECT;
2616 			break;
2617 		case DRM_MODE_SCALE_FULLSCREEN:
2618 			rmx_type = RMX_FULL;
2619 			break;
2620 		case DRM_MODE_SCALE_NONE:
2621 		default:
2622 			rmx_type = RMX_OFF;
2623 			break;
2624 		}
2625 
2626 		if (dm_old_state->scaling == rmx_type)
2627 			return 0;
2628 
2629 		dm_new_state->scaling = rmx_type;
2630 		ret = 0;
2631 	} else if (property == adev->mode_info.underscan_hborder_property) {
2632 		dm_new_state->underscan_hborder = val;
2633 		ret = 0;
2634 	} else if (property == adev->mode_info.underscan_vborder_property) {
2635 		dm_new_state->underscan_vborder = val;
2636 		ret = 0;
2637 	} else if (property == adev->mode_info.underscan_property) {
2638 		dm_new_state->underscan_enable = val;
2639 		ret = 0;
2640 	}
2641 
2642 	return ret;
2643 }
2644 
2645 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2646 					    const struct drm_connector_state *state,
2647 					    struct drm_property *property,
2648 					    uint64_t *val)
2649 {
2650 	struct drm_device *dev = connector->dev;
2651 	struct amdgpu_device *adev = dev->dev_private;
2652 	struct dm_connector_state *dm_state =
2653 		to_dm_connector_state(state);
2654 	int ret = -EINVAL;
2655 
2656 	if (property == dev->mode_config.scaling_mode_property) {
2657 		switch (dm_state->scaling) {
2658 		case RMX_CENTER:
2659 			*val = DRM_MODE_SCALE_CENTER;
2660 			break;
2661 		case RMX_ASPECT:
2662 			*val = DRM_MODE_SCALE_ASPECT;
2663 			break;
2664 		case RMX_FULL:
2665 			*val = DRM_MODE_SCALE_FULLSCREEN;
2666 			break;
2667 		case RMX_OFF:
2668 		default:
2669 			*val = DRM_MODE_SCALE_NONE;
2670 			break;
2671 		}
2672 		ret = 0;
2673 	} else if (property == adev->mode_info.underscan_hborder_property) {
2674 		*val = dm_state->underscan_hborder;
2675 		ret = 0;
2676 	} else if (property == adev->mode_info.underscan_vborder_property) {
2677 		*val = dm_state->underscan_vborder;
2678 		ret = 0;
2679 	} else if (property == adev->mode_info.underscan_property) {
2680 		*val = dm_state->underscan_enable;
2681 		ret = 0;
2682 	}
2683 	return ret;
2684 }
2685 
2686 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2687 {
2688 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2689 	const struct dc_link *link = aconnector->dc_link;
2690 	struct amdgpu_device *adev = connector->dev->dev_private;
2691 	struct amdgpu_display_manager *dm = &adev->dm;
2692 
2693 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2694 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2695 
2696 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2697 	    link->type != dc_connection_none &&
2698 	    dm->backlight_dev) {
2699 		backlight_device_unregister(dm->backlight_dev);
2700 		dm->backlight_dev = NULL;
2701 	}
2702 #endif
2703 	drm_connector_unregister(connector);
2704 	drm_connector_cleanup(connector);
2705 	kfree(connector);
2706 }
2707 
2708 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2709 {
2710 	struct dm_connector_state *state =
2711 		to_dm_connector_state(connector->state);
2712 
2713 	if (connector->state)
2714 		__drm_atomic_helper_connector_destroy_state(connector->state);
2715 
2716 	kfree(state);
2717 
2718 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2719 
2720 	if (state) {
2721 		state->scaling = RMX_OFF;
2722 		state->underscan_enable = false;
2723 		state->underscan_hborder = 0;
2724 		state->underscan_vborder = 0;
2725 
2726 		__drm_atomic_helper_connector_reset(connector, &state->base);
2727 	}
2728 }
2729 
2730 struct drm_connector_state *
2731 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2732 {
2733 	struct dm_connector_state *state =
2734 		to_dm_connector_state(connector->state);
2735 
2736 	struct dm_connector_state *new_state =
2737 			kmemdup(state, sizeof(*state), GFP_KERNEL);
2738 
2739 	if (new_state) {
2740 		__drm_atomic_helper_connector_duplicate_state(connector,
2741 							      &new_state->base);
2742 		return &new_state->base;
2743 	}
2744 
2745 	return NULL;
2746 }
2747 
2748 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2749 	.reset = amdgpu_dm_connector_funcs_reset,
2750 	.detect = amdgpu_dm_connector_detect,
2751 	.fill_modes = drm_helper_probe_single_connector_modes,
2752 	.destroy = amdgpu_dm_connector_destroy,
2753 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2754 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2755 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2756 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property
2757 };
2758 
2759 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2760 {
2761 	int enc_id = connector->encoder_ids[0];
2762 	struct drm_mode_object *obj;
2763 	struct drm_encoder *encoder;
2764 
2765 	DRM_DEBUG_DRIVER("Finding the best encoder\n");
2766 
2767 	/* pick the encoder ids */
2768 	if (enc_id) {
2769 		obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2770 		if (!obj) {
2771 			DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2772 			return NULL;
2773 		}
2774 		encoder = obj_to_encoder(obj);
2775 		return encoder;
2776 	}
2777 	DRM_ERROR("No encoder id\n");
2778 	return NULL;
2779 }
2780 
2781 static int get_modes(struct drm_connector *connector)
2782 {
2783 	return amdgpu_dm_connector_get_modes(connector);
2784 }
2785 
2786 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2787 {
2788 	struct dc_sink_init_data init_params = {
2789 			.link = aconnector->dc_link,
2790 			.sink_signal = SIGNAL_TYPE_VIRTUAL
2791 	};
2792 	struct edid *edid;
2793 
2794 	if (!aconnector->base.edid_blob_ptr) {
2795 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2796 				aconnector->base.name);
2797 
2798 		aconnector->base.force = DRM_FORCE_OFF;
2799 		aconnector->base.override_edid = false;
2800 		return;
2801 	}
2802 
2803 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2804 
2805 	aconnector->edid = edid;
2806 
2807 	aconnector->dc_em_sink = dc_link_add_remote_sink(
2808 		aconnector->dc_link,
2809 		(uint8_t *)edid,
2810 		(edid->extensions + 1) * EDID_LENGTH,
2811 		&init_params);
2812 
2813 	if (aconnector->base.force == DRM_FORCE_ON)
2814 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
2815 		aconnector->dc_link->local_sink :
2816 		aconnector->dc_em_sink;
2817 }
2818 
2819 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2820 {
2821 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2822 
2823 	/* In case of headless boot with force on for DP managed connector
2824 	 * Those settings have to be != 0 to get initial modeset
2825 	 */
2826 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2827 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2828 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2829 	}
2830 
2831 
2832 	aconnector->base.override_edid = true;
2833 	create_eml_sink(aconnector);
2834 }
2835 
2836 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2837 				   struct drm_display_mode *mode)
2838 {
2839 	int result = MODE_ERROR;
2840 	struct dc_sink *dc_sink;
2841 	struct amdgpu_device *adev = connector->dev->dev_private;
2842 	/* TODO: Unhardcode stream count */
2843 	struct dc_stream_state *stream;
2844 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2845 	enum dc_status dc_result = DC_OK;
2846 
2847 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2848 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
2849 		return result;
2850 
2851 	/* Only run this the first time mode_valid is called to initilialize
2852 	 * EDID mgmt
2853 	 */
2854 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2855 		!aconnector->dc_em_sink)
2856 		handle_edid_mgmt(aconnector);
2857 
2858 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2859 
2860 	if (dc_sink == NULL) {
2861 		DRM_ERROR("dc_sink is NULL!\n");
2862 		goto fail;
2863 	}
2864 
2865 	stream = create_stream_for_sink(aconnector, mode, NULL);
2866 	if (stream == NULL) {
2867 		DRM_ERROR("Failed to create stream for sink!\n");
2868 		goto fail;
2869 	}
2870 
2871 	dc_result = dc_validate_stream(adev->dm.dc, stream);
2872 
2873 	if (dc_result == DC_OK)
2874 		result = MODE_OK;
2875 	else
2876 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
2877 			      mode->vdisplay,
2878 			      mode->hdisplay,
2879 			      mode->clock,
2880 			      dc_result);
2881 
2882 	dc_stream_release(stream);
2883 
2884 fail:
2885 	/* TODO: error handling*/
2886 	return result;
2887 }
2888 
2889 static const struct drm_connector_helper_funcs
2890 amdgpu_dm_connector_helper_funcs = {
2891 	/*
2892 	 * If hotplug a second bigger display in FB Con mode, bigger resolution
2893 	 * modes will be filtered by drm_mode_validate_size(), and those modes
2894 	 * is missing after user start lightdm. So we need to renew modes list.
2895 	 * in get_modes call back, not just return the modes count
2896 	 */
2897 	.get_modes = get_modes,
2898 	.mode_valid = amdgpu_dm_connector_mode_valid,
2899 	.best_encoder = best_encoder
2900 };
2901 
2902 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2903 {
2904 }
2905 
2906 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2907 				       struct drm_crtc_state *state)
2908 {
2909 	struct amdgpu_device *adev = crtc->dev->dev_private;
2910 	struct dc *dc = adev->dm.dc;
2911 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2912 	int ret = -EINVAL;
2913 
2914 	if (unlikely(!dm_crtc_state->stream &&
2915 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
2916 		WARN_ON(1);
2917 		return ret;
2918 	}
2919 
2920 	/* In some use cases, like reset, no stream  is attached */
2921 	if (!dm_crtc_state->stream)
2922 		return 0;
2923 
2924 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2925 		return 0;
2926 
2927 	return ret;
2928 }
2929 
2930 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2931 				      const struct drm_display_mode *mode,
2932 				      struct drm_display_mode *adjusted_mode)
2933 {
2934 	return true;
2935 }
2936 
2937 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2938 	.disable = dm_crtc_helper_disable,
2939 	.atomic_check = dm_crtc_helper_atomic_check,
2940 	.mode_fixup = dm_crtc_helper_mode_fixup
2941 };
2942 
2943 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2944 {
2945 
2946 }
2947 
2948 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2949 					  struct drm_crtc_state *crtc_state,
2950 					  struct drm_connector_state *conn_state)
2951 {
2952 	return 0;
2953 }
2954 
2955 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2956 	.disable = dm_encoder_helper_disable,
2957 	.atomic_check = dm_encoder_helper_atomic_check
2958 };
2959 
2960 static void dm_drm_plane_reset(struct drm_plane *plane)
2961 {
2962 	struct dm_plane_state *amdgpu_state = NULL;
2963 
2964 	if (plane->state)
2965 		plane->funcs->atomic_destroy_state(plane, plane->state);
2966 
2967 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2968 	WARN_ON(amdgpu_state == NULL);
2969 
2970 	if (amdgpu_state) {
2971 		plane->state = &amdgpu_state->base;
2972 		plane->state->plane = plane;
2973 		plane->state->rotation = DRM_MODE_ROTATE_0;
2974 	}
2975 }
2976 
2977 static struct drm_plane_state *
2978 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2979 {
2980 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2981 
2982 	old_dm_plane_state = to_dm_plane_state(plane->state);
2983 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2984 	if (!dm_plane_state)
2985 		return NULL;
2986 
2987 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2988 
2989 	if (old_dm_plane_state->dc_state) {
2990 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2991 		dc_plane_state_retain(dm_plane_state->dc_state);
2992 	}
2993 
2994 	return &dm_plane_state->base;
2995 }
2996 
2997 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2998 				struct drm_plane_state *state)
2999 {
3000 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3001 
3002 	if (dm_plane_state->dc_state)
3003 		dc_plane_state_release(dm_plane_state->dc_state);
3004 
3005 	drm_atomic_helper_plane_destroy_state(plane, state);
3006 }
3007 
3008 static const struct drm_plane_funcs dm_plane_funcs = {
3009 	.update_plane	= drm_atomic_helper_update_plane,
3010 	.disable_plane	= drm_atomic_helper_disable_plane,
3011 	.destroy	= drm_plane_cleanup,
3012 	.reset = dm_drm_plane_reset,
3013 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
3014 	.atomic_destroy_state = dm_drm_plane_destroy_state,
3015 };
3016 
3017 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3018 				      struct drm_plane_state *new_state)
3019 {
3020 	struct amdgpu_framebuffer *afb;
3021 	struct drm_gem_object *obj;
3022 	struct amdgpu_device *adev;
3023 	struct amdgpu_bo *rbo;
3024 	uint64_t chroma_addr = 0;
3025 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3026 	unsigned int awidth;
3027 	uint32_t domain;
3028 	int r;
3029 
3030 	dm_plane_state_old = to_dm_plane_state(plane->state);
3031 	dm_plane_state_new = to_dm_plane_state(new_state);
3032 
3033 	if (!new_state->fb) {
3034 		DRM_DEBUG_DRIVER("No FB bound\n");
3035 		return 0;
3036 	}
3037 
3038 	afb = to_amdgpu_framebuffer(new_state->fb);
3039 	obj = new_state->fb->obj[0];
3040 	rbo = gem_to_amdgpu_bo(obj);
3041 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3042 	r = amdgpu_bo_reserve(rbo, false);
3043 	if (unlikely(r != 0))
3044 		return r;
3045 
3046 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
3047 		domain = amdgpu_display_supported_domains(adev);
3048 	else
3049 		domain = AMDGPU_GEM_DOMAIN_VRAM;
3050 
3051 	r = amdgpu_bo_pin(rbo, domain, &afb->address);
3052 	amdgpu_bo_unreserve(rbo);
3053 
3054 	if (unlikely(r != 0)) {
3055 		if (r != -ERESTARTSYS)
3056 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3057 		return r;
3058 	}
3059 
3060 	amdgpu_bo_ref(rbo);
3061 
3062 	if (dm_plane_state_new->dc_state &&
3063 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3064 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
3065 
3066 		if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3067 			plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3068 			plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3069 		} else {
3070 			awidth = ALIGN(new_state->fb->width, 64);
3071 			plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3072 			plane_state->address.video_progressive.luma_addr.low_part
3073 							= lower_32_bits(afb->address);
3074 			plane_state->address.video_progressive.luma_addr.high_part
3075 							= upper_32_bits(afb->address);
3076 			chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3077 			plane_state->address.video_progressive.chroma_addr.low_part
3078 							= lower_32_bits(chroma_addr);
3079 			plane_state->address.video_progressive.chroma_addr.high_part
3080 							= upper_32_bits(chroma_addr);
3081 		}
3082 	}
3083 
3084 	return 0;
3085 }
3086 
3087 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3088 				       struct drm_plane_state *old_state)
3089 {
3090 	struct amdgpu_bo *rbo;
3091 	int r;
3092 
3093 	if (!old_state->fb)
3094 		return;
3095 
3096 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
3097 	r = amdgpu_bo_reserve(rbo, false);
3098 	if (unlikely(r)) {
3099 		DRM_ERROR("failed to reserve rbo before unpin\n");
3100 		return;
3101 	}
3102 
3103 	amdgpu_bo_unpin(rbo);
3104 	amdgpu_bo_unreserve(rbo);
3105 	amdgpu_bo_unref(&rbo);
3106 }
3107 
3108 static int dm_plane_atomic_check(struct drm_plane *plane,
3109 				 struct drm_plane_state *state)
3110 {
3111 	struct amdgpu_device *adev = plane->dev->dev_private;
3112 	struct dc *dc = adev->dm.dc;
3113 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3114 
3115 	if (!dm_plane_state->dc_state)
3116 		return 0;
3117 
3118 	if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3119 		return -EINVAL;
3120 
3121 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3122 		return 0;
3123 
3124 	return -EINVAL;
3125 }
3126 
3127 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3128 	.prepare_fb = dm_plane_helper_prepare_fb,
3129 	.cleanup_fb = dm_plane_helper_cleanup_fb,
3130 	.atomic_check = dm_plane_atomic_check,
3131 };
3132 
3133 /*
3134  * TODO: these are currently initialized to rgb formats only.
3135  * For future use cases we should either initialize them dynamically based on
3136  * plane capabilities, or initialize this array to all formats, so internal drm
3137  * check will succeed, and let DC to implement proper check
3138  */
3139 static const uint32_t rgb_formats[] = {
3140 	DRM_FORMAT_RGB888,
3141 	DRM_FORMAT_XRGB8888,
3142 	DRM_FORMAT_ARGB8888,
3143 	DRM_FORMAT_RGBA8888,
3144 	DRM_FORMAT_XRGB2101010,
3145 	DRM_FORMAT_XBGR2101010,
3146 	DRM_FORMAT_ARGB2101010,
3147 	DRM_FORMAT_ABGR2101010,
3148 };
3149 
3150 static const uint32_t yuv_formats[] = {
3151 	DRM_FORMAT_NV12,
3152 	DRM_FORMAT_NV21,
3153 };
3154 
3155 static const u32 cursor_formats[] = {
3156 	DRM_FORMAT_ARGB8888
3157 };
3158 
3159 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3160 				struct amdgpu_plane *aplane,
3161 				unsigned long possible_crtcs)
3162 {
3163 	int res = -EPERM;
3164 
3165 	switch (aplane->base.type) {
3166 	case DRM_PLANE_TYPE_PRIMARY:
3167 		res = drm_universal_plane_init(
3168 				dm->adev->ddev,
3169 				&aplane->base,
3170 				possible_crtcs,
3171 				&dm_plane_funcs,
3172 				rgb_formats,
3173 				ARRAY_SIZE(rgb_formats),
3174 				NULL, aplane->base.type, NULL);
3175 		break;
3176 	case DRM_PLANE_TYPE_OVERLAY:
3177 		res = drm_universal_plane_init(
3178 				dm->adev->ddev,
3179 				&aplane->base,
3180 				possible_crtcs,
3181 				&dm_plane_funcs,
3182 				yuv_formats,
3183 				ARRAY_SIZE(yuv_formats),
3184 				NULL, aplane->base.type, NULL);
3185 		break;
3186 	case DRM_PLANE_TYPE_CURSOR:
3187 		res = drm_universal_plane_init(
3188 				dm->adev->ddev,
3189 				&aplane->base,
3190 				possible_crtcs,
3191 				&dm_plane_funcs,
3192 				cursor_formats,
3193 				ARRAY_SIZE(cursor_formats),
3194 				NULL, aplane->base.type, NULL);
3195 		break;
3196 	}
3197 
3198 	drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3199 
3200 	/* Create (reset) the plane state */
3201 	if (aplane->base.funcs->reset)
3202 		aplane->base.funcs->reset(&aplane->base);
3203 
3204 
3205 	return res;
3206 }
3207 
3208 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3209 			       struct drm_plane *plane,
3210 			       uint32_t crtc_index)
3211 {
3212 	struct amdgpu_crtc *acrtc = NULL;
3213 	struct amdgpu_plane *cursor_plane;
3214 
3215 	int res = -ENOMEM;
3216 
3217 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3218 	if (!cursor_plane)
3219 		goto fail;
3220 
3221 	cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3222 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3223 
3224 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3225 	if (!acrtc)
3226 		goto fail;
3227 
3228 	res = drm_crtc_init_with_planes(
3229 			dm->ddev,
3230 			&acrtc->base,
3231 			plane,
3232 			&cursor_plane->base,
3233 			&amdgpu_dm_crtc_funcs, NULL);
3234 
3235 	if (res)
3236 		goto fail;
3237 
3238 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3239 
3240 	/* Create (reset) the plane state */
3241 	if (acrtc->base.funcs->reset)
3242 		acrtc->base.funcs->reset(&acrtc->base);
3243 
3244 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3245 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3246 
3247 	acrtc->crtc_id = crtc_index;
3248 	acrtc->base.enabled = false;
3249 
3250 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3251 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
3252 				   true, MAX_COLOR_LUT_ENTRIES);
3253 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
3254 
3255 	return 0;
3256 
3257 fail:
3258 	kfree(acrtc);
3259 	kfree(cursor_plane);
3260 	return res;
3261 }
3262 
3263 
3264 static int to_drm_connector_type(enum signal_type st)
3265 {
3266 	switch (st) {
3267 	case SIGNAL_TYPE_HDMI_TYPE_A:
3268 		return DRM_MODE_CONNECTOR_HDMIA;
3269 	case SIGNAL_TYPE_EDP:
3270 		return DRM_MODE_CONNECTOR_eDP;
3271 	case SIGNAL_TYPE_RGB:
3272 		return DRM_MODE_CONNECTOR_VGA;
3273 	case SIGNAL_TYPE_DISPLAY_PORT:
3274 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
3275 		return DRM_MODE_CONNECTOR_DisplayPort;
3276 	case SIGNAL_TYPE_DVI_DUAL_LINK:
3277 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
3278 		return DRM_MODE_CONNECTOR_DVID;
3279 	case SIGNAL_TYPE_VIRTUAL:
3280 		return DRM_MODE_CONNECTOR_VIRTUAL;
3281 
3282 	default:
3283 		return DRM_MODE_CONNECTOR_Unknown;
3284 	}
3285 }
3286 
3287 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3288 {
3289 	const struct drm_connector_helper_funcs *helper =
3290 		connector->helper_private;
3291 	struct drm_encoder *encoder;
3292 	struct amdgpu_encoder *amdgpu_encoder;
3293 
3294 	encoder = helper->best_encoder(connector);
3295 
3296 	if (encoder == NULL)
3297 		return;
3298 
3299 	amdgpu_encoder = to_amdgpu_encoder(encoder);
3300 
3301 	amdgpu_encoder->native_mode.clock = 0;
3302 
3303 	if (!list_empty(&connector->probed_modes)) {
3304 		struct drm_display_mode *preferred_mode = NULL;
3305 
3306 		list_for_each_entry(preferred_mode,
3307 				    &connector->probed_modes,
3308 				    head) {
3309 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3310 				amdgpu_encoder->native_mode = *preferred_mode;
3311 
3312 			break;
3313 		}
3314 
3315 	}
3316 }
3317 
3318 static struct drm_display_mode *
3319 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3320 			     char *name,
3321 			     int hdisplay, int vdisplay)
3322 {
3323 	struct drm_device *dev = encoder->dev;
3324 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3325 	struct drm_display_mode *mode = NULL;
3326 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3327 
3328 	mode = drm_mode_duplicate(dev, native_mode);
3329 
3330 	if (mode == NULL)
3331 		return NULL;
3332 
3333 	mode->hdisplay = hdisplay;
3334 	mode->vdisplay = vdisplay;
3335 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3336 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3337 
3338 	return mode;
3339 
3340 }
3341 
3342 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3343 						 struct drm_connector *connector)
3344 {
3345 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3346 	struct drm_display_mode *mode = NULL;
3347 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3348 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3349 				to_amdgpu_dm_connector(connector);
3350 	int i;
3351 	int n;
3352 	struct mode_size {
3353 		char name[DRM_DISPLAY_MODE_LEN];
3354 		int w;
3355 		int h;
3356 	} common_modes[] = {
3357 		{  "640x480",  640,  480},
3358 		{  "800x600",  800,  600},
3359 		{ "1024x768", 1024,  768},
3360 		{ "1280x720", 1280,  720},
3361 		{ "1280x800", 1280,  800},
3362 		{"1280x1024", 1280, 1024},
3363 		{ "1440x900", 1440,  900},
3364 		{"1680x1050", 1680, 1050},
3365 		{"1600x1200", 1600, 1200},
3366 		{"1920x1080", 1920, 1080},
3367 		{"1920x1200", 1920, 1200}
3368 	};
3369 
3370 	n = ARRAY_SIZE(common_modes);
3371 
3372 	for (i = 0; i < n; i++) {
3373 		struct drm_display_mode *curmode = NULL;
3374 		bool mode_existed = false;
3375 
3376 		if (common_modes[i].w > native_mode->hdisplay ||
3377 		    common_modes[i].h > native_mode->vdisplay ||
3378 		   (common_modes[i].w == native_mode->hdisplay &&
3379 		    common_modes[i].h == native_mode->vdisplay))
3380 			continue;
3381 
3382 		list_for_each_entry(curmode, &connector->probed_modes, head) {
3383 			if (common_modes[i].w == curmode->hdisplay &&
3384 			    common_modes[i].h == curmode->vdisplay) {
3385 				mode_existed = true;
3386 				break;
3387 			}
3388 		}
3389 
3390 		if (mode_existed)
3391 			continue;
3392 
3393 		mode = amdgpu_dm_create_common_mode(encoder,
3394 				common_modes[i].name, common_modes[i].w,
3395 				common_modes[i].h);
3396 		drm_mode_probed_add(connector, mode);
3397 		amdgpu_dm_connector->num_modes++;
3398 	}
3399 }
3400 
3401 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3402 					      struct edid *edid)
3403 {
3404 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3405 			to_amdgpu_dm_connector(connector);
3406 
3407 	if (edid) {
3408 		/* empty probed_modes */
3409 		INIT_LIST_HEAD(&connector->probed_modes);
3410 		amdgpu_dm_connector->num_modes =
3411 				drm_add_edid_modes(connector, edid);
3412 
3413 		amdgpu_dm_get_native_mode(connector);
3414 	} else {
3415 		amdgpu_dm_connector->num_modes = 0;
3416 	}
3417 }
3418 
3419 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3420 {
3421 	const struct drm_connector_helper_funcs *helper =
3422 			connector->helper_private;
3423 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3424 			to_amdgpu_dm_connector(connector);
3425 	struct drm_encoder *encoder;
3426 	struct edid *edid = amdgpu_dm_connector->edid;
3427 
3428 	encoder = helper->best_encoder(connector);
3429 	amdgpu_dm_connector_ddc_get_modes(connector, edid);
3430 	amdgpu_dm_connector_add_common_modes(encoder, connector);
3431 
3432 #if defined(CONFIG_DRM_AMD_DC_FBC)
3433 	amdgpu_dm_fbc_init(connector);
3434 #endif
3435 	return amdgpu_dm_connector->num_modes;
3436 }
3437 
3438 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3439 				     struct amdgpu_dm_connector *aconnector,
3440 				     int connector_type,
3441 				     struct dc_link *link,
3442 				     int link_index)
3443 {
3444 	struct amdgpu_device *adev = dm->ddev->dev_private;
3445 
3446 	aconnector->connector_id = link_index;
3447 	aconnector->dc_link = link;
3448 	aconnector->base.interlace_allowed = false;
3449 	aconnector->base.doublescan_allowed = false;
3450 	aconnector->base.stereo_allowed = false;
3451 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3452 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3453 
3454 	mutex_init(&aconnector->hpd_lock);
3455 
3456 	/* configure support HPD hot plug connector_>polled default value is 0
3457 	 * which means HPD hot plug not supported
3458 	 */
3459 	switch (connector_type) {
3460 	case DRM_MODE_CONNECTOR_HDMIA:
3461 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3462 		break;
3463 	case DRM_MODE_CONNECTOR_DisplayPort:
3464 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3465 		break;
3466 	case DRM_MODE_CONNECTOR_DVID:
3467 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3468 		break;
3469 	default:
3470 		break;
3471 	}
3472 
3473 	drm_object_attach_property(&aconnector->base.base,
3474 				dm->ddev->mode_config.scaling_mode_property,
3475 				DRM_MODE_SCALE_NONE);
3476 
3477 	drm_object_attach_property(&aconnector->base.base,
3478 				adev->mode_info.underscan_property,
3479 				UNDERSCAN_OFF);
3480 	drm_object_attach_property(&aconnector->base.base,
3481 				adev->mode_info.underscan_hborder_property,
3482 				0);
3483 	drm_object_attach_property(&aconnector->base.base,
3484 				adev->mode_info.underscan_vborder_property,
3485 				0);
3486 
3487 }
3488 
3489 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3490 			      struct i2c_msg *msgs, int num)
3491 {
3492 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3493 	struct ddc_service *ddc_service = i2c->ddc_service;
3494 	struct i2c_command cmd;
3495 	int i;
3496 	int result = -EIO;
3497 
3498 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3499 
3500 	if (!cmd.payloads)
3501 		return result;
3502 
3503 	cmd.number_of_payloads = num;
3504 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3505 	cmd.speed = 100;
3506 
3507 	for (i = 0; i < num; i++) {
3508 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3509 		cmd.payloads[i].address = msgs[i].addr;
3510 		cmd.payloads[i].length = msgs[i].len;
3511 		cmd.payloads[i].data = msgs[i].buf;
3512 	}
3513 
3514 	if (dal_i2caux_submit_i2c_command(
3515 			ddc_service->ctx->i2caux,
3516 			ddc_service->ddc_pin,
3517 			&cmd))
3518 		result = num;
3519 
3520 	kfree(cmd.payloads);
3521 	return result;
3522 }
3523 
3524 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3525 {
3526 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3527 }
3528 
3529 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3530 	.master_xfer = amdgpu_dm_i2c_xfer,
3531 	.functionality = amdgpu_dm_i2c_func,
3532 };
3533 
3534 static struct amdgpu_i2c_adapter *
3535 create_i2c(struct ddc_service *ddc_service,
3536 	   int link_index,
3537 	   int *res)
3538 {
3539 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3540 	struct amdgpu_i2c_adapter *i2c;
3541 
3542 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3543 	if (!i2c)
3544 		return NULL;
3545 	i2c->base.owner = THIS_MODULE;
3546 	i2c->base.class = I2C_CLASS_DDC;
3547 	i2c->base.dev.parent = &adev->pdev->dev;
3548 	i2c->base.algo = &amdgpu_dm_i2c_algo;
3549 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3550 	i2c_set_adapdata(&i2c->base, i2c);
3551 	i2c->ddc_service = ddc_service;
3552 
3553 	return i2c;
3554 }
3555 
3556 
3557 /* Note: this function assumes that dc_link_detect() was called for the
3558  * dc_link which will be represented by this aconnector.
3559  */
3560 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3561 				    struct amdgpu_dm_connector *aconnector,
3562 				    uint32_t link_index,
3563 				    struct amdgpu_encoder *aencoder)
3564 {
3565 	int res = 0;
3566 	int connector_type;
3567 	struct dc *dc = dm->dc;
3568 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
3569 	struct amdgpu_i2c_adapter *i2c;
3570 
3571 	link->priv = aconnector;
3572 
3573 	DRM_DEBUG_DRIVER("%s()\n", __func__);
3574 
3575 	i2c = create_i2c(link->ddc, link->link_index, &res);
3576 	if (!i2c) {
3577 		DRM_ERROR("Failed to create i2c adapter data\n");
3578 		return -ENOMEM;
3579 	}
3580 
3581 	aconnector->i2c = i2c;
3582 	res = i2c_add_adapter(&i2c->base);
3583 
3584 	if (res) {
3585 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3586 		goto out_free;
3587 	}
3588 
3589 	connector_type = to_drm_connector_type(link->connector_signal);
3590 
3591 	res = drm_connector_init(
3592 			dm->ddev,
3593 			&aconnector->base,
3594 			&amdgpu_dm_connector_funcs,
3595 			connector_type);
3596 
3597 	if (res) {
3598 		DRM_ERROR("connector_init failed\n");
3599 		aconnector->connector_id = -1;
3600 		goto out_free;
3601 	}
3602 
3603 	drm_connector_helper_add(
3604 			&aconnector->base,
3605 			&amdgpu_dm_connector_helper_funcs);
3606 
3607 	if (aconnector->base.funcs->reset)
3608 		aconnector->base.funcs->reset(&aconnector->base);
3609 
3610 	amdgpu_dm_connector_init_helper(
3611 		dm,
3612 		aconnector,
3613 		connector_type,
3614 		link,
3615 		link_index);
3616 
3617 	drm_mode_connector_attach_encoder(
3618 		&aconnector->base, &aencoder->base);
3619 
3620 	drm_connector_register(&aconnector->base);
3621 
3622 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3623 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
3624 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
3625 
3626 out_free:
3627 	if (res) {
3628 		kfree(i2c);
3629 		aconnector->i2c = NULL;
3630 	}
3631 	return res;
3632 }
3633 
3634 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3635 {
3636 	switch (adev->mode_info.num_crtc) {
3637 	case 1:
3638 		return 0x1;
3639 	case 2:
3640 		return 0x3;
3641 	case 3:
3642 		return 0x7;
3643 	case 4:
3644 		return 0xf;
3645 	case 5:
3646 		return 0x1f;
3647 	case 6:
3648 	default:
3649 		return 0x3f;
3650 	}
3651 }
3652 
3653 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3654 				  struct amdgpu_encoder *aencoder,
3655 				  uint32_t link_index)
3656 {
3657 	struct amdgpu_device *adev = dev->dev_private;
3658 
3659 	int res = drm_encoder_init(dev,
3660 				   &aencoder->base,
3661 				   &amdgpu_dm_encoder_funcs,
3662 				   DRM_MODE_ENCODER_TMDS,
3663 				   NULL);
3664 
3665 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3666 
3667 	if (!res)
3668 		aencoder->encoder_id = link_index;
3669 	else
3670 		aencoder->encoder_id = -1;
3671 
3672 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3673 
3674 	return res;
3675 }
3676 
3677 static void manage_dm_interrupts(struct amdgpu_device *adev,
3678 				 struct amdgpu_crtc *acrtc,
3679 				 bool enable)
3680 {
3681 	/*
3682 	 * this is not correct translation but will work as soon as VBLANK
3683 	 * constant is the same as PFLIP
3684 	 */
3685 	int irq_type =
3686 		amdgpu_display_crtc_idx_to_irq_type(
3687 			adev,
3688 			acrtc->crtc_id);
3689 
3690 	if (enable) {
3691 		drm_crtc_vblank_on(&acrtc->base);
3692 		amdgpu_irq_get(
3693 			adev,
3694 			&adev->pageflip_irq,
3695 			irq_type);
3696 	} else {
3697 
3698 		amdgpu_irq_put(
3699 			adev,
3700 			&adev->pageflip_irq,
3701 			irq_type);
3702 		drm_crtc_vblank_off(&acrtc->base);
3703 	}
3704 }
3705 
3706 static bool
3707 is_scaling_state_different(const struct dm_connector_state *dm_state,
3708 			   const struct dm_connector_state *old_dm_state)
3709 {
3710 	if (dm_state->scaling != old_dm_state->scaling)
3711 		return true;
3712 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3713 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3714 			return true;
3715 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3716 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3717 			return true;
3718 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3719 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3720 		return true;
3721 	return false;
3722 }
3723 
3724 static void remove_stream(struct amdgpu_device *adev,
3725 			  struct amdgpu_crtc *acrtc,
3726 			  struct dc_stream_state *stream)
3727 {
3728 	/* this is the update mode case */
3729 	if (adev->dm.freesync_module)
3730 		mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3731 
3732 	acrtc->otg_inst = -1;
3733 	acrtc->enabled = false;
3734 }
3735 
3736 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3737 			       struct dc_cursor_position *position)
3738 {
3739 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3740 	int x, y;
3741 	int xorigin = 0, yorigin = 0;
3742 
3743 	if (!crtc || !plane->state->fb) {
3744 		position->enable = false;
3745 		position->x = 0;
3746 		position->y = 0;
3747 		return 0;
3748 	}
3749 
3750 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3751 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3752 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3753 			  __func__,
3754 			  plane->state->crtc_w,
3755 			  plane->state->crtc_h);
3756 		return -EINVAL;
3757 	}
3758 
3759 	x = plane->state->crtc_x;
3760 	y = plane->state->crtc_y;
3761 	/* avivo cursor are offset into the total surface */
3762 	x += crtc->primary->state->src_x >> 16;
3763 	y += crtc->primary->state->src_y >> 16;
3764 	if (x < 0) {
3765 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3766 		x = 0;
3767 	}
3768 	if (y < 0) {
3769 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3770 		y = 0;
3771 	}
3772 	position->enable = true;
3773 	position->x = x;
3774 	position->y = y;
3775 	position->x_hotspot = xorigin;
3776 	position->y_hotspot = yorigin;
3777 
3778 	return 0;
3779 }
3780 
3781 static void handle_cursor_update(struct drm_plane *plane,
3782 				 struct drm_plane_state *old_plane_state)
3783 {
3784 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3785 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3786 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3787 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3788 	uint64_t address = afb ? afb->address : 0;
3789 	struct dc_cursor_position position;
3790 	struct dc_cursor_attributes attributes;
3791 	int ret;
3792 
3793 	if (!plane->state->fb && !old_plane_state->fb)
3794 		return;
3795 
3796 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3797 			 __func__,
3798 			 amdgpu_crtc->crtc_id,
3799 			 plane->state->crtc_w,
3800 			 plane->state->crtc_h);
3801 
3802 	ret = get_cursor_position(plane, crtc, &position);
3803 	if (ret)
3804 		return;
3805 
3806 	if (!position.enable) {
3807 		/* turn off cursor */
3808 		if (crtc_state && crtc_state->stream)
3809 			dc_stream_set_cursor_position(crtc_state->stream,
3810 						      &position);
3811 		return;
3812 	}
3813 
3814 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
3815 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
3816 
3817 	attributes.address.high_part = upper_32_bits(address);
3818 	attributes.address.low_part  = lower_32_bits(address);
3819 	attributes.width             = plane->state->crtc_w;
3820 	attributes.height            = plane->state->crtc_h;
3821 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3822 	attributes.rotation_angle    = 0;
3823 	attributes.attribute_flags.value = 0;
3824 
3825 	attributes.pitch = attributes.width;
3826 
3827 	if (crtc_state->stream) {
3828 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3829 							 &attributes))
3830 			DRM_ERROR("DC failed to set cursor attributes\n");
3831 
3832 		if (!dc_stream_set_cursor_position(crtc_state->stream,
3833 						   &position))
3834 			DRM_ERROR("DC failed to set cursor position\n");
3835 	}
3836 }
3837 
3838 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3839 {
3840 
3841 	assert_spin_locked(&acrtc->base.dev->event_lock);
3842 	WARN_ON(acrtc->event);
3843 
3844 	acrtc->event = acrtc->base.state->event;
3845 
3846 	/* Set the flip status */
3847 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3848 
3849 	/* Mark this event as consumed */
3850 	acrtc->base.state->event = NULL;
3851 
3852 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3853 						 acrtc->crtc_id);
3854 }
3855 
3856 /*
3857  * Executes flip
3858  *
3859  * Waits on all BO's fences and for proper vblank count
3860  */
3861 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3862 			      struct drm_framebuffer *fb,
3863 			      uint32_t target,
3864 			      struct dc_state *state)
3865 {
3866 	unsigned long flags;
3867 	uint32_t target_vblank;
3868 	int r, vpos, hpos;
3869 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3870 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3871 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
3872 	struct amdgpu_device *adev = crtc->dev->dev_private;
3873 	bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3874 	struct dc_flip_addrs addr = { {0} };
3875 	/* TODO eliminate or rename surface_update */
3876 	struct dc_surface_update surface_updates[1] = { {0} };
3877 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3878 
3879 
3880 	/* Prepare wait for target vblank early - before the fence-waits */
3881 	target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
3882 			amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3883 
3884 	/* TODO This might fail and hence better not used, wait
3885 	 * explicitly on fences instead
3886 	 * and in general should be called for
3887 	 * blocking commit to as per framework helpers
3888 	 */
3889 	r = amdgpu_bo_reserve(abo, true);
3890 	if (unlikely(r != 0)) {
3891 		DRM_ERROR("failed to reserve buffer before flip\n");
3892 		WARN_ON(1);
3893 	}
3894 
3895 	/* Wait for all fences on this FB */
3896 	WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3897 								    MAX_SCHEDULE_TIMEOUT) < 0);
3898 
3899 	amdgpu_bo_unreserve(abo);
3900 
3901 	/* Wait until we're out of the vertical blank period before the one
3902 	 * targeted by the flip
3903 	 */
3904 	while ((acrtc->enabled &&
3905 		(amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
3906 						    0, &vpos, &hpos, NULL,
3907 						    NULL, &crtc->hwmode)
3908 		 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3909 		(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3910 		(int)(target_vblank -
3911 		  amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3912 		usleep_range(1000, 1100);
3913 	}
3914 
3915 	/* Flip */
3916 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
3917 	/* update crtc fb */
3918 	crtc->primary->fb = fb;
3919 
3920 	WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3921 	WARN_ON(!acrtc_state->stream);
3922 
3923 	addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3924 	addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3925 	addr.flip_immediate = async_flip;
3926 
3927 
3928 	if (acrtc->base.state->event)
3929 		prepare_flip_isr(acrtc);
3930 
3931 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3932 
3933 	surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3934 	surface_updates->flip_addr = &addr;
3935 
3936 	dc_commit_updates_for_stream(adev->dm.dc,
3937 					     surface_updates,
3938 					     1,
3939 					     acrtc_state->stream,
3940 					     NULL,
3941 					     &surface_updates->surface,
3942 					     state);
3943 
3944 	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3945 			 __func__,
3946 			 addr.address.grph.addr.high_part,
3947 			 addr.address.grph.addr.low_part);
3948 }
3949 
3950 /*
3951  * TODO this whole function needs to go
3952  *
3953  * dc_surface_update is needlessly complex. See if we can just replace this
3954  * with a dc_plane_state and follow the atomic model a bit more closely here.
3955  */
3956 static bool commit_planes_to_stream(
3957 		struct dc *dc,
3958 		struct dc_plane_state **plane_states,
3959 		uint8_t new_plane_count,
3960 		struct dm_crtc_state *dm_new_crtc_state,
3961 		struct dm_crtc_state *dm_old_crtc_state,
3962 		struct dc_state *state)
3963 {
3964 	/* no need to dynamically allocate this. it's pretty small */
3965 	struct dc_surface_update updates[MAX_SURFACES];
3966 	struct dc_flip_addrs *flip_addr;
3967 	struct dc_plane_info *plane_info;
3968 	struct dc_scaling_info *scaling_info;
3969 	int i;
3970 	struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
3971 	struct dc_stream_update *stream_update =
3972 			kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
3973 
3974 	if (!stream_update) {
3975 		BREAK_TO_DEBUGGER();
3976 		return false;
3977 	}
3978 
3979 	flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
3980 			    GFP_KERNEL);
3981 	plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
3982 			     GFP_KERNEL);
3983 	scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
3984 			       GFP_KERNEL);
3985 
3986 	if (!flip_addr || !plane_info || !scaling_info) {
3987 		kfree(flip_addr);
3988 		kfree(plane_info);
3989 		kfree(scaling_info);
3990 		kfree(stream_update);
3991 		return false;
3992 	}
3993 
3994 	memset(updates, 0, sizeof(updates));
3995 
3996 	stream_update->src = dc_stream->src;
3997 	stream_update->dst = dc_stream->dst;
3998 	stream_update->out_transfer_func = dc_stream->out_transfer_func;
3999 
4000 	for (i = 0; i < new_plane_count; i++) {
4001 		updates[i].surface = plane_states[i];
4002 		updates[i].gamma =
4003 			(struct dc_gamma *)plane_states[i]->gamma_correction;
4004 		updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4005 		flip_addr[i].address = plane_states[i]->address;
4006 		flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4007 		plane_info[i].color_space = plane_states[i]->color_space;
4008 		plane_info[i].format = plane_states[i]->format;
4009 		plane_info[i].plane_size = plane_states[i]->plane_size;
4010 		plane_info[i].rotation = plane_states[i]->rotation;
4011 		plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4012 		plane_info[i].stereo_format = plane_states[i]->stereo_format;
4013 		plane_info[i].tiling_info = plane_states[i]->tiling_info;
4014 		plane_info[i].visible = plane_states[i]->visible;
4015 		plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4016 		plane_info[i].dcc = plane_states[i]->dcc;
4017 		scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4018 		scaling_info[i].src_rect = plane_states[i]->src_rect;
4019 		scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4020 		scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4021 
4022 		updates[i].flip_addr = &flip_addr[i];
4023 		updates[i].plane_info = &plane_info[i];
4024 		updates[i].scaling_info = &scaling_info[i];
4025 	}
4026 
4027 	dc_commit_updates_for_stream(
4028 			dc,
4029 			updates,
4030 			new_plane_count,
4031 			dc_stream, stream_update, plane_states, state);
4032 
4033 	kfree(flip_addr);
4034 	kfree(plane_info);
4035 	kfree(scaling_info);
4036 	kfree(stream_update);
4037 	return true;
4038 }
4039 
4040 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4041 				    struct drm_device *dev,
4042 				    struct amdgpu_display_manager *dm,
4043 				    struct drm_crtc *pcrtc,
4044 				    bool *wait_for_vblank)
4045 {
4046 	uint32_t i;
4047 	struct drm_plane *plane;
4048 	struct drm_plane_state *old_plane_state, *new_plane_state;
4049 	struct dc_stream_state *dc_stream_attach;
4050 	struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
4051 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
4052 	struct drm_crtc_state *new_pcrtc_state =
4053 			drm_atomic_get_new_crtc_state(state, pcrtc);
4054 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4055 	struct dm_crtc_state *dm_old_crtc_state =
4056 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4057 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4058 	int planes_count = 0;
4059 	unsigned long flags;
4060 
4061 	/* update planes when needed */
4062 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4063 		struct drm_crtc *crtc = new_plane_state->crtc;
4064 		struct drm_crtc_state *new_crtc_state;
4065 		struct drm_framebuffer *fb = new_plane_state->fb;
4066 		bool pflip_needed;
4067 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
4068 
4069 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
4070 			handle_cursor_update(plane, old_plane_state);
4071 			continue;
4072 		}
4073 
4074 		if (!fb || !crtc || pcrtc != crtc)
4075 			continue;
4076 
4077 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4078 		if (!new_crtc_state->active)
4079 			continue;
4080 
4081 		pflip_needed = !state->allow_modeset;
4082 
4083 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
4084 		if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
4085 			DRM_ERROR("%s: acrtc %d, already busy\n",
4086 				  __func__,
4087 				  acrtc_attach->crtc_id);
4088 			/* In commit tail framework this cannot happen */
4089 			WARN_ON(1);
4090 		}
4091 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4092 
4093 		if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
4094 			WARN_ON(!dm_new_plane_state->dc_state);
4095 
4096 			plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
4097 
4098 			dc_stream_attach = acrtc_state->stream;
4099 			planes_count++;
4100 
4101 		} else if (new_crtc_state->planes_changed) {
4102 			/* Assume even ONE crtc with immediate flip means
4103 			 * entire can't wait for VBLANK
4104 			 * TODO Check if it's correct
4105 			 */
4106 			*wait_for_vblank =
4107 					new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
4108 				false : true;
4109 
4110 			/* TODO: Needs rework for multiplane flip */
4111 			if (plane->type == DRM_PLANE_TYPE_PRIMARY)
4112 				drm_crtc_vblank_get(crtc);
4113 
4114 			amdgpu_dm_do_flip(
4115 				crtc,
4116 				fb,
4117 				(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
4118 				dm_state->context);
4119 		}
4120 
4121 	}
4122 
4123 	if (planes_count) {
4124 		unsigned long flags;
4125 
4126 		if (new_pcrtc_state->event) {
4127 
4128 			drm_crtc_vblank_get(pcrtc);
4129 
4130 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4131 			prepare_flip_isr(acrtc_attach);
4132 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4133 		}
4134 
4135 
4136 		if (false == commit_planes_to_stream(dm->dc,
4137 							plane_states_constructed,
4138 							planes_count,
4139 							acrtc_state,
4140 							dm_old_crtc_state,
4141 							dm_state->context))
4142 			dm_error("%s: Failed to attach plane!\n", __func__);
4143 	} else {
4144 		/*TODO BUG Here should go disable planes on CRTC. */
4145 	}
4146 }
4147 
4148 /**
4149  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4150  * @crtc_state: the DRM CRTC state
4151  * @stream_state: the DC stream state.
4152  *
4153  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4154  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4155  */
4156 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4157 						struct dc_stream_state *stream_state)
4158 {
4159 	stream_state->mode_changed = crtc_state->mode_changed;
4160 }
4161 
4162 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4163 				   struct drm_atomic_state *state,
4164 				   bool nonblock)
4165 {
4166 	struct drm_crtc *crtc;
4167 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4168 	struct amdgpu_device *adev = dev->dev_private;
4169 	int i;
4170 
4171 	/*
4172 	 * We evade vblanks and pflips on crtc that
4173 	 * should be changed. We do it here to flush & disable
4174 	 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4175 	 * it will update crtc->dm_crtc_state->stream pointer which is used in
4176 	 * the ISRs.
4177 	 */
4178 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4179 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4180 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4181 
4182 		if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
4183 			manage_dm_interrupts(adev, acrtc, false);
4184 	}
4185 	/* Add check here for SoC's that support hardware cursor plane, to
4186 	 * unset legacy_cursor_update */
4187 
4188 	return drm_atomic_helper_commit(dev, state, nonblock);
4189 
4190 	/*TODO Handle EINTR, reenable IRQ*/
4191 }
4192 
4193 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4194 {
4195 	struct drm_device *dev = state->dev;
4196 	struct amdgpu_device *adev = dev->dev_private;
4197 	struct amdgpu_display_manager *dm = &adev->dm;
4198 	struct dm_atomic_state *dm_state;
4199 	uint32_t i, j;
4200 	struct drm_crtc *crtc;
4201 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4202 	unsigned long flags;
4203 	bool wait_for_vblank = true;
4204 	struct drm_connector *connector;
4205 	struct drm_connector_state *old_con_state, *new_con_state;
4206 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4207 	int crtc_disable_count = 0;
4208 
4209 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
4210 
4211 	dm_state = to_dm_atomic_state(state);
4212 
4213 	/* update changed items */
4214 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4215 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4216 
4217 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4218 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4219 
4220 		DRM_DEBUG_DRIVER(
4221 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4222 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
4223 			"connectors_changed:%d\n",
4224 			acrtc->crtc_id,
4225 			new_crtc_state->enable,
4226 			new_crtc_state->active,
4227 			new_crtc_state->planes_changed,
4228 			new_crtc_state->mode_changed,
4229 			new_crtc_state->active_changed,
4230 			new_crtc_state->connectors_changed);
4231 
4232 		/* Copy all transient state flags into dc state */
4233 		if (dm_new_crtc_state->stream) {
4234 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
4235 							    dm_new_crtc_state->stream);
4236 		}
4237 
4238 		/* handles headless hotplug case, updating new_state and
4239 		 * aconnector as needed
4240 		 */
4241 
4242 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4243 
4244 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4245 
4246 			if (!dm_new_crtc_state->stream) {
4247 				/*
4248 				 * this could happen because of issues with
4249 				 * userspace notifications delivery.
4250 				 * In this case userspace tries to set mode on
4251 				 * display which is disconnect in fact.
4252 				 * dc_sink in NULL in this case on aconnector.
4253 				 * We expect reset mode will come soon.
4254 				 *
4255 				 * This can also happen when unplug is done
4256 				 * during resume sequence ended
4257 				 *
4258 				 * In this case, we want to pretend we still
4259 				 * have a sink to keep the pipe running so that
4260 				 * hw state is consistent with the sw state
4261 				 */
4262 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4263 						__func__, acrtc->base.base.id);
4264 				continue;
4265 			}
4266 
4267 			if (dm_old_crtc_state->stream)
4268 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4269 
4270 			pm_runtime_get_noresume(dev->dev);
4271 
4272 			acrtc->enabled = true;
4273 			acrtc->hw_mode = new_crtc_state->mode;
4274 			crtc->hwmode = new_crtc_state->mode;
4275 		} else if (modereset_required(new_crtc_state)) {
4276 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4277 
4278 			/* i.e. reset mode */
4279 			if (dm_old_crtc_state->stream)
4280 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4281 		}
4282 	} /* for_each_crtc_in_state() */
4283 
4284 	/*
4285 	 * Add streams after required streams from new and replaced streams
4286 	 * are removed from freesync module
4287 	 */
4288 	if (adev->dm.freesync_module) {
4289 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4290 					      new_crtc_state, i) {
4291 			struct amdgpu_dm_connector *aconnector = NULL;
4292 			struct dm_connector_state *dm_new_con_state = NULL;
4293 			struct amdgpu_crtc *acrtc = NULL;
4294 			bool modeset_needed;
4295 
4296 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4297 			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4298 			modeset_needed = modeset_required(
4299 					new_crtc_state,
4300 					dm_new_crtc_state->stream,
4301 					dm_old_crtc_state->stream);
4302 			/* We add stream to freesync if:
4303 			 * 1. Said stream is not null, and
4304 			 * 2. A modeset is requested. This means that the
4305 			 *    stream was removed previously, and needs to be
4306 			 *    replaced.
4307 			 */
4308 			if (dm_new_crtc_state->stream == NULL ||
4309 					!modeset_needed)
4310 				continue;
4311 
4312 			acrtc = to_amdgpu_crtc(crtc);
4313 
4314 			aconnector =
4315 				amdgpu_dm_find_first_crtc_matching_connector(
4316 					state, crtc);
4317 			if (!aconnector) {
4318 				DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4319 						 "find connector for acrtc "
4320 						 "id:%d skipping freesync "
4321 						 "init\n",
4322 						 acrtc->crtc_id);
4323 				continue;
4324 			}
4325 
4326 			mod_freesync_add_stream(adev->dm.freesync_module,
4327 						dm_new_crtc_state->stream,
4328 						&aconnector->caps);
4329 			new_con_state = drm_atomic_get_new_connector_state(
4330 					state, &aconnector->base);
4331 			dm_new_con_state = to_dm_connector_state(new_con_state);
4332 
4333 			mod_freesync_set_user_enable(adev->dm.freesync_module,
4334 						     &dm_new_crtc_state->stream,
4335 						     1,
4336 						     &dm_new_con_state->user_enable);
4337 		}
4338 	}
4339 
4340 	if (dm_state->context) {
4341 		dm_enable_per_frame_crtc_master_sync(dm_state->context);
4342 		WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4343 	}
4344 
4345 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4346 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4347 
4348 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4349 
4350 		if (dm_new_crtc_state->stream != NULL) {
4351 			const struct dc_stream_status *status =
4352 					dc_stream_get_status(dm_new_crtc_state->stream);
4353 
4354 			if (!status)
4355 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4356 			else
4357 				acrtc->otg_inst = status->primary_otg_inst;
4358 		}
4359 	}
4360 
4361 	/* Handle scaling and underscan changes*/
4362 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4363 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4364 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4365 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4366 		struct dc_stream_status *status = NULL;
4367 
4368 		if (acrtc) {
4369 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4370 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
4371 		}
4372 
4373 		/* Skip any modesets/resets */
4374 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4375 			continue;
4376 
4377 		/* Skip any thing not scale or underscan changes */
4378 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4379 			continue;
4380 
4381 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4382 
4383 		update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4384 				dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4385 
4386 		if (!dm_new_crtc_state->stream)
4387 			continue;
4388 
4389 		status = dc_stream_get_status(dm_new_crtc_state->stream);
4390 		WARN_ON(!status);
4391 		WARN_ON(!status->plane_count);
4392 
4393 		/*TODO How it works with MPO ?*/
4394 		if (!commit_planes_to_stream(
4395 				dm->dc,
4396 				status->plane_states,
4397 				status->plane_count,
4398 				dm_new_crtc_state,
4399 				to_dm_crtc_state(old_crtc_state),
4400 				dm_state->context))
4401 			dm_error("%s: Failed to update stream scaling!\n", __func__);
4402 	}
4403 
4404 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4405 			new_crtc_state, i) {
4406 		/*
4407 		 * loop to enable interrupts on newly arrived crtc
4408 		 */
4409 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4410 		bool modeset_needed;
4411 
4412 		if (old_crtc_state->active && !new_crtc_state->active)
4413 			crtc_disable_count++;
4414 
4415 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4416 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4417 		modeset_needed = modeset_required(
4418 				new_crtc_state,
4419 				dm_new_crtc_state->stream,
4420 				dm_old_crtc_state->stream);
4421 
4422 		if (dm_new_crtc_state->stream == NULL || !modeset_needed)
4423 			continue;
4424 
4425 		if (adev->dm.freesync_module)
4426 			mod_freesync_notify_mode_change(
4427 				adev->dm.freesync_module,
4428 				&dm_new_crtc_state->stream, 1);
4429 
4430 		manage_dm_interrupts(adev, acrtc, true);
4431 	}
4432 
4433 	/* update planes when needed per crtc*/
4434 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4435 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4436 
4437 		if (dm_new_crtc_state->stream)
4438 			amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4439 	}
4440 
4441 
4442 	/*
4443 	 * send vblank event on all events not handled in flip and
4444 	 * mark consumed event for drm_atomic_helper_commit_hw_done
4445 	 */
4446 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
4447 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4448 
4449 		if (new_crtc_state->event)
4450 			drm_send_event_locked(dev, &new_crtc_state->event->base);
4451 
4452 		new_crtc_state->event = NULL;
4453 	}
4454 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4455 
4456 	/* Signal HW programming completion */
4457 	drm_atomic_helper_commit_hw_done(state);
4458 
4459 	if (wait_for_vblank)
4460 		drm_atomic_helper_wait_for_flip_done(dev, state);
4461 
4462 	drm_atomic_helper_cleanup_planes(dev, state);
4463 
4464 	/* Finally, drop a runtime PM reference for each newly disabled CRTC,
4465 	 * so we can put the GPU into runtime suspend if we're not driving any
4466 	 * displays anymore
4467 	 */
4468 	for (i = 0; i < crtc_disable_count; i++)
4469 		pm_runtime_put_autosuspend(dev->dev);
4470 	pm_runtime_mark_last_busy(dev->dev);
4471 }
4472 
4473 
4474 static int dm_force_atomic_commit(struct drm_connector *connector)
4475 {
4476 	int ret = 0;
4477 	struct drm_device *ddev = connector->dev;
4478 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4479 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4480 	struct drm_plane *plane = disconnected_acrtc->base.primary;
4481 	struct drm_connector_state *conn_state;
4482 	struct drm_crtc_state *crtc_state;
4483 	struct drm_plane_state *plane_state;
4484 
4485 	if (!state)
4486 		return -ENOMEM;
4487 
4488 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
4489 
4490 	/* Construct an atomic state to restore previous display setting */
4491 
4492 	/*
4493 	 * Attach connectors to drm_atomic_state
4494 	 */
4495 	conn_state = drm_atomic_get_connector_state(state, connector);
4496 
4497 	ret = PTR_ERR_OR_ZERO(conn_state);
4498 	if (ret)
4499 		goto err;
4500 
4501 	/* Attach crtc to drm_atomic_state*/
4502 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4503 
4504 	ret = PTR_ERR_OR_ZERO(crtc_state);
4505 	if (ret)
4506 		goto err;
4507 
4508 	/* force a restore */
4509 	crtc_state->mode_changed = true;
4510 
4511 	/* Attach plane to drm_atomic_state */
4512 	plane_state = drm_atomic_get_plane_state(state, plane);
4513 
4514 	ret = PTR_ERR_OR_ZERO(plane_state);
4515 	if (ret)
4516 		goto err;
4517 
4518 
4519 	/* Call commit internally with the state we just constructed */
4520 	ret = drm_atomic_commit(state);
4521 	if (!ret)
4522 		return 0;
4523 
4524 err:
4525 	DRM_ERROR("Restoring old state failed with %i\n", ret);
4526 	drm_atomic_state_put(state);
4527 
4528 	return ret;
4529 }
4530 
4531 /*
4532  * This functions handle all cases when set mode does not come upon hotplug.
4533  * This include when the same display is unplugged then plugged back into the
4534  * same port and when we are running without usermode desktop manager supprot
4535  */
4536 void dm_restore_drm_connector_state(struct drm_device *dev,
4537 				    struct drm_connector *connector)
4538 {
4539 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4540 	struct amdgpu_crtc *disconnected_acrtc;
4541 	struct dm_crtc_state *acrtc_state;
4542 
4543 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4544 		return;
4545 
4546 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4547 	if (!disconnected_acrtc)
4548 		return;
4549 
4550 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4551 	if (!acrtc_state->stream)
4552 		return;
4553 
4554 	/*
4555 	 * If the previous sink is not released and different from the current,
4556 	 * we deduce we are in a state where we can not rely on usermode call
4557 	 * to turn on the display, so we do it here
4558 	 */
4559 	if (acrtc_state->stream->sink != aconnector->dc_sink)
4560 		dm_force_atomic_commit(&aconnector->base);
4561 }
4562 
4563 /*`
4564  * Grabs all modesetting locks to serialize against any blocking commits,
4565  * Waits for completion of all non blocking commits.
4566  */
4567 static int do_aquire_global_lock(struct drm_device *dev,
4568 				 struct drm_atomic_state *state)
4569 {
4570 	struct drm_crtc *crtc;
4571 	struct drm_crtc_commit *commit;
4572 	long ret;
4573 
4574 	/* Adding all modeset locks to aquire_ctx will
4575 	 * ensure that when the framework release it the
4576 	 * extra locks we are locking here will get released to
4577 	 */
4578 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4579 	if (ret)
4580 		return ret;
4581 
4582 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4583 		spin_lock(&crtc->commit_lock);
4584 		commit = list_first_entry_or_null(&crtc->commit_list,
4585 				struct drm_crtc_commit, commit_entry);
4586 		if (commit)
4587 			drm_crtc_commit_get(commit);
4588 		spin_unlock(&crtc->commit_lock);
4589 
4590 		if (!commit)
4591 			continue;
4592 
4593 		/* Make sure all pending HW programming completed and
4594 		 * page flips done
4595 		 */
4596 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4597 
4598 		if (ret > 0)
4599 			ret = wait_for_completion_interruptible_timeout(
4600 					&commit->flip_done, 10*HZ);
4601 
4602 		if (ret == 0)
4603 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4604 				  "timed out\n", crtc->base.id, crtc->name);
4605 
4606 		drm_crtc_commit_put(commit);
4607 	}
4608 
4609 	return ret < 0 ? ret : 0;
4610 }
4611 
4612 static int dm_update_crtcs_state(struct dc *dc,
4613 				 struct drm_atomic_state *state,
4614 				 bool enable,
4615 				 bool *lock_and_validation_needed)
4616 {
4617 	struct drm_crtc *crtc;
4618 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4619 	int i;
4620 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4621 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4622 	struct dc_stream_state *new_stream;
4623 	int ret = 0;
4624 
4625 	/*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4626 	/* update changed items */
4627 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4628 		struct amdgpu_crtc *acrtc = NULL;
4629 		struct amdgpu_dm_connector *aconnector = NULL;
4630 		struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
4631 		struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
4632 		struct drm_plane_state *new_plane_state = NULL;
4633 
4634 		new_stream = NULL;
4635 
4636 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4637 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4638 		acrtc = to_amdgpu_crtc(crtc);
4639 
4640 		new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
4641 
4642 		if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
4643 			ret = -EINVAL;
4644 			goto fail;
4645 		}
4646 
4647 		aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4648 
4649 		/* TODO This hack should go away */
4650 		if (aconnector && enable) {
4651 			// Make sure fake sink is created in plug-in scenario
4652 			drm_new_conn_state = drm_atomic_get_new_connector_state(state,
4653  								    &aconnector->base);
4654 			drm_old_conn_state = drm_atomic_get_old_connector_state(state,
4655 								    &aconnector->base);
4656 
4657 			if (IS_ERR(drm_new_conn_state)) {
4658 				ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
4659 				break;
4660 			}
4661 
4662 			dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
4663 			dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
4664 
4665 			new_stream = create_stream_for_sink(aconnector,
4666 							     &new_crtc_state->mode,
4667 							    dm_new_conn_state);
4668 
4669 			/*
4670 			 * we can have no stream on ACTION_SET if a display
4671 			 * was disconnected during S3, in this case it not and
4672 			 * error, the OS will be updated after detection, and
4673 			 * do the right thing on next atomic commit
4674 			 */
4675 
4676 			if (!new_stream) {
4677 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4678 						__func__, acrtc->base.base.id);
4679 				break;
4680 			}
4681 
4682 			if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4683 			    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4684 				new_crtc_state->mode_changed = false;
4685 				DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4686 						 new_crtc_state->mode_changed);
4687 			}
4688 		}
4689 
4690 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4691 			goto next_crtc;
4692 
4693 		DRM_DEBUG_DRIVER(
4694 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4695 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
4696 			"connectors_changed:%d\n",
4697 			acrtc->crtc_id,
4698 			new_crtc_state->enable,
4699 			new_crtc_state->active,
4700 			new_crtc_state->planes_changed,
4701 			new_crtc_state->mode_changed,
4702 			new_crtc_state->active_changed,
4703 			new_crtc_state->connectors_changed);
4704 
4705 		/* Remove stream for any changed/disabled CRTC */
4706 		if (!enable) {
4707 
4708 			if (!dm_old_crtc_state->stream)
4709 				goto next_crtc;
4710 
4711 			DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4712 					crtc->base.id);
4713 
4714 			/* i.e. reset mode */
4715 			if (dc_remove_stream_from_ctx(
4716 					dc,
4717 					dm_state->context,
4718 					dm_old_crtc_state->stream) != DC_OK) {
4719 				ret = -EINVAL;
4720 				goto fail;
4721 			}
4722 
4723 			dc_stream_release(dm_old_crtc_state->stream);
4724 			dm_new_crtc_state->stream = NULL;
4725 
4726 			*lock_and_validation_needed = true;
4727 
4728 		} else {/* Add stream for any updated/enabled CRTC */
4729 			/*
4730 			 * Quick fix to prevent NULL pointer on new_stream when
4731 			 * added MST connectors not found in existing crtc_state in the chained mode
4732 			 * TODO: need to dig out the root cause of that
4733 			 */
4734 			if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4735 				goto next_crtc;
4736 
4737 			if (modereset_required(new_crtc_state))
4738 				goto next_crtc;
4739 
4740 			if (modeset_required(new_crtc_state, new_stream,
4741 					     dm_old_crtc_state->stream)) {
4742 
4743 				WARN_ON(dm_new_crtc_state->stream);
4744 
4745 				dm_new_crtc_state->stream = new_stream;
4746 
4747 				dc_stream_retain(new_stream);
4748 
4749 				DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4750 							crtc->base.id);
4751 
4752 				if (dc_add_stream_to_ctx(
4753 						dc,
4754 						dm_state->context,
4755 						dm_new_crtc_state->stream) != DC_OK) {
4756 					ret = -EINVAL;
4757 					goto fail;
4758 				}
4759 
4760 				*lock_and_validation_needed = true;
4761 			}
4762 		}
4763 
4764 next_crtc:
4765 		/* Release extra reference */
4766 		if (new_stream)
4767 			 dc_stream_release(new_stream);
4768 
4769 		/*
4770 		 * We want to do dc stream updates that do not require a
4771 		 * full modeset below.
4772 		 */
4773 		if (!(enable && aconnector && new_crtc_state->enable &&
4774 		      new_crtc_state->active))
4775 			continue;
4776 		/*
4777 		 * Given above conditions, the dc state cannot be NULL because:
4778 		 * 1. We're in the process of enabling CRTCs (just been added
4779 		 *    to the dc context, or already is on the context)
4780 		 * 2. Has a valid connector attached, and
4781 		 * 3. Is currently active and enabled.
4782 		 * => The dc stream state currently exists.
4783 		 */
4784 		BUG_ON(dm_new_crtc_state->stream == NULL);
4785 
4786 		/* Scaling or underscan settings */
4787 		if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
4788 			update_stream_scaling_settings(
4789 				&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
4790 
4791 		/*
4792 		 * Color management settings. We also update color properties
4793 		 * when a modeset is needed, to ensure it gets reprogrammed.
4794 		 */
4795 		if (dm_new_crtc_state->base.color_mgmt_changed ||
4796 		    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4797 			ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
4798 			if (ret)
4799 				goto fail;
4800 			amdgpu_dm_set_ctm(dm_new_crtc_state);
4801 		}
4802 	}
4803 
4804 	return ret;
4805 
4806 fail:
4807 	if (new_stream)
4808 		dc_stream_release(new_stream);
4809 	return ret;
4810 }
4811 
4812 static int dm_update_planes_state(struct dc *dc,
4813 				  struct drm_atomic_state *state,
4814 				  bool enable,
4815 				  bool *lock_and_validation_needed)
4816 {
4817 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4818 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4819 	struct drm_plane *plane;
4820 	struct drm_plane_state *old_plane_state, *new_plane_state;
4821 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
4822 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4823 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
4824 	int i ;
4825 	/* TODO return page_flip_needed() function */
4826 	bool pflip_needed  = !state->allow_modeset;
4827 	int ret = 0;
4828 
4829 
4830 	/* Add new planes, in reverse order as DC expectation */
4831 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
4832 		new_plane_crtc = new_plane_state->crtc;
4833 		old_plane_crtc = old_plane_state->crtc;
4834 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
4835 		dm_old_plane_state = to_dm_plane_state(old_plane_state);
4836 
4837 		/*TODO Implement atomic check for cursor plane */
4838 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
4839 			continue;
4840 
4841 		/* Remove any changed/removed planes */
4842 		if (!enable) {
4843 			if (pflip_needed &&
4844 			    plane->type != DRM_PLANE_TYPE_OVERLAY)
4845 				continue;
4846 
4847 			if (!old_plane_crtc)
4848 				continue;
4849 
4850 			old_crtc_state = drm_atomic_get_old_crtc_state(
4851 					state, old_plane_crtc);
4852 			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4853 
4854 			if (!dm_old_crtc_state->stream)
4855 				continue;
4856 
4857 			DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
4858 					plane->base.id, old_plane_crtc->base.id);
4859 
4860 			if (!dc_remove_plane_from_context(
4861 					dc,
4862 					dm_old_crtc_state->stream,
4863 					dm_old_plane_state->dc_state,
4864 					dm_state->context)) {
4865 
4866 				ret = EINVAL;
4867 				return ret;
4868 			}
4869 
4870 
4871 			dc_plane_state_release(dm_old_plane_state->dc_state);
4872 			dm_new_plane_state->dc_state = NULL;
4873 
4874 			*lock_and_validation_needed = true;
4875 
4876 		} else { /* Add new planes */
4877 			struct dc_plane_state *dc_new_plane_state;
4878 
4879 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4880 				continue;
4881 
4882 			if (!new_plane_crtc)
4883 				continue;
4884 
4885 			new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4886 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4887 
4888 			if (!dm_new_crtc_state->stream)
4889 				continue;
4890 
4891 			if (pflip_needed &&
4892 			    plane->type != DRM_PLANE_TYPE_OVERLAY)
4893 				continue;
4894 
4895 			WARN_ON(dm_new_plane_state->dc_state);
4896 
4897 			dc_new_plane_state = dc_create_plane_state(dc);
4898 			if (!dc_new_plane_state)
4899 				return -ENOMEM;
4900 
4901 			DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4902 					plane->base.id, new_plane_crtc->base.id);
4903 
4904 			ret = fill_plane_attributes(
4905 				new_plane_crtc->dev->dev_private,
4906 				dc_new_plane_state,
4907 				new_plane_state,
4908 				new_crtc_state);
4909 			if (ret) {
4910 				dc_plane_state_release(dc_new_plane_state);
4911 				return ret;
4912 			}
4913 
4914 			/*
4915 			 * Any atomic check errors that occur after this will
4916 			 * not need a release. The plane state will be attached
4917 			 * to the stream, and therefore part of the atomic
4918 			 * state. It'll be released when the atomic state is
4919 			 * cleaned.
4920 			 */
4921 			if (!dc_add_plane_to_context(
4922 					dc,
4923 					dm_new_crtc_state->stream,
4924 					dc_new_plane_state,
4925 					dm_state->context)) {
4926 
4927 				dc_plane_state_release(dc_new_plane_state);
4928 				return -EINVAL;
4929 			}
4930 
4931 			dm_new_plane_state->dc_state = dc_new_plane_state;
4932 
4933 			/* Tell DC to do a full surface update every time there
4934 			 * is a plane change. Inefficient, but works for now.
4935 			 */
4936 			dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
4937 
4938 			*lock_and_validation_needed = true;
4939 		}
4940 	}
4941 
4942 
4943 	return ret;
4944 }
4945 
4946 static int amdgpu_dm_atomic_check(struct drm_device *dev,
4947 				  struct drm_atomic_state *state)
4948 {
4949 	struct amdgpu_device *adev = dev->dev_private;
4950 	struct dc *dc = adev->dm.dc;
4951 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4952 	struct drm_connector *connector;
4953 	struct drm_connector_state *old_con_state, *new_con_state;
4954 	struct drm_crtc *crtc;
4955 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4956 	int ret, i;
4957 
4958 	/*
4959 	 * This bool will be set for true for any modeset/reset
4960 	 * or plane update which implies non fast surface update.
4961 	 */
4962 	bool lock_and_validation_needed = false;
4963 
4964 	ret = drm_atomic_helper_check_modeset(dev, state);
4965 	if (ret)
4966 		goto fail;
4967 
4968 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4969 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4970 		    !new_crtc_state->color_mgmt_changed)
4971 			continue;
4972 
4973 		if (!new_crtc_state->enable)
4974 			continue;
4975 
4976 		ret = drm_atomic_add_affected_connectors(state, crtc);
4977 		if (ret)
4978 			return ret;
4979 
4980 		ret = drm_atomic_add_affected_planes(state, crtc);
4981 		if (ret)
4982 			goto fail;
4983 	}
4984 
4985 	dm_state->context = dc_create_state();
4986 	ASSERT(dm_state->context);
4987 	dc_resource_state_copy_construct_current(dc, dm_state->context);
4988 
4989 	/* Remove exiting planes if they are modified */
4990 	ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4991 	if (ret) {
4992 		goto fail;
4993 	}
4994 
4995 	/* Disable all crtcs which require disable */
4996 	ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4997 	if (ret) {
4998 		goto fail;
4999 	}
5000 
5001 	/* Enable all crtcs which require enable */
5002 	ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
5003 	if (ret) {
5004 		goto fail;
5005 	}
5006 
5007 	/* Add new/modified planes */
5008 	ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
5009 	if (ret) {
5010 		goto fail;
5011 	}
5012 
5013 	/* Run this here since we want to validate the streams we created */
5014 	ret = drm_atomic_helper_check_planes(dev, state);
5015 	if (ret)
5016 		goto fail;
5017 
5018 	/* Check scaling and underscan changes*/
5019 	/*TODO Removed scaling changes validation due to inability to commit
5020 	 * new stream into context w\o causing full reset. Need to
5021 	 * decide how to handle.
5022 	 */
5023 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5024 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5025 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5026 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5027 
5028 		/* Skip any modesets/resets */
5029 		if (!acrtc || drm_atomic_crtc_needs_modeset(
5030 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
5031 			continue;
5032 
5033 		/* Skip any thing not scale or underscan changes */
5034 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
5035 			continue;
5036 
5037 		lock_and_validation_needed = true;
5038 	}
5039 
5040 	/*
5041 	 * For full updates case when
5042 	 * removing/adding/updating  streams on once CRTC while flipping
5043 	 * on another CRTC,
5044 	 * acquiring global lock  will guarantee that any such full
5045 	 * update commit
5046 	 * will wait for completion of any outstanding flip using DRMs
5047 	 * synchronization events.
5048 	 */
5049 
5050 	if (lock_and_validation_needed) {
5051 
5052 		ret = do_aquire_global_lock(dev, state);
5053 		if (ret)
5054 			goto fail;
5055 
5056 		if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
5057 			ret = -EINVAL;
5058 			goto fail;
5059 		}
5060 	}
5061 
5062 	/* Must be success */
5063 	WARN_ON(ret);
5064 	return ret;
5065 
5066 fail:
5067 	if (ret == -EDEADLK)
5068 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
5069 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
5070 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
5071 	else
5072 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
5073 
5074 	return ret;
5075 }
5076 
5077 static bool is_dp_capable_without_timing_msa(struct dc *dc,
5078 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
5079 {
5080 	uint8_t dpcd_data;
5081 	bool capable = false;
5082 
5083 	if (amdgpu_dm_connector->dc_link &&
5084 		dm_helpers_dp_read_dpcd(
5085 				NULL,
5086 				amdgpu_dm_connector->dc_link,
5087 				DP_DOWN_STREAM_PORT_COUNT,
5088 				&dpcd_data,
5089 				sizeof(dpcd_data))) {
5090 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
5091 	}
5092 
5093 	return capable;
5094 }
5095 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
5096 					   struct edid *edid)
5097 {
5098 	int i;
5099 	bool edid_check_required;
5100 	struct detailed_timing *timing;
5101 	struct detailed_non_pixel *data;
5102 	struct detailed_data_monitor_range *range;
5103 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5104 			to_amdgpu_dm_connector(connector);
5105 	struct dm_connector_state *dm_con_state;
5106 
5107 	struct drm_device *dev = connector->dev;
5108 	struct amdgpu_device *adev = dev->dev_private;
5109 
5110 	if (!connector->state) {
5111 		DRM_ERROR("%s - Connector has no state", __func__);
5112 		return;
5113 	}
5114 
5115 	dm_con_state = to_dm_connector_state(connector->state);
5116 
5117 	edid_check_required = false;
5118 	if (!amdgpu_dm_connector->dc_sink) {
5119 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
5120 		return;
5121 	}
5122 	if (!adev->dm.freesync_module)
5123 		return;
5124 	/*
5125 	 * if edid non zero restrict freesync only for dp and edp
5126 	 */
5127 	if (edid) {
5128 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
5129 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
5130 			edid_check_required = is_dp_capable_without_timing_msa(
5131 						adev->dm.dc,
5132 						amdgpu_dm_connector);
5133 		}
5134 	}
5135 	dm_con_state->freesync_capable = false;
5136 	if (edid_check_required == true && (edid->version > 1 ||
5137 	   (edid->version == 1 && edid->revision > 1))) {
5138 		for (i = 0; i < 4; i++) {
5139 
5140 			timing	= &edid->detailed_timings[i];
5141 			data	= &timing->data.other_data;
5142 			range	= &data->data.range;
5143 			/*
5144 			 * Check if monitor has continuous frequency mode
5145 			 */
5146 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
5147 				continue;
5148 			/*
5149 			 * Check for flag range limits only. If flag == 1 then
5150 			 * no additional timing information provided.
5151 			 * Default GTF, GTF Secondary curve and CVT are not
5152 			 * supported
5153 			 */
5154 			if (range->flags != 1)
5155 				continue;
5156 
5157 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
5158 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
5159 			amdgpu_dm_connector->pixel_clock_mhz =
5160 				range->pixel_clock_mhz * 10;
5161 			break;
5162 		}
5163 
5164 		if (amdgpu_dm_connector->max_vfreq -
5165 				amdgpu_dm_connector->min_vfreq > 10) {
5166 			amdgpu_dm_connector->caps.supported = true;
5167 			amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
5168 					amdgpu_dm_connector->min_vfreq * 1000000;
5169 			amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
5170 					amdgpu_dm_connector->max_vfreq * 1000000;
5171 			dm_con_state->freesync_capable = true;
5172 		}
5173 	}
5174 
5175 	/*
5176 	 * TODO figure out how to notify user-mode or DRM of freesync caps
5177 	 * once we figure out how to deal with freesync in an upstreamable
5178 	 * fashion
5179 	 */
5180 
5181 }
5182 
5183 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
5184 {
5185 	/*
5186 	 * TODO fill in once we figure out how to deal with freesync in
5187 	 * an upstreamable fashion
5188 	 */
5189 }
5190