1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 
33 #include "vid.h"
34 #include "amdgpu.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_ucode.h"
37 #include "atom.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_pm.h"
40 
41 #include "amd_shared.h"
42 #include "amdgpu_dm_irq.h"
43 #include "dm_helpers.h"
44 #include "amdgpu_dm_mst_types.h"
45 #if defined(CONFIG_DEBUG_FS)
46 #include "amdgpu_dm_debugfs.h"
47 #endif
48 
49 #include "ivsrcid/ivsrcid_vislands30.h"
50 
51 #include <linux/module.h>
52 #include <linux/moduleparam.h>
53 #include <linux/version.h>
54 #include <linux/types.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/firmware.h>
57 
58 #include <drm/drmP.h>
59 #include <drm/drm_atomic.h>
60 #include <drm/drm_atomic_uapi.h>
61 #include <drm/drm_atomic_helper.h>
62 #include <drm/drm_dp_mst_helper.h>
63 #include <drm/drm_fb_helper.h>
64 #include <drm/drm_edid.h>
65 
66 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
67 #include "ivsrcid/irqsrcs_dcn_1_0.h"
68 
69 #include "dcn/dcn_1_0_offset.h"
70 #include "dcn/dcn_1_0_sh_mask.h"
71 #include "soc15_hw_ip.h"
72 #include "vega10_ip_offset.h"
73 
74 #include "soc15_common.h"
75 #endif
76 
77 #include "modules/inc/mod_freesync.h"
78 #include "modules/power/power_helpers.h"
79 #include "modules/inc/mod_info_packet.h"
80 
81 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
82 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
83 
84 /**
85  * DOC: overview
86  *
87  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
88  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
89  * requests into DC requests, and DC responses into DRM responses.
90  *
91  * The root control structure is &struct amdgpu_display_manager.
92  */
93 
94 /* basic init/fini API */
95 static int amdgpu_dm_init(struct amdgpu_device *adev);
96 static void amdgpu_dm_fini(struct amdgpu_device *adev);
97 
98 /*
99  * initializes drm_device display related structures, based on the information
100  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
101  * drm_encoder, drm_mode_config
102  *
103  * Returns 0 on success
104  */
105 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
106 /* removes and deallocates the drm structures, created by the above function */
107 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
108 
109 static void
110 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
111 
112 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
113 				struct drm_plane *plane,
114 				unsigned long possible_crtcs);
115 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
116 			       struct drm_plane *plane,
117 			       uint32_t link_index);
118 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
119 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
120 				    uint32_t link_index,
121 				    struct amdgpu_encoder *amdgpu_encoder);
122 static int amdgpu_dm_encoder_init(struct drm_device *dev,
123 				  struct amdgpu_encoder *aencoder,
124 				  uint32_t link_index);
125 
126 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
127 
128 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
129 				   struct drm_atomic_state *state,
130 				   bool nonblock);
131 
132 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
133 
134 static int amdgpu_dm_atomic_check(struct drm_device *dev,
135 				  struct drm_atomic_state *state);
136 
137 static void handle_cursor_update(struct drm_plane *plane,
138 				 struct drm_plane_state *old_plane_state);
139 
140 
141 
142 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
143 	DRM_PLANE_TYPE_PRIMARY,
144 	DRM_PLANE_TYPE_PRIMARY,
145 	DRM_PLANE_TYPE_PRIMARY,
146 	DRM_PLANE_TYPE_PRIMARY,
147 	DRM_PLANE_TYPE_PRIMARY,
148 	DRM_PLANE_TYPE_PRIMARY,
149 };
150 
151 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
152 	DRM_PLANE_TYPE_PRIMARY,
153 	DRM_PLANE_TYPE_PRIMARY,
154 	DRM_PLANE_TYPE_PRIMARY,
155 	DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
156 };
157 
158 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
159 	DRM_PLANE_TYPE_PRIMARY,
160 	DRM_PLANE_TYPE_PRIMARY,
161 	DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
162 };
163 
164 /*
165  * dm_vblank_get_counter
166  *
167  * @brief
168  * Get counter for number of vertical blanks
169  *
170  * @param
171  * struct amdgpu_device *adev - [in] desired amdgpu device
172  * int disp_idx - [in] which CRTC to get the counter from
173  *
174  * @return
175  * Counter for vertical blanks
176  */
177 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
178 {
179 	if (crtc >= adev->mode_info.num_crtc)
180 		return 0;
181 	else {
182 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
183 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
184 				acrtc->base.state);
185 
186 
187 		if (acrtc_state->stream == NULL) {
188 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
189 				  crtc);
190 			return 0;
191 		}
192 
193 		return dc_stream_get_vblank_counter(acrtc_state->stream);
194 	}
195 }
196 
197 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
198 				  u32 *vbl, u32 *position)
199 {
200 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
201 
202 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
203 		return -EINVAL;
204 	else {
205 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
206 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
207 						acrtc->base.state);
208 
209 		if (acrtc_state->stream ==  NULL) {
210 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
211 				  crtc);
212 			return 0;
213 		}
214 
215 		/*
216 		 * TODO rework base driver to use values directly.
217 		 * for now parse it back into reg-format
218 		 */
219 		dc_stream_get_scanoutpos(acrtc_state->stream,
220 					 &v_blank_start,
221 					 &v_blank_end,
222 					 &h_position,
223 					 &v_position);
224 
225 		*position = v_position | (h_position << 16);
226 		*vbl = v_blank_start | (v_blank_end << 16);
227 	}
228 
229 	return 0;
230 }
231 
232 static bool dm_is_idle(void *handle)
233 {
234 	/* XXX todo */
235 	return true;
236 }
237 
238 static int dm_wait_for_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return 0;
242 }
243 
244 static bool dm_check_soft_reset(void *handle)
245 {
246 	return false;
247 }
248 
249 static int dm_soft_reset(void *handle)
250 {
251 	/* XXX todo */
252 	return 0;
253 }
254 
255 static struct amdgpu_crtc *
256 get_crtc_by_otg_inst(struct amdgpu_device *adev,
257 		     int otg_inst)
258 {
259 	struct drm_device *dev = adev->ddev;
260 	struct drm_crtc *crtc;
261 	struct amdgpu_crtc *amdgpu_crtc;
262 
263 	if (otg_inst == -1) {
264 		WARN_ON(1);
265 		return adev->mode_info.crtcs[0];
266 	}
267 
268 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
269 		amdgpu_crtc = to_amdgpu_crtc(crtc);
270 
271 		if (amdgpu_crtc->otg_inst == otg_inst)
272 			return amdgpu_crtc;
273 	}
274 
275 	return NULL;
276 }
277 
278 static void dm_pflip_high_irq(void *interrupt_params)
279 {
280 	struct amdgpu_crtc *amdgpu_crtc;
281 	struct common_irq_params *irq_params = interrupt_params;
282 	struct amdgpu_device *adev = irq_params->adev;
283 	unsigned long flags;
284 
285 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
286 
287 	/* IRQ could occur when in initial stage */
288 	/* TODO work and BO cleanup */
289 	if (amdgpu_crtc == NULL) {
290 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
291 		return;
292 	}
293 
294 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
295 
296 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
297 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
298 						 amdgpu_crtc->pflip_status,
299 						 AMDGPU_FLIP_SUBMITTED,
300 						 amdgpu_crtc->crtc_id,
301 						 amdgpu_crtc);
302 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
303 		return;
304 	}
305 
306 
307 	/* wake up userspace */
308 	if (amdgpu_crtc->event) {
309 		/* Update to correct count(s) if racing with vblank irq */
310 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
311 
312 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
313 
314 		/* page flip completed. clean up */
315 		amdgpu_crtc->event = NULL;
316 
317 	} else
318 		WARN_ON(1);
319 
320 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
321 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
322 
323 	DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
324 					__func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
325 
326 	drm_crtc_vblank_put(&amdgpu_crtc->base);
327 }
328 
329 static void dm_crtc_high_irq(void *interrupt_params)
330 {
331 	struct common_irq_params *irq_params = interrupt_params;
332 	struct amdgpu_device *adev = irq_params->adev;
333 	struct amdgpu_crtc *acrtc;
334 	struct dm_crtc_state *acrtc_state;
335 
336 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
337 
338 	if (acrtc) {
339 		drm_crtc_handle_vblank(&acrtc->base);
340 		amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
341 
342 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
343 
344 		if (acrtc_state->stream &&
345 		    acrtc_state->vrr_params.supported &&
346 		    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
347 			mod_freesync_handle_v_update(
348 				adev->dm.freesync_module,
349 				acrtc_state->stream,
350 				&acrtc_state->vrr_params);
351 
352 			dc_stream_adjust_vmin_vmax(
353 				adev->dm.dc,
354 				acrtc_state->stream,
355 				&acrtc_state->vrr_params.adjust);
356 		}
357 	}
358 }
359 
360 static int dm_set_clockgating_state(void *handle,
361 		  enum amd_clockgating_state state)
362 {
363 	return 0;
364 }
365 
366 static int dm_set_powergating_state(void *handle,
367 		  enum amd_powergating_state state)
368 {
369 	return 0;
370 }
371 
372 /* Prototypes of private functions */
373 static int dm_early_init(void* handle);
374 
375 /* Allocate memory for FBC compressed data  */
376 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
377 {
378 	struct drm_device *dev = connector->dev;
379 	struct amdgpu_device *adev = dev->dev_private;
380 	struct dm_comressor_info *compressor = &adev->dm.compressor;
381 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
382 	struct drm_display_mode *mode;
383 	unsigned long max_size = 0;
384 
385 	if (adev->dm.dc->fbc_compressor == NULL)
386 		return;
387 
388 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
389 		return;
390 
391 	if (compressor->bo_ptr)
392 		return;
393 
394 
395 	list_for_each_entry(mode, &connector->modes, head) {
396 		if (max_size < mode->htotal * mode->vtotal)
397 			max_size = mode->htotal * mode->vtotal;
398 	}
399 
400 	if (max_size) {
401 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
402 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
403 			    &compressor->gpu_addr, &compressor->cpu_addr);
404 
405 		if (r)
406 			DRM_ERROR("DM: Failed to initialize FBC\n");
407 		else {
408 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
409 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
410 		}
411 
412 	}
413 
414 }
415 
416 static int amdgpu_dm_init(struct amdgpu_device *adev)
417 {
418 	struct dc_init_data init_data;
419 	adev->dm.ddev = adev->ddev;
420 	adev->dm.adev = adev;
421 
422 	/* Zero all the fields */
423 	memset(&init_data, 0, sizeof(init_data));
424 
425 	mutex_init(&adev->dm.dc_lock);
426 
427 	if(amdgpu_dm_irq_init(adev)) {
428 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
429 		goto error;
430 	}
431 
432 	init_data.asic_id.chip_family = adev->family;
433 
434 	init_data.asic_id.pci_revision_id = adev->rev_id;
435 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
436 
437 	init_data.asic_id.vram_width = adev->gmc.vram_width;
438 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
439 	init_data.asic_id.atombios_base_address =
440 		adev->mode_info.atom_context->bios;
441 
442 	init_data.driver = adev;
443 
444 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
445 
446 	if (!adev->dm.cgs_device) {
447 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
448 		goto error;
449 	}
450 
451 	init_data.cgs_device = adev->dm.cgs_device;
452 
453 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
454 
455 	/*
456 	 * TODO debug why this doesn't work on Raven
457 	 */
458 	if (adev->flags & AMD_IS_APU &&
459 	    adev->asic_type >= CHIP_CARRIZO &&
460 	    adev->asic_type < CHIP_RAVEN)
461 		init_data.flags.gpu_vm_support = true;
462 
463 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
464 		init_data.flags.fbc_support = true;
465 
466 	/* Display Core create. */
467 	adev->dm.dc = dc_create(&init_data);
468 
469 	if (adev->dm.dc) {
470 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
471 	} else {
472 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
473 		goto error;
474 	}
475 
476 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
477 	if (!adev->dm.freesync_module) {
478 		DRM_ERROR(
479 		"amdgpu: failed to initialize freesync_module.\n");
480 	} else
481 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
482 				adev->dm.freesync_module);
483 
484 	amdgpu_dm_init_color_mod();
485 
486 	if (amdgpu_dm_initialize_drm_device(adev)) {
487 		DRM_ERROR(
488 		"amdgpu: failed to initialize sw for display support.\n");
489 		goto error;
490 	}
491 
492 	/* Update the actual used number of crtc */
493 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
494 
495 	/* TODO: Add_display_info? */
496 
497 	/* TODO use dynamic cursor width */
498 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
499 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
500 
501 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
502 		DRM_ERROR(
503 		"amdgpu: failed to initialize sw for display support.\n");
504 		goto error;
505 	}
506 
507 #if defined(CONFIG_DEBUG_FS)
508 	if (dtn_debugfs_init(adev))
509 		DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
510 #endif
511 
512 	DRM_DEBUG_DRIVER("KMS initialized.\n");
513 
514 	return 0;
515 error:
516 	amdgpu_dm_fini(adev);
517 
518 	return -EINVAL;
519 }
520 
521 static void amdgpu_dm_fini(struct amdgpu_device *adev)
522 {
523 	amdgpu_dm_destroy_drm_device(&adev->dm);
524 	/*
525 	 * TODO: pageflip, vlank interrupt
526 	 *
527 	 * amdgpu_dm_irq_fini(adev);
528 	 */
529 
530 	if (adev->dm.cgs_device) {
531 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
532 		adev->dm.cgs_device = NULL;
533 	}
534 	if (adev->dm.freesync_module) {
535 		mod_freesync_destroy(adev->dm.freesync_module);
536 		adev->dm.freesync_module = NULL;
537 	}
538 	/* DC Destroy TODO: Replace destroy DAL */
539 	if (adev->dm.dc)
540 		dc_destroy(&adev->dm.dc);
541 
542 	mutex_destroy(&adev->dm.dc_lock);
543 
544 	return;
545 }
546 
547 static int load_dmcu_fw(struct amdgpu_device *adev)
548 {
549 	const char *fw_name_dmcu;
550 	int r;
551 	const struct dmcu_firmware_header_v1_0 *hdr;
552 
553 	switch(adev->asic_type) {
554 	case CHIP_BONAIRE:
555 	case CHIP_HAWAII:
556 	case CHIP_KAVERI:
557 	case CHIP_KABINI:
558 	case CHIP_MULLINS:
559 	case CHIP_TONGA:
560 	case CHIP_FIJI:
561 	case CHIP_CARRIZO:
562 	case CHIP_STONEY:
563 	case CHIP_POLARIS11:
564 	case CHIP_POLARIS10:
565 	case CHIP_POLARIS12:
566 	case CHIP_VEGAM:
567 	case CHIP_VEGA10:
568 	case CHIP_VEGA12:
569 	case CHIP_VEGA20:
570 		return 0;
571 	case CHIP_RAVEN:
572 		fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
573 		break;
574 	default:
575 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
576 		return -EINVAL;
577 	}
578 
579 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
580 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
581 		return 0;
582 	}
583 
584 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
585 	if (r == -ENOENT) {
586 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
587 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
588 		adev->dm.fw_dmcu = NULL;
589 		return 0;
590 	}
591 	if (r) {
592 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
593 			fw_name_dmcu);
594 		return r;
595 	}
596 
597 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
598 	if (r) {
599 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
600 			fw_name_dmcu);
601 		release_firmware(adev->dm.fw_dmcu);
602 		adev->dm.fw_dmcu = NULL;
603 		return r;
604 	}
605 
606 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
607 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
608 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
609 	adev->firmware.fw_size +=
610 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
611 
612 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
613 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
614 	adev->firmware.fw_size +=
615 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
616 
617 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
618 
619 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
620 
621 	return 0;
622 }
623 
624 static int dm_sw_init(void *handle)
625 {
626 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
627 
628 	return load_dmcu_fw(adev);
629 }
630 
631 static int dm_sw_fini(void *handle)
632 {
633 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
634 
635 	if(adev->dm.fw_dmcu) {
636 		release_firmware(adev->dm.fw_dmcu);
637 		adev->dm.fw_dmcu = NULL;
638 	}
639 
640 	return 0;
641 }
642 
643 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
644 {
645 	struct amdgpu_dm_connector *aconnector;
646 	struct drm_connector *connector;
647 	int ret = 0;
648 
649 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
650 
651 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
652 		aconnector = to_amdgpu_dm_connector(connector);
653 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
654 		    aconnector->mst_mgr.aux) {
655 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
656 					aconnector, aconnector->base.base.id);
657 
658 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
659 			if (ret < 0) {
660 				DRM_ERROR("DM_MST: Failed to start MST\n");
661 				((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
662 				return ret;
663 				}
664 			}
665 	}
666 
667 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
668 	return ret;
669 }
670 
671 static int dm_late_init(void *handle)
672 {
673 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
674 
675 	struct dmcu_iram_parameters params;
676 	unsigned int linear_lut[16];
677 	int i;
678 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
679 	bool ret;
680 
681 	for (i = 0; i < 16; i++)
682 		linear_lut[i] = 0xFFFF * i / 15;
683 
684 	params.set = 0;
685 	params.backlight_ramping_start = 0xCCCC;
686 	params.backlight_ramping_reduction = 0xCCCCCCCC;
687 	params.backlight_lut_array_size = 16;
688 	params.backlight_lut_array = linear_lut;
689 
690 	ret = dmcu_load_iram(dmcu, params);
691 
692 	if (!ret)
693 		return -EINVAL;
694 
695 	return detect_mst_link_for_all_connectors(adev->ddev);
696 }
697 
698 static void s3_handle_mst(struct drm_device *dev, bool suspend)
699 {
700 	struct amdgpu_dm_connector *aconnector;
701 	struct drm_connector *connector;
702 
703 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
704 
705 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
706 		   aconnector = to_amdgpu_dm_connector(connector);
707 		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
708 				   !aconnector->mst_port) {
709 
710 			   if (suspend)
711 				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
712 			   else
713 				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
714 		   }
715 	}
716 
717 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
718 }
719 
720 /**
721  * dm_hw_init() - Initialize DC device
722  * @handle: The base driver device containing the amdpgu_dm device.
723  *
724  * Initialize the &struct amdgpu_display_manager device. This involves calling
725  * the initializers of each DM component, then populating the struct with them.
726  *
727  * Although the function implies hardware initialization, both hardware and
728  * software are initialized here. Splitting them out to their relevant init
729  * hooks is a future TODO item.
730  *
731  * Some notable things that are initialized here:
732  *
733  * - Display Core, both software and hardware
734  * - DC modules that we need (freesync and color management)
735  * - DRM software states
736  * - Interrupt sources and handlers
737  * - Vblank support
738  * - Debug FS entries, if enabled
739  */
740 static int dm_hw_init(void *handle)
741 {
742 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743 	/* Create DAL display manager */
744 	amdgpu_dm_init(adev);
745 	amdgpu_dm_hpd_init(adev);
746 
747 	return 0;
748 }
749 
750 /**
751  * dm_hw_fini() - Teardown DC device
752  * @handle: The base driver device containing the amdpgu_dm device.
753  *
754  * Teardown components within &struct amdgpu_display_manager that require
755  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
756  * were loaded. Also flush IRQ workqueues and disable them.
757  */
758 static int dm_hw_fini(void *handle)
759 {
760 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
761 
762 	amdgpu_dm_hpd_fini(adev);
763 
764 	amdgpu_dm_irq_fini(adev);
765 	amdgpu_dm_fini(adev);
766 	return 0;
767 }
768 
769 static int dm_suspend(void *handle)
770 {
771 	struct amdgpu_device *adev = handle;
772 	struct amdgpu_display_manager *dm = &adev->dm;
773 	int ret = 0;
774 
775 	s3_handle_mst(adev->ddev, true);
776 
777 	amdgpu_dm_irq_suspend(adev);
778 
779 	WARN_ON(adev->dm.cached_state);
780 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
781 
782 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
783 
784 	return ret;
785 }
786 
787 static struct amdgpu_dm_connector *
788 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
789 					     struct drm_crtc *crtc)
790 {
791 	uint32_t i;
792 	struct drm_connector_state *new_con_state;
793 	struct drm_connector *connector;
794 	struct drm_crtc *crtc_from_state;
795 
796 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
797 		crtc_from_state = new_con_state->crtc;
798 
799 		if (crtc_from_state == crtc)
800 			return to_amdgpu_dm_connector(connector);
801 	}
802 
803 	return NULL;
804 }
805 
806 static void emulated_link_detect(struct dc_link *link)
807 {
808 	struct dc_sink_init_data sink_init_data = { 0 };
809 	struct display_sink_capability sink_caps = { 0 };
810 	enum dc_edid_status edid_status;
811 	struct dc_context *dc_ctx = link->ctx;
812 	struct dc_sink *sink = NULL;
813 	struct dc_sink *prev_sink = NULL;
814 
815 	link->type = dc_connection_none;
816 	prev_sink = link->local_sink;
817 
818 	if (prev_sink != NULL)
819 		dc_sink_retain(prev_sink);
820 
821 	switch (link->connector_signal) {
822 	case SIGNAL_TYPE_HDMI_TYPE_A: {
823 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
824 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
825 		break;
826 	}
827 
828 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
829 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
830 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
831 		break;
832 	}
833 
834 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
835 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
836 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
837 		break;
838 	}
839 
840 	case SIGNAL_TYPE_LVDS: {
841 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
842 		sink_caps.signal = SIGNAL_TYPE_LVDS;
843 		break;
844 	}
845 
846 	case SIGNAL_TYPE_EDP: {
847 		sink_caps.transaction_type =
848 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
849 		sink_caps.signal = SIGNAL_TYPE_EDP;
850 		break;
851 	}
852 
853 	case SIGNAL_TYPE_DISPLAY_PORT: {
854 		sink_caps.transaction_type =
855 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
856 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
857 		break;
858 	}
859 
860 	default:
861 		DC_ERROR("Invalid connector type! signal:%d\n",
862 			link->connector_signal);
863 		return;
864 	}
865 
866 	sink_init_data.link = link;
867 	sink_init_data.sink_signal = sink_caps.signal;
868 
869 	sink = dc_sink_create(&sink_init_data);
870 	if (!sink) {
871 		DC_ERROR("Failed to create sink!\n");
872 		return;
873 	}
874 
875 	link->local_sink = sink;
876 
877 	edid_status = dm_helpers_read_local_edid(
878 			link->ctx,
879 			link,
880 			sink);
881 
882 	if (edid_status != EDID_OK)
883 		DC_ERROR("Failed to read EDID");
884 
885 }
886 
887 static int dm_resume(void *handle)
888 {
889 	struct amdgpu_device *adev = handle;
890 	struct drm_device *ddev = adev->ddev;
891 	struct amdgpu_display_manager *dm = &adev->dm;
892 	struct amdgpu_dm_connector *aconnector;
893 	struct drm_connector *connector;
894 	struct drm_crtc *crtc;
895 	struct drm_crtc_state *new_crtc_state;
896 	struct dm_crtc_state *dm_new_crtc_state;
897 	struct drm_plane *plane;
898 	struct drm_plane_state *new_plane_state;
899 	struct dm_plane_state *dm_new_plane_state;
900 	enum dc_connection_type new_connection_type = dc_connection_none;
901 	int ret;
902 	int i;
903 
904 	/* power on hardware */
905 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
906 
907 	/* program HPD filter */
908 	dc_resume(dm->dc);
909 
910 	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
911 	s3_handle_mst(ddev, false);
912 
913 	/*
914 	 * early enable HPD Rx IRQ, should be done before set mode as short
915 	 * pulse interrupts are used for MST
916 	 */
917 	amdgpu_dm_irq_resume_early(adev);
918 
919 	/* Do detection*/
920 	list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
921 		aconnector = to_amdgpu_dm_connector(connector);
922 
923 		/*
924 		 * this is the case when traversing through already created
925 		 * MST connectors, should be skipped
926 		 */
927 		if (aconnector->mst_port)
928 			continue;
929 
930 		mutex_lock(&aconnector->hpd_lock);
931 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
932 			DRM_ERROR("KMS: Failed to detect connector\n");
933 
934 		if (aconnector->base.force && new_connection_type == dc_connection_none)
935 			emulated_link_detect(aconnector->dc_link);
936 		else
937 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
938 
939 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
940 			aconnector->fake_enable = false;
941 
942 		aconnector->dc_sink = NULL;
943 		amdgpu_dm_update_connector_after_detect(aconnector);
944 		mutex_unlock(&aconnector->hpd_lock);
945 	}
946 
947 	/* Force mode set in atomic commit */
948 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
949 		new_crtc_state->active_changed = true;
950 
951 	/*
952 	 * atomic_check is expected to create the dc states. We need to release
953 	 * them here, since they were duplicated as part of the suspend
954 	 * procedure.
955 	 */
956 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
957 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
958 		if (dm_new_crtc_state->stream) {
959 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
960 			dc_stream_release(dm_new_crtc_state->stream);
961 			dm_new_crtc_state->stream = NULL;
962 		}
963 	}
964 
965 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
966 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
967 		if (dm_new_plane_state->dc_state) {
968 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
969 			dc_plane_state_release(dm_new_plane_state->dc_state);
970 			dm_new_plane_state->dc_state = NULL;
971 		}
972 	}
973 
974 	ret = drm_atomic_helper_resume(ddev, dm->cached_state);
975 
976 	dm->cached_state = NULL;
977 
978 	amdgpu_dm_irq_resume_late(adev);
979 
980 	return ret;
981 }
982 
983 /**
984  * DOC: DM Lifecycle
985  *
986  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
987  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
988  * the base driver's device list to be initialized and torn down accordingly.
989  *
990  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
991  */
992 
993 static const struct amd_ip_funcs amdgpu_dm_funcs = {
994 	.name = "dm",
995 	.early_init = dm_early_init,
996 	.late_init = dm_late_init,
997 	.sw_init = dm_sw_init,
998 	.sw_fini = dm_sw_fini,
999 	.hw_init = dm_hw_init,
1000 	.hw_fini = dm_hw_fini,
1001 	.suspend = dm_suspend,
1002 	.resume = dm_resume,
1003 	.is_idle = dm_is_idle,
1004 	.wait_for_idle = dm_wait_for_idle,
1005 	.check_soft_reset = dm_check_soft_reset,
1006 	.soft_reset = dm_soft_reset,
1007 	.set_clockgating_state = dm_set_clockgating_state,
1008 	.set_powergating_state = dm_set_powergating_state,
1009 };
1010 
1011 const struct amdgpu_ip_block_version dm_ip_block =
1012 {
1013 	.type = AMD_IP_BLOCK_TYPE_DCE,
1014 	.major = 1,
1015 	.minor = 0,
1016 	.rev = 0,
1017 	.funcs = &amdgpu_dm_funcs,
1018 };
1019 
1020 
1021 /**
1022  * DOC: atomic
1023  *
1024  * *WIP*
1025  */
1026 
1027 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1028 	.fb_create = amdgpu_display_user_framebuffer_create,
1029 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1030 	.atomic_check = amdgpu_dm_atomic_check,
1031 	.atomic_commit = amdgpu_dm_atomic_commit,
1032 };
1033 
1034 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1035 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1036 };
1037 
1038 static void
1039 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1040 {
1041 	struct drm_connector *connector = &aconnector->base;
1042 	struct drm_device *dev = connector->dev;
1043 	struct dc_sink *sink;
1044 
1045 	/* MST handled by drm_mst framework */
1046 	if (aconnector->mst_mgr.mst_state == true)
1047 		return;
1048 
1049 
1050 	sink = aconnector->dc_link->local_sink;
1051 
1052 	/*
1053 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1054 	 * the connector sink is set to either fake or physical sink depends on link status.
1055 	 * Skip if already done during boot.
1056 	 */
1057 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1058 			&& aconnector->dc_em_sink) {
1059 
1060 		/*
1061 		 * For S3 resume with headless use eml_sink to fake stream
1062 		 * because on resume connector->sink is set to NULL
1063 		 */
1064 		mutex_lock(&dev->mode_config.mutex);
1065 
1066 		if (sink) {
1067 			if (aconnector->dc_sink) {
1068 				amdgpu_dm_update_freesync_caps(connector, NULL);
1069 				/*
1070 				 * retain and release below are used to
1071 				 * bump up refcount for sink because the link doesn't point
1072 				 * to it anymore after disconnect, so on next crtc to connector
1073 				 * reshuffle by UMD we will get into unwanted dc_sink release
1074 				 */
1075 				if (aconnector->dc_sink != aconnector->dc_em_sink)
1076 					dc_sink_release(aconnector->dc_sink);
1077 			}
1078 			aconnector->dc_sink = sink;
1079 			amdgpu_dm_update_freesync_caps(connector,
1080 					aconnector->edid);
1081 		} else {
1082 			amdgpu_dm_update_freesync_caps(connector, NULL);
1083 			if (!aconnector->dc_sink)
1084 				aconnector->dc_sink = aconnector->dc_em_sink;
1085 			else if (aconnector->dc_sink != aconnector->dc_em_sink)
1086 				dc_sink_retain(aconnector->dc_sink);
1087 		}
1088 
1089 		mutex_unlock(&dev->mode_config.mutex);
1090 		return;
1091 	}
1092 
1093 	/*
1094 	 * TODO: temporary guard to look for proper fix
1095 	 * if this sink is MST sink, we should not do anything
1096 	 */
1097 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
1098 		return;
1099 
1100 	if (aconnector->dc_sink == sink) {
1101 		/*
1102 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1103 		 * Do nothing!!
1104 		 */
1105 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1106 				aconnector->connector_id);
1107 		return;
1108 	}
1109 
1110 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1111 		aconnector->connector_id, aconnector->dc_sink, sink);
1112 
1113 	mutex_lock(&dev->mode_config.mutex);
1114 
1115 	/*
1116 	 * 1. Update status of the drm connector
1117 	 * 2. Send an event and let userspace tell us what to do
1118 	 */
1119 	if (sink) {
1120 		/*
1121 		 * TODO: check if we still need the S3 mode update workaround.
1122 		 * If yes, put it here.
1123 		 */
1124 		if (aconnector->dc_sink)
1125 			amdgpu_dm_update_freesync_caps(connector, NULL);
1126 
1127 		aconnector->dc_sink = sink;
1128 		if (sink->dc_edid.length == 0) {
1129 			aconnector->edid = NULL;
1130 			drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1131 		} else {
1132 			aconnector->edid =
1133 				(struct edid *) sink->dc_edid.raw_edid;
1134 
1135 
1136 			drm_connector_update_edid_property(connector,
1137 					aconnector->edid);
1138 			drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1139 					    aconnector->edid);
1140 		}
1141 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1142 
1143 	} else {
1144 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1145 		amdgpu_dm_update_freesync_caps(connector, NULL);
1146 		drm_connector_update_edid_property(connector, NULL);
1147 		aconnector->num_modes = 0;
1148 		aconnector->dc_sink = NULL;
1149 		aconnector->edid = NULL;
1150 	}
1151 
1152 	mutex_unlock(&dev->mode_config.mutex);
1153 }
1154 
1155 static void handle_hpd_irq(void *param)
1156 {
1157 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1158 	struct drm_connector *connector = &aconnector->base;
1159 	struct drm_device *dev = connector->dev;
1160 	enum dc_connection_type new_connection_type = dc_connection_none;
1161 
1162 	/*
1163 	 * In case of failure or MST no need to update connector status or notify the OS
1164 	 * since (for MST case) MST does this in its own context.
1165 	 */
1166 	mutex_lock(&aconnector->hpd_lock);
1167 
1168 	if (aconnector->fake_enable)
1169 		aconnector->fake_enable = false;
1170 
1171 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1172 		DRM_ERROR("KMS: Failed to detect connector\n");
1173 
1174 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
1175 		emulated_link_detect(aconnector->dc_link);
1176 
1177 
1178 		drm_modeset_lock_all(dev);
1179 		dm_restore_drm_connector_state(dev, connector);
1180 		drm_modeset_unlock_all(dev);
1181 
1182 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1183 			drm_kms_helper_hotplug_event(dev);
1184 
1185 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1186 		amdgpu_dm_update_connector_after_detect(aconnector);
1187 
1188 
1189 		drm_modeset_lock_all(dev);
1190 		dm_restore_drm_connector_state(dev, connector);
1191 		drm_modeset_unlock_all(dev);
1192 
1193 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1194 			drm_kms_helper_hotplug_event(dev);
1195 	}
1196 	mutex_unlock(&aconnector->hpd_lock);
1197 
1198 }
1199 
1200 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1201 {
1202 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1203 	uint8_t dret;
1204 	bool new_irq_handled = false;
1205 	int dpcd_addr;
1206 	int dpcd_bytes_to_read;
1207 
1208 	const int max_process_count = 30;
1209 	int process_count = 0;
1210 
1211 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1212 
1213 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1214 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1215 		/* DPCD 0x200 - 0x201 for downstream IRQ */
1216 		dpcd_addr = DP_SINK_COUNT;
1217 	} else {
1218 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1219 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
1220 		dpcd_addr = DP_SINK_COUNT_ESI;
1221 	}
1222 
1223 	dret = drm_dp_dpcd_read(
1224 		&aconnector->dm_dp_aux.aux,
1225 		dpcd_addr,
1226 		esi,
1227 		dpcd_bytes_to_read);
1228 
1229 	while (dret == dpcd_bytes_to_read &&
1230 		process_count < max_process_count) {
1231 		uint8_t retry;
1232 		dret = 0;
1233 
1234 		process_count++;
1235 
1236 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1237 		/* handle HPD short pulse irq */
1238 		if (aconnector->mst_mgr.mst_state)
1239 			drm_dp_mst_hpd_irq(
1240 				&aconnector->mst_mgr,
1241 				esi,
1242 				&new_irq_handled);
1243 
1244 		if (new_irq_handled) {
1245 			/* ACK at DPCD to notify down stream */
1246 			const int ack_dpcd_bytes_to_write =
1247 				dpcd_bytes_to_read - 1;
1248 
1249 			for (retry = 0; retry < 3; retry++) {
1250 				uint8_t wret;
1251 
1252 				wret = drm_dp_dpcd_write(
1253 					&aconnector->dm_dp_aux.aux,
1254 					dpcd_addr + 1,
1255 					&esi[1],
1256 					ack_dpcd_bytes_to_write);
1257 				if (wret == ack_dpcd_bytes_to_write)
1258 					break;
1259 			}
1260 
1261 			/* check if there is new irq to be handled */
1262 			dret = drm_dp_dpcd_read(
1263 				&aconnector->dm_dp_aux.aux,
1264 				dpcd_addr,
1265 				esi,
1266 				dpcd_bytes_to_read);
1267 
1268 			new_irq_handled = false;
1269 		} else {
1270 			break;
1271 		}
1272 	}
1273 
1274 	if (process_count == max_process_count)
1275 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1276 }
1277 
1278 static void handle_hpd_rx_irq(void *param)
1279 {
1280 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1281 	struct drm_connector *connector = &aconnector->base;
1282 	struct drm_device *dev = connector->dev;
1283 	struct dc_link *dc_link = aconnector->dc_link;
1284 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1285 	enum dc_connection_type new_connection_type = dc_connection_none;
1286 
1287 	/*
1288 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1289 	 * conflict, after implement i2c helper, this mutex should be
1290 	 * retired.
1291 	 */
1292 	if (dc_link->type != dc_connection_mst_branch)
1293 		mutex_lock(&aconnector->hpd_lock);
1294 
1295 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1296 			!is_mst_root_connector) {
1297 		/* Downstream Port status changed. */
1298 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
1299 			DRM_ERROR("KMS: Failed to detect connector\n");
1300 
1301 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
1302 			emulated_link_detect(dc_link);
1303 
1304 			if (aconnector->fake_enable)
1305 				aconnector->fake_enable = false;
1306 
1307 			amdgpu_dm_update_connector_after_detect(aconnector);
1308 
1309 
1310 			drm_modeset_lock_all(dev);
1311 			dm_restore_drm_connector_state(dev, connector);
1312 			drm_modeset_unlock_all(dev);
1313 
1314 			drm_kms_helper_hotplug_event(dev);
1315 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1316 
1317 			if (aconnector->fake_enable)
1318 				aconnector->fake_enable = false;
1319 
1320 			amdgpu_dm_update_connector_after_detect(aconnector);
1321 
1322 
1323 			drm_modeset_lock_all(dev);
1324 			dm_restore_drm_connector_state(dev, connector);
1325 			drm_modeset_unlock_all(dev);
1326 
1327 			drm_kms_helper_hotplug_event(dev);
1328 		}
1329 	}
1330 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1331 	    (dc_link->type == dc_connection_mst_branch))
1332 		dm_handle_hpd_rx_irq(aconnector);
1333 
1334 	if (dc_link->type != dc_connection_mst_branch) {
1335 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1336 		mutex_unlock(&aconnector->hpd_lock);
1337 	}
1338 }
1339 
1340 static void register_hpd_handlers(struct amdgpu_device *adev)
1341 {
1342 	struct drm_device *dev = adev->ddev;
1343 	struct drm_connector *connector;
1344 	struct amdgpu_dm_connector *aconnector;
1345 	const struct dc_link *dc_link;
1346 	struct dc_interrupt_params int_params = {0};
1347 
1348 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1349 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1350 
1351 	list_for_each_entry(connector,
1352 			&dev->mode_config.connector_list, head)	{
1353 
1354 		aconnector = to_amdgpu_dm_connector(connector);
1355 		dc_link = aconnector->dc_link;
1356 
1357 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1358 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1359 			int_params.irq_source = dc_link->irq_source_hpd;
1360 
1361 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1362 					handle_hpd_irq,
1363 					(void *) aconnector);
1364 		}
1365 
1366 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1367 
1368 			/* Also register for DP short pulse (hpd_rx). */
1369 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1370 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
1371 
1372 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1373 					handle_hpd_rx_irq,
1374 					(void *) aconnector);
1375 		}
1376 	}
1377 }
1378 
1379 /* Register IRQ sources and initialize IRQ callbacks */
1380 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1381 {
1382 	struct dc *dc = adev->dm.dc;
1383 	struct common_irq_params *c_irq_params;
1384 	struct dc_interrupt_params int_params = {0};
1385 	int r;
1386 	int i;
1387 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1388 
1389 	if (adev->asic_type == CHIP_VEGA10 ||
1390 	    adev->asic_type == CHIP_VEGA12 ||
1391 	    adev->asic_type == CHIP_VEGA20 ||
1392 	    adev->asic_type == CHIP_RAVEN)
1393 		client_id = SOC15_IH_CLIENTID_DCE;
1394 
1395 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1396 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1397 
1398 	/*
1399 	 * Actions of amdgpu_irq_add_id():
1400 	 * 1. Register a set() function with base driver.
1401 	 *    Base driver will call set() function to enable/disable an
1402 	 *    interrupt in DC hardware.
1403 	 * 2. Register amdgpu_dm_irq_handler().
1404 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1405 	 *    coming from DC hardware.
1406 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1407 	 *    for acknowledging and handling. */
1408 
1409 	/* Use VBLANK interrupt */
1410 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1411 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1412 		if (r) {
1413 			DRM_ERROR("Failed to add crtc irq id!\n");
1414 			return r;
1415 		}
1416 
1417 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1418 		int_params.irq_source =
1419 			dc_interrupt_to_irq_source(dc, i, 0);
1420 
1421 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1422 
1423 		c_irq_params->adev = adev;
1424 		c_irq_params->irq_src = int_params.irq_source;
1425 
1426 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1427 				dm_crtc_high_irq, c_irq_params);
1428 	}
1429 
1430 	/* Use GRPH_PFLIP interrupt */
1431 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1432 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1433 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1434 		if (r) {
1435 			DRM_ERROR("Failed to add page flip irq id!\n");
1436 			return r;
1437 		}
1438 
1439 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1440 		int_params.irq_source =
1441 			dc_interrupt_to_irq_source(dc, i, 0);
1442 
1443 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1444 
1445 		c_irq_params->adev = adev;
1446 		c_irq_params->irq_src = int_params.irq_source;
1447 
1448 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1449 				dm_pflip_high_irq, c_irq_params);
1450 
1451 	}
1452 
1453 	/* HPD */
1454 	r = amdgpu_irq_add_id(adev, client_id,
1455 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1456 	if (r) {
1457 		DRM_ERROR("Failed to add hpd irq id!\n");
1458 		return r;
1459 	}
1460 
1461 	register_hpd_handlers(adev);
1462 
1463 	return 0;
1464 }
1465 
1466 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1467 /* Register IRQ sources and initialize IRQ callbacks */
1468 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1469 {
1470 	struct dc *dc = adev->dm.dc;
1471 	struct common_irq_params *c_irq_params;
1472 	struct dc_interrupt_params int_params = {0};
1473 	int r;
1474 	int i;
1475 
1476 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1477 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1478 
1479 	/*
1480 	 * Actions of amdgpu_irq_add_id():
1481 	 * 1. Register a set() function with base driver.
1482 	 *    Base driver will call set() function to enable/disable an
1483 	 *    interrupt in DC hardware.
1484 	 * 2. Register amdgpu_dm_irq_handler().
1485 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1486 	 *    coming from DC hardware.
1487 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1488 	 *    for acknowledging and handling.
1489 	 */
1490 
1491 	/* Use VSTARTUP interrupt */
1492 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1493 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1494 			i++) {
1495 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1496 
1497 		if (r) {
1498 			DRM_ERROR("Failed to add crtc irq id!\n");
1499 			return r;
1500 		}
1501 
1502 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1503 		int_params.irq_source =
1504 			dc_interrupt_to_irq_source(dc, i, 0);
1505 
1506 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1507 
1508 		c_irq_params->adev = adev;
1509 		c_irq_params->irq_src = int_params.irq_source;
1510 
1511 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1512 				dm_crtc_high_irq, c_irq_params);
1513 	}
1514 
1515 	/* Use GRPH_PFLIP interrupt */
1516 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1517 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1518 			i++) {
1519 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1520 		if (r) {
1521 			DRM_ERROR("Failed to add page flip irq id!\n");
1522 			return r;
1523 		}
1524 
1525 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1526 		int_params.irq_source =
1527 			dc_interrupt_to_irq_source(dc, i, 0);
1528 
1529 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1530 
1531 		c_irq_params->adev = adev;
1532 		c_irq_params->irq_src = int_params.irq_source;
1533 
1534 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1535 				dm_pflip_high_irq, c_irq_params);
1536 
1537 	}
1538 
1539 	/* HPD */
1540 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1541 			&adev->hpd_irq);
1542 	if (r) {
1543 		DRM_ERROR("Failed to add hpd irq id!\n");
1544 		return r;
1545 	}
1546 
1547 	register_hpd_handlers(adev);
1548 
1549 	return 0;
1550 }
1551 #endif
1552 
1553 /*
1554  * Acquires the lock for the atomic state object and returns
1555  * the new atomic state.
1556  *
1557  * This should only be called during atomic check.
1558  */
1559 static int dm_atomic_get_state(struct drm_atomic_state *state,
1560 			       struct dm_atomic_state **dm_state)
1561 {
1562 	struct drm_device *dev = state->dev;
1563 	struct amdgpu_device *adev = dev->dev_private;
1564 	struct amdgpu_display_manager *dm = &adev->dm;
1565 	struct drm_private_state *priv_state;
1566 	int ret;
1567 
1568 	if (*dm_state)
1569 		return 0;
1570 
1571 	ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
1572 	if (ret)
1573 		return ret;
1574 
1575 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1576 	if (IS_ERR(priv_state))
1577 		return PTR_ERR(priv_state);
1578 
1579 	*dm_state = to_dm_atomic_state(priv_state);
1580 
1581 	return 0;
1582 }
1583 
1584 struct dm_atomic_state *
1585 dm_atomic_get_new_state(struct drm_atomic_state *state)
1586 {
1587 	struct drm_device *dev = state->dev;
1588 	struct amdgpu_device *adev = dev->dev_private;
1589 	struct amdgpu_display_manager *dm = &adev->dm;
1590 	struct drm_private_obj *obj;
1591 	struct drm_private_state *new_obj_state;
1592 	int i;
1593 
1594 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1595 		if (obj->funcs == dm->atomic_obj.funcs)
1596 			return to_dm_atomic_state(new_obj_state);
1597 	}
1598 
1599 	return NULL;
1600 }
1601 
1602 struct dm_atomic_state *
1603 dm_atomic_get_old_state(struct drm_atomic_state *state)
1604 {
1605 	struct drm_device *dev = state->dev;
1606 	struct amdgpu_device *adev = dev->dev_private;
1607 	struct amdgpu_display_manager *dm = &adev->dm;
1608 	struct drm_private_obj *obj;
1609 	struct drm_private_state *old_obj_state;
1610 	int i;
1611 
1612 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1613 		if (obj->funcs == dm->atomic_obj.funcs)
1614 			return to_dm_atomic_state(old_obj_state);
1615 	}
1616 
1617 	return NULL;
1618 }
1619 
1620 static struct drm_private_state *
1621 dm_atomic_duplicate_state(struct drm_private_obj *obj)
1622 {
1623 	struct dm_atomic_state *old_state, *new_state;
1624 
1625 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1626 	if (!new_state)
1627 		return NULL;
1628 
1629 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1630 
1631 	new_state->context = dc_create_state();
1632 	if (!new_state->context) {
1633 		kfree(new_state);
1634 		return NULL;
1635 	}
1636 
1637 	old_state = to_dm_atomic_state(obj->state);
1638 	if (old_state && old_state->context)
1639 		dc_resource_state_copy_construct(old_state->context,
1640 						 new_state->context);
1641 
1642 	return &new_state->base;
1643 }
1644 
1645 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1646 				    struct drm_private_state *state)
1647 {
1648 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1649 
1650 	if (dm_state && dm_state->context)
1651 		dc_release_state(dm_state->context);
1652 
1653 	kfree(dm_state);
1654 }
1655 
1656 static struct drm_private_state_funcs dm_atomic_state_funcs = {
1657 	.atomic_duplicate_state = dm_atomic_duplicate_state,
1658 	.atomic_destroy_state = dm_atomic_destroy_state,
1659 };
1660 
1661 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1662 {
1663 	struct dm_atomic_state *state;
1664 	int r;
1665 
1666 	adev->mode_info.mode_config_initialized = true;
1667 
1668 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1669 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1670 
1671 	adev->ddev->mode_config.max_width = 16384;
1672 	adev->ddev->mode_config.max_height = 16384;
1673 
1674 	adev->ddev->mode_config.preferred_depth = 24;
1675 	adev->ddev->mode_config.prefer_shadow = 1;
1676 	/* indicates support for immediate flip */
1677 	adev->ddev->mode_config.async_page_flip = true;
1678 
1679 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1680 
1681 	drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
1682 
1683 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1684 	if (!state)
1685 		return -ENOMEM;
1686 
1687 	state->context = dc_create_state();
1688 	if (!state->context) {
1689 		kfree(state);
1690 		return -ENOMEM;
1691 	}
1692 
1693 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1694 
1695 	drm_atomic_private_obj_init(adev->ddev,
1696 				    &adev->dm.atomic_obj,
1697 				    &state->base,
1698 				    &dm_atomic_state_funcs);
1699 
1700 	r = amdgpu_display_modeset_create_props(adev);
1701 	if (r)
1702 		return r;
1703 
1704 	return 0;
1705 }
1706 
1707 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1708 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1709 
1710 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1711 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1712 
1713 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1714 {
1715 #if defined(CONFIG_ACPI)
1716 	struct amdgpu_dm_backlight_caps caps;
1717 
1718 	if (dm->backlight_caps.caps_valid)
1719 		return;
1720 
1721 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
1722 	if (caps.caps_valid) {
1723 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
1724 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
1725 		dm->backlight_caps.caps_valid = true;
1726 	} else {
1727 		dm->backlight_caps.min_input_signal =
1728 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1729 		dm->backlight_caps.max_input_signal =
1730 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1731 	}
1732 #else
1733 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1734 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1735 #endif
1736 }
1737 
1738 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1739 {
1740 	struct amdgpu_display_manager *dm = bl_get_data(bd);
1741 	struct amdgpu_dm_backlight_caps caps;
1742 	uint32_t brightness = bd->props.brightness;
1743 
1744 	amdgpu_dm_update_backlight_caps(dm);
1745 	caps = dm->backlight_caps;
1746 	/*
1747 	 * The brightness input is in the range 0-255
1748 	 * It needs to be rescaled to be between the
1749 	 * requested min and max input signal
1750 	 *
1751 	 * It also needs to be scaled up by 0x101 to
1752 	 * match the DC interface which has a range of
1753 	 * 0 to 0xffff
1754 	 */
1755 	brightness =
1756 		brightness
1757 		* 0x101
1758 		* (caps.max_input_signal - caps.min_input_signal)
1759 		/ AMDGPU_MAX_BL_LEVEL
1760 		+ caps.min_input_signal * 0x101;
1761 
1762 	if (dc_link_set_backlight_level(dm->backlight_link,
1763 			brightness, 0, 0))
1764 		return 0;
1765 	else
1766 		return 1;
1767 }
1768 
1769 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1770 {
1771 	struct amdgpu_display_manager *dm = bl_get_data(bd);
1772 	int ret = dc_link_get_backlight_level(dm->backlight_link);
1773 
1774 	if (ret == DC_ERROR_UNEXPECTED)
1775 		return bd->props.brightness;
1776 	return ret;
1777 }
1778 
1779 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1780 	.get_brightness = amdgpu_dm_backlight_get_brightness,
1781 	.update_status	= amdgpu_dm_backlight_update_status,
1782 };
1783 
1784 static void
1785 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1786 {
1787 	char bl_name[16];
1788 	struct backlight_properties props = { 0 };
1789 
1790 	amdgpu_dm_update_backlight_caps(dm);
1791 
1792 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1793 	props.brightness = AMDGPU_MAX_BL_LEVEL;
1794 	props.type = BACKLIGHT_RAW;
1795 
1796 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1797 			dm->adev->ddev->primary->index);
1798 
1799 	dm->backlight_dev = backlight_device_register(bl_name,
1800 			dm->adev->ddev->dev,
1801 			dm,
1802 			&amdgpu_dm_backlight_ops,
1803 			&props);
1804 
1805 	if (IS_ERR(dm->backlight_dev))
1806 		DRM_ERROR("DM: Backlight registration failed!\n");
1807 	else
1808 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1809 }
1810 
1811 #endif
1812 
1813 static int initialize_plane(struct amdgpu_display_manager *dm,
1814 			     struct amdgpu_mode_info *mode_info,
1815 			     int plane_id)
1816 {
1817 	struct drm_plane *plane;
1818 	unsigned long possible_crtcs;
1819 	int ret = 0;
1820 
1821 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
1822 	mode_info->planes[plane_id] = plane;
1823 
1824 	if (!plane) {
1825 		DRM_ERROR("KMS: Failed to allocate plane\n");
1826 		return -ENOMEM;
1827 	}
1828 	plane->type = mode_info->plane_type[plane_id];
1829 
1830 	/*
1831 	 * HACK: IGT tests expect that each plane can only have
1832 	 * one possible CRTC. For now, set one CRTC for each
1833 	 * plane that is not an underlay, but still allow multiple
1834 	 * CRTCs for underlay planes.
1835 	 */
1836 	possible_crtcs = 1 << plane_id;
1837 	if (plane_id >= dm->dc->caps.max_streams)
1838 		possible_crtcs = 0xff;
1839 
1840 	ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1841 
1842 	if (ret) {
1843 		DRM_ERROR("KMS: Failed to initialize plane\n");
1844 		return ret;
1845 	}
1846 
1847 	return ret;
1848 }
1849 
1850 
1851 static void register_backlight_device(struct amdgpu_display_manager *dm,
1852 				      struct dc_link *link)
1853 {
1854 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1855 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1856 
1857 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
1858 	    link->type != dc_connection_none) {
1859 		/*
1860 		 * Event if registration failed, we should continue with
1861 		 * DM initialization because not having a backlight control
1862 		 * is better then a black screen.
1863 		 */
1864 		amdgpu_dm_register_backlight_device(dm);
1865 
1866 		if (dm->backlight_dev)
1867 			dm->backlight_link = link;
1868 	}
1869 #endif
1870 }
1871 
1872 
1873 /*
1874  * In this architecture, the association
1875  * connector -> encoder -> crtc
1876  * id not really requried. The crtc and connector will hold the
1877  * display_index as an abstraction to use with DAL component
1878  *
1879  * Returns 0 on success
1880  */
1881 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1882 {
1883 	struct amdgpu_display_manager *dm = &adev->dm;
1884 	int32_t i;
1885 	struct amdgpu_dm_connector *aconnector = NULL;
1886 	struct amdgpu_encoder *aencoder = NULL;
1887 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
1888 	uint32_t link_cnt;
1889 	int32_t total_overlay_planes, total_primary_planes;
1890 	enum dc_connection_type new_connection_type = dc_connection_none;
1891 
1892 	link_cnt = dm->dc->caps.max_links;
1893 	if (amdgpu_dm_mode_config_init(dm->adev)) {
1894 		DRM_ERROR("DM: Failed to initialize mode config\n");
1895 		return -EINVAL;
1896 	}
1897 
1898 	/* Identify the number of planes to be initialized */
1899 	total_overlay_planes = dm->dc->caps.max_slave_planes;
1900 	total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1901 
1902 	/* First initialize overlay planes, index starting after primary planes */
1903 	for (i = (total_overlay_planes - 1); i >= 0; i--) {
1904 		if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1905 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1906 			goto fail;
1907 		}
1908 	}
1909 
1910 	/* Initialize primary planes */
1911 	for (i = (total_primary_planes - 1); i >= 0; i--) {
1912 		if (initialize_plane(dm, mode_info, i)) {
1913 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
1914 			goto fail;
1915 		}
1916 	}
1917 
1918 	for (i = 0; i < dm->dc->caps.max_streams; i++)
1919 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
1920 			DRM_ERROR("KMS: Failed to initialize crtc\n");
1921 			goto fail;
1922 		}
1923 
1924 	dm->display_indexes_num = dm->dc->caps.max_streams;
1925 
1926 	/* loops over all connectors on the board */
1927 	for (i = 0; i < link_cnt; i++) {
1928 		struct dc_link *link = NULL;
1929 
1930 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1931 			DRM_ERROR(
1932 				"KMS: Cannot support more than %d display indexes\n",
1933 					AMDGPU_DM_MAX_DISPLAY_INDEX);
1934 			continue;
1935 		}
1936 
1937 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1938 		if (!aconnector)
1939 			goto fail;
1940 
1941 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1942 		if (!aencoder)
1943 			goto fail;
1944 
1945 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1946 			DRM_ERROR("KMS: Failed to initialize encoder\n");
1947 			goto fail;
1948 		}
1949 
1950 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1951 			DRM_ERROR("KMS: Failed to initialize connector\n");
1952 			goto fail;
1953 		}
1954 
1955 		link = dc_get_link_at_index(dm->dc, i);
1956 
1957 		if (!dc_link_detect_sink(link, &new_connection_type))
1958 			DRM_ERROR("KMS: Failed to detect connector\n");
1959 
1960 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
1961 			emulated_link_detect(link);
1962 			amdgpu_dm_update_connector_after_detect(aconnector);
1963 
1964 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1965 			amdgpu_dm_update_connector_after_detect(aconnector);
1966 			register_backlight_device(dm, link);
1967 		}
1968 
1969 
1970 	}
1971 
1972 	/* Software is initialized. Now we can register interrupt handlers. */
1973 	switch (adev->asic_type) {
1974 	case CHIP_BONAIRE:
1975 	case CHIP_HAWAII:
1976 	case CHIP_KAVERI:
1977 	case CHIP_KABINI:
1978 	case CHIP_MULLINS:
1979 	case CHIP_TONGA:
1980 	case CHIP_FIJI:
1981 	case CHIP_CARRIZO:
1982 	case CHIP_STONEY:
1983 	case CHIP_POLARIS11:
1984 	case CHIP_POLARIS10:
1985 	case CHIP_POLARIS12:
1986 	case CHIP_VEGAM:
1987 	case CHIP_VEGA10:
1988 	case CHIP_VEGA12:
1989 	case CHIP_VEGA20:
1990 		if (dce110_register_irq_handlers(dm->adev)) {
1991 			DRM_ERROR("DM: Failed to initialize IRQ\n");
1992 			goto fail;
1993 		}
1994 		break;
1995 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1996 	case CHIP_RAVEN:
1997 		if (dcn10_register_irq_handlers(dm->adev)) {
1998 			DRM_ERROR("DM: Failed to initialize IRQ\n");
1999 			goto fail;
2000 		}
2001 		break;
2002 #endif
2003 	default:
2004 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2005 		goto fail;
2006 	}
2007 
2008 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2009 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2010 
2011 	return 0;
2012 fail:
2013 	kfree(aencoder);
2014 	kfree(aconnector);
2015 	for (i = 0; i < dm->dc->caps.max_planes; i++)
2016 		kfree(mode_info->planes[i]);
2017 	return -EINVAL;
2018 }
2019 
2020 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2021 {
2022 	drm_mode_config_cleanup(dm->ddev);
2023 	drm_atomic_private_obj_fini(&dm->atomic_obj);
2024 	return;
2025 }
2026 
2027 /******************************************************************************
2028  * amdgpu_display_funcs functions
2029  *****************************************************************************/
2030 
2031 /*
2032  * dm_bandwidth_update - program display watermarks
2033  *
2034  * @adev: amdgpu_device pointer
2035  *
2036  * Calculate and program the display watermarks and line buffer allocation.
2037  */
2038 static void dm_bandwidth_update(struct amdgpu_device *adev)
2039 {
2040 	/* TODO: implement later */
2041 }
2042 
2043 static const struct amdgpu_display_funcs dm_display_funcs = {
2044 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2045 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2046 	.backlight_set_level = NULL, /* never called for DC */
2047 	.backlight_get_level = NULL, /* never called for DC */
2048 	.hpd_sense = NULL,/* called unconditionally */
2049 	.hpd_set_polarity = NULL, /* called unconditionally */
2050 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2051 	.page_flip_get_scanoutpos =
2052 		dm_crtc_get_scanoutpos,/* called unconditionally */
2053 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2054 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
2055 };
2056 
2057 #if defined(CONFIG_DEBUG_KERNEL_DC)
2058 
2059 static ssize_t s3_debug_store(struct device *device,
2060 			      struct device_attribute *attr,
2061 			      const char *buf,
2062 			      size_t count)
2063 {
2064 	int ret;
2065 	int s3_state;
2066 	struct pci_dev *pdev = to_pci_dev(device);
2067 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
2068 	struct amdgpu_device *adev = drm_dev->dev_private;
2069 
2070 	ret = kstrtoint(buf, 0, &s3_state);
2071 
2072 	if (ret == 0) {
2073 		if (s3_state) {
2074 			dm_resume(adev);
2075 			drm_kms_helper_hotplug_event(adev->ddev);
2076 		} else
2077 			dm_suspend(adev);
2078 	}
2079 
2080 	return ret == 0 ? count : 0;
2081 }
2082 
2083 DEVICE_ATTR_WO(s3_debug);
2084 
2085 #endif
2086 
2087 static int dm_early_init(void *handle)
2088 {
2089 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2090 
2091 	switch (adev->asic_type) {
2092 	case CHIP_BONAIRE:
2093 	case CHIP_HAWAII:
2094 		adev->mode_info.num_crtc = 6;
2095 		adev->mode_info.num_hpd = 6;
2096 		adev->mode_info.num_dig = 6;
2097 		adev->mode_info.plane_type = dm_plane_type_default;
2098 		break;
2099 	case CHIP_KAVERI:
2100 		adev->mode_info.num_crtc = 4;
2101 		adev->mode_info.num_hpd = 6;
2102 		adev->mode_info.num_dig = 7;
2103 		adev->mode_info.plane_type = dm_plane_type_default;
2104 		break;
2105 	case CHIP_KABINI:
2106 	case CHIP_MULLINS:
2107 		adev->mode_info.num_crtc = 2;
2108 		adev->mode_info.num_hpd = 6;
2109 		adev->mode_info.num_dig = 6;
2110 		adev->mode_info.plane_type = dm_plane_type_default;
2111 		break;
2112 	case CHIP_FIJI:
2113 	case CHIP_TONGA:
2114 		adev->mode_info.num_crtc = 6;
2115 		adev->mode_info.num_hpd = 6;
2116 		adev->mode_info.num_dig = 7;
2117 		adev->mode_info.plane_type = dm_plane_type_default;
2118 		break;
2119 	case CHIP_CARRIZO:
2120 		adev->mode_info.num_crtc = 3;
2121 		adev->mode_info.num_hpd = 6;
2122 		adev->mode_info.num_dig = 9;
2123 		adev->mode_info.plane_type = dm_plane_type_carizzo;
2124 		break;
2125 	case CHIP_STONEY:
2126 		adev->mode_info.num_crtc = 2;
2127 		adev->mode_info.num_hpd = 6;
2128 		adev->mode_info.num_dig = 9;
2129 		adev->mode_info.plane_type = dm_plane_type_stoney;
2130 		break;
2131 	case CHIP_POLARIS11:
2132 	case CHIP_POLARIS12:
2133 		adev->mode_info.num_crtc = 5;
2134 		adev->mode_info.num_hpd = 5;
2135 		adev->mode_info.num_dig = 5;
2136 		adev->mode_info.plane_type = dm_plane_type_default;
2137 		break;
2138 	case CHIP_POLARIS10:
2139 	case CHIP_VEGAM:
2140 		adev->mode_info.num_crtc = 6;
2141 		adev->mode_info.num_hpd = 6;
2142 		adev->mode_info.num_dig = 6;
2143 		adev->mode_info.plane_type = dm_plane_type_default;
2144 		break;
2145 	case CHIP_VEGA10:
2146 	case CHIP_VEGA12:
2147 	case CHIP_VEGA20:
2148 		adev->mode_info.num_crtc = 6;
2149 		adev->mode_info.num_hpd = 6;
2150 		adev->mode_info.num_dig = 6;
2151 		adev->mode_info.plane_type = dm_plane_type_default;
2152 		break;
2153 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2154 	case CHIP_RAVEN:
2155 		adev->mode_info.num_crtc = 4;
2156 		adev->mode_info.num_hpd = 4;
2157 		adev->mode_info.num_dig = 4;
2158 		adev->mode_info.plane_type = dm_plane_type_default;
2159 		break;
2160 #endif
2161 	default:
2162 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2163 		return -EINVAL;
2164 	}
2165 
2166 	amdgpu_dm_set_irq_funcs(adev);
2167 
2168 	if (adev->mode_info.funcs == NULL)
2169 		adev->mode_info.funcs = &dm_display_funcs;
2170 
2171 	/*
2172 	 * Note: Do NOT change adev->audio_endpt_rreg and
2173 	 * adev->audio_endpt_wreg because they are initialised in
2174 	 * amdgpu_device_init()
2175 	 */
2176 #if defined(CONFIG_DEBUG_KERNEL_DC)
2177 	device_create_file(
2178 		adev->ddev->dev,
2179 		&dev_attr_s3_debug);
2180 #endif
2181 
2182 	return 0;
2183 }
2184 
2185 static bool modeset_required(struct drm_crtc_state *crtc_state,
2186 			     struct dc_stream_state *new_stream,
2187 			     struct dc_stream_state *old_stream)
2188 {
2189 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
2190 		return false;
2191 
2192 	if (!crtc_state->enable)
2193 		return false;
2194 
2195 	return crtc_state->active;
2196 }
2197 
2198 static bool modereset_required(struct drm_crtc_state *crtc_state)
2199 {
2200 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
2201 		return false;
2202 
2203 	return !crtc_state->enable || !crtc_state->active;
2204 }
2205 
2206 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2207 {
2208 	drm_encoder_cleanup(encoder);
2209 	kfree(encoder);
2210 }
2211 
2212 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2213 	.destroy = amdgpu_dm_encoder_destroy,
2214 };
2215 
2216 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
2217 					struct dc_plane_state *plane_state)
2218 {
2219 	plane_state->src_rect.x = state->src_x >> 16;
2220 	plane_state->src_rect.y = state->src_y >> 16;
2221 	/* we ignore the mantissa for now and do not deal with floating pixels :( */
2222 	plane_state->src_rect.width = state->src_w >> 16;
2223 
2224 	if (plane_state->src_rect.width == 0)
2225 		return false;
2226 
2227 	plane_state->src_rect.height = state->src_h >> 16;
2228 	if (plane_state->src_rect.height == 0)
2229 		return false;
2230 
2231 	plane_state->dst_rect.x = state->crtc_x;
2232 	plane_state->dst_rect.y = state->crtc_y;
2233 
2234 	if (state->crtc_w == 0)
2235 		return false;
2236 
2237 	plane_state->dst_rect.width = state->crtc_w;
2238 
2239 	if (state->crtc_h == 0)
2240 		return false;
2241 
2242 	plane_state->dst_rect.height = state->crtc_h;
2243 
2244 	plane_state->clip_rect = plane_state->dst_rect;
2245 
2246 	switch (state->rotation & DRM_MODE_ROTATE_MASK) {
2247 	case DRM_MODE_ROTATE_0:
2248 		plane_state->rotation = ROTATION_ANGLE_0;
2249 		break;
2250 	case DRM_MODE_ROTATE_90:
2251 		plane_state->rotation = ROTATION_ANGLE_90;
2252 		break;
2253 	case DRM_MODE_ROTATE_180:
2254 		plane_state->rotation = ROTATION_ANGLE_180;
2255 		break;
2256 	case DRM_MODE_ROTATE_270:
2257 		plane_state->rotation = ROTATION_ANGLE_270;
2258 		break;
2259 	default:
2260 		plane_state->rotation = ROTATION_ANGLE_0;
2261 		break;
2262 	}
2263 
2264 	return true;
2265 }
2266 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2267 		       uint64_t *tiling_flags)
2268 {
2269 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2270 	int r = amdgpu_bo_reserve(rbo, false);
2271 
2272 	if (unlikely(r)) {
2273 		/* Don't show error message when returning -ERESTARTSYS */
2274 		if (r != -ERESTARTSYS)
2275 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
2276 		return r;
2277 	}
2278 
2279 	if (tiling_flags)
2280 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2281 
2282 	amdgpu_bo_unreserve(rbo);
2283 
2284 	return r;
2285 }
2286 
2287 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2288 					 struct dc_plane_state *plane_state,
2289 					 const struct amdgpu_framebuffer *amdgpu_fb)
2290 {
2291 	uint64_t tiling_flags;
2292 	unsigned int awidth;
2293 	const struct drm_framebuffer *fb = &amdgpu_fb->base;
2294 	int ret = 0;
2295 	struct drm_format_name_buf format_name;
2296 
2297 	ret = get_fb_info(
2298 		amdgpu_fb,
2299 		&tiling_flags);
2300 
2301 	if (ret)
2302 		return ret;
2303 
2304 	switch (fb->format->format) {
2305 	case DRM_FORMAT_C8:
2306 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2307 		break;
2308 	case DRM_FORMAT_RGB565:
2309 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2310 		break;
2311 	case DRM_FORMAT_XRGB8888:
2312 	case DRM_FORMAT_ARGB8888:
2313 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2314 		break;
2315 	case DRM_FORMAT_XRGB2101010:
2316 	case DRM_FORMAT_ARGB2101010:
2317 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2318 		break;
2319 	case DRM_FORMAT_XBGR2101010:
2320 	case DRM_FORMAT_ABGR2101010:
2321 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2322 		break;
2323 	case DRM_FORMAT_XBGR8888:
2324 	case DRM_FORMAT_ABGR8888:
2325 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2326 		break;
2327 	case DRM_FORMAT_NV21:
2328 		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2329 		break;
2330 	case DRM_FORMAT_NV12:
2331 		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2332 		break;
2333 	default:
2334 		DRM_ERROR("Unsupported screen format %s\n",
2335 			  drm_get_format_name(fb->format->format, &format_name));
2336 		return -EINVAL;
2337 	}
2338 
2339 	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2340 		plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
2341 		plane_state->plane_size.grph.surface_size.x = 0;
2342 		plane_state->plane_size.grph.surface_size.y = 0;
2343 		plane_state->plane_size.grph.surface_size.width = fb->width;
2344 		plane_state->plane_size.grph.surface_size.height = fb->height;
2345 		plane_state->plane_size.grph.surface_pitch =
2346 				fb->pitches[0] / fb->format->cpp[0];
2347 		/* TODO: unhardcode */
2348 		plane_state->color_space = COLOR_SPACE_SRGB;
2349 
2350 	} else {
2351 		awidth = ALIGN(fb->width, 64);
2352 		plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2353 		plane_state->plane_size.video.luma_size.x = 0;
2354 		plane_state->plane_size.video.luma_size.y = 0;
2355 		plane_state->plane_size.video.luma_size.width = awidth;
2356 		plane_state->plane_size.video.luma_size.height = fb->height;
2357 		/* TODO: unhardcode */
2358 		plane_state->plane_size.video.luma_pitch = awidth;
2359 
2360 		plane_state->plane_size.video.chroma_size.x = 0;
2361 		plane_state->plane_size.video.chroma_size.y = 0;
2362 		plane_state->plane_size.video.chroma_size.width = awidth;
2363 		plane_state->plane_size.video.chroma_size.height = fb->height;
2364 		plane_state->plane_size.video.chroma_pitch = awidth / 2;
2365 
2366 		/* TODO: unhardcode */
2367 		plane_state->color_space = COLOR_SPACE_YCBCR709;
2368 	}
2369 
2370 	memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
2371 
2372 	/* Fill GFX8 params */
2373 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2374 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2375 
2376 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2377 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2378 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2379 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2380 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2381 
2382 		/* XXX fix me for VI */
2383 		plane_state->tiling_info.gfx8.num_banks = num_banks;
2384 		plane_state->tiling_info.gfx8.array_mode =
2385 				DC_ARRAY_2D_TILED_THIN1;
2386 		plane_state->tiling_info.gfx8.tile_split = tile_split;
2387 		plane_state->tiling_info.gfx8.bank_width = bankw;
2388 		plane_state->tiling_info.gfx8.bank_height = bankh;
2389 		plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
2390 		plane_state->tiling_info.gfx8.tile_mode =
2391 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2392 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2393 			== DC_ARRAY_1D_TILED_THIN1) {
2394 		plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2395 	}
2396 
2397 	plane_state->tiling_info.gfx8.pipe_config =
2398 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2399 
2400 	if (adev->asic_type == CHIP_VEGA10 ||
2401 	    adev->asic_type == CHIP_VEGA12 ||
2402 	    adev->asic_type == CHIP_VEGA20 ||
2403 	    adev->asic_type == CHIP_RAVEN) {
2404 		/* Fill GFX9 params */
2405 		plane_state->tiling_info.gfx9.num_pipes =
2406 			adev->gfx.config.gb_addr_config_fields.num_pipes;
2407 		plane_state->tiling_info.gfx9.num_banks =
2408 			adev->gfx.config.gb_addr_config_fields.num_banks;
2409 		plane_state->tiling_info.gfx9.pipe_interleave =
2410 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2411 		plane_state->tiling_info.gfx9.num_shader_engines =
2412 			adev->gfx.config.gb_addr_config_fields.num_se;
2413 		plane_state->tiling_info.gfx9.max_compressed_frags =
2414 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2415 		plane_state->tiling_info.gfx9.num_rb_per_se =
2416 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2417 		plane_state->tiling_info.gfx9.swizzle =
2418 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2419 		plane_state->tiling_info.gfx9.shaderEnable = 1;
2420 	}
2421 
2422 	plane_state->visible = true;
2423 	plane_state->scaling_quality.h_taps_c = 0;
2424 	plane_state->scaling_quality.v_taps_c = 0;
2425 
2426 	/* is this needed? is plane_state zeroed at allocation? */
2427 	plane_state->scaling_quality.h_taps = 0;
2428 	plane_state->scaling_quality.v_taps = 0;
2429 	plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
2430 
2431 	return ret;
2432 
2433 }
2434 
2435 static int fill_plane_attributes(struct amdgpu_device *adev,
2436 				 struct dc_plane_state *dc_plane_state,
2437 				 struct drm_plane_state *plane_state,
2438 				 struct drm_crtc_state *crtc_state)
2439 {
2440 	const struct amdgpu_framebuffer *amdgpu_fb =
2441 		to_amdgpu_framebuffer(plane_state->fb);
2442 	const struct drm_crtc *crtc = plane_state->crtc;
2443 	int ret = 0;
2444 
2445 	if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
2446 		return -EINVAL;
2447 
2448 	ret = fill_plane_attributes_from_fb(
2449 		crtc->dev->dev_private,
2450 		dc_plane_state,
2451 		amdgpu_fb);
2452 
2453 	if (ret)
2454 		return ret;
2455 
2456 	/*
2457 	 * Always set input transfer function, since plane state is refreshed
2458 	 * every time.
2459 	 */
2460 	ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2461 	if (ret) {
2462 		dc_transfer_func_release(dc_plane_state->in_transfer_func);
2463 		dc_plane_state->in_transfer_func = NULL;
2464 	}
2465 
2466 	return ret;
2467 }
2468 
2469 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2470 					   const struct dm_connector_state *dm_state,
2471 					   struct dc_stream_state *stream)
2472 {
2473 	enum amdgpu_rmx_type rmx_type;
2474 
2475 	struct rect src = { 0 }; /* viewport in composition space*/
2476 	struct rect dst = { 0 }; /* stream addressable area */
2477 
2478 	/* no mode. nothing to be done */
2479 	if (!mode)
2480 		return;
2481 
2482 	/* Full screen scaling by default */
2483 	src.width = mode->hdisplay;
2484 	src.height = mode->vdisplay;
2485 	dst.width = stream->timing.h_addressable;
2486 	dst.height = stream->timing.v_addressable;
2487 
2488 	if (dm_state) {
2489 		rmx_type = dm_state->scaling;
2490 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2491 			if (src.width * dst.height <
2492 					src.height * dst.width) {
2493 				/* height needs less upscaling/more downscaling */
2494 				dst.width = src.width *
2495 						dst.height / src.height;
2496 			} else {
2497 				/* width needs less upscaling/more downscaling */
2498 				dst.height = src.height *
2499 						dst.width / src.width;
2500 			}
2501 		} else if (rmx_type == RMX_CENTER) {
2502 			dst = src;
2503 		}
2504 
2505 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
2506 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
2507 
2508 		if (dm_state->underscan_enable) {
2509 			dst.x += dm_state->underscan_hborder / 2;
2510 			dst.y += dm_state->underscan_vborder / 2;
2511 			dst.width -= dm_state->underscan_hborder;
2512 			dst.height -= dm_state->underscan_vborder;
2513 		}
2514 	}
2515 
2516 	stream->src = src;
2517 	stream->dst = dst;
2518 
2519 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2520 			dst.x, dst.y, dst.width, dst.height);
2521 
2522 }
2523 
2524 static enum dc_color_depth
2525 convert_color_depth_from_display_info(const struct drm_connector *connector)
2526 {
2527 	struct dm_connector_state *dm_conn_state =
2528 		to_dm_connector_state(connector->state);
2529 	uint32_t bpc = connector->display_info.bpc;
2530 
2531 	/* TODO: Remove this when there's support for max_bpc in drm */
2532 	if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2533 		/* Round down to nearest even number. */
2534 		bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2535 
2536 	switch (bpc) {
2537 	case 0:
2538 		/*
2539 		 * Temporary Work around, DRM doesn't parse color depth for
2540 		 * EDID revision before 1.4
2541 		 * TODO: Fix edid parsing
2542 		 */
2543 		return COLOR_DEPTH_888;
2544 	case 6:
2545 		return COLOR_DEPTH_666;
2546 	case 8:
2547 		return COLOR_DEPTH_888;
2548 	case 10:
2549 		return COLOR_DEPTH_101010;
2550 	case 12:
2551 		return COLOR_DEPTH_121212;
2552 	case 14:
2553 		return COLOR_DEPTH_141414;
2554 	case 16:
2555 		return COLOR_DEPTH_161616;
2556 	default:
2557 		return COLOR_DEPTH_UNDEFINED;
2558 	}
2559 }
2560 
2561 static enum dc_aspect_ratio
2562 get_aspect_ratio(const struct drm_display_mode *mode_in)
2563 {
2564 	/* 1-1 mapping, since both enums follow the HDMI spec. */
2565 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
2566 }
2567 
2568 static enum dc_color_space
2569 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2570 {
2571 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
2572 
2573 	switch (dc_crtc_timing->pixel_encoding)	{
2574 	case PIXEL_ENCODING_YCBCR422:
2575 	case PIXEL_ENCODING_YCBCR444:
2576 	case PIXEL_ENCODING_YCBCR420:
2577 	{
2578 		/*
2579 		 * 27030khz is the separation point between HDTV and SDTV
2580 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
2581 		 * respectively
2582 		 */
2583 		if (dc_crtc_timing->pix_clk_khz > 27030) {
2584 			if (dc_crtc_timing->flags.Y_ONLY)
2585 				color_space =
2586 					COLOR_SPACE_YCBCR709_LIMITED;
2587 			else
2588 				color_space = COLOR_SPACE_YCBCR709;
2589 		} else {
2590 			if (dc_crtc_timing->flags.Y_ONLY)
2591 				color_space =
2592 					COLOR_SPACE_YCBCR601_LIMITED;
2593 			else
2594 				color_space = COLOR_SPACE_YCBCR601;
2595 		}
2596 
2597 	}
2598 	break;
2599 	case PIXEL_ENCODING_RGB:
2600 		color_space = COLOR_SPACE_SRGB;
2601 		break;
2602 
2603 	default:
2604 		WARN_ON(1);
2605 		break;
2606 	}
2607 
2608 	return color_space;
2609 }
2610 
2611 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2612 {
2613 	if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2614 		return;
2615 
2616 	timing_out->display_color_depth--;
2617 }
2618 
2619 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2620 						const struct drm_display_info *info)
2621 {
2622 	int normalized_clk;
2623 	if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2624 		return;
2625 	do {
2626 		normalized_clk = timing_out->pix_clk_khz;
2627 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2628 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2629 			normalized_clk /= 2;
2630 		/* Adjusting pix clock following on HDMI spec based on colour depth */
2631 		switch (timing_out->display_color_depth) {
2632 		case COLOR_DEPTH_101010:
2633 			normalized_clk = (normalized_clk * 30) / 24;
2634 			break;
2635 		case COLOR_DEPTH_121212:
2636 			normalized_clk = (normalized_clk * 36) / 24;
2637 			break;
2638 		case COLOR_DEPTH_161616:
2639 			normalized_clk = (normalized_clk * 48) / 24;
2640 			break;
2641 		default:
2642 			return;
2643 		}
2644 		if (normalized_clk <= info->max_tmds_clock)
2645 			return;
2646 		reduce_mode_colour_depth(timing_out);
2647 
2648 	} while (timing_out->display_color_depth > COLOR_DEPTH_888);
2649 
2650 }
2651 
2652 static void
2653 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2654 					     const struct drm_display_mode *mode_in,
2655 					     const struct drm_connector *connector,
2656 					     const struct dc_stream_state *old_stream)
2657 {
2658 	struct dc_crtc_timing *timing_out = &stream->timing;
2659 	const struct drm_display_info *info = &connector->display_info;
2660 
2661 	memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2662 
2663 	timing_out->h_border_left = 0;
2664 	timing_out->h_border_right = 0;
2665 	timing_out->v_border_top = 0;
2666 	timing_out->v_border_bottom = 0;
2667 	/* TODO: un-hardcode */
2668 	if (drm_mode_is_420_only(info, mode_in)
2669 			&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2670 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2671 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2672 			&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2673 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2674 	else
2675 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2676 
2677 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2678 	timing_out->display_color_depth = convert_color_depth_from_display_info(
2679 			connector);
2680 	timing_out->scan_type = SCANNING_TYPE_NODATA;
2681 	timing_out->hdmi_vic = 0;
2682 
2683 	if(old_stream) {
2684 		timing_out->vic = old_stream->timing.vic;
2685 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
2686 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
2687 	} else {
2688 		timing_out->vic = drm_match_cea_mode(mode_in);
2689 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2690 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2691 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2692 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2693 	}
2694 
2695 	timing_out->h_addressable = mode_in->crtc_hdisplay;
2696 	timing_out->h_total = mode_in->crtc_htotal;
2697 	timing_out->h_sync_width =
2698 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2699 	timing_out->h_front_porch =
2700 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2701 	timing_out->v_total = mode_in->crtc_vtotal;
2702 	timing_out->v_addressable = mode_in->crtc_vdisplay;
2703 	timing_out->v_front_porch =
2704 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2705 	timing_out->v_sync_width =
2706 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2707 	timing_out->pix_clk_khz = mode_in->crtc_clock;
2708 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2709 
2710 	stream->output_color_space = get_output_color_space(timing_out);
2711 
2712 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2713 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2714 	if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2715 		adjust_colour_depth_from_display_info(timing_out, info);
2716 }
2717 
2718 static void fill_audio_info(struct audio_info *audio_info,
2719 			    const struct drm_connector *drm_connector,
2720 			    const struct dc_sink *dc_sink)
2721 {
2722 	int i = 0;
2723 	int cea_revision = 0;
2724 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2725 
2726 	audio_info->manufacture_id = edid_caps->manufacturer_id;
2727 	audio_info->product_id = edid_caps->product_id;
2728 
2729 	cea_revision = drm_connector->display_info.cea_rev;
2730 
2731 	strscpy(audio_info->display_name,
2732 		edid_caps->display_name,
2733 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
2734 
2735 	if (cea_revision >= 3) {
2736 		audio_info->mode_count = edid_caps->audio_mode_count;
2737 
2738 		for (i = 0; i < audio_info->mode_count; ++i) {
2739 			audio_info->modes[i].format_code =
2740 					(enum audio_format_code)
2741 					(edid_caps->audio_modes[i].format_code);
2742 			audio_info->modes[i].channel_count =
2743 					edid_caps->audio_modes[i].channel_count;
2744 			audio_info->modes[i].sample_rates.all =
2745 					edid_caps->audio_modes[i].sample_rate;
2746 			audio_info->modes[i].sample_size =
2747 					edid_caps->audio_modes[i].sample_size;
2748 		}
2749 	}
2750 
2751 	audio_info->flags.all = edid_caps->speaker_flags;
2752 
2753 	/* TODO: We only check for the progressive mode, check for interlace mode too */
2754 	if (drm_connector->latency_present[0]) {
2755 		audio_info->video_latency = drm_connector->video_latency[0];
2756 		audio_info->audio_latency = drm_connector->audio_latency[0];
2757 	}
2758 
2759 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2760 
2761 }
2762 
2763 static void
2764 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2765 				      struct drm_display_mode *dst_mode)
2766 {
2767 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2768 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2769 	dst_mode->crtc_clock = src_mode->crtc_clock;
2770 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2771 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2772 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2773 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2774 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
2775 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
2776 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2777 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2778 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2779 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2780 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2781 }
2782 
2783 static void
2784 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2785 					const struct drm_display_mode *native_mode,
2786 					bool scale_enabled)
2787 {
2788 	if (scale_enabled) {
2789 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2790 	} else if (native_mode->clock == drm_mode->clock &&
2791 			native_mode->htotal == drm_mode->htotal &&
2792 			native_mode->vtotal == drm_mode->vtotal) {
2793 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2794 	} else {
2795 		/* no scaling nor amdgpu inserted, no need to patch */
2796 	}
2797 }
2798 
2799 static struct dc_sink *
2800 create_fake_sink(struct amdgpu_dm_connector *aconnector)
2801 {
2802 	struct dc_sink_init_data sink_init_data = { 0 };
2803 	struct dc_sink *sink = NULL;
2804 	sink_init_data.link = aconnector->dc_link;
2805 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2806 
2807 	sink = dc_sink_create(&sink_init_data);
2808 	if (!sink) {
2809 		DRM_ERROR("Failed to create sink!\n");
2810 		return NULL;
2811 	}
2812 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2813 
2814 	return sink;
2815 }
2816 
2817 static void set_multisync_trigger_params(
2818 		struct dc_stream_state *stream)
2819 {
2820 	if (stream->triggered_crtc_reset.enabled) {
2821 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2822 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2823 	}
2824 }
2825 
2826 static void set_master_stream(struct dc_stream_state *stream_set[],
2827 			      int stream_count)
2828 {
2829 	int j, highest_rfr = 0, master_stream = 0;
2830 
2831 	for (j = 0;  j < stream_count; j++) {
2832 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2833 			int refresh_rate = 0;
2834 
2835 			refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2836 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2837 			if (refresh_rate > highest_rfr) {
2838 				highest_rfr = refresh_rate;
2839 				master_stream = j;
2840 			}
2841 		}
2842 	}
2843 	for (j = 0;  j < stream_count; j++) {
2844 		if (stream_set[j])
2845 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2846 	}
2847 }
2848 
2849 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2850 {
2851 	int i = 0;
2852 
2853 	if (context->stream_count < 2)
2854 		return;
2855 	for (i = 0; i < context->stream_count ; i++) {
2856 		if (!context->streams[i])
2857 			continue;
2858 		/*
2859 		 * TODO: add a function to read AMD VSDB bits and set
2860 		 * crtc_sync_master.multi_sync_enabled flag
2861 		 * For now it's set to false
2862 		 */
2863 		set_multisync_trigger_params(context->streams[i]);
2864 	}
2865 	set_master_stream(context->streams, context->stream_count);
2866 }
2867 
2868 static struct dc_stream_state *
2869 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2870 		       const struct drm_display_mode *drm_mode,
2871 		       const struct dm_connector_state *dm_state,
2872 		       const struct dc_stream_state *old_stream)
2873 {
2874 	struct drm_display_mode *preferred_mode = NULL;
2875 	struct drm_connector *drm_connector;
2876 	struct dc_stream_state *stream = NULL;
2877 	struct drm_display_mode mode = *drm_mode;
2878 	bool native_mode_found = false;
2879 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
2880 	int mode_refresh;
2881 	int preferred_refresh = 0;
2882 
2883 	struct dc_sink *sink = NULL;
2884 	if (aconnector == NULL) {
2885 		DRM_ERROR("aconnector is NULL!\n");
2886 		return stream;
2887 	}
2888 
2889 	drm_connector = &aconnector->base;
2890 
2891 	if (!aconnector->dc_sink) {
2892 		if (!aconnector->mst_port) {
2893 			sink = create_fake_sink(aconnector);
2894 			if (!sink)
2895 				return stream;
2896 		}
2897 	} else {
2898 		sink = aconnector->dc_sink;
2899 	}
2900 
2901 	stream = dc_create_stream_for_sink(sink);
2902 
2903 	if (stream == NULL) {
2904 		DRM_ERROR("Failed to create stream for sink!\n");
2905 		goto finish;
2906 	}
2907 
2908 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2909 		/* Search for preferred mode */
2910 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2911 			native_mode_found = true;
2912 			break;
2913 		}
2914 	}
2915 	if (!native_mode_found)
2916 		preferred_mode = list_first_entry_or_null(
2917 				&aconnector->base.modes,
2918 				struct drm_display_mode,
2919 				head);
2920 
2921 	mode_refresh = drm_mode_vrefresh(&mode);
2922 
2923 	if (preferred_mode == NULL) {
2924 		/*
2925 		 * This may not be an error, the use case is when we have no
2926 		 * usermode calls to reset and set mode upon hotplug. In this
2927 		 * case, we call set mode ourselves to restore the previous mode
2928 		 * and the modelist may not be filled in in time.
2929 		 */
2930 		DRM_DEBUG_DRIVER("No preferred mode found\n");
2931 	} else {
2932 		decide_crtc_timing_for_drm_display_mode(
2933 				&mode, preferred_mode,
2934 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
2935 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
2936 	}
2937 
2938 	if (!dm_state)
2939 		drm_mode_set_crtcinfo(&mode, 0);
2940 
2941 	/*
2942 	* If scaling is enabled and refresh rate didn't change
2943 	* we copy the vic and polarities of the old timings
2944 	*/
2945 	if (!scale || mode_refresh != preferred_refresh)
2946 		fill_stream_properties_from_drm_display_mode(stream,
2947 			&mode, &aconnector->base, NULL);
2948 	else
2949 		fill_stream_properties_from_drm_display_mode(stream,
2950 			&mode, &aconnector->base, old_stream);
2951 
2952 	update_stream_scaling_settings(&mode, dm_state, stream);
2953 
2954 	fill_audio_info(
2955 		&stream->audio_info,
2956 		drm_connector,
2957 		sink);
2958 
2959 	update_stream_signal(stream);
2960 
2961 	if (dm_state && dm_state->freesync_capable)
2962 		stream->ignore_msa_timing_param = true;
2963 
2964 finish:
2965 	if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
2966 		dc_sink_release(sink);
2967 
2968 	return stream;
2969 }
2970 
2971 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2972 {
2973 	drm_crtc_cleanup(crtc);
2974 	kfree(crtc);
2975 }
2976 
2977 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2978 				  struct drm_crtc_state *state)
2979 {
2980 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
2981 
2982 	/* TODO Destroy dc_stream objects are stream object is flattened */
2983 	if (cur->stream)
2984 		dc_stream_release(cur->stream);
2985 
2986 
2987 	__drm_atomic_helper_crtc_destroy_state(state);
2988 
2989 
2990 	kfree(state);
2991 }
2992 
2993 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2994 {
2995 	struct dm_crtc_state *state;
2996 
2997 	if (crtc->state)
2998 		dm_crtc_destroy_state(crtc, crtc->state);
2999 
3000 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3001 	if (WARN_ON(!state))
3002 		return;
3003 
3004 	crtc->state = &state->base;
3005 	crtc->state->crtc = crtc;
3006 
3007 }
3008 
3009 static struct drm_crtc_state *
3010 dm_crtc_duplicate_state(struct drm_crtc *crtc)
3011 {
3012 	struct dm_crtc_state *state, *cur;
3013 
3014 	cur = to_dm_crtc_state(crtc->state);
3015 
3016 	if (WARN_ON(!crtc->state))
3017 		return NULL;
3018 
3019 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3020 	if (!state)
3021 		return NULL;
3022 
3023 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3024 
3025 	if (cur->stream) {
3026 		state->stream = cur->stream;
3027 		dc_stream_retain(state->stream);
3028 	}
3029 
3030 	state->vrr_params = cur->vrr_params;
3031 	state->vrr_infopacket = cur->vrr_infopacket;
3032 	state->abm_level = cur->abm_level;
3033 	state->vrr_supported = cur->vrr_supported;
3034 	state->freesync_config = cur->freesync_config;
3035 	state->crc_enabled = cur->crc_enabled;
3036 
3037 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
3038 
3039 	return &state->base;
3040 }
3041 
3042 
3043 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3044 {
3045 	enum dc_irq_source irq_source;
3046 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3047 	struct amdgpu_device *adev = crtc->dev->dev_private;
3048 
3049 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3050 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3051 }
3052 
3053 static int dm_enable_vblank(struct drm_crtc *crtc)
3054 {
3055 	return dm_set_vblank(crtc, true);
3056 }
3057 
3058 static void dm_disable_vblank(struct drm_crtc *crtc)
3059 {
3060 	dm_set_vblank(crtc, false);
3061 }
3062 
3063 /* Implemented only the options currently availible for the driver */
3064 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3065 	.reset = dm_crtc_reset_state,
3066 	.destroy = amdgpu_dm_crtc_destroy,
3067 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
3068 	.set_config = drm_atomic_helper_set_config,
3069 	.page_flip = drm_atomic_helper_page_flip,
3070 	.atomic_duplicate_state = dm_crtc_duplicate_state,
3071 	.atomic_destroy_state = dm_crtc_destroy_state,
3072 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
3073 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3074 	.enable_vblank = dm_enable_vblank,
3075 	.disable_vblank = dm_disable_vblank,
3076 };
3077 
3078 static enum drm_connector_status
3079 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3080 {
3081 	bool connected;
3082 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3083 
3084 	/*
3085 	 * Notes:
3086 	 * 1. This interface is NOT called in context of HPD irq.
3087 	 * 2. This interface *is called* in context of user-mode ioctl. Which
3088 	 * makes it a bad place for *any* MST-related activity.
3089 	 */
3090 
3091 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3092 	    !aconnector->fake_enable)
3093 		connected = (aconnector->dc_sink != NULL);
3094 	else
3095 		connected = (aconnector->base.force == DRM_FORCE_ON);
3096 
3097 	return (connected ? connector_status_connected :
3098 			connector_status_disconnected);
3099 }
3100 
3101 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3102 					    struct drm_connector_state *connector_state,
3103 					    struct drm_property *property,
3104 					    uint64_t val)
3105 {
3106 	struct drm_device *dev = connector->dev;
3107 	struct amdgpu_device *adev = dev->dev_private;
3108 	struct dm_connector_state *dm_old_state =
3109 		to_dm_connector_state(connector->state);
3110 	struct dm_connector_state *dm_new_state =
3111 		to_dm_connector_state(connector_state);
3112 
3113 	int ret = -EINVAL;
3114 
3115 	if (property == dev->mode_config.scaling_mode_property) {
3116 		enum amdgpu_rmx_type rmx_type;
3117 
3118 		switch (val) {
3119 		case DRM_MODE_SCALE_CENTER:
3120 			rmx_type = RMX_CENTER;
3121 			break;
3122 		case DRM_MODE_SCALE_ASPECT:
3123 			rmx_type = RMX_ASPECT;
3124 			break;
3125 		case DRM_MODE_SCALE_FULLSCREEN:
3126 			rmx_type = RMX_FULL;
3127 			break;
3128 		case DRM_MODE_SCALE_NONE:
3129 		default:
3130 			rmx_type = RMX_OFF;
3131 			break;
3132 		}
3133 
3134 		if (dm_old_state->scaling == rmx_type)
3135 			return 0;
3136 
3137 		dm_new_state->scaling = rmx_type;
3138 		ret = 0;
3139 	} else if (property == adev->mode_info.underscan_hborder_property) {
3140 		dm_new_state->underscan_hborder = val;
3141 		ret = 0;
3142 	} else if (property == adev->mode_info.underscan_vborder_property) {
3143 		dm_new_state->underscan_vborder = val;
3144 		ret = 0;
3145 	} else if (property == adev->mode_info.underscan_property) {
3146 		dm_new_state->underscan_enable = val;
3147 		ret = 0;
3148 	} else if (property == adev->mode_info.max_bpc_property) {
3149 		dm_new_state->max_bpc = val;
3150 		ret = 0;
3151 	} else if (property == adev->mode_info.abm_level_property) {
3152 		dm_new_state->abm_level = val;
3153 		ret = 0;
3154 	}
3155 
3156 	return ret;
3157 }
3158 
3159 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3160 					    const struct drm_connector_state *state,
3161 					    struct drm_property *property,
3162 					    uint64_t *val)
3163 {
3164 	struct drm_device *dev = connector->dev;
3165 	struct amdgpu_device *adev = dev->dev_private;
3166 	struct dm_connector_state *dm_state =
3167 		to_dm_connector_state(state);
3168 	int ret = -EINVAL;
3169 
3170 	if (property == dev->mode_config.scaling_mode_property) {
3171 		switch (dm_state->scaling) {
3172 		case RMX_CENTER:
3173 			*val = DRM_MODE_SCALE_CENTER;
3174 			break;
3175 		case RMX_ASPECT:
3176 			*val = DRM_MODE_SCALE_ASPECT;
3177 			break;
3178 		case RMX_FULL:
3179 			*val = DRM_MODE_SCALE_FULLSCREEN;
3180 			break;
3181 		case RMX_OFF:
3182 		default:
3183 			*val = DRM_MODE_SCALE_NONE;
3184 			break;
3185 		}
3186 		ret = 0;
3187 	} else if (property == adev->mode_info.underscan_hborder_property) {
3188 		*val = dm_state->underscan_hborder;
3189 		ret = 0;
3190 	} else if (property == adev->mode_info.underscan_vborder_property) {
3191 		*val = dm_state->underscan_vborder;
3192 		ret = 0;
3193 	} else if (property == adev->mode_info.underscan_property) {
3194 		*val = dm_state->underscan_enable;
3195 		ret = 0;
3196 	} else if (property == adev->mode_info.max_bpc_property) {
3197 		*val = dm_state->max_bpc;
3198 		ret = 0;
3199 	} else if (property == adev->mode_info.abm_level_property) {
3200 		*val = dm_state->abm_level;
3201 		ret = 0;
3202 	}
3203 
3204 	return ret;
3205 }
3206 
3207 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3208 {
3209 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3210 	const struct dc_link *link = aconnector->dc_link;
3211 	struct amdgpu_device *adev = connector->dev->dev_private;
3212 	struct amdgpu_display_manager *dm = &adev->dm;
3213 
3214 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3215 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3216 
3217 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3218 	    link->type != dc_connection_none &&
3219 	    dm->backlight_dev) {
3220 		backlight_device_unregister(dm->backlight_dev);
3221 		dm->backlight_dev = NULL;
3222 	}
3223 #endif
3224 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3225 	drm_connector_unregister(connector);
3226 	drm_connector_cleanup(connector);
3227 	kfree(connector);
3228 }
3229 
3230 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3231 {
3232 	struct dm_connector_state *state =
3233 		to_dm_connector_state(connector->state);
3234 
3235 	if (connector->state)
3236 		__drm_atomic_helper_connector_destroy_state(connector->state);
3237 
3238 	kfree(state);
3239 
3240 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3241 
3242 	if (state) {
3243 		state->scaling = RMX_OFF;
3244 		state->underscan_enable = false;
3245 		state->underscan_hborder = 0;
3246 		state->underscan_vborder = 0;
3247 		state->max_bpc = 8;
3248 
3249 		__drm_atomic_helper_connector_reset(connector, &state->base);
3250 	}
3251 }
3252 
3253 struct drm_connector_state *
3254 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3255 {
3256 	struct dm_connector_state *state =
3257 		to_dm_connector_state(connector->state);
3258 
3259 	struct dm_connector_state *new_state =
3260 			kmemdup(state, sizeof(*state), GFP_KERNEL);
3261 
3262 	if (!new_state)
3263 		return NULL;
3264 
3265 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3266 
3267 	new_state->freesync_capable = state->freesync_capable;
3268 	new_state->abm_level = state->abm_level;
3269 	new_state->scaling = state->scaling;
3270 	new_state->underscan_enable = state->underscan_enable;
3271 	new_state->underscan_hborder = state->underscan_hborder;
3272 	new_state->underscan_vborder = state->underscan_vborder;
3273 	new_state->max_bpc = state->max_bpc;
3274 
3275 	return &new_state->base;
3276 }
3277 
3278 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
3279 	.reset = amdgpu_dm_connector_funcs_reset,
3280 	.detect = amdgpu_dm_connector_detect,
3281 	.fill_modes = drm_helper_probe_single_connector_modes,
3282 	.destroy = amdgpu_dm_connector_destroy,
3283 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
3284 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
3285 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
3286 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property
3287 };
3288 
3289 static int get_modes(struct drm_connector *connector)
3290 {
3291 	return amdgpu_dm_connector_get_modes(connector);
3292 }
3293 
3294 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
3295 {
3296 	struct dc_sink_init_data init_params = {
3297 			.link = aconnector->dc_link,
3298 			.sink_signal = SIGNAL_TYPE_VIRTUAL
3299 	};
3300 	struct edid *edid;
3301 
3302 	if (!aconnector->base.edid_blob_ptr) {
3303 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3304 				aconnector->base.name);
3305 
3306 		aconnector->base.force = DRM_FORCE_OFF;
3307 		aconnector->base.override_edid = false;
3308 		return;
3309 	}
3310 
3311 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3312 
3313 	aconnector->edid = edid;
3314 
3315 	aconnector->dc_em_sink = dc_link_add_remote_sink(
3316 		aconnector->dc_link,
3317 		(uint8_t *)edid,
3318 		(edid->extensions + 1) * EDID_LENGTH,
3319 		&init_params);
3320 
3321 	if (aconnector->base.force == DRM_FORCE_ON)
3322 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
3323 		aconnector->dc_link->local_sink :
3324 		aconnector->dc_em_sink;
3325 }
3326 
3327 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
3328 {
3329 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3330 
3331 	/*
3332 	 * In case of headless boot with force on for DP managed connector
3333 	 * Those settings have to be != 0 to get initial modeset
3334 	 */
3335 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3336 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3337 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3338 	}
3339 
3340 
3341 	aconnector->base.override_edid = true;
3342 	create_eml_sink(aconnector);
3343 }
3344 
3345 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3346 				   struct drm_display_mode *mode)
3347 {
3348 	int result = MODE_ERROR;
3349 	struct dc_sink *dc_sink;
3350 	struct amdgpu_device *adev = connector->dev->dev_private;
3351 	/* TODO: Unhardcode stream count */
3352 	struct dc_stream_state *stream;
3353 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3354 	enum dc_status dc_result = DC_OK;
3355 
3356 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3357 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
3358 		return result;
3359 
3360 	/*
3361 	 * Only run this the first time mode_valid is called to initilialize
3362 	 * EDID mgmt
3363 	 */
3364 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3365 		!aconnector->dc_em_sink)
3366 		handle_edid_mgmt(aconnector);
3367 
3368 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
3369 
3370 	if (dc_sink == NULL) {
3371 		DRM_ERROR("dc_sink is NULL!\n");
3372 		goto fail;
3373 	}
3374 
3375 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
3376 	if (stream == NULL) {
3377 		DRM_ERROR("Failed to create stream for sink!\n");
3378 		goto fail;
3379 	}
3380 
3381 	dc_result = dc_validate_stream(adev->dm.dc, stream);
3382 
3383 	if (dc_result == DC_OK)
3384 		result = MODE_OK;
3385 	else
3386 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3387 			      mode->vdisplay,
3388 			      mode->hdisplay,
3389 			      mode->clock,
3390 			      dc_result);
3391 
3392 	dc_stream_release(stream);
3393 
3394 fail:
3395 	/* TODO: error handling*/
3396 	return result;
3397 }
3398 
3399 static const struct drm_connector_helper_funcs
3400 amdgpu_dm_connector_helper_funcs = {
3401 	/*
3402 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
3403 	 * modes will be filtered by drm_mode_validate_size(), and those modes
3404 	 * are missing after user start lightdm. So we need to renew modes list.
3405 	 * in get_modes call back, not just return the modes count
3406 	 */
3407 	.get_modes = get_modes,
3408 	.mode_valid = amdgpu_dm_connector_mode_valid,
3409 };
3410 
3411 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
3412 {
3413 }
3414 
3415 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
3416 				       struct drm_crtc_state *state)
3417 {
3418 	struct amdgpu_device *adev = crtc->dev->dev_private;
3419 	struct dc *dc = adev->dm.dc;
3420 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
3421 	int ret = -EINVAL;
3422 
3423 	if (unlikely(!dm_crtc_state->stream &&
3424 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
3425 		WARN_ON(1);
3426 		return ret;
3427 	}
3428 
3429 	/* In some use cases, like reset, no stream is attached */
3430 	if (!dm_crtc_state->stream)
3431 		return 0;
3432 
3433 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
3434 		return 0;
3435 
3436 	return ret;
3437 }
3438 
3439 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
3440 				      const struct drm_display_mode *mode,
3441 				      struct drm_display_mode *adjusted_mode)
3442 {
3443 	return true;
3444 }
3445 
3446 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
3447 	.disable = dm_crtc_helper_disable,
3448 	.atomic_check = dm_crtc_helper_atomic_check,
3449 	.mode_fixup = dm_crtc_helper_mode_fixup
3450 };
3451 
3452 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
3453 {
3454 
3455 }
3456 
3457 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
3458 					  struct drm_crtc_state *crtc_state,
3459 					  struct drm_connector_state *conn_state)
3460 {
3461 	return 0;
3462 }
3463 
3464 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
3465 	.disable = dm_encoder_helper_disable,
3466 	.atomic_check = dm_encoder_helper_atomic_check
3467 };
3468 
3469 static void dm_drm_plane_reset(struct drm_plane *plane)
3470 {
3471 	struct dm_plane_state *amdgpu_state = NULL;
3472 
3473 	if (plane->state)
3474 		plane->funcs->atomic_destroy_state(plane, plane->state);
3475 
3476 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
3477 	WARN_ON(amdgpu_state == NULL);
3478 
3479 	if (amdgpu_state) {
3480 		plane->state = &amdgpu_state->base;
3481 		plane->state->plane = plane;
3482 		plane->state->rotation = DRM_MODE_ROTATE_0;
3483 	}
3484 }
3485 
3486 static struct drm_plane_state *
3487 dm_drm_plane_duplicate_state(struct drm_plane *plane)
3488 {
3489 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
3490 
3491 	old_dm_plane_state = to_dm_plane_state(plane->state);
3492 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
3493 	if (!dm_plane_state)
3494 		return NULL;
3495 
3496 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
3497 
3498 	if (old_dm_plane_state->dc_state) {
3499 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
3500 		dc_plane_state_retain(dm_plane_state->dc_state);
3501 	}
3502 
3503 	return &dm_plane_state->base;
3504 }
3505 
3506 void dm_drm_plane_destroy_state(struct drm_plane *plane,
3507 				struct drm_plane_state *state)
3508 {
3509 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3510 
3511 	if (dm_plane_state->dc_state)
3512 		dc_plane_state_release(dm_plane_state->dc_state);
3513 
3514 	drm_atomic_helper_plane_destroy_state(plane, state);
3515 }
3516 
3517 static const struct drm_plane_funcs dm_plane_funcs = {
3518 	.update_plane	= drm_atomic_helper_update_plane,
3519 	.disable_plane	= drm_atomic_helper_disable_plane,
3520 	.destroy	= drm_primary_helper_destroy,
3521 	.reset = dm_drm_plane_reset,
3522 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
3523 	.atomic_destroy_state = dm_drm_plane_destroy_state,
3524 };
3525 
3526 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3527 				      struct drm_plane_state *new_state)
3528 {
3529 	struct amdgpu_framebuffer *afb;
3530 	struct drm_gem_object *obj;
3531 	struct amdgpu_device *adev;
3532 	struct amdgpu_bo *rbo;
3533 	uint64_t chroma_addr = 0;
3534 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3535 	unsigned int awidth;
3536 	uint32_t domain;
3537 	int r;
3538 
3539 	dm_plane_state_old = to_dm_plane_state(plane->state);
3540 	dm_plane_state_new = to_dm_plane_state(new_state);
3541 
3542 	if (!new_state->fb) {
3543 		DRM_DEBUG_DRIVER("No FB bound\n");
3544 		return 0;
3545 	}
3546 
3547 	afb = to_amdgpu_framebuffer(new_state->fb);
3548 	obj = new_state->fb->obj[0];
3549 	rbo = gem_to_amdgpu_bo(obj);
3550 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3551 	r = amdgpu_bo_reserve(rbo, false);
3552 	if (unlikely(r != 0))
3553 		return r;
3554 
3555 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
3556 		domain = amdgpu_display_supported_domains(adev);
3557 	else
3558 		domain = AMDGPU_GEM_DOMAIN_VRAM;
3559 
3560 	r = amdgpu_bo_pin(rbo, domain);
3561 	if (unlikely(r != 0)) {
3562 		if (r != -ERESTARTSYS)
3563 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3564 		amdgpu_bo_unreserve(rbo);
3565 		return r;
3566 	}
3567 
3568 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
3569 	if (unlikely(r != 0)) {
3570 		amdgpu_bo_unpin(rbo);
3571 		amdgpu_bo_unreserve(rbo);
3572 		DRM_ERROR("%p bind failed\n", rbo);
3573 		return r;
3574 	}
3575 	amdgpu_bo_unreserve(rbo);
3576 
3577 	afb->address = amdgpu_bo_gpu_offset(rbo);
3578 
3579 	amdgpu_bo_ref(rbo);
3580 
3581 	if (dm_plane_state_new->dc_state &&
3582 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3583 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
3584 
3585 		if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3586 			plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3587 			plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3588 		} else {
3589 			awidth = ALIGN(new_state->fb->width, 64);
3590 			plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3591 			plane_state->address.video_progressive.luma_addr.low_part
3592 							= lower_32_bits(afb->address);
3593 			plane_state->address.video_progressive.luma_addr.high_part
3594 							= upper_32_bits(afb->address);
3595 			chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3596 			plane_state->address.video_progressive.chroma_addr.low_part
3597 							= lower_32_bits(chroma_addr);
3598 			plane_state->address.video_progressive.chroma_addr.high_part
3599 							= upper_32_bits(chroma_addr);
3600 		}
3601 	}
3602 
3603 	return 0;
3604 }
3605 
3606 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3607 				       struct drm_plane_state *old_state)
3608 {
3609 	struct amdgpu_bo *rbo;
3610 	int r;
3611 
3612 	if (!old_state->fb)
3613 		return;
3614 
3615 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
3616 	r = amdgpu_bo_reserve(rbo, false);
3617 	if (unlikely(r)) {
3618 		DRM_ERROR("failed to reserve rbo before unpin\n");
3619 		return;
3620 	}
3621 
3622 	amdgpu_bo_unpin(rbo);
3623 	amdgpu_bo_unreserve(rbo);
3624 	amdgpu_bo_unref(&rbo);
3625 }
3626 
3627 static int dm_plane_atomic_check(struct drm_plane *plane,
3628 				 struct drm_plane_state *state)
3629 {
3630 	struct amdgpu_device *adev = plane->dev->dev_private;
3631 	struct dc *dc = adev->dm.dc;
3632 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3633 
3634 	if (!dm_plane_state->dc_state)
3635 		return 0;
3636 
3637 	if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3638 		return -EINVAL;
3639 
3640 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3641 		return 0;
3642 
3643 	return -EINVAL;
3644 }
3645 
3646 static int dm_plane_atomic_async_check(struct drm_plane *plane,
3647 				       struct drm_plane_state *new_plane_state)
3648 {
3649 	struct drm_plane_state *old_plane_state =
3650 		drm_atomic_get_old_plane_state(new_plane_state->state, plane);
3651 
3652 	/* Only support async updates on cursor planes. */
3653 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
3654 		return -EINVAL;
3655 
3656 	/*
3657 	 * DRM calls prepare_fb and cleanup_fb on new_plane_state for
3658 	 * async commits so don't allow fb changes.
3659 	 */
3660 	if (old_plane_state->fb != new_plane_state->fb)
3661 		return -EINVAL;
3662 
3663 	return 0;
3664 }
3665 
3666 static void dm_plane_atomic_async_update(struct drm_plane *plane,
3667 					 struct drm_plane_state *new_state)
3668 {
3669 	struct drm_plane_state *old_state =
3670 		drm_atomic_get_old_plane_state(new_state->state, plane);
3671 
3672 	if (plane->state->fb != new_state->fb)
3673 		drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
3674 
3675 	plane->state->src_x = new_state->src_x;
3676 	plane->state->src_y = new_state->src_y;
3677 	plane->state->src_w = new_state->src_w;
3678 	plane->state->src_h = new_state->src_h;
3679 	plane->state->crtc_x = new_state->crtc_x;
3680 	plane->state->crtc_y = new_state->crtc_y;
3681 	plane->state->crtc_w = new_state->crtc_w;
3682 	plane->state->crtc_h = new_state->crtc_h;
3683 
3684 	handle_cursor_update(plane, old_state);
3685 }
3686 
3687 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3688 	.prepare_fb = dm_plane_helper_prepare_fb,
3689 	.cleanup_fb = dm_plane_helper_cleanup_fb,
3690 	.atomic_check = dm_plane_atomic_check,
3691 	.atomic_async_check = dm_plane_atomic_async_check,
3692 	.atomic_async_update = dm_plane_atomic_async_update
3693 };
3694 
3695 /*
3696  * TODO: these are currently initialized to rgb formats only.
3697  * For future use cases we should either initialize them dynamically based on
3698  * plane capabilities, or initialize this array to all formats, so internal drm
3699  * check will succeed, and let DC implement proper check
3700  */
3701 static const uint32_t rgb_formats[] = {
3702 	DRM_FORMAT_RGB888,
3703 	DRM_FORMAT_XRGB8888,
3704 	DRM_FORMAT_ARGB8888,
3705 	DRM_FORMAT_RGBA8888,
3706 	DRM_FORMAT_XRGB2101010,
3707 	DRM_FORMAT_XBGR2101010,
3708 	DRM_FORMAT_ARGB2101010,
3709 	DRM_FORMAT_ABGR2101010,
3710 	DRM_FORMAT_XBGR8888,
3711 	DRM_FORMAT_ABGR8888,
3712 };
3713 
3714 static const uint32_t yuv_formats[] = {
3715 	DRM_FORMAT_NV12,
3716 	DRM_FORMAT_NV21,
3717 };
3718 
3719 static const u32 cursor_formats[] = {
3720 	DRM_FORMAT_ARGB8888
3721 };
3722 
3723 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3724 				struct drm_plane *plane,
3725 				unsigned long possible_crtcs)
3726 {
3727 	int res = -EPERM;
3728 
3729 	switch (plane->type) {
3730 	case DRM_PLANE_TYPE_PRIMARY:
3731 		res = drm_universal_plane_init(
3732 				dm->adev->ddev,
3733 				plane,
3734 				possible_crtcs,
3735 				&dm_plane_funcs,
3736 				rgb_formats,
3737 				ARRAY_SIZE(rgb_formats),
3738 				NULL, plane->type, NULL);
3739 		break;
3740 	case DRM_PLANE_TYPE_OVERLAY:
3741 		res = drm_universal_plane_init(
3742 				dm->adev->ddev,
3743 				plane,
3744 				possible_crtcs,
3745 				&dm_plane_funcs,
3746 				yuv_formats,
3747 				ARRAY_SIZE(yuv_formats),
3748 				NULL, plane->type, NULL);
3749 		break;
3750 	case DRM_PLANE_TYPE_CURSOR:
3751 		res = drm_universal_plane_init(
3752 				dm->adev->ddev,
3753 				plane,
3754 				possible_crtcs,
3755 				&dm_plane_funcs,
3756 				cursor_formats,
3757 				ARRAY_SIZE(cursor_formats),
3758 				NULL, plane->type, NULL);
3759 		break;
3760 	}
3761 
3762 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
3763 
3764 	/* Create (reset) the plane state */
3765 	if (plane->funcs->reset)
3766 		plane->funcs->reset(plane);
3767 
3768 
3769 	return res;
3770 }
3771 
3772 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3773 			       struct drm_plane *plane,
3774 			       uint32_t crtc_index)
3775 {
3776 	struct amdgpu_crtc *acrtc = NULL;
3777 	struct drm_plane *cursor_plane;
3778 
3779 	int res = -ENOMEM;
3780 
3781 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3782 	if (!cursor_plane)
3783 		goto fail;
3784 
3785 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
3786 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3787 
3788 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3789 	if (!acrtc)
3790 		goto fail;
3791 
3792 	res = drm_crtc_init_with_planes(
3793 			dm->ddev,
3794 			&acrtc->base,
3795 			plane,
3796 			cursor_plane,
3797 			&amdgpu_dm_crtc_funcs, NULL);
3798 
3799 	if (res)
3800 		goto fail;
3801 
3802 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3803 
3804 	/* Create (reset) the plane state */
3805 	if (acrtc->base.funcs->reset)
3806 		acrtc->base.funcs->reset(&acrtc->base);
3807 
3808 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3809 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3810 
3811 	acrtc->crtc_id = crtc_index;
3812 	acrtc->base.enabled = false;
3813 	acrtc->otg_inst = -1;
3814 
3815 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3816 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
3817 				   true, MAX_COLOR_LUT_ENTRIES);
3818 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
3819 
3820 	return 0;
3821 
3822 fail:
3823 	kfree(acrtc);
3824 	kfree(cursor_plane);
3825 	return res;
3826 }
3827 
3828 
3829 static int to_drm_connector_type(enum signal_type st)
3830 {
3831 	switch (st) {
3832 	case SIGNAL_TYPE_HDMI_TYPE_A:
3833 		return DRM_MODE_CONNECTOR_HDMIA;
3834 	case SIGNAL_TYPE_EDP:
3835 		return DRM_MODE_CONNECTOR_eDP;
3836 	case SIGNAL_TYPE_LVDS:
3837 		return DRM_MODE_CONNECTOR_LVDS;
3838 	case SIGNAL_TYPE_RGB:
3839 		return DRM_MODE_CONNECTOR_VGA;
3840 	case SIGNAL_TYPE_DISPLAY_PORT:
3841 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
3842 		return DRM_MODE_CONNECTOR_DisplayPort;
3843 	case SIGNAL_TYPE_DVI_DUAL_LINK:
3844 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
3845 		return DRM_MODE_CONNECTOR_DVID;
3846 	case SIGNAL_TYPE_VIRTUAL:
3847 		return DRM_MODE_CONNECTOR_VIRTUAL;
3848 
3849 	default:
3850 		return DRM_MODE_CONNECTOR_Unknown;
3851 	}
3852 }
3853 
3854 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
3855 {
3856 	return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
3857 }
3858 
3859 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3860 {
3861 	struct drm_encoder *encoder;
3862 	struct amdgpu_encoder *amdgpu_encoder;
3863 
3864 	encoder = amdgpu_dm_connector_to_encoder(connector);
3865 
3866 	if (encoder == NULL)
3867 		return;
3868 
3869 	amdgpu_encoder = to_amdgpu_encoder(encoder);
3870 
3871 	amdgpu_encoder->native_mode.clock = 0;
3872 
3873 	if (!list_empty(&connector->probed_modes)) {
3874 		struct drm_display_mode *preferred_mode = NULL;
3875 
3876 		list_for_each_entry(preferred_mode,
3877 				    &connector->probed_modes,
3878 				    head) {
3879 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3880 				amdgpu_encoder->native_mode = *preferred_mode;
3881 
3882 			break;
3883 		}
3884 
3885 	}
3886 }
3887 
3888 static struct drm_display_mode *
3889 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3890 			     char *name,
3891 			     int hdisplay, int vdisplay)
3892 {
3893 	struct drm_device *dev = encoder->dev;
3894 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3895 	struct drm_display_mode *mode = NULL;
3896 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3897 
3898 	mode = drm_mode_duplicate(dev, native_mode);
3899 
3900 	if (mode == NULL)
3901 		return NULL;
3902 
3903 	mode->hdisplay = hdisplay;
3904 	mode->vdisplay = vdisplay;
3905 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3906 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3907 
3908 	return mode;
3909 
3910 }
3911 
3912 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3913 						 struct drm_connector *connector)
3914 {
3915 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3916 	struct drm_display_mode *mode = NULL;
3917 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3918 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3919 				to_amdgpu_dm_connector(connector);
3920 	int i;
3921 	int n;
3922 	struct mode_size {
3923 		char name[DRM_DISPLAY_MODE_LEN];
3924 		int w;
3925 		int h;
3926 	} common_modes[] = {
3927 		{  "640x480",  640,  480},
3928 		{  "800x600",  800,  600},
3929 		{ "1024x768", 1024,  768},
3930 		{ "1280x720", 1280,  720},
3931 		{ "1280x800", 1280,  800},
3932 		{"1280x1024", 1280, 1024},
3933 		{ "1440x900", 1440,  900},
3934 		{"1680x1050", 1680, 1050},
3935 		{"1600x1200", 1600, 1200},
3936 		{"1920x1080", 1920, 1080},
3937 		{"1920x1200", 1920, 1200}
3938 	};
3939 
3940 	n = ARRAY_SIZE(common_modes);
3941 
3942 	for (i = 0; i < n; i++) {
3943 		struct drm_display_mode *curmode = NULL;
3944 		bool mode_existed = false;
3945 
3946 		if (common_modes[i].w > native_mode->hdisplay ||
3947 		    common_modes[i].h > native_mode->vdisplay ||
3948 		   (common_modes[i].w == native_mode->hdisplay &&
3949 		    common_modes[i].h == native_mode->vdisplay))
3950 			continue;
3951 
3952 		list_for_each_entry(curmode, &connector->probed_modes, head) {
3953 			if (common_modes[i].w == curmode->hdisplay &&
3954 			    common_modes[i].h == curmode->vdisplay) {
3955 				mode_existed = true;
3956 				break;
3957 			}
3958 		}
3959 
3960 		if (mode_existed)
3961 			continue;
3962 
3963 		mode = amdgpu_dm_create_common_mode(encoder,
3964 				common_modes[i].name, common_modes[i].w,
3965 				common_modes[i].h);
3966 		drm_mode_probed_add(connector, mode);
3967 		amdgpu_dm_connector->num_modes++;
3968 	}
3969 }
3970 
3971 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3972 					      struct edid *edid)
3973 {
3974 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3975 			to_amdgpu_dm_connector(connector);
3976 
3977 	if (edid) {
3978 		/* empty probed_modes */
3979 		INIT_LIST_HEAD(&connector->probed_modes);
3980 		amdgpu_dm_connector->num_modes =
3981 				drm_add_edid_modes(connector, edid);
3982 
3983 		amdgpu_dm_get_native_mode(connector);
3984 	} else {
3985 		amdgpu_dm_connector->num_modes = 0;
3986 	}
3987 }
3988 
3989 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3990 {
3991 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3992 			to_amdgpu_dm_connector(connector);
3993 	struct drm_encoder *encoder;
3994 	struct edid *edid = amdgpu_dm_connector->edid;
3995 
3996 	encoder = amdgpu_dm_connector_to_encoder(connector);
3997 
3998 	if (!edid || !drm_edid_is_valid(edid)) {
3999 		amdgpu_dm_connector->num_modes =
4000 				drm_add_modes_noedid(connector, 640, 480);
4001 	} else {
4002 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
4003 		amdgpu_dm_connector_add_common_modes(encoder, connector);
4004 	}
4005 	amdgpu_dm_fbc_init(connector);
4006 
4007 	return amdgpu_dm_connector->num_modes;
4008 }
4009 
4010 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4011 				     struct amdgpu_dm_connector *aconnector,
4012 				     int connector_type,
4013 				     struct dc_link *link,
4014 				     int link_index)
4015 {
4016 	struct amdgpu_device *adev = dm->ddev->dev_private;
4017 
4018 	aconnector->connector_id = link_index;
4019 	aconnector->dc_link = link;
4020 	aconnector->base.interlace_allowed = false;
4021 	aconnector->base.doublescan_allowed = false;
4022 	aconnector->base.stereo_allowed = false;
4023 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
4024 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
4025 	mutex_init(&aconnector->hpd_lock);
4026 
4027 	/*
4028 	 * configure support HPD hot plug connector_>polled default value is 0
4029 	 * which means HPD hot plug not supported
4030 	 */
4031 	switch (connector_type) {
4032 	case DRM_MODE_CONNECTOR_HDMIA:
4033 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4034 		aconnector->base.ycbcr_420_allowed =
4035 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
4036 		break;
4037 	case DRM_MODE_CONNECTOR_DisplayPort:
4038 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4039 		aconnector->base.ycbcr_420_allowed =
4040 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
4041 		break;
4042 	case DRM_MODE_CONNECTOR_DVID:
4043 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4044 		break;
4045 	default:
4046 		break;
4047 	}
4048 
4049 	drm_object_attach_property(&aconnector->base.base,
4050 				dm->ddev->mode_config.scaling_mode_property,
4051 				DRM_MODE_SCALE_NONE);
4052 
4053 	drm_object_attach_property(&aconnector->base.base,
4054 				adev->mode_info.underscan_property,
4055 				UNDERSCAN_OFF);
4056 	drm_object_attach_property(&aconnector->base.base,
4057 				adev->mode_info.underscan_hborder_property,
4058 				0);
4059 	drm_object_attach_property(&aconnector->base.base,
4060 				adev->mode_info.underscan_vborder_property,
4061 				0);
4062 	drm_object_attach_property(&aconnector->base.base,
4063 				adev->mode_info.max_bpc_property,
4064 				0);
4065 
4066 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
4067 	    dc_is_dmcu_initialized(adev->dm.dc)) {
4068 		drm_object_attach_property(&aconnector->base.base,
4069 				adev->mode_info.abm_level_property, 0);
4070 	}
4071 
4072 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4073 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
4074 		drm_connector_attach_vrr_capable_property(
4075 			&aconnector->base);
4076 	}
4077 }
4078 
4079 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
4080 			      struct i2c_msg *msgs, int num)
4081 {
4082 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
4083 	struct ddc_service *ddc_service = i2c->ddc_service;
4084 	struct i2c_command cmd;
4085 	int i;
4086 	int result = -EIO;
4087 
4088 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
4089 
4090 	if (!cmd.payloads)
4091 		return result;
4092 
4093 	cmd.number_of_payloads = num;
4094 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
4095 	cmd.speed = 100;
4096 
4097 	for (i = 0; i < num; i++) {
4098 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
4099 		cmd.payloads[i].address = msgs[i].addr;
4100 		cmd.payloads[i].length = msgs[i].len;
4101 		cmd.payloads[i].data = msgs[i].buf;
4102 	}
4103 
4104 	if (dc_submit_i2c(
4105 			ddc_service->ctx->dc,
4106 			ddc_service->ddc_pin->hw_info.ddc_channel,
4107 			&cmd))
4108 		result = num;
4109 
4110 	kfree(cmd.payloads);
4111 	return result;
4112 }
4113 
4114 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
4115 {
4116 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4117 }
4118 
4119 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
4120 	.master_xfer = amdgpu_dm_i2c_xfer,
4121 	.functionality = amdgpu_dm_i2c_func,
4122 };
4123 
4124 static struct amdgpu_i2c_adapter *
4125 create_i2c(struct ddc_service *ddc_service,
4126 	   int link_index,
4127 	   int *res)
4128 {
4129 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
4130 	struct amdgpu_i2c_adapter *i2c;
4131 
4132 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
4133 	if (!i2c)
4134 		return NULL;
4135 	i2c->base.owner = THIS_MODULE;
4136 	i2c->base.class = I2C_CLASS_DDC;
4137 	i2c->base.dev.parent = &adev->pdev->dev;
4138 	i2c->base.algo = &amdgpu_dm_i2c_algo;
4139 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
4140 	i2c_set_adapdata(&i2c->base, i2c);
4141 	i2c->ddc_service = ddc_service;
4142 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
4143 
4144 	return i2c;
4145 }
4146 
4147 
4148 /*
4149  * Note: this function assumes that dc_link_detect() was called for the
4150  * dc_link which will be represented by this aconnector.
4151  */
4152 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
4153 				    struct amdgpu_dm_connector *aconnector,
4154 				    uint32_t link_index,
4155 				    struct amdgpu_encoder *aencoder)
4156 {
4157 	int res = 0;
4158 	int connector_type;
4159 	struct dc *dc = dm->dc;
4160 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
4161 	struct amdgpu_i2c_adapter *i2c;
4162 
4163 	link->priv = aconnector;
4164 
4165 	DRM_DEBUG_DRIVER("%s()\n", __func__);
4166 
4167 	i2c = create_i2c(link->ddc, link->link_index, &res);
4168 	if (!i2c) {
4169 		DRM_ERROR("Failed to create i2c adapter data\n");
4170 		return -ENOMEM;
4171 	}
4172 
4173 	aconnector->i2c = i2c;
4174 	res = i2c_add_adapter(&i2c->base);
4175 
4176 	if (res) {
4177 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
4178 		goto out_free;
4179 	}
4180 
4181 	connector_type = to_drm_connector_type(link->connector_signal);
4182 
4183 	res = drm_connector_init(
4184 			dm->ddev,
4185 			&aconnector->base,
4186 			&amdgpu_dm_connector_funcs,
4187 			connector_type);
4188 
4189 	if (res) {
4190 		DRM_ERROR("connector_init failed\n");
4191 		aconnector->connector_id = -1;
4192 		goto out_free;
4193 	}
4194 
4195 	drm_connector_helper_add(
4196 			&aconnector->base,
4197 			&amdgpu_dm_connector_helper_funcs);
4198 
4199 	if (aconnector->base.funcs->reset)
4200 		aconnector->base.funcs->reset(&aconnector->base);
4201 
4202 	amdgpu_dm_connector_init_helper(
4203 		dm,
4204 		aconnector,
4205 		connector_type,
4206 		link,
4207 		link_index);
4208 
4209 	drm_connector_attach_encoder(
4210 		&aconnector->base, &aencoder->base);
4211 
4212 	drm_connector_register(&aconnector->base);
4213 #if defined(CONFIG_DEBUG_FS)
4214 	res = connector_debugfs_init(aconnector);
4215 	if (res) {
4216 		DRM_ERROR("Failed to create debugfs for connector");
4217 		goto out_free;
4218 	}
4219 #endif
4220 
4221 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
4222 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
4223 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
4224 
4225 out_free:
4226 	if (res) {
4227 		kfree(i2c);
4228 		aconnector->i2c = NULL;
4229 	}
4230 	return res;
4231 }
4232 
4233 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
4234 {
4235 	switch (adev->mode_info.num_crtc) {
4236 	case 1:
4237 		return 0x1;
4238 	case 2:
4239 		return 0x3;
4240 	case 3:
4241 		return 0x7;
4242 	case 4:
4243 		return 0xf;
4244 	case 5:
4245 		return 0x1f;
4246 	case 6:
4247 	default:
4248 		return 0x3f;
4249 	}
4250 }
4251 
4252 static int amdgpu_dm_encoder_init(struct drm_device *dev,
4253 				  struct amdgpu_encoder *aencoder,
4254 				  uint32_t link_index)
4255 {
4256 	struct amdgpu_device *adev = dev->dev_private;
4257 
4258 	int res = drm_encoder_init(dev,
4259 				   &aencoder->base,
4260 				   &amdgpu_dm_encoder_funcs,
4261 				   DRM_MODE_ENCODER_TMDS,
4262 				   NULL);
4263 
4264 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
4265 
4266 	if (!res)
4267 		aencoder->encoder_id = link_index;
4268 	else
4269 		aencoder->encoder_id = -1;
4270 
4271 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
4272 
4273 	return res;
4274 }
4275 
4276 static void manage_dm_interrupts(struct amdgpu_device *adev,
4277 				 struct amdgpu_crtc *acrtc,
4278 				 bool enable)
4279 {
4280 	/*
4281 	 * this is not correct translation but will work as soon as VBLANK
4282 	 * constant is the same as PFLIP
4283 	 */
4284 	int irq_type =
4285 		amdgpu_display_crtc_idx_to_irq_type(
4286 			adev,
4287 			acrtc->crtc_id);
4288 
4289 	if (enable) {
4290 		drm_crtc_vblank_on(&acrtc->base);
4291 		amdgpu_irq_get(
4292 			adev,
4293 			&adev->pageflip_irq,
4294 			irq_type);
4295 	} else {
4296 
4297 		amdgpu_irq_put(
4298 			adev,
4299 			&adev->pageflip_irq,
4300 			irq_type);
4301 		drm_crtc_vblank_off(&acrtc->base);
4302 	}
4303 }
4304 
4305 static bool
4306 is_scaling_state_different(const struct dm_connector_state *dm_state,
4307 			   const struct dm_connector_state *old_dm_state)
4308 {
4309 	if (dm_state->scaling != old_dm_state->scaling)
4310 		return true;
4311 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
4312 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
4313 			return true;
4314 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
4315 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
4316 			return true;
4317 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
4318 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
4319 		return true;
4320 	return false;
4321 }
4322 
4323 static void remove_stream(struct amdgpu_device *adev,
4324 			  struct amdgpu_crtc *acrtc,
4325 			  struct dc_stream_state *stream)
4326 {
4327 	/* this is the update mode case */
4328 
4329 	acrtc->otg_inst = -1;
4330 	acrtc->enabled = false;
4331 }
4332 
4333 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
4334 			       struct dc_cursor_position *position)
4335 {
4336 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4337 	int x, y;
4338 	int xorigin = 0, yorigin = 0;
4339 
4340 	if (!crtc || !plane->state->fb) {
4341 		position->enable = false;
4342 		position->x = 0;
4343 		position->y = 0;
4344 		return 0;
4345 	}
4346 
4347 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
4348 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
4349 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
4350 			  __func__,
4351 			  plane->state->crtc_w,
4352 			  plane->state->crtc_h);
4353 		return -EINVAL;
4354 	}
4355 
4356 	x = plane->state->crtc_x;
4357 	y = plane->state->crtc_y;
4358 	/* avivo cursor are offset into the total surface */
4359 	x += crtc->primary->state->src_x >> 16;
4360 	y += crtc->primary->state->src_y >> 16;
4361 	if (x < 0) {
4362 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
4363 		x = 0;
4364 	}
4365 	if (y < 0) {
4366 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
4367 		y = 0;
4368 	}
4369 	position->enable = true;
4370 	position->x = x;
4371 	position->y = y;
4372 	position->x_hotspot = xorigin;
4373 	position->y_hotspot = yorigin;
4374 
4375 	return 0;
4376 }
4377 
4378 static void handle_cursor_update(struct drm_plane *plane,
4379 				 struct drm_plane_state *old_plane_state)
4380 {
4381 	struct amdgpu_device *adev = plane->dev->dev_private;
4382 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
4383 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
4384 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
4385 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4386 	uint64_t address = afb ? afb->address : 0;
4387 	struct dc_cursor_position position;
4388 	struct dc_cursor_attributes attributes;
4389 	int ret;
4390 
4391 	if (!plane->state->fb && !old_plane_state->fb)
4392 		return;
4393 
4394 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
4395 			 __func__,
4396 			 amdgpu_crtc->crtc_id,
4397 			 plane->state->crtc_w,
4398 			 plane->state->crtc_h);
4399 
4400 	ret = get_cursor_position(plane, crtc, &position);
4401 	if (ret)
4402 		return;
4403 
4404 	if (!position.enable) {
4405 		/* turn off cursor */
4406 		if (crtc_state && crtc_state->stream) {
4407 			mutex_lock(&adev->dm.dc_lock);
4408 			dc_stream_set_cursor_position(crtc_state->stream,
4409 						      &position);
4410 			mutex_unlock(&adev->dm.dc_lock);
4411 		}
4412 		return;
4413 	}
4414 
4415 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
4416 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
4417 
4418 	attributes.address.high_part = upper_32_bits(address);
4419 	attributes.address.low_part  = lower_32_bits(address);
4420 	attributes.width             = plane->state->crtc_w;
4421 	attributes.height            = plane->state->crtc_h;
4422 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
4423 	attributes.rotation_angle    = 0;
4424 	attributes.attribute_flags.value = 0;
4425 
4426 	attributes.pitch = attributes.width;
4427 
4428 	if (crtc_state->stream) {
4429 		mutex_lock(&adev->dm.dc_lock);
4430 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
4431 							 &attributes))
4432 			DRM_ERROR("DC failed to set cursor attributes\n");
4433 
4434 		if (!dc_stream_set_cursor_position(crtc_state->stream,
4435 						   &position))
4436 			DRM_ERROR("DC failed to set cursor position\n");
4437 		mutex_unlock(&adev->dm.dc_lock);
4438 	}
4439 }
4440 
4441 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4442 {
4443 
4444 	assert_spin_locked(&acrtc->base.dev->event_lock);
4445 	WARN_ON(acrtc->event);
4446 
4447 	acrtc->event = acrtc->base.state->event;
4448 
4449 	/* Set the flip status */
4450 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
4451 
4452 	/* Mark this event as consumed */
4453 	acrtc->base.state->event = NULL;
4454 
4455 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4456 						 acrtc->crtc_id);
4457 }
4458 
4459 struct dc_stream_status *dc_state_get_stream_status(
4460 	struct dc_state *state,
4461 	struct dc_stream_state *stream)
4462 {
4463 	uint8_t i;
4464 
4465 	for (i = 0; i < state->stream_count; i++) {
4466 		if (stream == state->streams[i])
4467 			return &state->stream_status[i];
4468 	}
4469 
4470 	return NULL;
4471 }
4472 
4473 static void update_freesync_state_on_stream(
4474 	struct amdgpu_display_manager *dm,
4475 	struct dm_crtc_state *new_crtc_state,
4476 	struct dc_stream_state *new_stream,
4477 	struct dc_plane_state *surface,
4478 	u32 flip_timestamp_in_us)
4479 {
4480 	struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
4481 	struct dc_info_packet vrr_infopacket = {0};
4482 	struct mod_freesync_config config = new_crtc_state->freesync_config;
4483 
4484 	if (!new_stream)
4485 		return;
4486 
4487 	/*
4488 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
4489 	 * For now it's sufficient to just guard against these conditions.
4490 	 */
4491 
4492 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
4493 		return;
4494 
4495 	if (new_crtc_state->vrr_supported &&
4496 	    config.min_refresh_in_uhz &&
4497 	    config.max_refresh_in_uhz) {
4498 		config.state = new_crtc_state->base.vrr_enabled ?
4499 			VRR_STATE_ACTIVE_VARIABLE :
4500 			VRR_STATE_INACTIVE;
4501 	} else {
4502 		config.state = VRR_STATE_UNSUPPORTED;
4503 	}
4504 
4505 	mod_freesync_build_vrr_params(dm->freesync_module,
4506 				      new_stream,
4507 				      &config, &vrr_params);
4508 
4509 	if (surface) {
4510 		mod_freesync_handle_preflip(
4511 			dm->freesync_module,
4512 			surface,
4513 			new_stream,
4514 			flip_timestamp_in_us,
4515 			&vrr_params);
4516 	}
4517 
4518 	mod_freesync_build_vrr_infopacket(
4519 		dm->freesync_module,
4520 		new_stream,
4521 		&vrr_params,
4522 		PACKET_TYPE_VRR,
4523 		TRANSFER_FUNC_UNKNOWN,
4524 		&vrr_infopacket);
4525 
4526 	new_crtc_state->freesync_timing_changed =
4527 		(memcmp(&new_crtc_state->vrr_params.adjust,
4528 			&vrr_params.adjust,
4529 			sizeof(vrr_params.adjust)) != 0);
4530 
4531 	new_crtc_state->freesync_vrr_info_changed =
4532 		(memcmp(&new_crtc_state->vrr_infopacket,
4533 			&vrr_infopacket,
4534 			sizeof(vrr_infopacket)) != 0);
4535 
4536 	new_crtc_state->vrr_params = vrr_params;
4537 	new_crtc_state->vrr_infopacket = vrr_infopacket;
4538 
4539 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
4540 	new_stream->vrr_infopacket = vrr_infopacket;
4541 
4542 	if (new_crtc_state->freesync_vrr_info_changed)
4543 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
4544 			      new_crtc_state->base.crtc->base.id,
4545 			      (int)new_crtc_state->base.vrr_enabled,
4546 			      (int)vrr_params.state);
4547 
4548 	if (new_crtc_state->freesync_timing_changed)
4549 		DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
4550 			      new_crtc_state->base.crtc->base.id,
4551 				  vrr_params.adjust.v_total_min,
4552 				  vrr_params.adjust.v_total_max);
4553 }
4554 
4555 /*
4556  * Executes flip
4557  *
4558  * Waits on all BO's fences and for proper vblank count
4559  */
4560 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
4561 			      struct drm_framebuffer *fb,
4562 			      uint32_t target,
4563 			      struct dc_state *state)
4564 {
4565 	unsigned long flags;
4566 	uint64_t timestamp_ns;
4567 	uint32_t target_vblank;
4568 	int r, vpos, hpos;
4569 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4570 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
4571 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
4572 	struct amdgpu_device *adev = crtc->dev->dev_private;
4573 	bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
4574 	struct dc_flip_addrs addr = { {0} };
4575 	/* TODO eliminate or rename surface_update */
4576 	struct dc_surface_update surface_updates[1] = { {0} };
4577 	struct dc_stream_update stream_update = {0};
4578 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4579 	struct dc_stream_status *stream_status;
4580 	struct dc_plane_state *surface;
4581 
4582 
4583 	/* Prepare wait for target vblank early - before the fence-waits */
4584 	target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
4585 			amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
4586 
4587 	/*
4588 	 * TODO This might fail and hence better not used, wait
4589 	 * explicitly on fences instead
4590 	 * and in general should be called for
4591 	 * blocking commit to as per framework helpers
4592 	 */
4593 	r = amdgpu_bo_reserve(abo, true);
4594 	if (unlikely(r != 0)) {
4595 		DRM_ERROR("failed to reserve buffer before flip\n");
4596 		WARN_ON(1);
4597 	}
4598 
4599 	/* Wait for all fences on this FB */
4600 	WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
4601 								    MAX_SCHEDULE_TIMEOUT) < 0);
4602 
4603 	amdgpu_bo_unreserve(abo);
4604 
4605 	/*
4606 	 * Wait until we're out of the vertical blank period before the one
4607 	 * targeted by the flip
4608 	 */
4609 	while ((acrtc->enabled &&
4610 		(amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
4611 						    0, &vpos, &hpos, NULL,
4612 						    NULL, &crtc->hwmode)
4613 		 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
4614 		(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
4615 		(int)(target_vblank -
4616 		  amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
4617 		usleep_range(1000, 1100);
4618 	}
4619 
4620 	/* Flip */
4621 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
4622 
4623 	WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
4624 	WARN_ON(!acrtc_state->stream);
4625 
4626 	addr.address.grph.addr.low_part = lower_32_bits(afb->address);
4627 	addr.address.grph.addr.high_part = upper_32_bits(afb->address);
4628 	addr.flip_immediate = async_flip;
4629 
4630 	timestamp_ns = ktime_get_ns();
4631 	addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
4632 
4633 
4634 	if (acrtc->base.state->event)
4635 		prepare_flip_isr(acrtc);
4636 
4637 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4638 
4639 	stream_status = dc_stream_get_status(acrtc_state->stream);
4640 	if (!stream_status) {
4641 		DRM_ERROR("No stream status for CRTC: id=%d\n",
4642 			acrtc->crtc_id);
4643 		return;
4644 	}
4645 
4646 	surface = stream_status->plane_states[0];
4647 	surface_updates->surface = surface;
4648 
4649 	if (!surface) {
4650 		DRM_ERROR("No surface for CRTC: id=%d\n",
4651 			acrtc->crtc_id);
4652 		return;
4653 	}
4654 	surface_updates->flip_addr = &addr;
4655 
4656 	if (acrtc_state->stream) {
4657 		update_freesync_state_on_stream(
4658 			&adev->dm,
4659 			acrtc_state,
4660 			acrtc_state->stream,
4661 			surface,
4662 			addr.flip_timestamp_in_us);
4663 
4664 		if (acrtc_state->freesync_timing_changed)
4665 			stream_update.adjust =
4666 				&acrtc_state->stream->adjust;
4667 
4668 		if (acrtc_state->freesync_vrr_info_changed)
4669 			stream_update.vrr_infopacket =
4670 				&acrtc_state->stream->vrr_infopacket;
4671 	}
4672 
4673 	/* Update surface timing information. */
4674 	surface->time.time_elapsed_in_us[surface->time.index] =
4675 		addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
4676 	surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
4677 	surface->time.index++;
4678 	if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
4679 		surface->time.index = 0;
4680 
4681 	mutex_lock(&adev->dm.dc_lock);
4682 
4683 	dc_commit_updates_for_stream(adev->dm.dc,
4684 					     surface_updates,
4685 					     1,
4686 					     acrtc_state->stream,
4687 					     &stream_update,
4688 					     &surface_updates->surface,
4689 					     state);
4690 	mutex_unlock(&adev->dm.dc_lock);
4691 
4692 	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
4693 			 __func__,
4694 			 addr.address.grph.addr.high_part,
4695 			 addr.address.grph.addr.low_part);
4696 }
4697 
4698 /*
4699  * TODO this whole function needs to go
4700  *
4701  * dc_surface_update is needlessly complex. See if we can just replace this
4702  * with a dc_plane_state and follow the atomic model a bit more closely here.
4703  */
4704 static bool commit_planes_to_stream(
4705 		struct amdgpu_display_manager *dm,
4706 		struct dc *dc,
4707 		struct dc_plane_state **plane_states,
4708 		uint8_t new_plane_count,
4709 		struct dm_crtc_state *dm_new_crtc_state,
4710 		struct dm_crtc_state *dm_old_crtc_state,
4711 		struct dc_state *state)
4712 {
4713 	/* no need to dynamically allocate this. it's pretty small */
4714 	struct dc_surface_update updates[MAX_SURFACES];
4715 	struct dc_flip_addrs *flip_addr;
4716 	struct dc_plane_info *plane_info;
4717 	struct dc_scaling_info *scaling_info;
4718 	int i;
4719 	struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
4720 	struct dc_stream_update *stream_update =
4721 			kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
4722 	unsigned int abm_level;
4723 
4724 	if (!stream_update) {
4725 		BREAK_TO_DEBUGGER();
4726 		return false;
4727 	}
4728 
4729 	flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
4730 			    GFP_KERNEL);
4731 	plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
4732 			     GFP_KERNEL);
4733 	scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
4734 			       GFP_KERNEL);
4735 
4736 	if (!flip_addr || !plane_info || !scaling_info) {
4737 		kfree(flip_addr);
4738 		kfree(plane_info);
4739 		kfree(scaling_info);
4740 		kfree(stream_update);
4741 		return false;
4742 	}
4743 
4744 	memset(updates, 0, sizeof(updates));
4745 
4746 	stream_update->src = dc_stream->src;
4747 	stream_update->dst = dc_stream->dst;
4748 	stream_update->out_transfer_func = dc_stream->out_transfer_func;
4749 
4750 	if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
4751 		abm_level = dm_new_crtc_state->abm_level;
4752 		stream_update->abm_level = &abm_level;
4753 	}
4754 
4755 	for (i = 0; i < new_plane_count; i++) {
4756 		updates[i].surface = plane_states[i];
4757 		updates[i].gamma =
4758 			(struct dc_gamma *)plane_states[i]->gamma_correction;
4759 		updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4760 		flip_addr[i].address = plane_states[i]->address;
4761 		flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4762 		plane_info[i].color_space = plane_states[i]->color_space;
4763 		plane_info[i].format = plane_states[i]->format;
4764 		plane_info[i].plane_size = plane_states[i]->plane_size;
4765 		plane_info[i].rotation = plane_states[i]->rotation;
4766 		plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4767 		plane_info[i].stereo_format = plane_states[i]->stereo_format;
4768 		plane_info[i].tiling_info = plane_states[i]->tiling_info;
4769 		plane_info[i].visible = plane_states[i]->visible;
4770 		plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4771 		plane_info[i].dcc = plane_states[i]->dcc;
4772 		scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4773 		scaling_info[i].src_rect = plane_states[i]->src_rect;
4774 		scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4775 		scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4776 
4777 		updates[i].flip_addr = &flip_addr[i];
4778 		updates[i].plane_info = &plane_info[i];
4779 		updates[i].scaling_info = &scaling_info[i];
4780 	}
4781 
4782 	mutex_lock(&dm->dc_lock);
4783 	dc_commit_updates_for_stream(
4784 			dc,
4785 			updates,
4786 			new_plane_count,
4787 			dc_stream, stream_update, plane_states, state);
4788 	mutex_unlock(&dm->dc_lock);
4789 
4790 	kfree(flip_addr);
4791 	kfree(plane_info);
4792 	kfree(scaling_info);
4793 	kfree(stream_update);
4794 	return true;
4795 }
4796 
4797 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4798 				    struct dc_state *dc_state,
4799 				    struct drm_device *dev,
4800 				    struct amdgpu_display_manager *dm,
4801 				    struct drm_crtc *pcrtc,
4802 				    bool *wait_for_vblank)
4803 {
4804 	uint32_t i;
4805 	struct drm_plane *plane;
4806 	struct drm_plane_state *old_plane_state, *new_plane_state;
4807 	struct dc_stream_state *dc_stream_attach;
4808 	struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
4809 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
4810 	struct drm_crtc_state *new_pcrtc_state =
4811 			drm_atomic_get_new_crtc_state(state, pcrtc);
4812 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4813 	struct dm_crtc_state *dm_old_crtc_state =
4814 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4815 	int planes_count = 0;
4816 	unsigned long flags;
4817 
4818 	/* update planes when needed */
4819 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4820 		struct drm_crtc *crtc = new_plane_state->crtc;
4821 		struct drm_crtc_state *new_crtc_state;
4822 		struct drm_framebuffer *fb = new_plane_state->fb;
4823 		bool pflip_needed;
4824 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
4825 
4826 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
4827 			handle_cursor_update(plane, old_plane_state);
4828 			continue;
4829 		}
4830 
4831 		if (!fb || !crtc || pcrtc != crtc)
4832 			continue;
4833 
4834 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4835 		if (!new_crtc_state->active)
4836 			continue;
4837 
4838 		pflip_needed = !state->allow_modeset;
4839 
4840 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
4841 		if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
4842 			DRM_ERROR("%s: acrtc %d, already busy\n",
4843 				  __func__,
4844 				  acrtc_attach->crtc_id);
4845 			/* In commit tail framework this cannot happen */
4846 			WARN_ON(1);
4847 		}
4848 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4849 
4850 		if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
4851 			WARN_ON(!dm_new_plane_state->dc_state);
4852 
4853 			plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
4854 
4855 			dc_stream_attach = acrtc_state->stream;
4856 			planes_count++;
4857 
4858 		} else if (new_crtc_state->planes_changed) {
4859 			/* Assume even ONE crtc with immediate flip means
4860 			 * entire can't wait for VBLANK
4861 			 * TODO Check if it's correct
4862 			 */
4863 			*wait_for_vblank =
4864 					new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
4865 				false : true;
4866 
4867 			/* TODO: Needs rework for multiplane flip */
4868 			if (plane->type == DRM_PLANE_TYPE_PRIMARY)
4869 				drm_crtc_vblank_get(crtc);
4870 
4871 			amdgpu_dm_do_flip(
4872 				crtc,
4873 				fb,
4874 				(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
4875 				dc_state);
4876 		}
4877 
4878 	}
4879 
4880 	if (planes_count) {
4881 		unsigned long flags;
4882 
4883 		if (new_pcrtc_state->event) {
4884 
4885 			drm_crtc_vblank_get(pcrtc);
4886 
4887 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4888 			prepare_flip_isr(acrtc_attach);
4889 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4890 		}
4891 
4892 		dc_stream_attach->abm_level = acrtc_state->abm_level;
4893 
4894 		if (false == commit_planes_to_stream(dm,
4895 							dm->dc,
4896 							plane_states_constructed,
4897 							planes_count,
4898 							acrtc_state,
4899 							dm_old_crtc_state,
4900 							dc_state))
4901 			dm_error("%s: Failed to attach plane!\n", __func__);
4902 	} else {
4903 		/*TODO BUG Here should go disable planes on CRTC. */
4904 	}
4905 }
4906 
4907 /*
4908  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4909  * @crtc_state: the DRM CRTC state
4910  * @stream_state: the DC stream state.
4911  *
4912  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4913  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4914  */
4915 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4916 						struct dc_stream_state *stream_state)
4917 {
4918 	stream_state->mode_changed = crtc_state->mode_changed;
4919 }
4920 
4921 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4922 				   struct drm_atomic_state *state,
4923 				   bool nonblock)
4924 {
4925 	struct drm_crtc *crtc;
4926 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4927 	struct amdgpu_device *adev = dev->dev_private;
4928 	int i;
4929 
4930 	/*
4931 	 * We evade vblanks and pflips on crtc that
4932 	 * should be changed. We do it here to flush & disable
4933 	 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4934 	 * it will update crtc->dm_crtc_state->stream pointer which is used in
4935 	 * the ISRs.
4936 	 */
4937 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4938 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4939 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4940 
4941 		if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
4942 			manage_dm_interrupts(adev, acrtc, false);
4943 	}
4944 	/*
4945 	 * Add check here for SoC's that support hardware cursor plane, to
4946 	 * unset legacy_cursor_update
4947 	 */
4948 
4949 	return drm_atomic_helper_commit(dev, state, nonblock);
4950 
4951 	/*TODO Handle EINTR, reenable IRQ*/
4952 }
4953 
4954 /**
4955  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
4956  * @state: The atomic state to commit
4957  *
4958  * This will tell DC to commit the constructed DC state from atomic_check,
4959  * programming the hardware. Any failures here implies a hardware failure, since
4960  * atomic check should have filtered anything non-kosher.
4961  */
4962 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4963 {
4964 	struct drm_device *dev = state->dev;
4965 	struct amdgpu_device *adev = dev->dev_private;
4966 	struct amdgpu_display_manager *dm = &adev->dm;
4967 	struct dm_atomic_state *dm_state;
4968 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
4969 	uint32_t i, j;
4970 	struct drm_crtc *crtc;
4971 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4972 	unsigned long flags;
4973 	bool wait_for_vblank = true;
4974 	struct drm_connector *connector;
4975 	struct drm_connector_state *old_con_state, *new_con_state;
4976 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4977 	int crtc_disable_count = 0;
4978 
4979 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
4980 
4981 	dm_state = dm_atomic_get_new_state(state);
4982 	if (dm_state && dm_state->context) {
4983 		dc_state = dm_state->context;
4984 	} else {
4985 		/* No state changes, retain current state. */
4986 		dc_state_temp = dc_create_state();
4987 		ASSERT(dc_state_temp);
4988 		dc_state = dc_state_temp;
4989 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
4990 	}
4991 
4992 	/* update changed items */
4993 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4994 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4995 
4996 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4997 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4998 
4999 		DRM_DEBUG_DRIVER(
5000 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5001 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
5002 			"connectors_changed:%d\n",
5003 			acrtc->crtc_id,
5004 			new_crtc_state->enable,
5005 			new_crtc_state->active,
5006 			new_crtc_state->planes_changed,
5007 			new_crtc_state->mode_changed,
5008 			new_crtc_state->active_changed,
5009 			new_crtc_state->connectors_changed);
5010 
5011 		/* Copy all transient state flags into dc state */
5012 		if (dm_new_crtc_state->stream) {
5013 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
5014 							    dm_new_crtc_state->stream);
5015 		}
5016 
5017 		/* handles headless hotplug case, updating new_state and
5018 		 * aconnector as needed
5019 		 */
5020 
5021 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
5022 
5023 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
5024 
5025 			if (!dm_new_crtc_state->stream) {
5026 				/*
5027 				 * this could happen because of issues with
5028 				 * userspace notifications delivery.
5029 				 * In this case userspace tries to set mode on
5030 				 * display which is disconnected in fact.
5031 				 * dc_sink is NULL in this case on aconnector.
5032 				 * We expect reset mode will come soon.
5033 				 *
5034 				 * This can also happen when unplug is done
5035 				 * during resume sequence ended
5036 				 *
5037 				 * In this case, we want to pretend we still
5038 				 * have a sink to keep the pipe running so that
5039 				 * hw state is consistent with the sw state
5040 				 */
5041 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5042 						__func__, acrtc->base.base.id);
5043 				continue;
5044 			}
5045 
5046 			if (dm_old_crtc_state->stream)
5047 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5048 
5049 			pm_runtime_get_noresume(dev->dev);
5050 
5051 			acrtc->enabled = true;
5052 			acrtc->hw_mode = new_crtc_state->mode;
5053 			crtc->hwmode = new_crtc_state->mode;
5054 		} else if (modereset_required(new_crtc_state)) {
5055 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
5056 
5057 			/* i.e. reset mode */
5058 			if (dm_old_crtc_state->stream)
5059 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5060 		}
5061 	} /* for_each_crtc_in_state() */
5062 
5063 	if (dc_state) {
5064 		dm_enable_per_frame_crtc_master_sync(dc_state);
5065 		mutex_lock(&dm->dc_lock);
5066 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
5067 		mutex_unlock(&dm->dc_lock);
5068 	}
5069 
5070 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5071 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5072 
5073 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5074 
5075 		if (dm_new_crtc_state->stream != NULL) {
5076 			const struct dc_stream_status *status =
5077 					dc_stream_get_status(dm_new_crtc_state->stream);
5078 
5079 			if (!status)
5080 				status = dc_state_get_stream_status(dc_state,
5081 								    dm_new_crtc_state->stream);
5082 
5083 			if (!status)
5084 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
5085 			else
5086 				acrtc->otg_inst = status->primary_otg_inst;
5087 		}
5088 	}
5089 
5090 	/* Handle scaling, underscan, and abm changes*/
5091 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5092 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5093 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5094 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5095 		struct dc_stream_status *status = NULL;
5096 
5097 		if (acrtc) {
5098 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
5099 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
5100 		}
5101 
5102 		/* Skip any modesets/resets */
5103 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
5104 			continue;
5105 
5106 
5107 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5108 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5109 
5110 		/* Skip anything that is not scaling or underscan changes */
5111 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
5112 				(dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
5113 			continue;
5114 
5115 		update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5116 				dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
5117 
5118 		if (!dm_new_crtc_state->stream)
5119 			continue;
5120 
5121 		status = dc_stream_get_status(dm_new_crtc_state->stream);
5122 		WARN_ON(!status);
5123 		WARN_ON(!status->plane_count);
5124 
5125 		dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5126 
5127 		/*TODO How it works with MPO ?*/
5128 		if (!commit_planes_to_stream(
5129 				dm,
5130 				dm->dc,
5131 				status->plane_states,
5132 				status->plane_count,
5133 				dm_new_crtc_state,
5134 				to_dm_crtc_state(old_crtc_state),
5135 				dc_state))
5136 			dm_error("%s: Failed to update stream scaling!\n", __func__);
5137 	}
5138 
5139 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5140 			new_crtc_state, i) {
5141 		/*
5142 		 * loop to enable interrupts on newly arrived crtc
5143 		 */
5144 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5145 		bool modeset_needed;
5146 
5147 		if (old_crtc_state->active && !new_crtc_state->active)
5148 			crtc_disable_count++;
5149 
5150 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5151 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5152 		modeset_needed = modeset_required(
5153 				new_crtc_state,
5154 				dm_new_crtc_state->stream,
5155 				dm_old_crtc_state->stream);
5156 
5157 		if (dm_new_crtc_state->stream == NULL || !modeset_needed)
5158 			continue;
5159 
5160 		manage_dm_interrupts(adev, acrtc, true);
5161 	}
5162 
5163 	/* update planes when needed per crtc*/
5164 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
5165 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5166 
5167 		if (dm_new_crtc_state->stream)
5168 			amdgpu_dm_commit_planes(state, dc_state, dev,
5169 						dm, crtc, &wait_for_vblank);
5170 	}
5171 
5172 
5173 	/*
5174 	 * send vblank event on all events not handled in flip and
5175 	 * mark consumed event for drm_atomic_helper_commit_hw_done
5176 	 */
5177 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
5178 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5179 
5180 		if (new_crtc_state->event)
5181 			drm_send_event_locked(dev, &new_crtc_state->event->base);
5182 
5183 		new_crtc_state->event = NULL;
5184 	}
5185 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5186 
5187 
5188 	if (wait_for_vblank)
5189 		drm_atomic_helper_wait_for_flip_done(dev, state);
5190 
5191 	/*
5192 	 * FIXME:
5193 	 * Delay hw_done() until flip_done() is signaled. This is to block
5194 	 * another commit from freeing the CRTC state while we're still
5195 	 * waiting on flip_done.
5196 	 */
5197 	drm_atomic_helper_commit_hw_done(state);
5198 
5199 	drm_atomic_helper_cleanup_planes(dev, state);
5200 
5201 	/*
5202 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
5203 	 * so we can put the GPU into runtime suspend if we're not driving any
5204 	 * displays anymore
5205 	 */
5206 	for (i = 0; i < crtc_disable_count; i++)
5207 		pm_runtime_put_autosuspend(dev->dev);
5208 	pm_runtime_mark_last_busy(dev->dev);
5209 
5210 	if (dc_state_temp)
5211 		dc_release_state(dc_state_temp);
5212 }
5213 
5214 
5215 static int dm_force_atomic_commit(struct drm_connector *connector)
5216 {
5217 	int ret = 0;
5218 	struct drm_device *ddev = connector->dev;
5219 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
5220 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5221 	struct drm_plane *plane = disconnected_acrtc->base.primary;
5222 	struct drm_connector_state *conn_state;
5223 	struct drm_crtc_state *crtc_state;
5224 	struct drm_plane_state *plane_state;
5225 
5226 	if (!state)
5227 		return -ENOMEM;
5228 
5229 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
5230 
5231 	/* Construct an atomic state to restore previous display setting */
5232 
5233 	/*
5234 	 * Attach connectors to drm_atomic_state
5235 	 */
5236 	conn_state = drm_atomic_get_connector_state(state, connector);
5237 
5238 	ret = PTR_ERR_OR_ZERO(conn_state);
5239 	if (ret)
5240 		goto err;
5241 
5242 	/* Attach crtc to drm_atomic_state*/
5243 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
5244 
5245 	ret = PTR_ERR_OR_ZERO(crtc_state);
5246 	if (ret)
5247 		goto err;
5248 
5249 	/* force a restore */
5250 	crtc_state->mode_changed = true;
5251 
5252 	/* Attach plane to drm_atomic_state */
5253 	plane_state = drm_atomic_get_plane_state(state, plane);
5254 
5255 	ret = PTR_ERR_OR_ZERO(plane_state);
5256 	if (ret)
5257 		goto err;
5258 
5259 
5260 	/* Call commit internally with the state we just constructed */
5261 	ret = drm_atomic_commit(state);
5262 	if (!ret)
5263 		return 0;
5264 
5265 err:
5266 	DRM_ERROR("Restoring old state failed with %i\n", ret);
5267 	drm_atomic_state_put(state);
5268 
5269 	return ret;
5270 }
5271 
5272 /*
5273  * This function handles all cases when set mode does not come upon hotplug.
5274  * This includes when a display is unplugged then plugged back into the
5275  * same port and when running without usermode desktop manager supprot
5276  */
5277 void dm_restore_drm_connector_state(struct drm_device *dev,
5278 				    struct drm_connector *connector)
5279 {
5280 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5281 	struct amdgpu_crtc *disconnected_acrtc;
5282 	struct dm_crtc_state *acrtc_state;
5283 
5284 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
5285 		return;
5286 
5287 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5288 	if (!disconnected_acrtc)
5289 		return;
5290 
5291 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
5292 	if (!acrtc_state->stream)
5293 		return;
5294 
5295 	/*
5296 	 * If the previous sink is not released and different from the current,
5297 	 * we deduce we are in a state where we can not rely on usermode call
5298 	 * to turn on the display, so we do it here
5299 	 */
5300 	if (acrtc_state->stream->sink != aconnector->dc_sink)
5301 		dm_force_atomic_commit(&aconnector->base);
5302 }
5303 
5304 /*
5305  * Grabs all modesetting locks to serialize against any blocking commits,
5306  * Waits for completion of all non blocking commits.
5307  */
5308 static int do_aquire_global_lock(struct drm_device *dev,
5309 				 struct drm_atomic_state *state)
5310 {
5311 	struct drm_crtc *crtc;
5312 	struct drm_crtc_commit *commit;
5313 	long ret;
5314 
5315 	/*
5316 	 * Adding all modeset locks to aquire_ctx will
5317 	 * ensure that when the framework release it the
5318 	 * extra locks we are locking here will get released to
5319 	 */
5320 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
5321 	if (ret)
5322 		return ret;
5323 
5324 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5325 		spin_lock(&crtc->commit_lock);
5326 		commit = list_first_entry_or_null(&crtc->commit_list,
5327 				struct drm_crtc_commit, commit_entry);
5328 		if (commit)
5329 			drm_crtc_commit_get(commit);
5330 		spin_unlock(&crtc->commit_lock);
5331 
5332 		if (!commit)
5333 			continue;
5334 
5335 		/*
5336 		 * Make sure all pending HW programming completed and
5337 		 * page flips done
5338 		 */
5339 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
5340 
5341 		if (ret > 0)
5342 			ret = wait_for_completion_interruptible_timeout(
5343 					&commit->flip_done, 10*HZ);
5344 
5345 		if (ret == 0)
5346 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
5347 				  "timed out\n", crtc->base.id, crtc->name);
5348 
5349 		drm_crtc_commit_put(commit);
5350 	}
5351 
5352 	return ret < 0 ? ret : 0;
5353 }
5354 
5355 static void get_freesync_config_for_crtc(
5356 	struct dm_crtc_state *new_crtc_state,
5357 	struct dm_connector_state *new_con_state)
5358 {
5359 	struct mod_freesync_config config = {0};
5360 	struct amdgpu_dm_connector *aconnector =
5361 			to_amdgpu_dm_connector(new_con_state->base.connector);
5362 
5363 	new_crtc_state->vrr_supported = new_con_state->freesync_capable;
5364 
5365 	if (new_con_state->freesync_capable) {
5366 		config.state = new_crtc_state->base.vrr_enabled ?
5367 				VRR_STATE_ACTIVE_VARIABLE :
5368 				VRR_STATE_INACTIVE;
5369 		config.min_refresh_in_uhz =
5370 				aconnector->min_vfreq * 1000000;
5371 		config.max_refresh_in_uhz =
5372 				aconnector->max_vfreq * 1000000;
5373 		config.vsif_supported = true;
5374 		config.btr = true;
5375 	}
5376 
5377 	new_crtc_state->freesync_config = config;
5378 }
5379 
5380 static void reset_freesync_config_for_crtc(
5381 	struct dm_crtc_state *new_crtc_state)
5382 {
5383 	new_crtc_state->vrr_supported = false;
5384 
5385 	memset(&new_crtc_state->vrr_params, 0,
5386 	       sizeof(new_crtc_state->vrr_params));
5387 	memset(&new_crtc_state->vrr_infopacket, 0,
5388 	       sizeof(new_crtc_state->vrr_infopacket));
5389 }
5390 
5391 static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
5392 				 struct drm_atomic_state *state,
5393 				 bool enable,
5394 				 bool *lock_and_validation_needed)
5395 {
5396 	struct dm_atomic_state *dm_state = NULL;
5397 	struct drm_crtc *crtc;
5398 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5399 	int i;
5400 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5401 	struct dc_stream_state *new_stream;
5402 	int ret = 0;
5403 
5404 	/*
5405 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
5406 	 * update changed items
5407 	 */
5408 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5409 		struct amdgpu_crtc *acrtc = NULL;
5410 		struct amdgpu_dm_connector *aconnector = NULL;
5411 		struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
5412 		struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
5413 		struct drm_plane_state *new_plane_state = NULL;
5414 
5415 		new_stream = NULL;
5416 
5417 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5418 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5419 		acrtc = to_amdgpu_crtc(crtc);
5420 
5421 		new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
5422 
5423 		if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
5424 			ret = -EINVAL;
5425 			goto fail;
5426 		}
5427 
5428 		aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
5429 
5430 		/* TODO This hack should go away */
5431 		if (aconnector && enable) {
5432 			/* Make sure fake sink is created in plug-in scenario */
5433 			drm_new_conn_state = drm_atomic_get_new_connector_state(state,
5434  								    &aconnector->base);
5435 			drm_old_conn_state = drm_atomic_get_old_connector_state(state,
5436 								    &aconnector->base);
5437 
5438 			if (IS_ERR(drm_new_conn_state)) {
5439 				ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
5440 				break;
5441 			}
5442 
5443 			dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
5444 			dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
5445 
5446 			new_stream = create_stream_for_sink(aconnector,
5447 							     &new_crtc_state->mode,
5448 							    dm_new_conn_state,
5449 							    dm_old_crtc_state->stream);
5450 
5451 			/*
5452 			 * we can have no stream on ACTION_SET if a display
5453 			 * was disconnected during S3, in this case it is not an
5454 			 * error, the OS will be updated after detection, and
5455 			 * will do the right thing on next atomic commit
5456 			 */
5457 
5458 			if (!new_stream) {
5459 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5460 						__func__, acrtc->base.base.id);
5461 				break;
5462 			}
5463 
5464 			dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
5465 
5466 			if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
5467 			    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
5468 				new_crtc_state->mode_changed = false;
5469 				DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5470 						 new_crtc_state->mode_changed);
5471 			}
5472 		}
5473 
5474 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5475 			goto next_crtc;
5476 
5477 		DRM_DEBUG_DRIVER(
5478 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5479 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
5480 			"connectors_changed:%d\n",
5481 			acrtc->crtc_id,
5482 			new_crtc_state->enable,
5483 			new_crtc_state->active,
5484 			new_crtc_state->planes_changed,
5485 			new_crtc_state->mode_changed,
5486 			new_crtc_state->active_changed,
5487 			new_crtc_state->connectors_changed);
5488 
5489 		/* Remove stream for any changed/disabled CRTC */
5490 		if (!enable) {
5491 
5492 			if (!dm_old_crtc_state->stream)
5493 				goto next_crtc;
5494 
5495 			ret = dm_atomic_get_state(state, &dm_state);
5496 			if (ret)
5497 				goto fail;
5498 
5499 			DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5500 					crtc->base.id);
5501 
5502 			/* i.e. reset mode */
5503 			if (dc_remove_stream_from_ctx(
5504 					dm->dc,
5505 					dm_state->context,
5506 					dm_old_crtc_state->stream) != DC_OK) {
5507 				ret = -EINVAL;
5508 				goto fail;
5509 			}
5510 
5511 			dc_stream_release(dm_old_crtc_state->stream);
5512 			dm_new_crtc_state->stream = NULL;
5513 
5514 			reset_freesync_config_for_crtc(dm_new_crtc_state);
5515 
5516 			*lock_and_validation_needed = true;
5517 
5518 		} else {/* Add stream for any updated/enabled CRTC */
5519 			/*
5520 			 * Quick fix to prevent NULL pointer on new_stream when
5521 			 * added MST connectors not found in existing crtc_state in the chained mode
5522 			 * TODO: need to dig out the root cause of that
5523 			 */
5524 			if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
5525 				goto next_crtc;
5526 
5527 			if (modereset_required(new_crtc_state))
5528 				goto next_crtc;
5529 
5530 			if (modeset_required(new_crtc_state, new_stream,
5531 					     dm_old_crtc_state->stream)) {
5532 
5533 				WARN_ON(dm_new_crtc_state->stream);
5534 
5535 				ret = dm_atomic_get_state(state, &dm_state);
5536 				if (ret)
5537 					goto fail;
5538 
5539 				dm_new_crtc_state->stream = new_stream;
5540 
5541 				dc_stream_retain(new_stream);
5542 
5543 				DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5544 							crtc->base.id);
5545 
5546 				if (dc_add_stream_to_ctx(
5547 						dm->dc,
5548 						dm_state->context,
5549 						dm_new_crtc_state->stream) != DC_OK) {
5550 					ret = -EINVAL;
5551 					goto fail;
5552 				}
5553 
5554 				*lock_and_validation_needed = true;
5555 			}
5556 		}
5557 
5558 next_crtc:
5559 		/* Release extra reference */
5560 		if (new_stream)
5561 			 dc_stream_release(new_stream);
5562 
5563 		/*
5564 		 * We want to do dc stream updates that do not require a
5565 		 * full modeset below.
5566 		 */
5567 		if (!(enable && aconnector && new_crtc_state->enable &&
5568 		      new_crtc_state->active))
5569 			continue;
5570 		/*
5571 		 * Given above conditions, the dc state cannot be NULL because:
5572 		 * 1. We're in the process of enabling CRTCs (just been added
5573 		 *    to the dc context, or already is on the context)
5574 		 * 2. Has a valid connector attached, and
5575 		 * 3. Is currently active and enabled.
5576 		 * => The dc stream state currently exists.
5577 		 */
5578 		BUG_ON(dm_new_crtc_state->stream == NULL);
5579 
5580 		/* Scaling or underscan settings */
5581 		if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
5582 			update_stream_scaling_settings(
5583 				&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
5584 
5585 		/*
5586 		 * Color management settings. We also update color properties
5587 		 * when a modeset is needed, to ensure it gets reprogrammed.
5588 		 */
5589 		if (dm_new_crtc_state->base.color_mgmt_changed ||
5590 		    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
5591 			ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
5592 			if (ret)
5593 				goto fail;
5594 			amdgpu_dm_set_ctm(dm_new_crtc_state);
5595 		}
5596 
5597 		/* Update Freesync settings. */
5598 		get_freesync_config_for_crtc(dm_new_crtc_state,
5599 					     dm_new_conn_state);
5600 	}
5601 
5602 	return ret;
5603 
5604 fail:
5605 	if (new_stream)
5606 		dc_stream_release(new_stream);
5607 	return ret;
5608 }
5609 
5610 static int dm_update_planes_state(struct dc *dc,
5611 				  struct drm_atomic_state *state,
5612 				  bool enable,
5613 				  bool *lock_and_validation_needed)
5614 {
5615 
5616 	struct dm_atomic_state *dm_state = NULL;
5617 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5618 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5619 	struct drm_plane *plane;
5620 	struct drm_plane_state *old_plane_state, *new_plane_state;
5621 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
5622 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
5623 	int i ;
5624 	/* TODO return page_flip_needed() function */
5625 	bool pflip_needed  = !state->allow_modeset;
5626 	int ret = 0;
5627 
5628 
5629 	/* Add new planes, in reverse order as DC expectation */
5630 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
5631 		new_plane_crtc = new_plane_state->crtc;
5632 		old_plane_crtc = old_plane_state->crtc;
5633 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
5634 		dm_old_plane_state = to_dm_plane_state(old_plane_state);
5635 
5636 		/*TODO Implement atomic check for cursor plane */
5637 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5638 			continue;
5639 
5640 		/* Remove any changed/removed planes */
5641 		if (!enable) {
5642 			if (pflip_needed &&
5643 			    plane->type != DRM_PLANE_TYPE_OVERLAY)
5644 				continue;
5645 
5646 			if (!old_plane_crtc)
5647 				continue;
5648 
5649 			old_crtc_state = drm_atomic_get_old_crtc_state(
5650 					state, old_plane_crtc);
5651 			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5652 
5653 			if (!dm_old_crtc_state->stream)
5654 				continue;
5655 
5656 			DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5657 					plane->base.id, old_plane_crtc->base.id);
5658 
5659 			ret = dm_atomic_get_state(state, &dm_state);
5660 			if (ret)
5661 				return ret;
5662 
5663 			if (!dc_remove_plane_from_context(
5664 					dc,
5665 					dm_old_crtc_state->stream,
5666 					dm_old_plane_state->dc_state,
5667 					dm_state->context)) {
5668 
5669 				ret = EINVAL;
5670 				return ret;
5671 			}
5672 
5673 
5674 			dc_plane_state_release(dm_old_plane_state->dc_state);
5675 			dm_new_plane_state->dc_state = NULL;
5676 
5677 			*lock_and_validation_needed = true;
5678 
5679 		} else { /* Add new planes */
5680 			struct dc_plane_state *dc_new_plane_state;
5681 
5682 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
5683 				continue;
5684 
5685 			if (!new_plane_crtc)
5686 				continue;
5687 
5688 			new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
5689 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5690 
5691 			if (!dm_new_crtc_state->stream)
5692 				continue;
5693 
5694 			if (pflip_needed &&
5695 			    plane->type != DRM_PLANE_TYPE_OVERLAY)
5696 				continue;
5697 
5698 			WARN_ON(dm_new_plane_state->dc_state);
5699 
5700 			dc_new_plane_state = dc_create_plane_state(dc);
5701 			if (!dc_new_plane_state)
5702 				return -ENOMEM;
5703 
5704 			DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5705 					plane->base.id, new_plane_crtc->base.id);
5706 
5707 			ret = fill_plane_attributes(
5708 				new_plane_crtc->dev->dev_private,
5709 				dc_new_plane_state,
5710 				new_plane_state,
5711 				new_crtc_state);
5712 			if (ret) {
5713 				dc_plane_state_release(dc_new_plane_state);
5714 				return ret;
5715 			}
5716 
5717 			ret = dm_atomic_get_state(state, &dm_state);
5718 			if (ret) {
5719 				dc_plane_state_release(dc_new_plane_state);
5720 				return ret;
5721 			}
5722 
5723 			/*
5724 			 * Any atomic check errors that occur after this will
5725 			 * not need a release. The plane state will be attached
5726 			 * to the stream, and therefore part of the atomic
5727 			 * state. It'll be released when the atomic state is
5728 			 * cleaned.
5729 			 */
5730 			if (!dc_add_plane_to_context(
5731 					dc,
5732 					dm_new_crtc_state->stream,
5733 					dc_new_plane_state,
5734 					dm_state->context)) {
5735 
5736 				dc_plane_state_release(dc_new_plane_state);
5737 				return -EINVAL;
5738 			}
5739 
5740 			dm_new_plane_state->dc_state = dc_new_plane_state;
5741 
5742 			/* Tell DC to do a full surface update every time there
5743 			 * is a plane change. Inefficient, but works for now.
5744 			 */
5745 			dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
5746 
5747 			*lock_and_validation_needed = true;
5748 		}
5749 	}
5750 
5751 
5752 	return ret;
5753 }
5754 
5755 static int
5756 dm_determine_update_type_for_commit(struct dc *dc,
5757 				    struct drm_atomic_state *state,
5758 				    enum surface_update_type *out_type)
5759 {
5760 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
5761 	int i, j, num_plane, ret = 0;
5762 	struct drm_plane_state *old_plane_state, *new_plane_state;
5763 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
5764 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5765 	struct drm_plane *plane;
5766 
5767 	struct drm_crtc *crtc;
5768 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
5769 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
5770 	struct dc_stream_status *status = NULL;
5771 
5772 	struct dc_surface_update *updates = kzalloc(MAX_SURFACES * sizeof(struct dc_surface_update), GFP_KERNEL);
5773 	struct dc_plane_state *surface = kzalloc(MAX_SURFACES * sizeof(struct dc_plane_state), GFP_KERNEL);
5774 	struct dc_stream_update stream_update;
5775 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
5776 
5777 	if (!updates || !surface) {
5778 		DRM_ERROR("Plane or surface update failed to allocate");
5779 		/* Set type to FULL to avoid crashing in DC*/
5780 		update_type = UPDATE_TYPE_FULL;
5781 		goto cleanup;
5782 	}
5783 
5784 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5785 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
5786 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
5787 		num_plane = 0;
5788 
5789 		if (new_dm_crtc_state->stream) {
5790 
5791 			for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
5792 				new_plane_crtc = new_plane_state->crtc;
5793 				old_plane_crtc = old_plane_state->crtc;
5794 				new_dm_plane_state = to_dm_plane_state(new_plane_state);
5795 				old_dm_plane_state = to_dm_plane_state(old_plane_state);
5796 
5797 				if (plane->type == DRM_PLANE_TYPE_CURSOR)
5798 					continue;
5799 
5800 				if (!state->allow_modeset)
5801 					continue;
5802 
5803 				if (crtc == new_plane_crtc) {
5804 					updates[num_plane].surface = &surface[num_plane];
5805 
5806 					if (new_crtc_state->mode_changed) {
5807 						updates[num_plane].surface->src_rect =
5808 									new_dm_plane_state->dc_state->src_rect;
5809 						updates[num_plane].surface->dst_rect =
5810 									new_dm_plane_state->dc_state->dst_rect;
5811 						updates[num_plane].surface->rotation =
5812 									new_dm_plane_state->dc_state->rotation;
5813 						updates[num_plane].surface->in_transfer_func =
5814 									new_dm_plane_state->dc_state->in_transfer_func;
5815 						stream_update.dst = new_dm_crtc_state->stream->dst;
5816 						stream_update.src = new_dm_crtc_state->stream->src;
5817 					}
5818 
5819 					if (new_crtc_state->color_mgmt_changed) {
5820 						updates[num_plane].gamma =
5821 								new_dm_plane_state->dc_state->gamma_correction;
5822 						updates[num_plane].in_transfer_func =
5823 								new_dm_plane_state->dc_state->in_transfer_func;
5824 						stream_update.gamut_remap =
5825 								&new_dm_crtc_state->stream->gamut_remap_matrix;
5826 						stream_update.out_transfer_func =
5827 								new_dm_crtc_state->stream->out_transfer_func;
5828 					}
5829 
5830 					num_plane++;
5831 				}
5832 			}
5833 
5834 			if (num_plane > 0) {
5835 				ret = dm_atomic_get_state(state, &dm_state);
5836 				if (ret)
5837 					goto cleanup;
5838 
5839 				old_dm_state = dm_atomic_get_old_state(state);
5840 				if (!old_dm_state) {
5841 					ret = -EINVAL;
5842 					goto cleanup;
5843 				}
5844 
5845 				status = dc_state_get_stream_status(old_dm_state->context,
5846 								    new_dm_crtc_state->stream);
5847 
5848 				update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
5849 										  &stream_update, status);
5850 
5851 				if (update_type > UPDATE_TYPE_MED) {
5852 					update_type = UPDATE_TYPE_FULL;
5853 					goto cleanup;
5854 				}
5855 			}
5856 
5857 		} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
5858 			update_type = UPDATE_TYPE_FULL;
5859 			goto cleanup;
5860 		}
5861 	}
5862 
5863 cleanup:
5864 	kfree(updates);
5865 	kfree(surface);
5866 
5867 	*out_type = update_type;
5868 	return ret;
5869 }
5870 
5871 /**
5872  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
5873  * @dev: The DRM device
5874  * @state: The atomic state to commit
5875  *
5876  * Validate that the given atomic state is programmable by DC into hardware.
5877  * This involves constructing a &struct dc_state reflecting the new hardware
5878  * state we wish to commit, then querying DC to see if it is programmable. It's
5879  * important not to modify the existing DC state. Otherwise, atomic_check
5880  * may unexpectedly commit hardware changes.
5881  *
5882  * When validating the DC state, it's important that the right locks are
5883  * acquired. For full updates case which removes/adds/updates streams on one
5884  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
5885  * that any such full update commit will wait for completion of any outstanding
5886  * flip using DRMs synchronization events. See
5887  * dm_determine_update_type_for_commit()
5888  *
5889  * Note that DM adds the affected connectors for all CRTCs in state, when that
5890  * might not seem necessary. This is because DC stream creation requires the
5891  * DC sink, which is tied to the DRM connector state. Cleaning this up should
5892  * be possible but non-trivial - a possible TODO item.
5893  *
5894  * Return: -Error code if validation failed.
5895  */
5896 static int amdgpu_dm_atomic_check(struct drm_device *dev,
5897 				  struct drm_atomic_state *state)
5898 {
5899 	struct amdgpu_device *adev = dev->dev_private;
5900 	struct dm_atomic_state *dm_state = NULL;
5901 	struct dc *dc = adev->dm.dc;
5902 	struct drm_connector *connector;
5903 	struct drm_connector_state *old_con_state, *new_con_state;
5904 	struct drm_crtc *crtc;
5905 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5906 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
5907 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
5908 
5909 	int ret, i;
5910 
5911 	/*
5912 	 * This bool will be set for true for any modeset/reset
5913 	 * or plane update which implies non fast surface update.
5914 	 */
5915 	bool lock_and_validation_needed = false;
5916 
5917 	ret = drm_atomic_helper_check_modeset(dev, state);
5918 	if (ret)
5919 		goto fail;
5920 
5921 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5922 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5923 		    !new_crtc_state->color_mgmt_changed &&
5924 		    !new_crtc_state->vrr_enabled)
5925 			continue;
5926 
5927 		if (!new_crtc_state->enable)
5928 			continue;
5929 
5930 		ret = drm_atomic_add_affected_connectors(state, crtc);
5931 		if (ret)
5932 			return ret;
5933 
5934 		ret = drm_atomic_add_affected_planes(state, crtc);
5935 		if (ret)
5936 			goto fail;
5937 	}
5938 
5939 	/* Remove exiting planes if they are modified */
5940 	ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
5941 	if (ret) {
5942 		goto fail;
5943 	}
5944 
5945 	/* Disable all crtcs which require disable */
5946 	ret = dm_update_crtcs_state(&adev->dm, state, false, &lock_and_validation_needed);
5947 	if (ret) {
5948 		goto fail;
5949 	}
5950 
5951 	/* Enable all crtcs which require enable */
5952 	ret = dm_update_crtcs_state(&adev->dm, state, true, &lock_and_validation_needed);
5953 	if (ret) {
5954 		goto fail;
5955 	}
5956 
5957 	/* Add new/modified planes */
5958 	ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
5959 	if (ret) {
5960 		goto fail;
5961 	}
5962 
5963 	/* Run this here since we want to validate the streams we created */
5964 	ret = drm_atomic_helper_check_planes(dev, state);
5965 	if (ret)
5966 		goto fail;
5967 
5968 	/* Check scaling and underscan changes*/
5969 	/* TODO Removed scaling changes validation due to inability to commit
5970 	 * new stream into context w\o causing full reset. Need to
5971 	 * decide how to handle.
5972 	 */
5973 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5974 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5975 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5976 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5977 
5978 		/* Skip any modesets/resets */
5979 		if (!acrtc || drm_atomic_crtc_needs_modeset(
5980 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
5981 			continue;
5982 
5983 		/* Skip any thing not scale or underscan changes */
5984 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
5985 			continue;
5986 
5987 		overall_update_type = UPDATE_TYPE_FULL;
5988 		lock_and_validation_needed = true;
5989 	}
5990 
5991 	ret = dm_determine_update_type_for_commit(dc, state, &update_type);
5992 	if (ret)
5993 		goto fail;
5994 
5995 	if (overall_update_type < update_type)
5996 		overall_update_type = update_type;
5997 
5998 	/*
5999 	 * lock_and_validation_needed was an old way to determine if we need to set
6000 	 * the global lock. Leaving it in to check if we broke any corner cases
6001 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6002 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
6003 	 */
6004 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
6005 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6006 	else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
6007 		WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
6008 
6009 
6010 	if (overall_update_type > UPDATE_TYPE_FAST) {
6011 		ret = dm_atomic_get_state(state, &dm_state);
6012 		if (ret)
6013 			goto fail;
6014 
6015 		ret = do_aquire_global_lock(dev, state);
6016 		if (ret)
6017 			goto fail;
6018 
6019 		if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
6020 			ret = -EINVAL;
6021 			goto fail;
6022 		}
6023 	} else if (state->legacy_cursor_update) {
6024 		/*
6025 		 * This is a fast cursor update coming from the plane update
6026 		 * helper, check if it can be done asynchronously for better
6027 		 * performance.
6028 		 */
6029 		state->async_update = !drm_atomic_helper_async_check(dev, state);
6030 	}
6031 
6032 	/* Must be success */
6033 	WARN_ON(ret);
6034 	return ret;
6035 
6036 fail:
6037 	if (ret == -EDEADLK)
6038 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
6039 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
6040 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
6041 	else
6042 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
6043 
6044 	return ret;
6045 }
6046 
6047 static bool is_dp_capable_without_timing_msa(struct dc *dc,
6048 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
6049 {
6050 	uint8_t dpcd_data;
6051 	bool capable = false;
6052 
6053 	if (amdgpu_dm_connector->dc_link &&
6054 		dm_helpers_dp_read_dpcd(
6055 				NULL,
6056 				amdgpu_dm_connector->dc_link,
6057 				DP_DOWN_STREAM_PORT_COUNT,
6058 				&dpcd_data,
6059 				sizeof(dpcd_data))) {
6060 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
6061 	}
6062 
6063 	return capable;
6064 }
6065 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
6066 					struct edid *edid)
6067 {
6068 	int i;
6069 	bool edid_check_required;
6070 	struct detailed_timing *timing;
6071 	struct detailed_non_pixel *data;
6072 	struct detailed_data_monitor_range *range;
6073 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6074 			to_amdgpu_dm_connector(connector);
6075 	struct dm_connector_state *dm_con_state = NULL;
6076 
6077 	struct drm_device *dev = connector->dev;
6078 	struct amdgpu_device *adev = dev->dev_private;
6079 	bool freesync_capable = false;
6080 
6081 	if (!connector->state) {
6082 		DRM_ERROR("%s - Connector has no state", __func__);
6083 		goto update;
6084 	}
6085 
6086 	if (!edid) {
6087 		dm_con_state = to_dm_connector_state(connector->state);
6088 
6089 		amdgpu_dm_connector->min_vfreq = 0;
6090 		amdgpu_dm_connector->max_vfreq = 0;
6091 		amdgpu_dm_connector->pixel_clock_mhz = 0;
6092 
6093 		goto update;
6094 	}
6095 
6096 	dm_con_state = to_dm_connector_state(connector->state);
6097 
6098 	edid_check_required = false;
6099 	if (!amdgpu_dm_connector->dc_sink) {
6100 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
6101 		goto update;
6102 	}
6103 	if (!adev->dm.freesync_module)
6104 		goto update;
6105 	/*
6106 	 * if edid non zero restrict freesync only for dp and edp
6107 	 */
6108 	if (edid) {
6109 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
6110 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
6111 			edid_check_required = is_dp_capable_without_timing_msa(
6112 						adev->dm.dc,
6113 						amdgpu_dm_connector);
6114 		}
6115 	}
6116 	if (edid_check_required == true && (edid->version > 1 ||
6117 	   (edid->version == 1 && edid->revision > 1))) {
6118 		for (i = 0; i < 4; i++) {
6119 
6120 			timing	= &edid->detailed_timings[i];
6121 			data	= &timing->data.other_data;
6122 			range	= &data->data.range;
6123 			/*
6124 			 * Check if monitor has continuous frequency mode
6125 			 */
6126 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
6127 				continue;
6128 			/*
6129 			 * Check for flag range limits only. If flag == 1 then
6130 			 * no additional timing information provided.
6131 			 * Default GTF, GTF Secondary curve and CVT are not
6132 			 * supported
6133 			 */
6134 			if (range->flags != 1)
6135 				continue;
6136 
6137 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
6138 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
6139 			amdgpu_dm_connector->pixel_clock_mhz =
6140 				range->pixel_clock_mhz * 10;
6141 			break;
6142 		}
6143 
6144 		if (amdgpu_dm_connector->max_vfreq -
6145 		    amdgpu_dm_connector->min_vfreq > 10) {
6146 
6147 			freesync_capable = true;
6148 		}
6149 	}
6150 
6151 update:
6152 	if (dm_con_state)
6153 		dm_con_state->freesync_capable = freesync_capable;
6154 
6155 	if (connector->vrr_capable_property)
6156 		drm_connector_set_vrr_capable_property(connector,
6157 						       freesync_capable);
6158 }
6159 
6160