1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 
33 #include "vid.h"
34 #include "amdgpu.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_ucode.h"
37 #include "atom.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_pm.h"
40 
41 #include "amd_shared.h"
42 #include "amdgpu_dm_irq.h"
43 #include "dm_helpers.h"
44 #include "amdgpu_dm_mst_types.h"
45 #if defined(CONFIG_DEBUG_FS)
46 #include "amdgpu_dm_debugfs.h"
47 #endif
48 
49 #include "ivsrcid/ivsrcid_vislands30.h"
50 
51 #include <linux/module.h>
52 #include <linux/moduleparam.h>
53 #include <linux/version.h>
54 #include <linux/types.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/firmware.h>
57 
58 #include <drm/drmP.h>
59 #include <drm/drm_atomic.h>
60 #include <drm/drm_atomic_uapi.h>
61 #include <drm/drm_atomic_helper.h>
62 #include <drm/drm_dp_mst_helper.h>
63 #include <drm/drm_fb_helper.h>
64 #include <drm/drm_edid.h>
65 
66 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
67 #include "ivsrcid/irqsrcs_dcn_1_0.h"
68 
69 #include "dcn/dcn_1_0_offset.h"
70 #include "dcn/dcn_1_0_sh_mask.h"
71 #include "soc15_hw_ip.h"
72 #include "vega10_ip_offset.h"
73 
74 #include "soc15_common.h"
75 #endif
76 
77 #include "modules/inc/mod_freesync.h"
78 #include "modules/power/power_helpers.h"
79 #include "modules/inc/mod_info_packet.h"
80 
81 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
82 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
83 
84 /**
85  * DOC: overview
86  *
87  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
88  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
89  * requests into DC requests, and DC responses into DRM responses.
90  *
91  * The root control structure is &struct amdgpu_display_manager.
92  */
93 
94 /* basic init/fini API */
95 static int amdgpu_dm_init(struct amdgpu_device *adev);
96 static void amdgpu_dm_fini(struct amdgpu_device *adev);
97 
98 /*
99  * initializes drm_device display related structures, based on the information
100  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
101  * drm_encoder, drm_mode_config
102  *
103  * Returns 0 on success
104  */
105 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
106 /* removes and deallocates the drm structures, created by the above function */
107 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
108 
109 static void
110 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
111 
112 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
113 				struct drm_plane *plane,
114 				unsigned long possible_crtcs);
115 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
116 			       struct drm_plane *plane,
117 			       uint32_t link_index);
118 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
119 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
120 				    uint32_t link_index,
121 				    struct amdgpu_encoder *amdgpu_encoder);
122 static int amdgpu_dm_encoder_init(struct drm_device *dev,
123 				  struct amdgpu_encoder *aencoder,
124 				  uint32_t link_index);
125 
126 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
127 
128 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
129 				   struct drm_atomic_state *state,
130 				   bool nonblock);
131 
132 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
133 
134 static int amdgpu_dm_atomic_check(struct drm_device *dev,
135 				  struct drm_atomic_state *state);
136 
137 static void handle_cursor_update(struct drm_plane *plane,
138 				 struct drm_plane_state *old_plane_state);
139 
140 
141 
142 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
143 	DRM_PLANE_TYPE_PRIMARY,
144 	DRM_PLANE_TYPE_PRIMARY,
145 	DRM_PLANE_TYPE_PRIMARY,
146 	DRM_PLANE_TYPE_PRIMARY,
147 	DRM_PLANE_TYPE_PRIMARY,
148 	DRM_PLANE_TYPE_PRIMARY,
149 };
150 
151 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
152 	DRM_PLANE_TYPE_PRIMARY,
153 	DRM_PLANE_TYPE_PRIMARY,
154 	DRM_PLANE_TYPE_PRIMARY,
155 	DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
156 };
157 
158 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
159 	DRM_PLANE_TYPE_PRIMARY,
160 	DRM_PLANE_TYPE_PRIMARY,
161 	DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
162 };
163 
164 /*
165  * dm_vblank_get_counter
166  *
167  * @brief
168  * Get counter for number of vertical blanks
169  *
170  * @param
171  * struct amdgpu_device *adev - [in] desired amdgpu device
172  * int disp_idx - [in] which CRTC to get the counter from
173  *
174  * @return
175  * Counter for vertical blanks
176  */
177 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
178 {
179 	if (crtc >= adev->mode_info.num_crtc)
180 		return 0;
181 	else {
182 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
183 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
184 				acrtc->base.state);
185 
186 
187 		if (acrtc_state->stream == NULL) {
188 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
189 				  crtc);
190 			return 0;
191 		}
192 
193 		return dc_stream_get_vblank_counter(acrtc_state->stream);
194 	}
195 }
196 
197 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
198 				  u32 *vbl, u32 *position)
199 {
200 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
201 
202 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
203 		return -EINVAL;
204 	else {
205 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
206 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
207 						acrtc->base.state);
208 
209 		if (acrtc_state->stream ==  NULL) {
210 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
211 				  crtc);
212 			return 0;
213 		}
214 
215 		/*
216 		 * TODO rework base driver to use values directly.
217 		 * for now parse it back into reg-format
218 		 */
219 		dc_stream_get_scanoutpos(acrtc_state->stream,
220 					 &v_blank_start,
221 					 &v_blank_end,
222 					 &h_position,
223 					 &v_position);
224 
225 		*position = v_position | (h_position << 16);
226 		*vbl = v_blank_start | (v_blank_end << 16);
227 	}
228 
229 	return 0;
230 }
231 
232 static bool dm_is_idle(void *handle)
233 {
234 	/* XXX todo */
235 	return true;
236 }
237 
238 static int dm_wait_for_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return 0;
242 }
243 
244 static bool dm_check_soft_reset(void *handle)
245 {
246 	return false;
247 }
248 
249 static int dm_soft_reset(void *handle)
250 {
251 	/* XXX todo */
252 	return 0;
253 }
254 
255 static struct amdgpu_crtc *
256 get_crtc_by_otg_inst(struct amdgpu_device *adev,
257 		     int otg_inst)
258 {
259 	struct drm_device *dev = adev->ddev;
260 	struct drm_crtc *crtc;
261 	struct amdgpu_crtc *amdgpu_crtc;
262 
263 	if (otg_inst == -1) {
264 		WARN_ON(1);
265 		return adev->mode_info.crtcs[0];
266 	}
267 
268 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
269 		amdgpu_crtc = to_amdgpu_crtc(crtc);
270 
271 		if (amdgpu_crtc->otg_inst == otg_inst)
272 			return amdgpu_crtc;
273 	}
274 
275 	return NULL;
276 }
277 
278 static void dm_pflip_high_irq(void *interrupt_params)
279 {
280 	struct amdgpu_crtc *amdgpu_crtc;
281 	struct common_irq_params *irq_params = interrupt_params;
282 	struct amdgpu_device *adev = irq_params->adev;
283 	unsigned long flags;
284 
285 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
286 
287 	/* IRQ could occur when in initial stage */
288 	/* TODO work and BO cleanup */
289 	if (amdgpu_crtc == NULL) {
290 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
291 		return;
292 	}
293 
294 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
295 
296 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
297 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
298 						 amdgpu_crtc->pflip_status,
299 						 AMDGPU_FLIP_SUBMITTED,
300 						 amdgpu_crtc->crtc_id,
301 						 amdgpu_crtc);
302 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
303 		return;
304 	}
305 
306 	/* Update to correct count(s) if racing with vblank irq */
307 	amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
308 
309 	/* wake up userspace */
310 	if (amdgpu_crtc->event) {
311 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
312 
313 		/* page flip completed. clean up */
314 		amdgpu_crtc->event = NULL;
315 
316 	} else
317 		WARN_ON(1);
318 
319 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
320 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
321 
322 	DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
323 					__func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
324 
325 	drm_crtc_vblank_put(&amdgpu_crtc->base);
326 }
327 
328 static void dm_crtc_high_irq(void *interrupt_params)
329 {
330 	struct common_irq_params *irq_params = interrupt_params;
331 	struct amdgpu_device *adev = irq_params->adev;
332 	struct amdgpu_crtc *acrtc;
333 	struct dm_crtc_state *acrtc_state;
334 
335 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
336 
337 	if (acrtc) {
338 		drm_crtc_handle_vblank(&acrtc->base);
339 		amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
340 
341 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
342 
343 		if (acrtc_state->stream &&
344 		    acrtc_state->vrr_params.supported &&
345 		    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
346 			mod_freesync_handle_v_update(
347 				adev->dm.freesync_module,
348 				acrtc_state->stream,
349 				&acrtc_state->vrr_params);
350 
351 			dc_stream_adjust_vmin_vmax(
352 				adev->dm.dc,
353 				acrtc_state->stream,
354 				&acrtc_state->vrr_params.adjust);
355 		}
356 	}
357 }
358 
359 static int dm_set_clockgating_state(void *handle,
360 		  enum amd_clockgating_state state)
361 {
362 	return 0;
363 }
364 
365 static int dm_set_powergating_state(void *handle,
366 		  enum amd_powergating_state state)
367 {
368 	return 0;
369 }
370 
371 /* Prototypes of private functions */
372 static int dm_early_init(void* handle);
373 
374 /* Allocate memory for FBC compressed data  */
375 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
376 {
377 	struct drm_device *dev = connector->dev;
378 	struct amdgpu_device *adev = dev->dev_private;
379 	struct dm_comressor_info *compressor = &adev->dm.compressor;
380 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
381 	struct drm_display_mode *mode;
382 	unsigned long max_size = 0;
383 
384 	if (adev->dm.dc->fbc_compressor == NULL)
385 		return;
386 
387 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
388 		return;
389 
390 	if (compressor->bo_ptr)
391 		return;
392 
393 
394 	list_for_each_entry(mode, &connector->modes, head) {
395 		if (max_size < mode->htotal * mode->vtotal)
396 			max_size = mode->htotal * mode->vtotal;
397 	}
398 
399 	if (max_size) {
400 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
401 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
402 			    &compressor->gpu_addr, &compressor->cpu_addr);
403 
404 		if (r)
405 			DRM_ERROR("DM: Failed to initialize FBC\n");
406 		else {
407 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
408 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
409 		}
410 
411 	}
412 
413 }
414 
415 static int amdgpu_dm_init(struct amdgpu_device *adev)
416 {
417 	struct dc_init_data init_data;
418 	adev->dm.ddev = adev->ddev;
419 	adev->dm.adev = adev;
420 
421 	/* Zero all the fields */
422 	memset(&init_data, 0, sizeof(init_data));
423 
424 	mutex_init(&adev->dm.dc_lock);
425 
426 	if(amdgpu_dm_irq_init(adev)) {
427 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
428 		goto error;
429 	}
430 
431 	init_data.asic_id.chip_family = adev->family;
432 
433 	init_data.asic_id.pci_revision_id = adev->rev_id;
434 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
435 
436 	init_data.asic_id.vram_width = adev->gmc.vram_width;
437 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
438 	init_data.asic_id.atombios_base_address =
439 		adev->mode_info.atom_context->bios;
440 
441 	init_data.driver = adev;
442 
443 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
444 
445 	if (!adev->dm.cgs_device) {
446 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
447 		goto error;
448 	}
449 
450 	init_data.cgs_device = adev->dm.cgs_device;
451 
452 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
453 
454 	/*
455 	 * TODO debug why this doesn't work on Raven
456 	 */
457 	if (adev->flags & AMD_IS_APU &&
458 	    adev->asic_type >= CHIP_CARRIZO &&
459 	    adev->asic_type < CHIP_RAVEN)
460 		init_data.flags.gpu_vm_support = true;
461 
462 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
463 		init_data.flags.fbc_support = true;
464 
465 	/* Display Core create. */
466 	adev->dm.dc = dc_create(&init_data);
467 
468 	if (adev->dm.dc) {
469 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
470 	} else {
471 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
472 		goto error;
473 	}
474 
475 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
476 	if (!adev->dm.freesync_module) {
477 		DRM_ERROR(
478 		"amdgpu: failed to initialize freesync_module.\n");
479 	} else
480 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
481 				adev->dm.freesync_module);
482 
483 	amdgpu_dm_init_color_mod();
484 
485 	if (amdgpu_dm_initialize_drm_device(adev)) {
486 		DRM_ERROR(
487 		"amdgpu: failed to initialize sw for display support.\n");
488 		goto error;
489 	}
490 
491 	/* Update the actual used number of crtc */
492 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
493 
494 	/* TODO: Add_display_info? */
495 
496 	/* TODO use dynamic cursor width */
497 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
498 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
499 
500 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
501 		DRM_ERROR(
502 		"amdgpu: failed to initialize sw for display support.\n");
503 		goto error;
504 	}
505 
506 #if defined(CONFIG_DEBUG_FS)
507 	if (dtn_debugfs_init(adev))
508 		DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
509 #endif
510 
511 	DRM_DEBUG_DRIVER("KMS initialized.\n");
512 
513 	return 0;
514 error:
515 	amdgpu_dm_fini(adev);
516 
517 	return -EINVAL;
518 }
519 
520 static void amdgpu_dm_fini(struct amdgpu_device *adev)
521 {
522 	amdgpu_dm_destroy_drm_device(&adev->dm);
523 	/*
524 	 * TODO: pageflip, vlank interrupt
525 	 *
526 	 * amdgpu_dm_irq_fini(adev);
527 	 */
528 
529 	if (adev->dm.cgs_device) {
530 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
531 		adev->dm.cgs_device = NULL;
532 	}
533 	if (adev->dm.freesync_module) {
534 		mod_freesync_destroy(adev->dm.freesync_module);
535 		adev->dm.freesync_module = NULL;
536 	}
537 	/* DC Destroy TODO: Replace destroy DAL */
538 	if (adev->dm.dc)
539 		dc_destroy(&adev->dm.dc);
540 
541 	mutex_destroy(&adev->dm.dc_lock);
542 
543 	return;
544 }
545 
546 static int load_dmcu_fw(struct amdgpu_device *adev)
547 {
548 	const char *fw_name_dmcu;
549 	int r;
550 	const struct dmcu_firmware_header_v1_0 *hdr;
551 
552 	switch(adev->asic_type) {
553 	case CHIP_BONAIRE:
554 	case CHIP_HAWAII:
555 	case CHIP_KAVERI:
556 	case CHIP_KABINI:
557 	case CHIP_MULLINS:
558 	case CHIP_TONGA:
559 	case CHIP_FIJI:
560 	case CHIP_CARRIZO:
561 	case CHIP_STONEY:
562 	case CHIP_POLARIS11:
563 	case CHIP_POLARIS10:
564 	case CHIP_POLARIS12:
565 	case CHIP_VEGAM:
566 	case CHIP_VEGA10:
567 	case CHIP_VEGA12:
568 	case CHIP_VEGA20:
569 		return 0;
570 	case CHIP_RAVEN:
571 		fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
572 		break;
573 	default:
574 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
575 		return -EINVAL;
576 	}
577 
578 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
579 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
580 		return 0;
581 	}
582 
583 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
584 	if (r == -ENOENT) {
585 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
586 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
587 		adev->dm.fw_dmcu = NULL;
588 		return 0;
589 	}
590 	if (r) {
591 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
592 			fw_name_dmcu);
593 		return r;
594 	}
595 
596 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
597 	if (r) {
598 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
599 			fw_name_dmcu);
600 		release_firmware(adev->dm.fw_dmcu);
601 		adev->dm.fw_dmcu = NULL;
602 		return r;
603 	}
604 
605 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
606 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
607 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
608 	adev->firmware.fw_size +=
609 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
610 
611 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
612 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
613 	adev->firmware.fw_size +=
614 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
615 
616 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
617 
618 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
619 
620 	return 0;
621 }
622 
623 static int dm_sw_init(void *handle)
624 {
625 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
626 
627 	return load_dmcu_fw(adev);
628 }
629 
630 static int dm_sw_fini(void *handle)
631 {
632 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
633 
634 	if(adev->dm.fw_dmcu) {
635 		release_firmware(adev->dm.fw_dmcu);
636 		adev->dm.fw_dmcu = NULL;
637 	}
638 
639 	return 0;
640 }
641 
642 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
643 {
644 	struct amdgpu_dm_connector *aconnector;
645 	struct drm_connector *connector;
646 	int ret = 0;
647 
648 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
649 
650 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
651 		aconnector = to_amdgpu_dm_connector(connector);
652 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
653 		    aconnector->mst_mgr.aux) {
654 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
655 					aconnector, aconnector->base.base.id);
656 
657 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
658 			if (ret < 0) {
659 				DRM_ERROR("DM_MST: Failed to start MST\n");
660 				((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
661 				return ret;
662 				}
663 			}
664 	}
665 
666 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
667 	return ret;
668 }
669 
670 static int dm_late_init(void *handle)
671 {
672 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
673 
674 	struct dmcu_iram_parameters params;
675 	unsigned int linear_lut[16];
676 	int i;
677 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
678 	bool ret;
679 
680 	for (i = 0; i < 16; i++)
681 		linear_lut[i] = 0xFFFF * i / 15;
682 
683 	params.set = 0;
684 	params.backlight_ramping_start = 0xCCCC;
685 	params.backlight_ramping_reduction = 0xCCCCCCCC;
686 	params.backlight_lut_array_size = 16;
687 	params.backlight_lut_array = linear_lut;
688 
689 	ret = dmcu_load_iram(dmcu, params);
690 
691 	if (!ret)
692 		return -EINVAL;
693 
694 	return detect_mst_link_for_all_connectors(adev->ddev);
695 }
696 
697 static void s3_handle_mst(struct drm_device *dev, bool suspend)
698 {
699 	struct amdgpu_dm_connector *aconnector;
700 	struct drm_connector *connector;
701 	struct drm_dp_mst_topology_mgr *mgr;
702 	int ret;
703 	bool need_hotplug = false;
704 
705 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
706 
707 	list_for_each_entry(connector, &dev->mode_config.connector_list,
708 			    head) {
709 		aconnector = to_amdgpu_dm_connector(connector);
710 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
711 		    aconnector->mst_port)
712 			continue;
713 
714 		mgr = &aconnector->mst_mgr;
715 
716 		if (suspend) {
717 			drm_dp_mst_topology_mgr_suspend(mgr);
718 		} else {
719 			ret = drm_dp_mst_topology_mgr_resume(mgr);
720 			if (ret < 0) {
721 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
722 				need_hotplug = true;
723 			}
724 		}
725 	}
726 
727 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
728 
729 	if (need_hotplug)
730 		drm_kms_helper_hotplug_event(dev);
731 }
732 
733 /**
734  * dm_hw_init() - Initialize DC device
735  * @handle: The base driver device containing the amdpgu_dm device.
736  *
737  * Initialize the &struct amdgpu_display_manager device. This involves calling
738  * the initializers of each DM component, then populating the struct with them.
739  *
740  * Although the function implies hardware initialization, both hardware and
741  * software are initialized here. Splitting them out to their relevant init
742  * hooks is a future TODO item.
743  *
744  * Some notable things that are initialized here:
745  *
746  * - Display Core, both software and hardware
747  * - DC modules that we need (freesync and color management)
748  * - DRM software states
749  * - Interrupt sources and handlers
750  * - Vblank support
751  * - Debug FS entries, if enabled
752  */
753 static int dm_hw_init(void *handle)
754 {
755 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
756 	/* Create DAL display manager */
757 	amdgpu_dm_init(adev);
758 	amdgpu_dm_hpd_init(adev);
759 
760 	return 0;
761 }
762 
763 /**
764  * dm_hw_fini() - Teardown DC device
765  * @handle: The base driver device containing the amdpgu_dm device.
766  *
767  * Teardown components within &struct amdgpu_display_manager that require
768  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
769  * were loaded. Also flush IRQ workqueues and disable them.
770  */
771 static int dm_hw_fini(void *handle)
772 {
773 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774 
775 	amdgpu_dm_hpd_fini(adev);
776 
777 	amdgpu_dm_irq_fini(adev);
778 	amdgpu_dm_fini(adev);
779 	return 0;
780 }
781 
782 static int dm_suspend(void *handle)
783 {
784 	struct amdgpu_device *adev = handle;
785 	struct amdgpu_display_manager *dm = &adev->dm;
786 	int ret = 0;
787 
788 	WARN_ON(adev->dm.cached_state);
789 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
790 
791 	s3_handle_mst(adev->ddev, true);
792 
793 	amdgpu_dm_irq_suspend(adev);
794 
795 
796 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
797 
798 	return ret;
799 }
800 
801 static struct amdgpu_dm_connector *
802 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
803 					     struct drm_crtc *crtc)
804 {
805 	uint32_t i;
806 	struct drm_connector_state *new_con_state;
807 	struct drm_connector *connector;
808 	struct drm_crtc *crtc_from_state;
809 
810 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
811 		crtc_from_state = new_con_state->crtc;
812 
813 		if (crtc_from_state == crtc)
814 			return to_amdgpu_dm_connector(connector);
815 	}
816 
817 	return NULL;
818 }
819 
820 static void emulated_link_detect(struct dc_link *link)
821 {
822 	struct dc_sink_init_data sink_init_data = { 0 };
823 	struct display_sink_capability sink_caps = { 0 };
824 	enum dc_edid_status edid_status;
825 	struct dc_context *dc_ctx = link->ctx;
826 	struct dc_sink *sink = NULL;
827 	struct dc_sink *prev_sink = NULL;
828 
829 	link->type = dc_connection_none;
830 	prev_sink = link->local_sink;
831 
832 	if (prev_sink != NULL)
833 		dc_sink_retain(prev_sink);
834 
835 	switch (link->connector_signal) {
836 	case SIGNAL_TYPE_HDMI_TYPE_A: {
837 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
838 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
839 		break;
840 	}
841 
842 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
843 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
844 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
845 		break;
846 	}
847 
848 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
849 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
850 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
851 		break;
852 	}
853 
854 	case SIGNAL_TYPE_LVDS: {
855 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
856 		sink_caps.signal = SIGNAL_TYPE_LVDS;
857 		break;
858 	}
859 
860 	case SIGNAL_TYPE_EDP: {
861 		sink_caps.transaction_type =
862 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
863 		sink_caps.signal = SIGNAL_TYPE_EDP;
864 		break;
865 	}
866 
867 	case SIGNAL_TYPE_DISPLAY_PORT: {
868 		sink_caps.transaction_type =
869 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
870 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
871 		break;
872 	}
873 
874 	default:
875 		DC_ERROR("Invalid connector type! signal:%d\n",
876 			link->connector_signal);
877 		return;
878 	}
879 
880 	sink_init_data.link = link;
881 	sink_init_data.sink_signal = sink_caps.signal;
882 
883 	sink = dc_sink_create(&sink_init_data);
884 	if (!sink) {
885 		DC_ERROR("Failed to create sink!\n");
886 		return;
887 	}
888 
889 	link->local_sink = sink;
890 
891 	edid_status = dm_helpers_read_local_edid(
892 			link->ctx,
893 			link,
894 			sink);
895 
896 	if (edid_status != EDID_OK)
897 		DC_ERROR("Failed to read EDID");
898 
899 }
900 
901 static int dm_resume(void *handle)
902 {
903 	struct amdgpu_device *adev = handle;
904 	struct drm_device *ddev = adev->ddev;
905 	struct amdgpu_display_manager *dm = &adev->dm;
906 	struct amdgpu_dm_connector *aconnector;
907 	struct drm_connector *connector;
908 	struct drm_crtc *crtc;
909 	struct drm_crtc_state *new_crtc_state;
910 	struct dm_crtc_state *dm_new_crtc_state;
911 	struct drm_plane *plane;
912 	struct drm_plane_state *new_plane_state;
913 	struct dm_plane_state *dm_new_plane_state;
914 	enum dc_connection_type new_connection_type = dc_connection_none;
915 	int i;
916 
917 	/* power on hardware */
918 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
919 
920 	/* program HPD filter */
921 	dc_resume(dm->dc);
922 
923 	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
924 	s3_handle_mst(ddev, false);
925 
926 	/*
927 	 * early enable HPD Rx IRQ, should be done before set mode as short
928 	 * pulse interrupts are used for MST
929 	 */
930 	amdgpu_dm_irq_resume_early(adev);
931 
932 	/* Do detection*/
933 	list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
934 		aconnector = to_amdgpu_dm_connector(connector);
935 
936 		/*
937 		 * this is the case when traversing through already created
938 		 * MST connectors, should be skipped
939 		 */
940 		if (aconnector->mst_port)
941 			continue;
942 
943 		mutex_lock(&aconnector->hpd_lock);
944 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
945 			DRM_ERROR("KMS: Failed to detect connector\n");
946 
947 		if (aconnector->base.force && new_connection_type == dc_connection_none)
948 			emulated_link_detect(aconnector->dc_link);
949 		else
950 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
951 
952 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
953 			aconnector->fake_enable = false;
954 
955 		aconnector->dc_sink = NULL;
956 		amdgpu_dm_update_connector_after_detect(aconnector);
957 		mutex_unlock(&aconnector->hpd_lock);
958 	}
959 
960 	/* Force mode set in atomic commit */
961 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
962 		new_crtc_state->active_changed = true;
963 
964 	/*
965 	 * atomic_check is expected to create the dc states. We need to release
966 	 * them here, since they were duplicated as part of the suspend
967 	 * procedure.
968 	 */
969 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
970 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
971 		if (dm_new_crtc_state->stream) {
972 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
973 			dc_stream_release(dm_new_crtc_state->stream);
974 			dm_new_crtc_state->stream = NULL;
975 		}
976 	}
977 
978 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
979 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
980 		if (dm_new_plane_state->dc_state) {
981 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
982 			dc_plane_state_release(dm_new_plane_state->dc_state);
983 			dm_new_plane_state->dc_state = NULL;
984 		}
985 	}
986 
987 	drm_atomic_helper_resume(ddev, dm->cached_state);
988 
989 	dm->cached_state = NULL;
990 
991 	amdgpu_dm_irq_resume_late(adev);
992 
993 	return 0;
994 }
995 
996 /**
997  * DOC: DM Lifecycle
998  *
999  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1000  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1001  * the base driver's device list to be initialized and torn down accordingly.
1002  *
1003  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1004  */
1005 
1006 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1007 	.name = "dm",
1008 	.early_init = dm_early_init,
1009 	.late_init = dm_late_init,
1010 	.sw_init = dm_sw_init,
1011 	.sw_fini = dm_sw_fini,
1012 	.hw_init = dm_hw_init,
1013 	.hw_fini = dm_hw_fini,
1014 	.suspend = dm_suspend,
1015 	.resume = dm_resume,
1016 	.is_idle = dm_is_idle,
1017 	.wait_for_idle = dm_wait_for_idle,
1018 	.check_soft_reset = dm_check_soft_reset,
1019 	.soft_reset = dm_soft_reset,
1020 	.set_clockgating_state = dm_set_clockgating_state,
1021 	.set_powergating_state = dm_set_powergating_state,
1022 };
1023 
1024 const struct amdgpu_ip_block_version dm_ip_block =
1025 {
1026 	.type = AMD_IP_BLOCK_TYPE_DCE,
1027 	.major = 1,
1028 	.minor = 0,
1029 	.rev = 0,
1030 	.funcs = &amdgpu_dm_funcs,
1031 };
1032 
1033 
1034 /**
1035  * DOC: atomic
1036  *
1037  * *WIP*
1038  */
1039 
1040 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1041 	.fb_create = amdgpu_display_user_framebuffer_create,
1042 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1043 	.atomic_check = amdgpu_dm_atomic_check,
1044 	.atomic_commit = amdgpu_dm_atomic_commit,
1045 };
1046 
1047 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1048 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1049 };
1050 
1051 static void
1052 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1053 {
1054 	struct drm_connector *connector = &aconnector->base;
1055 	struct drm_device *dev = connector->dev;
1056 	struct dc_sink *sink;
1057 
1058 	/* MST handled by drm_mst framework */
1059 	if (aconnector->mst_mgr.mst_state == true)
1060 		return;
1061 
1062 
1063 	sink = aconnector->dc_link->local_sink;
1064 
1065 	/*
1066 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1067 	 * the connector sink is set to either fake or physical sink depends on link status.
1068 	 * Skip if already done during boot.
1069 	 */
1070 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1071 			&& aconnector->dc_em_sink) {
1072 
1073 		/*
1074 		 * For S3 resume with headless use eml_sink to fake stream
1075 		 * because on resume connector->sink is set to NULL
1076 		 */
1077 		mutex_lock(&dev->mode_config.mutex);
1078 
1079 		if (sink) {
1080 			if (aconnector->dc_sink) {
1081 				amdgpu_dm_update_freesync_caps(connector, NULL);
1082 				/*
1083 				 * retain and release below are used to
1084 				 * bump up refcount for sink because the link doesn't point
1085 				 * to it anymore after disconnect, so on next crtc to connector
1086 				 * reshuffle by UMD we will get into unwanted dc_sink release
1087 				 */
1088 				if (aconnector->dc_sink != aconnector->dc_em_sink)
1089 					dc_sink_release(aconnector->dc_sink);
1090 			}
1091 			aconnector->dc_sink = sink;
1092 			amdgpu_dm_update_freesync_caps(connector,
1093 					aconnector->edid);
1094 		} else {
1095 			amdgpu_dm_update_freesync_caps(connector, NULL);
1096 			if (!aconnector->dc_sink)
1097 				aconnector->dc_sink = aconnector->dc_em_sink;
1098 			else if (aconnector->dc_sink != aconnector->dc_em_sink)
1099 				dc_sink_retain(aconnector->dc_sink);
1100 		}
1101 
1102 		mutex_unlock(&dev->mode_config.mutex);
1103 		return;
1104 	}
1105 
1106 	/*
1107 	 * TODO: temporary guard to look for proper fix
1108 	 * if this sink is MST sink, we should not do anything
1109 	 */
1110 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
1111 		return;
1112 
1113 	if (aconnector->dc_sink == sink) {
1114 		/*
1115 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1116 		 * Do nothing!!
1117 		 */
1118 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1119 				aconnector->connector_id);
1120 		return;
1121 	}
1122 
1123 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1124 		aconnector->connector_id, aconnector->dc_sink, sink);
1125 
1126 	mutex_lock(&dev->mode_config.mutex);
1127 
1128 	/*
1129 	 * 1. Update status of the drm connector
1130 	 * 2. Send an event and let userspace tell us what to do
1131 	 */
1132 	if (sink) {
1133 		/*
1134 		 * TODO: check if we still need the S3 mode update workaround.
1135 		 * If yes, put it here.
1136 		 */
1137 		if (aconnector->dc_sink)
1138 			amdgpu_dm_update_freesync_caps(connector, NULL);
1139 
1140 		aconnector->dc_sink = sink;
1141 		if (sink->dc_edid.length == 0) {
1142 			aconnector->edid = NULL;
1143 			drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1144 		} else {
1145 			aconnector->edid =
1146 				(struct edid *) sink->dc_edid.raw_edid;
1147 
1148 
1149 			drm_connector_update_edid_property(connector,
1150 					aconnector->edid);
1151 			drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1152 					    aconnector->edid);
1153 		}
1154 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1155 
1156 	} else {
1157 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1158 		amdgpu_dm_update_freesync_caps(connector, NULL);
1159 		drm_connector_update_edid_property(connector, NULL);
1160 		aconnector->num_modes = 0;
1161 		aconnector->dc_sink = NULL;
1162 		aconnector->edid = NULL;
1163 	}
1164 
1165 	mutex_unlock(&dev->mode_config.mutex);
1166 }
1167 
1168 static void handle_hpd_irq(void *param)
1169 {
1170 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1171 	struct drm_connector *connector = &aconnector->base;
1172 	struct drm_device *dev = connector->dev;
1173 	enum dc_connection_type new_connection_type = dc_connection_none;
1174 
1175 	/*
1176 	 * In case of failure or MST no need to update connector status or notify the OS
1177 	 * since (for MST case) MST does this in its own context.
1178 	 */
1179 	mutex_lock(&aconnector->hpd_lock);
1180 
1181 	if (aconnector->fake_enable)
1182 		aconnector->fake_enable = false;
1183 
1184 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1185 		DRM_ERROR("KMS: Failed to detect connector\n");
1186 
1187 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
1188 		emulated_link_detect(aconnector->dc_link);
1189 
1190 
1191 		drm_modeset_lock_all(dev);
1192 		dm_restore_drm_connector_state(dev, connector);
1193 		drm_modeset_unlock_all(dev);
1194 
1195 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1196 			drm_kms_helper_hotplug_event(dev);
1197 
1198 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1199 		amdgpu_dm_update_connector_after_detect(aconnector);
1200 
1201 
1202 		drm_modeset_lock_all(dev);
1203 		dm_restore_drm_connector_state(dev, connector);
1204 		drm_modeset_unlock_all(dev);
1205 
1206 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1207 			drm_kms_helper_hotplug_event(dev);
1208 	}
1209 	mutex_unlock(&aconnector->hpd_lock);
1210 
1211 }
1212 
1213 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1214 {
1215 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1216 	uint8_t dret;
1217 	bool new_irq_handled = false;
1218 	int dpcd_addr;
1219 	int dpcd_bytes_to_read;
1220 
1221 	const int max_process_count = 30;
1222 	int process_count = 0;
1223 
1224 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1225 
1226 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1227 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1228 		/* DPCD 0x200 - 0x201 for downstream IRQ */
1229 		dpcd_addr = DP_SINK_COUNT;
1230 	} else {
1231 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1232 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
1233 		dpcd_addr = DP_SINK_COUNT_ESI;
1234 	}
1235 
1236 	dret = drm_dp_dpcd_read(
1237 		&aconnector->dm_dp_aux.aux,
1238 		dpcd_addr,
1239 		esi,
1240 		dpcd_bytes_to_read);
1241 
1242 	while (dret == dpcd_bytes_to_read &&
1243 		process_count < max_process_count) {
1244 		uint8_t retry;
1245 		dret = 0;
1246 
1247 		process_count++;
1248 
1249 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1250 		/* handle HPD short pulse irq */
1251 		if (aconnector->mst_mgr.mst_state)
1252 			drm_dp_mst_hpd_irq(
1253 				&aconnector->mst_mgr,
1254 				esi,
1255 				&new_irq_handled);
1256 
1257 		if (new_irq_handled) {
1258 			/* ACK at DPCD to notify down stream */
1259 			const int ack_dpcd_bytes_to_write =
1260 				dpcd_bytes_to_read - 1;
1261 
1262 			for (retry = 0; retry < 3; retry++) {
1263 				uint8_t wret;
1264 
1265 				wret = drm_dp_dpcd_write(
1266 					&aconnector->dm_dp_aux.aux,
1267 					dpcd_addr + 1,
1268 					&esi[1],
1269 					ack_dpcd_bytes_to_write);
1270 				if (wret == ack_dpcd_bytes_to_write)
1271 					break;
1272 			}
1273 
1274 			/* check if there is new irq to be handled */
1275 			dret = drm_dp_dpcd_read(
1276 				&aconnector->dm_dp_aux.aux,
1277 				dpcd_addr,
1278 				esi,
1279 				dpcd_bytes_to_read);
1280 
1281 			new_irq_handled = false;
1282 		} else {
1283 			break;
1284 		}
1285 	}
1286 
1287 	if (process_count == max_process_count)
1288 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1289 }
1290 
1291 static void handle_hpd_rx_irq(void *param)
1292 {
1293 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1294 	struct drm_connector *connector = &aconnector->base;
1295 	struct drm_device *dev = connector->dev;
1296 	struct dc_link *dc_link = aconnector->dc_link;
1297 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1298 	enum dc_connection_type new_connection_type = dc_connection_none;
1299 
1300 	/*
1301 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1302 	 * conflict, after implement i2c helper, this mutex should be
1303 	 * retired.
1304 	 */
1305 	if (dc_link->type != dc_connection_mst_branch)
1306 		mutex_lock(&aconnector->hpd_lock);
1307 
1308 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1309 			!is_mst_root_connector) {
1310 		/* Downstream Port status changed. */
1311 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
1312 			DRM_ERROR("KMS: Failed to detect connector\n");
1313 
1314 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
1315 			emulated_link_detect(dc_link);
1316 
1317 			if (aconnector->fake_enable)
1318 				aconnector->fake_enable = false;
1319 
1320 			amdgpu_dm_update_connector_after_detect(aconnector);
1321 
1322 
1323 			drm_modeset_lock_all(dev);
1324 			dm_restore_drm_connector_state(dev, connector);
1325 			drm_modeset_unlock_all(dev);
1326 
1327 			drm_kms_helper_hotplug_event(dev);
1328 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1329 
1330 			if (aconnector->fake_enable)
1331 				aconnector->fake_enable = false;
1332 
1333 			amdgpu_dm_update_connector_after_detect(aconnector);
1334 
1335 
1336 			drm_modeset_lock_all(dev);
1337 			dm_restore_drm_connector_state(dev, connector);
1338 			drm_modeset_unlock_all(dev);
1339 
1340 			drm_kms_helper_hotplug_event(dev);
1341 		}
1342 	}
1343 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1344 	    (dc_link->type == dc_connection_mst_branch))
1345 		dm_handle_hpd_rx_irq(aconnector);
1346 
1347 	if (dc_link->type != dc_connection_mst_branch) {
1348 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1349 		mutex_unlock(&aconnector->hpd_lock);
1350 	}
1351 }
1352 
1353 static void register_hpd_handlers(struct amdgpu_device *adev)
1354 {
1355 	struct drm_device *dev = adev->ddev;
1356 	struct drm_connector *connector;
1357 	struct amdgpu_dm_connector *aconnector;
1358 	const struct dc_link *dc_link;
1359 	struct dc_interrupt_params int_params = {0};
1360 
1361 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1362 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1363 
1364 	list_for_each_entry(connector,
1365 			&dev->mode_config.connector_list, head)	{
1366 
1367 		aconnector = to_amdgpu_dm_connector(connector);
1368 		dc_link = aconnector->dc_link;
1369 
1370 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1371 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1372 			int_params.irq_source = dc_link->irq_source_hpd;
1373 
1374 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1375 					handle_hpd_irq,
1376 					(void *) aconnector);
1377 		}
1378 
1379 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1380 
1381 			/* Also register for DP short pulse (hpd_rx). */
1382 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1383 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
1384 
1385 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1386 					handle_hpd_rx_irq,
1387 					(void *) aconnector);
1388 		}
1389 	}
1390 }
1391 
1392 /* Register IRQ sources and initialize IRQ callbacks */
1393 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1394 {
1395 	struct dc *dc = adev->dm.dc;
1396 	struct common_irq_params *c_irq_params;
1397 	struct dc_interrupt_params int_params = {0};
1398 	int r;
1399 	int i;
1400 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1401 
1402 	if (adev->asic_type == CHIP_VEGA10 ||
1403 	    adev->asic_type == CHIP_VEGA12 ||
1404 	    adev->asic_type == CHIP_VEGA20 ||
1405 	    adev->asic_type == CHIP_RAVEN)
1406 		client_id = SOC15_IH_CLIENTID_DCE;
1407 
1408 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1409 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1410 
1411 	/*
1412 	 * Actions of amdgpu_irq_add_id():
1413 	 * 1. Register a set() function with base driver.
1414 	 *    Base driver will call set() function to enable/disable an
1415 	 *    interrupt in DC hardware.
1416 	 * 2. Register amdgpu_dm_irq_handler().
1417 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1418 	 *    coming from DC hardware.
1419 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1420 	 *    for acknowledging and handling. */
1421 
1422 	/* Use VBLANK interrupt */
1423 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1424 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1425 		if (r) {
1426 			DRM_ERROR("Failed to add crtc irq id!\n");
1427 			return r;
1428 		}
1429 
1430 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1431 		int_params.irq_source =
1432 			dc_interrupt_to_irq_source(dc, i, 0);
1433 
1434 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1435 
1436 		c_irq_params->adev = adev;
1437 		c_irq_params->irq_src = int_params.irq_source;
1438 
1439 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1440 				dm_crtc_high_irq, c_irq_params);
1441 	}
1442 
1443 	/* Use GRPH_PFLIP interrupt */
1444 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1445 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1446 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1447 		if (r) {
1448 			DRM_ERROR("Failed to add page flip irq id!\n");
1449 			return r;
1450 		}
1451 
1452 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1453 		int_params.irq_source =
1454 			dc_interrupt_to_irq_source(dc, i, 0);
1455 
1456 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1457 
1458 		c_irq_params->adev = adev;
1459 		c_irq_params->irq_src = int_params.irq_source;
1460 
1461 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1462 				dm_pflip_high_irq, c_irq_params);
1463 
1464 	}
1465 
1466 	/* HPD */
1467 	r = amdgpu_irq_add_id(adev, client_id,
1468 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1469 	if (r) {
1470 		DRM_ERROR("Failed to add hpd irq id!\n");
1471 		return r;
1472 	}
1473 
1474 	register_hpd_handlers(adev);
1475 
1476 	return 0;
1477 }
1478 
1479 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1480 /* Register IRQ sources and initialize IRQ callbacks */
1481 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1482 {
1483 	struct dc *dc = adev->dm.dc;
1484 	struct common_irq_params *c_irq_params;
1485 	struct dc_interrupt_params int_params = {0};
1486 	int r;
1487 	int i;
1488 
1489 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1490 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1491 
1492 	/*
1493 	 * Actions of amdgpu_irq_add_id():
1494 	 * 1. Register a set() function with base driver.
1495 	 *    Base driver will call set() function to enable/disable an
1496 	 *    interrupt in DC hardware.
1497 	 * 2. Register amdgpu_dm_irq_handler().
1498 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1499 	 *    coming from DC hardware.
1500 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1501 	 *    for acknowledging and handling.
1502 	 */
1503 
1504 	/* Use VSTARTUP interrupt */
1505 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1506 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1507 			i++) {
1508 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1509 
1510 		if (r) {
1511 			DRM_ERROR("Failed to add crtc irq id!\n");
1512 			return r;
1513 		}
1514 
1515 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1516 		int_params.irq_source =
1517 			dc_interrupt_to_irq_source(dc, i, 0);
1518 
1519 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1520 
1521 		c_irq_params->adev = adev;
1522 		c_irq_params->irq_src = int_params.irq_source;
1523 
1524 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1525 				dm_crtc_high_irq, c_irq_params);
1526 	}
1527 
1528 	/* Use GRPH_PFLIP interrupt */
1529 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1530 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1531 			i++) {
1532 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1533 		if (r) {
1534 			DRM_ERROR("Failed to add page flip irq id!\n");
1535 			return r;
1536 		}
1537 
1538 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1539 		int_params.irq_source =
1540 			dc_interrupt_to_irq_source(dc, i, 0);
1541 
1542 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1543 
1544 		c_irq_params->adev = adev;
1545 		c_irq_params->irq_src = int_params.irq_source;
1546 
1547 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1548 				dm_pflip_high_irq, c_irq_params);
1549 
1550 	}
1551 
1552 	/* HPD */
1553 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1554 			&adev->hpd_irq);
1555 	if (r) {
1556 		DRM_ERROR("Failed to add hpd irq id!\n");
1557 		return r;
1558 	}
1559 
1560 	register_hpd_handlers(adev);
1561 
1562 	return 0;
1563 }
1564 #endif
1565 
1566 /*
1567  * Acquires the lock for the atomic state object and returns
1568  * the new atomic state.
1569  *
1570  * This should only be called during atomic check.
1571  */
1572 static int dm_atomic_get_state(struct drm_atomic_state *state,
1573 			       struct dm_atomic_state **dm_state)
1574 {
1575 	struct drm_device *dev = state->dev;
1576 	struct amdgpu_device *adev = dev->dev_private;
1577 	struct amdgpu_display_manager *dm = &adev->dm;
1578 	struct drm_private_state *priv_state;
1579 	int ret;
1580 
1581 	if (*dm_state)
1582 		return 0;
1583 
1584 	ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
1585 	if (ret)
1586 		return ret;
1587 
1588 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1589 	if (IS_ERR(priv_state))
1590 		return PTR_ERR(priv_state);
1591 
1592 	*dm_state = to_dm_atomic_state(priv_state);
1593 
1594 	return 0;
1595 }
1596 
1597 struct dm_atomic_state *
1598 dm_atomic_get_new_state(struct drm_atomic_state *state)
1599 {
1600 	struct drm_device *dev = state->dev;
1601 	struct amdgpu_device *adev = dev->dev_private;
1602 	struct amdgpu_display_manager *dm = &adev->dm;
1603 	struct drm_private_obj *obj;
1604 	struct drm_private_state *new_obj_state;
1605 	int i;
1606 
1607 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1608 		if (obj->funcs == dm->atomic_obj.funcs)
1609 			return to_dm_atomic_state(new_obj_state);
1610 	}
1611 
1612 	return NULL;
1613 }
1614 
1615 struct dm_atomic_state *
1616 dm_atomic_get_old_state(struct drm_atomic_state *state)
1617 {
1618 	struct drm_device *dev = state->dev;
1619 	struct amdgpu_device *adev = dev->dev_private;
1620 	struct amdgpu_display_manager *dm = &adev->dm;
1621 	struct drm_private_obj *obj;
1622 	struct drm_private_state *old_obj_state;
1623 	int i;
1624 
1625 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1626 		if (obj->funcs == dm->atomic_obj.funcs)
1627 			return to_dm_atomic_state(old_obj_state);
1628 	}
1629 
1630 	return NULL;
1631 }
1632 
1633 static struct drm_private_state *
1634 dm_atomic_duplicate_state(struct drm_private_obj *obj)
1635 {
1636 	struct dm_atomic_state *old_state, *new_state;
1637 
1638 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1639 	if (!new_state)
1640 		return NULL;
1641 
1642 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1643 
1644 	new_state->context = dc_create_state();
1645 	if (!new_state->context) {
1646 		kfree(new_state);
1647 		return NULL;
1648 	}
1649 
1650 	old_state = to_dm_atomic_state(obj->state);
1651 	if (old_state && old_state->context)
1652 		dc_resource_state_copy_construct(old_state->context,
1653 						 new_state->context);
1654 
1655 	return &new_state->base;
1656 }
1657 
1658 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1659 				    struct drm_private_state *state)
1660 {
1661 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1662 
1663 	if (dm_state && dm_state->context)
1664 		dc_release_state(dm_state->context);
1665 
1666 	kfree(dm_state);
1667 }
1668 
1669 static struct drm_private_state_funcs dm_atomic_state_funcs = {
1670 	.atomic_duplicate_state = dm_atomic_duplicate_state,
1671 	.atomic_destroy_state = dm_atomic_destroy_state,
1672 };
1673 
1674 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1675 {
1676 	struct dm_atomic_state *state;
1677 	int r;
1678 
1679 	adev->mode_info.mode_config_initialized = true;
1680 
1681 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1682 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1683 
1684 	adev->ddev->mode_config.max_width = 16384;
1685 	adev->ddev->mode_config.max_height = 16384;
1686 
1687 	adev->ddev->mode_config.preferred_depth = 24;
1688 	adev->ddev->mode_config.prefer_shadow = 1;
1689 	/* indicates support for immediate flip */
1690 	adev->ddev->mode_config.async_page_flip = true;
1691 
1692 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1693 
1694 	drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
1695 
1696 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1697 	if (!state)
1698 		return -ENOMEM;
1699 
1700 	state->context = dc_create_state();
1701 	if (!state->context) {
1702 		kfree(state);
1703 		return -ENOMEM;
1704 	}
1705 
1706 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1707 
1708 	drm_atomic_private_obj_init(&adev->dm.atomic_obj,
1709 				    &state->base,
1710 				    &dm_atomic_state_funcs);
1711 
1712 	r = amdgpu_display_modeset_create_props(adev);
1713 	if (r)
1714 		return r;
1715 
1716 	return 0;
1717 }
1718 
1719 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1720 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1721 
1722 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1723 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1724 
1725 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1726 {
1727 #if defined(CONFIG_ACPI)
1728 	struct amdgpu_dm_backlight_caps caps;
1729 
1730 	if (dm->backlight_caps.caps_valid)
1731 		return;
1732 
1733 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
1734 	if (caps.caps_valid) {
1735 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
1736 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
1737 		dm->backlight_caps.caps_valid = true;
1738 	} else {
1739 		dm->backlight_caps.min_input_signal =
1740 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1741 		dm->backlight_caps.max_input_signal =
1742 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1743 	}
1744 #else
1745 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1746 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1747 #endif
1748 }
1749 
1750 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1751 {
1752 	struct amdgpu_display_manager *dm = bl_get_data(bd);
1753 	struct amdgpu_dm_backlight_caps caps;
1754 	uint32_t brightness = bd->props.brightness;
1755 
1756 	amdgpu_dm_update_backlight_caps(dm);
1757 	caps = dm->backlight_caps;
1758 	/*
1759 	 * The brightness input is in the range 0-255
1760 	 * It needs to be rescaled to be between the
1761 	 * requested min and max input signal
1762 	 *
1763 	 * It also needs to be scaled up by 0x101 to
1764 	 * match the DC interface which has a range of
1765 	 * 0 to 0xffff
1766 	 */
1767 	brightness =
1768 		brightness
1769 		* 0x101
1770 		* (caps.max_input_signal - caps.min_input_signal)
1771 		/ AMDGPU_MAX_BL_LEVEL
1772 		+ caps.min_input_signal * 0x101;
1773 
1774 	if (dc_link_set_backlight_level(dm->backlight_link,
1775 			brightness, 0))
1776 		return 0;
1777 	else
1778 		return 1;
1779 }
1780 
1781 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1782 {
1783 	struct amdgpu_display_manager *dm = bl_get_data(bd);
1784 	int ret = dc_link_get_backlight_level(dm->backlight_link);
1785 
1786 	if (ret == DC_ERROR_UNEXPECTED)
1787 		return bd->props.brightness;
1788 	return ret;
1789 }
1790 
1791 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1792 	.get_brightness = amdgpu_dm_backlight_get_brightness,
1793 	.update_status	= amdgpu_dm_backlight_update_status,
1794 };
1795 
1796 static void
1797 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1798 {
1799 	char bl_name[16];
1800 	struct backlight_properties props = { 0 };
1801 
1802 	amdgpu_dm_update_backlight_caps(dm);
1803 
1804 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1805 	props.brightness = AMDGPU_MAX_BL_LEVEL;
1806 	props.type = BACKLIGHT_RAW;
1807 
1808 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1809 			dm->adev->ddev->primary->index);
1810 
1811 	dm->backlight_dev = backlight_device_register(bl_name,
1812 			dm->adev->ddev->dev,
1813 			dm,
1814 			&amdgpu_dm_backlight_ops,
1815 			&props);
1816 
1817 	if (IS_ERR(dm->backlight_dev))
1818 		DRM_ERROR("DM: Backlight registration failed!\n");
1819 	else
1820 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1821 }
1822 
1823 #endif
1824 
1825 static int initialize_plane(struct amdgpu_display_manager *dm,
1826 			     struct amdgpu_mode_info *mode_info,
1827 			     int plane_id)
1828 {
1829 	struct drm_plane *plane;
1830 	unsigned long possible_crtcs;
1831 	int ret = 0;
1832 
1833 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
1834 	mode_info->planes[plane_id] = plane;
1835 
1836 	if (!plane) {
1837 		DRM_ERROR("KMS: Failed to allocate plane\n");
1838 		return -ENOMEM;
1839 	}
1840 	plane->type = mode_info->plane_type[plane_id];
1841 
1842 	/*
1843 	 * HACK: IGT tests expect that each plane can only have
1844 	 * one possible CRTC. For now, set one CRTC for each
1845 	 * plane that is not an underlay, but still allow multiple
1846 	 * CRTCs for underlay planes.
1847 	 */
1848 	possible_crtcs = 1 << plane_id;
1849 	if (plane_id >= dm->dc->caps.max_streams)
1850 		possible_crtcs = 0xff;
1851 
1852 	ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1853 
1854 	if (ret) {
1855 		DRM_ERROR("KMS: Failed to initialize plane\n");
1856 		return ret;
1857 	}
1858 
1859 	return ret;
1860 }
1861 
1862 
1863 static void register_backlight_device(struct amdgpu_display_manager *dm,
1864 				      struct dc_link *link)
1865 {
1866 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1867 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1868 
1869 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
1870 	    link->type != dc_connection_none) {
1871 		/*
1872 		 * Event if registration failed, we should continue with
1873 		 * DM initialization because not having a backlight control
1874 		 * is better then a black screen.
1875 		 */
1876 		amdgpu_dm_register_backlight_device(dm);
1877 
1878 		if (dm->backlight_dev)
1879 			dm->backlight_link = link;
1880 	}
1881 #endif
1882 }
1883 
1884 
1885 /*
1886  * In this architecture, the association
1887  * connector -> encoder -> crtc
1888  * id not really requried. The crtc and connector will hold the
1889  * display_index as an abstraction to use with DAL component
1890  *
1891  * Returns 0 on success
1892  */
1893 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1894 {
1895 	struct amdgpu_display_manager *dm = &adev->dm;
1896 	int32_t i;
1897 	struct amdgpu_dm_connector *aconnector = NULL;
1898 	struct amdgpu_encoder *aencoder = NULL;
1899 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
1900 	uint32_t link_cnt;
1901 	int32_t total_overlay_planes, total_primary_planes;
1902 	enum dc_connection_type new_connection_type = dc_connection_none;
1903 
1904 	link_cnt = dm->dc->caps.max_links;
1905 	if (amdgpu_dm_mode_config_init(dm->adev)) {
1906 		DRM_ERROR("DM: Failed to initialize mode config\n");
1907 		return -EINVAL;
1908 	}
1909 
1910 	/* Identify the number of planes to be initialized */
1911 	total_overlay_planes = dm->dc->caps.max_slave_planes;
1912 	total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1913 
1914 	/* First initialize overlay planes, index starting after primary planes */
1915 	for (i = (total_overlay_planes - 1); i >= 0; i--) {
1916 		if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1917 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1918 			goto fail;
1919 		}
1920 	}
1921 
1922 	/* Initialize primary planes */
1923 	for (i = (total_primary_planes - 1); i >= 0; i--) {
1924 		if (initialize_plane(dm, mode_info, i)) {
1925 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
1926 			goto fail;
1927 		}
1928 	}
1929 
1930 	for (i = 0; i < dm->dc->caps.max_streams; i++)
1931 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
1932 			DRM_ERROR("KMS: Failed to initialize crtc\n");
1933 			goto fail;
1934 		}
1935 
1936 	dm->display_indexes_num = dm->dc->caps.max_streams;
1937 
1938 	/* loops over all connectors on the board */
1939 	for (i = 0; i < link_cnt; i++) {
1940 		struct dc_link *link = NULL;
1941 
1942 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1943 			DRM_ERROR(
1944 				"KMS: Cannot support more than %d display indexes\n",
1945 					AMDGPU_DM_MAX_DISPLAY_INDEX);
1946 			continue;
1947 		}
1948 
1949 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1950 		if (!aconnector)
1951 			goto fail;
1952 
1953 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1954 		if (!aencoder)
1955 			goto fail;
1956 
1957 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1958 			DRM_ERROR("KMS: Failed to initialize encoder\n");
1959 			goto fail;
1960 		}
1961 
1962 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1963 			DRM_ERROR("KMS: Failed to initialize connector\n");
1964 			goto fail;
1965 		}
1966 
1967 		link = dc_get_link_at_index(dm->dc, i);
1968 
1969 		if (!dc_link_detect_sink(link, &new_connection_type))
1970 			DRM_ERROR("KMS: Failed to detect connector\n");
1971 
1972 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
1973 			emulated_link_detect(link);
1974 			amdgpu_dm_update_connector_after_detect(aconnector);
1975 
1976 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1977 			amdgpu_dm_update_connector_after_detect(aconnector);
1978 			register_backlight_device(dm, link);
1979 		}
1980 
1981 
1982 	}
1983 
1984 	/* Software is initialized. Now we can register interrupt handlers. */
1985 	switch (adev->asic_type) {
1986 	case CHIP_BONAIRE:
1987 	case CHIP_HAWAII:
1988 	case CHIP_KAVERI:
1989 	case CHIP_KABINI:
1990 	case CHIP_MULLINS:
1991 	case CHIP_TONGA:
1992 	case CHIP_FIJI:
1993 	case CHIP_CARRIZO:
1994 	case CHIP_STONEY:
1995 	case CHIP_POLARIS11:
1996 	case CHIP_POLARIS10:
1997 	case CHIP_POLARIS12:
1998 	case CHIP_VEGAM:
1999 	case CHIP_VEGA10:
2000 	case CHIP_VEGA12:
2001 	case CHIP_VEGA20:
2002 		if (dce110_register_irq_handlers(dm->adev)) {
2003 			DRM_ERROR("DM: Failed to initialize IRQ\n");
2004 			goto fail;
2005 		}
2006 		break;
2007 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2008 	case CHIP_RAVEN:
2009 		if (dcn10_register_irq_handlers(dm->adev)) {
2010 			DRM_ERROR("DM: Failed to initialize IRQ\n");
2011 			goto fail;
2012 		}
2013 		break;
2014 #endif
2015 	default:
2016 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2017 		goto fail;
2018 	}
2019 
2020 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2021 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2022 
2023 	return 0;
2024 fail:
2025 	kfree(aencoder);
2026 	kfree(aconnector);
2027 	for (i = 0; i < dm->dc->caps.max_planes; i++)
2028 		kfree(mode_info->planes[i]);
2029 	return -EINVAL;
2030 }
2031 
2032 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2033 {
2034 	drm_mode_config_cleanup(dm->ddev);
2035 	drm_atomic_private_obj_fini(&dm->atomic_obj);
2036 	return;
2037 }
2038 
2039 /******************************************************************************
2040  * amdgpu_display_funcs functions
2041  *****************************************************************************/
2042 
2043 /*
2044  * dm_bandwidth_update - program display watermarks
2045  *
2046  * @adev: amdgpu_device pointer
2047  *
2048  * Calculate and program the display watermarks and line buffer allocation.
2049  */
2050 static void dm_bandwidth_update(struct amdgpu_device *adev)
2051 {
2052 	/* TODO: implement later */
2053 }
2054 
2055 static const struct amdgpu_display_funcs dm_display_funcs = {
2056 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2057 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2058 	.backlight_set_level = NULL, /* never called for DC */
2059 	.backlight_get_level = NULL, /* never called for DC */
2060 	.hpd_sense = NULL,/* called unconditionally */
2061 	.hpd_set_polarity = NULL, /* called unconditionally */
2062 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2063 	.page_flip_get_scanoutpos =
2064 		dm_crtc_get_scanoutpos,/* called unconditionally */
2065 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2066 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
2067 };
2068 
2069 #if defined(CONFIG_DEBUG_KERNEL_DC)
2070 
2071 static ssize_t s3_debug_store(struct device *device,
2072 			      struct device_attribute *attr,
2073 			      const char *buf,
2074 			      size_t count)
2075 {
2076 	int ret;
2077 	int s3_state;
2078 	struct pci_dev *pdev = to_pci_dev(device);
2079 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
2080 	struct amdgpu_device *adev = drm_dev->dev_private;
2081 
2082 	ret = kstrtoint(buf, 0, &s3_state);
2083 
2084 	if (ret == 0) {
2085 		if (s3_state) {
2086 			dm_resume(adev);
2087 			drm_kms_helper_hotplug_event(adev->ddev);
2088 		} else
2089 			dm_suspend(adev);
2090 	}
2091 
2092 	return ret == 0 ? count : 0;
2093 }
2094 
2095 DEVICE_ATTR_WO(s3_debug);
2096 
2097 #endif
2098 
2099 static int dm_early_init(void *handle)
2100 {
2101 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2102 
2103 	switch (adev->asic_type) {
2104 	case CHIP_BONAIRE:
2105 	case CHIP_HAWAII:
2106 		adev->mode_info.num_crtc = 6;
2107 		adev->mode_info.num_hpd = 6;
2108 		adev->mode_info.num_dig = 6;
2109 		adev->mode_info.plane_type = dm_plane_type_default;
2110 		break;
2111 	case CHIP_KAVERI:
2112 		adev->mode_info.num_crtc = 4;
2113 		adev->mode_info.num_hpd = 6;
2114 		adev->mode_info.num_dig = 7;
2115 		adev->mode_info.plane_type = dm_plane_type_default;
2116 		break;
2117 	case CHIP_KABINI:
2118 	case CHIP_MULLINS:
2119 		adev->mode_info.num_crtc = 2;
2120 		adev->mode_info.num_hpd = 6;
2121 		adev->mode_info.num_dig = 6;
2122 		adev->mode_info.plane_type = dm_plane_type_default;
2123 		break;
2124 	case CHIP_FIJI:
2125 	case CHIP_TONGA:
2126 		adev->mode_info.num_crtc = 6;
2127 		adev->mode_info.num_hpd = 6;
2128 		adev->mode_info.num_dig = 7;
2129 		adev->mode_info.plane_type = dm_plane_type_default;
2130 		break;
2131 	case CHIP_CARRIZO:
2132 		adev->mode_info.num_crtc = 3;
2133 		adev->mode_info.num_hpd = 6;
2134 		adev->mode_info.num_dig = 9;
2135 		adev->mode_info.plane_type = dm_plane_type_carizzo;
2136 		break;
2137 	case CHIP_STONEY:
2138 		adev->mode_info.num_crtc = 2;
2139 		adev->mode_info.num_hpd = 6;
2140 		adev->mode_info.num_dig = 9;
2141 		adev->mode_info.plane_type = dm_plane_type_stoney;
2142 		break;
2143 	case CHIP_POLARIS11:
2144 	case CHIP_POLARIS12:
2145 		adev->mode_info.num_crtc = 5;
2146 		adev->mode_info.num_hpd = 5;
2147 		adev->mode_info.num_dig = 5;
2148 		adev->mode_info.plane_type = dm_plane_type_default;
2149 		break;
2150 	case CHIP_POLARIS10:
2151 	case CHIP_VEGAM:
2152 		adev->mode_info.num_crtc = 6;
2153 		adev->mode_info.num_hpd = 6;
2154 		adev->mode_info.num_dig = 6;
2155 		adev->mode_info.plane_type = dm_plane_type_default;
2156 		break;
2157 	case CHIP_VEGA10:
2158 	case CHIP_VEGA12:
2159 	case CHIP_VEGA20:
2160 		adev->mode_info.num_crtc = 6;
2161 		adev->mode_info.num_hpd = 6;
2162 		adev->mode_info.num_dig = 6;
2163 		adev->mode_info.plane_type = dm_plane_type_default;
2164 		break;
2165 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2166 	case CHIP_RAVEN:
2167 		adev->mode_info.num_crtc = 4;
2168 		adev->mode_info.num_hpd = 4;
2169 		adev->mode_info.num_dig = 4;
2170 		adev->mode_info.plane_type = dm_plane_type_default;
2171 		break;
2172 #endif
2173 	default:
2174 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2175 		return -EINVAL;
2176 	}
2177 
2178 	amdgpu_dm_set_irq_funcs(adev);
2179 
2180 	if (adev->mode_info.funcs == NULL)
2181 		adev->mode_info.funcs = &dm_display_funcs;
2182 
2183 	/*
2184 	 * Note: Do NOT change adev->audio_endpt_rreg and
2185 	 * adev->audio_endpt_wreg because they are initialised in
2186 	 * amdgpu_device_init()
2187 	 */
2188 #if defined(CONFIG_DEBUG_KERNEL_DC)
2189 	device_create_file(
2190 		adev->ddev->dev,
2191 		&dev_attr_s3_debug);
2192 #endif
2193 
2194 	return 0;
2195 }
2196 
2197 static bool modeset_required(struct drm_crtc_state *crtc_state,
2198 			     struct dc_stream_state *new_stream,
2199 			     struct dc_stream_state *old_stream)
2200 {
2201 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
2202 		return false;
2203 
2204 	if (!crtc_state->enable)
2205 		return false;
2206 
2207 	return crtc_state->active;
2208 }
2209 
2210 static bool modereset_required(struct drm_crtc_state *crtc_state)
2211 {
2212 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
2213 		return false;
2214 
2215 	return !crtc_state->enable || !crtc_state->active;
2216 }
2217 
2218 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2219 {
2220 	drm_encoder_cleanup(encoder);
2221 	kfree(encoder);
2222 }
2223 
2224 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2225 	.destroy = amdgpu_dm_encoder_destroy,
2226 };
2227 
2228 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
2229 					struct dc_plane_state *plane_state)
2230 {
2231 	plane_state->src_rect.x = state->src_x >> 16;
2232 	plane_state->src_rect.y = state->src_y >> 16;
2233 	/* we ignore the mantissa for now and do not deal with floating pixels :( */
2234 	plane_state->src_rect.width = state->src_w >> 16;
2235 
2236 	if (plane_state->src_rect.width == 0)
2237 		return false;
2238 
2239 	plane_state->src_rect.height = state->src_h >> 16;
2240 	if (plane_state->src_rect.height == 0)
2241 		return false;
2242 
2243 	plane_state->dst_rect.x = state->crtc_x;
2244 	plane_state->dst_rect.y = state->crtc_y;
2245 
2246 	if (state->crtc_w == 0)
2247 		return false;
2248 
2249 	plane_state->dst_rect.width = state->crtc_w;
2250 
2251 	if (state->crtc_h == 0)
2252 		return false;
2253 
2254 	plane_state->dst_rect.height = state->crtc_h;
2255 
2256 	plane_state->clip_rect = plane_state->dst_rect;
2257 
2258 	switch (state->rotation & DRM_MODE_ROTATE_MASK) {
2259 	case DRM_MODE_ROTATE_0:
2260 		plane_state->rotation = ROTATION_ANGLE_0;
2261 		break;
2262 	case DRM_MODE_ROTATE_90:
2263 		plane_state->rotation = ROTATION_ANGLE_90;
2264 		break;
2265 	case DRM_MODE_ROTATE_180:
2266 		plane_state->rotation = ROTATION_ANGLE_180;
2267 		break;
2268 	case DRM_MODE_ROTATE_270:
2269 		plane_state->rotation = ROTATION_ANGLE_270;
2270 		break;
2271 	default:
2272 		plane_state->rotation = ROTATION_ANGLE_0;
2273 		break;
2274 	}
2275 
2276 	return true;
2277 }
2278 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2279 		       uint64_t *tiling_flags)
2280 {
2281 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2282 	int r = amdgpu_bo_reserve(rbo, false);
2283 
2284 	if (unlikely(r)) {
2285 		/* Don't show error message when returning -ERESTARTSYS */
2286 		if (r != -ERESTARTSYS)
2287 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
2288 		return r;
2289 	}
2290 
2291 	if (tiling_flags)
2292 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2293 
2294 	amdgpu_bo_unreserve(rbo);
2295 
2296 	return r;
2297 }
2298 
2299 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2300 					 struct dc_plane_state *plane_state,
2301 					 const struct amdgpu_framebuffer *amdgpu_fb)
2302 {
2303 	uint64_t tiling_flags;
2304 	unsigned int awidth;
2305 	const struct drm_framebuffer *fb = &amdgpu_fb->base;
2306 	int ret = 0;
2307 	struct drm_format_name_buf format_name;
2308 
2309 	ret = get_fb_info(
2310 		amdgpu_fb,
2311 		&tiling_flags);
2312 
2313 	if (ret)
2314 		return ret;
2315 
2316 	switch (fb->format->format) {
2317 	case DRM_FORMAT_C8:
2318 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2319 		break;
2320 	case DRM_FORMAT_RGB565:
2321 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2322 		break;
2323 	case DRM_FORMAT_XRGB8888:
2324 	case DRM_FORMAT_ARGB8888:
2325 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2326 		break;
2327 	case DRM_FORMAT_XRGB2101010:
2328 	case DRM_FORMAT_ARGB2101010:
2329 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2330 		break;
2331 	case DRM_FORMAT_XBGR2101010:
2332 	case DRM_FORMAT_ABGR2101010:
2333 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2334 		break;
2335 	case DRM_FORMAT_XBGR8888:
2336 	case DRM_FORMAT_ABGR8888:
2337 		plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2338 		break;
2339 	case DRM_FORMAT_NV21:
2340 		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2341 		break;
2342 	case DRM_FORMAT_NV12:
2343 		plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2344 		break;
2345 	default:
2346 		DRM_ERROR("Unsupported screen format %s\n",
2347 			  drm_get_format_name(fb->format->format, &format_name));
2348 		return -EINVAL;
2349 	}
2350 
2351 	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2352 		plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
2353 		plane_state->plane_size.grph.surface_size.x = 0;
2354 		plane_state->plane_size.grph.surface_size.y = 0;
2355 		plane_state->plane_size.grph.surface_size.width = fb->width;
2356 		plane_state->plane_size.grph.surface_size.height = fb->height;
2357 		plane_state->plane_size.grph.surface_pitch =
2358 				fb->pitches[0] / fb->format->cpp[0];
2359 		/* TODO: unhardcode */
2360 		plane_state->color_space = COLOR_SPACE_SRGB;
2361 
2362 	} else {
2363 		awidth = ALIGN(fb->width, 64);
2364 		plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2365 		plane_state->plane_size.video.luma_size.x = 0;
2366 		plane_state->plane_size.video.luma_size.y = 0;
2367 		plane_state->plane_size.video.luma_size.width = awidth;
2368 		plane_state->plane_size.video.luma_size.height = fb->height;
2369 		/* TODO: unhardcode */
2370 		plane_state->plane_size.video.luma_pitch = awidth;
2371 
2372 		plane_state->plane_size.video.chroma_size.x = 0;
2373 		plane_state->plane_size.video.chroma_size.y = 0;
2374 		plane_state->plane_size.video.chroma_size.width = awidth;
2375 		plane_state->plane_size.video.chroma_size.height = fb->height;
2376 		plane_state->plane_size.video.chroma_pitch = awidth / 2;
2377 
2378 		/* TODO: unhardcode */
2379 		plane_state->color_space = COLOR_SPACE_YCBCR709;
2380 	}
2381 
2382 	memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
2383 
2384 	/* Fill GFX8 params */
2385 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2386 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2387 
2388 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2389 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2390 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2391 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2392 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2393 
2394 		/* XXX fix me for VI */
2395 		plane_state->tiling_info.gfx8.num_banks = num_banks;
2396 		plane_state->tiling_info.gfx8.array_mode =
2397 				DC_ARRAY_2D_TILED_THIN1;
2398 		plane_state->tiling_info.gfx8.tile_split = tile_split;
2399 		plane_state->tiling_info.gfx8.bank_width = bankw;
2400 		plane_state->tiling_info.gfx8.bank_height = bankh;
2401 		plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
2402 		plane_state->tiling_info.gfx8.tile_mode =
2403 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2404 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2405 			== DC_ARRAY_1D_TILED_THIN1) {
2406 		plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2407 	}
2408 
2409 	plane_state->tiling_info.gfx8.pipe_config =
2410 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2411 
2412 	if (adev->asic_type == CHIP_VEGA10 ||
2413 	    adev->asic_type == CHIP_VEGA12 ||
2414 	    adev->asic_type == CHIP_VEGA20 ||
2415 	    adev->asic_type == CHIP_RAVEN) {
2416 		/* Fill GFX9 params */
2417 		plane_state->tiling_info.gfx9.num_pipes =
2418 			adev->gfx.config.gb_addr_config_fields.num_pipes;
2419 		plane_state->tiling_info.gfx9.num_banks =
2420 			adev->gfx.config.gb_addr_config_fields.num_banks;
2421 		plane_state->tiling_info.gfx9.pipe_interleave =
2422 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2423 		plane_state->tiling_info.gfx9.num_shader_engines =
2424 			adev->gfx.config.gb_addr_config_fields.num_se;
2425 		plane_state->tiling_info.gfx9.max_compressed_frags =
2426 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2427 		plane_state->tiling_info.gfx9.num_rb_per_se =
2428 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2429 		plane_state->tiling_info.gfx9.swizzle =
2430 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2431 		plane_state->tiling_info.gfx9.shaderEnable = 1;
2432 	}
2433 
2434 	plane_state->visible = true;
2435 	plane_state->scaling_quality.h_taps_c = 0;
2436 	plane_state->scaling_quality.v_taps_c = 0;
2437 
2438 	/* is this needed? is plane_state zeroed at allocation? */
2439 	plane_state->scaling_quality.h_taps = 0;
2440 	plane_state->scaling_quality.v_taps = 0;
2441 	plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
2442 
2443 	return ret;
2444 
2445 }
2446 
2447 static int fill_plane_attributes(struct amdgpu_device *adev,
2448 				 struct dc_plane_state *dc_plane_state,
2449 				 struct drm_plane_state *plane_state,
2450 				 struct drm_crtc_state *crtc_state)
2451 {
2452 	const struct amdgpu_framebuffer *amdgpu_fb =
2453 		to_amdgpu_framebuffer(plane_state->fb);
2454 	const struct drm_crtc *crtc = plane_state->crtc;
2455 	int ret = 0;
2456 
2457 	if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
2458 		return -EINVAL;
2459 
2460 	ret = fill_plane_attributes_from_fb(
2461 		crtc->dev->dev_private,
2462 		dc_plane_state,
2463 		amdgpu_fb);
2464 
2465 	if (ret)
2466 		return ret;
2467 
2468 	/*
2469 	 * Always set input transfer function, since plane state is refreshed
2470 	 * every time.
2471 	 */
2472 	ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2473 	if (ret) {
2474 		dc_transfer_func_release(dc_plane_state->in_transfer_func);
2475 		dc_plane_state->in_transfer_func = NULL;
2476 	}
2477 
2478 	return ret;
2479 }
2480 
2481 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2482 					   const struct dm_connector_state *dm_state,
2483 					   struct dc_stream_state *stream)
2484 {
2485 	enum amdgpu_rmx_type rmx_type;
2486 
2487 	struct rect src = { 0 }; /* viewport in composition space*/
2488 	struct rect dst = { 0 }; /* stream addressable area */
2489 
2490 	/* no mode. nothing to be done */
2491 	if (!mode)
2492 		return;
2493 
2494 	/* Full screen scaling by default */
2495 	src.width = mode->hdisplay;
2496 	src.height = mode->vdisplay;
2497 	dst.width = stream->timing.h_addressable;
2498 	dst.height = stream->timing.v_addressable;
2499 
2500 	if (dm_state) {
2501 		rmx_type = dm_state->scaling;
2502 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2503 			if (src.width * dst.height <
2504 					src.height * dst.width) {
2505 				/* height needs less upscaling/more downscaling */
2506 				dst.width = src.width *
2507 						dst.height / src.height;
2508 			} else {
2509 				/* width needs less upscaling/more downscaling */
2510 				dst.height = src.height *
2511 						dst.width / src.width;
2512 			}
2513 		} else if (rmx_type == RMX_CENTER) {
2514 			dst = src;
2515 		}
2516 
2517 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
2518 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
2519 
2520 		if (dm_state->underscan_enable) {
2521 			dst.x += dm_state->underscan_hborder / 2;
2522 			dst.y += dm_state->underscan_vborder / 2;
2523 			dst.width -= dm_state->underscan_hborder;
2524 			dst.height -= dm_state->underscan_vborder;
2525 		}
2526 	}
2527 
2528 	stream->src = src;
2529 	stream->dst = dst;
2530 
2531 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2532 			dst.x, dst.y, dst.width, dst.height);
2533 
2534 }
2535 
2536 static enum dc_color_depth
2537 convert_color_depth_from_display_info(const struct drm_connector *connector)
2538 {
2539 	struct dm_connector_state *dm_conn_state =
2540 		to_dm_connector_state(connector->state);
2541 	uint32_t bpc = connector->display_info.bpc;
2542 
2543 	/* TODO: Remove this when there's support for max_bpc in drm */
2544 	if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2545 		/* Round down to nearest even number. */
2546 		bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2547 
2548 	switch (bpc) {
2549 	case 0:
2550 		/*
2551 		 * Temporary Work around, DRM doesn't parse color depth for
2552 		 * EDID revision before 1.4
2553 		 * TODO: Fix edid parsing
2554 		 */
2555 		return COLOR_DEPTH_888;
2556 	case 6:
2557 		return COLOR_DEPTH_666;
2558 	case 8:
2559 		return COLOR_DEPTH_888;
2560 	case 10:
2561 		return COLOR_DEPTH_101010;
2562 	case 12:
2563 		return COLOR_DEPTH_121212;
2564 	case 14:
2565 		return COLOR_DEPTH_141414;
2566 	case 16:
2567 		return COLOR_DEPTH_161616;
2568 	default:
2569 		return COLOR_DEPTH_UNDEFINED;
2570 	}
2571 }
2572 
2573 static enum dc_aspect_ratio
2574 get_aspect_ratio(const struct drm_display_mode *mode_in)
2575 {
2576 	/* 1-1 mapping, since both enums follow the HDMI spec. */
2577 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
2578 }
2579 
2580 static enum dc_color_space
2581 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2582 {
2583 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
2584 
2585 	switch (dc_crtc_timing->pixel_encoding)	{
2586 	case PIXEL_ENCODING_YCBCR422:
2587 	case PIXEL_ENCODING_YCBCR444:
2588 	case PIXEL_ENCODING_YCBCR420:
2589 	{
2590 		/*
2591 		 * 27030khz is the separation point between HDTV and SDTV
2592 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
2593 		 * respectively
2594 		 */
2595 		if (dc_crtc_timing->pix_clk_khz > 27030) {
2596 			if (dc_crtc_timing->flags.Y_ONLY)
2597 				color_space =
2598 					COLOR_SPACE_YCBCR709_LIMITED;
2599 			else
2600 				color_space = COLOR_SPACE_YCBCR709;
2601 		} else {
2602 			if (dc_crtc_timing->flags.Y_ONLY)
2603 				color_space =
2604 					COLOR_SPACE_YCBCR601_LIMITED;
2605 			else
2606 				color_space = COLOR_SPACE_YCBCR601;
2607 		}
2608 
2609 	}
2610 	break;
2611 	case PIXEL_ENCODING_RGB:
2612 		color_space = COLOR_SPACE_SRGB;
2613 		break;
2614 
2615 	default:
2616 		WARN_ON(1);
2617 		break;
2618 	}
2619 
2620 	return color_space;
2621 }
2622 
2623 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2624 {
2625 	if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2626 		return;
2627 
2628 	timing_out->display_color_depth--;
2629 }
2630 
2631 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2632 						const struct drm_display_info *info)
2633 {
2634 	int normalized_clk;
2635 	if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2636 		return;
2637 	do {
2638 		normalized_clk = timing_out->pix_clk_khz;
2639 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2640 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2641 			normalized_clk /= 2;
2642 		/* Adjusting pix clock following on HDMI spec based on colour depth */
2643 		switch (timing_out->display_color_depth) {
2644 		case COLOR_DEPTH_101010:
2645 			normalized_clk = (normalized_clk * 30) / 24;
2646 			break;
2647 		case COLOR_DEPTH_121212:
2648 			normalized_clk = (normalized_clk * 36) / 24;
2649 			break;
2650 		case COLOR_DEPTH_161616:
2651 			normalized_clk = (normalized_clk * 48) / 24;
2652 			break;
2653 		default:
2654 			return;
2655 		}
2656 		if (normalized_clk <= info->max_tmds_clock)
2657 			return;
2658 		reduce_mode_colour_depth(timing_out);
2659 
2660 	} while (timing_out->display_color_depth > COLOR_DEPTH_888);
2661 
2662 }
2663 
2664 static void
2665 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2666 					     const struct drm_display_mode *mode_in,
2667 					     const struct drm_connector *connector,
2668 					     const struct dc_stream_state *old_stream)
2669 {
2670 	struct dc_crtc_timing *timing_out = &stream->timing;
2671 	const struct drm_display_info *info = &connector->display_info;
2672 
2673 	memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2674 
2675 	timing_out->h_border_left = 0;
2676 	timing_out->h_border_right = 0;
2677 	timing_out->v_border_top = 0;
2678 	timing_out->v_border_bottom = 0;
2679 	/* TODO: un-hardcode */
2680 	if (drm_mode_is_420_only(info, mode_in)
2681 			&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2682 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2683 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2684 			&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2685 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2686 	else
2687 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2688 
2689 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2690 	timing_out->display_color_depth = convert_color_depth_from_display_info(
2691 			connector);
2692 	timing_out->scan_type = SCANNING_TYPE_NODATA;
2693 	timing_out->hdmi_vic = 0;
2694 
2695 	if(old_stream) {
2696 		timing_out->vic = old_stream->timing.vic;
2697 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
2698 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
2699 	} else {
2700 		timing_out->vic = drm_match_cea_mode(mode_in);
2701 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2702 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2703 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2704 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2705 	}
2706 
2707 	timing_out->h_addressable = mode_in->crtc_hdisplay;
2708 	timing_out->h_total = mode_in->crtc_htotal;
2709 	timing_out->h_sync_width =
2710 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2711 	timing_out->h_front_porch =
2712 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2713 	timing_out->v_total = mode_in->crtc_vtotal;
2714 	timing_out->v_addressable = mode_in->crtc_vdisplay;
2715 	timing_out->v_front_porch =
2716 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2717 	timing_out->v_sync_width =
2718 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2719 	timing_out->pix_clk_khz = mode_in->crtc_clock;
2720 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2721 
2722 	stream->output_color_space = get_output_color_space(timing_out);
2723 
2724 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2725 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2726 	if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2727 		adjust_colour_depth_from_display_info(timing_out, info);
2728 }
2729 
2730 static void fill_audio_info(struct audio_info *audio_info,
2731 			    const struct drm_connector *drm_connector,
2732 			    const struct dc_sink *dc_sink)
2733 {
2734 	int i = 0;
2735 	int cea_revision = 0;
2736 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2737 
2738 	audio_info->manufacture_id = edid_caps->manufacturer_id;
2739 	audio_info->product_id = edid_caps->product_id;
2740 
2741 	cea_revision = drm_connector->display_info.cea_rev;
2742 
2743 	strscpy(audio_info->display_name,
2744 		edid_caps->display_name,
2745 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
2746 
2747 	if (cea_revision >= 3) {
2748 		audio_info->mode_count = edid_caps->audio_mode_count;
2749 
2750 		for (i = 0; i < audio_info->mode_count; ++i) {
2751 			audio_info->modes[i].format_code =
2752 					(enum audio_format_code)
2753 					(edid_caps->audio_modes[i].format_code);
2754 			audio_info->modes[i].channel_count =
2755 					edid_caps->audio_modes[i].channel_count;
2756 			audio_info->modes[i].sample_rates.all =
2757 					edid_caps->audio_modes[i].sample_rate;
2758 			audio_info->modes[i].sample_size =
2759 					edid_caps->audio_modes[i].sample_size;
2760 		}
2761 	}
2762 
2763 	audio_info->flags.all = edid_caps->speaker_flags;
2764 
2765 	/* TODO: We only check for the progressive mode, check for interlace mode too */
2766 	if (drm_connector->latency_present[0]) {
2767 		audio_info->video_latency = drm_connector->video_latency[0];
2768 		audio_info->audio_latency = drm_connector->audio_latency[0];
2769 	}
2770 
2771 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2772 
2773 }
2774 
2775 static void
2776 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2777 				      struct drm_display_mode *dst_mode)
2778 {
2779 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2780 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2781 	dst_mode->crtc_clock = src_mode->crtc_clock;
2782 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2783 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2784 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2785 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2786 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
2787 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
2788 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2789 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2790 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2791 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2792 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2793 }
2794 
2795 static void
2796 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2797 					const struct drm_display_mode *native_mode,
2798 					bool scale_enabled)
2799 {
2800 	if (scale_enabled) {
2801 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2802 	} else if (native_mode->clock == drm_mode->clock &&
2803 			native_mode->htotal == drm_mode->htotal &&
2804 			native_mode->vtotal == drm_mode->vtotal) {
2805 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2806 	} else {
2807 		/* no scaling nor amdgpu inserted, no need to patch */
2808 	}
2809 }
2810 
2811 static struct dc_sink *
2812 create_fake_sink(struct amdgpu_dm_connector *aconnector)
2813 {
2814 	struct dc_sink_init_data sink_init_data = { 0 };
2815 	struct dc_sink *sink = NULL;
2816 	sink_init_data.link = aconnector->dc_link;
2817 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2818 
2819 	sink = dc_sink_create(&sink_init_data);
2820 	if (!sink) {
2821 		DRM_ERROR("Failed to create sink!\n");
2822 		return NULL;
2823 	}
2824 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2825 
2826 	return sink;
2827 }
2828 
2829 static void set_multisync_trigger_params(
2830 		struct dc_stream_state *stream)
2831 {
2832 	if (stream->triggered_crtc_reset.enabled) {
2833 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2834 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2835 	}
2836 }
2837 
2838 static void set_master_stream(struct dc_stream_state *stream_set[],
2839 			      int stream_count)
2840 {
2841 	int j, highest_rfr = 0, master_stream = 0;
2842 
2843 	for (j = 0;  j < stream_count; j++) {
2844 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2845 			int refresh_rate = 0;
2846 
2847 			refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2848 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2849 			if (refresh_rate > highest_rfr) {
2850 				highest_rfr = refresh_rate;
2851 				master_stream = j;
2852 			}
2853 		}
2854 	}
2855 	for (j = 0;  j < stream_count; j++) {
2856 		if (stream_set[j])
2857 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2858 	}
2859 }
2860 
2861 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2862 {
2863 	int i = 0;
2864 
2865 	if (context->stream_count < 2)
2866 		return;
2867 	for (i = 0; i < context->stream_count ; i++) {
2868 		if (!context->streams[i])
2869 			continue;
2870 		/*
2871 		 * TODO: add a function to read AMD VSDB bits and set
2872 		 * crtc_sync_master.multi_sync_enabled flag
2873 		 * For now it's set to false
2874 		 */
2875 		set_multisync_trigger_params(context->streams[i]);
2876 	}
2877 	set_master_stream(context->streams, context->stream_count);
2878 }
2879 
2880 static struct dc_stream_state *
2881 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2882 		       const struct drm_display_mode *drm_mode,
2883 		       const struct dm_connector_state *dm_state,
2884 		       const struct dc_stream_state *old_stream)
2885 {
2886 	struct drm_display_mode *preferred_mode = NULL;
2887 	struct drm_connector *drm_connector;
2888 	struct dc_stream_state *stream = NULL;
2889 	struct drm_display_mode mode = *drm_mode;
2890 	bool native_mode_found = false;
2891 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
2892 	int mode_refresh;
2893 	int preferred_refresh = 0;
2894 
2895 	struct dc_sink *sink = NULL;
2896 	if (aconnector == NULL) {
2897 		DRM_ERROR("aconnector is NULL!\n");
2898 		return stream;
2899 	}
2900 
2901 	drm_connector = &aconnector->base;
2902 
2903 	if (!aconnector->dc_sink) {
2904 		if (!aconnector->mst_port) {
2905 			sink = create_fake_sink(aconnector);
2906 			if (!sink)
2907 				return stream;
2908 		}
2909 	} else {
2910 		sink = aconnector->dc_sink;
2911 	}
2912 
2913 	stream = dc_create_stream_for_sink(sink);
2914 
2915 	if (stream == NULL) {
2916 		DRM_ERROR("Failed to create stream for sink!\n");
2917 		goto finish;
2918 	}
2919 
2920 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2921 		/* Search for preferred mode */
2922 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2923 			native_mode_found = true;
2924 			break;
2925 		}
2926 	}
2927 	if (!native_mode_found)
2928 		preferred_mode = list_first_entry_or_null(
2929 				&aconnector->base.modes,
2930 				struct drm_display_mode,
2931 				head);
2932 
2933 	mode_refresh = drm_mode_vrefresh(&mode);
2934 
2935 	if (preferred_mode == NULL) {
2936 		/*
2937 		 * This may not be an error, the use case is when we have no
2938 		 * usermode calls to reset and set mode upon hotplug. In this
2939 		 * case, we call set mode ourselves to restore the previous mode
2940 		 * and the modelist may not be filled in in time.
2941 		 */
2942 		DRM_DEBUG_DRIVER("No preferred mode found\n");
2943 	} else {
2944 		decide_crtc_timing_for_drm_display_mode(
2945 				&mode, preferred_mode,
2946 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
2947 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
2948 	}
2949 
2950 	if (!dm_state)
2951 		drm_mode_set_crtcinfo(&mode, 0);
2952 
2953 	/*
2954 	* If scaling is enabled and refresh rate didn't change
2955 	* we copy the vic and polarities of the old timings
2956 	*/
2957 	if (!scale || mode_refresh != preferred_refresh)
2958 		fill_stream_properties_from_drm_display_mode(stream,
2959 			&mode, &aconnector->base, NULL);
2960 	else
2961 		fill_stream_properties_from_drm_display_mode(stream,
2962 			&mode, &aconnector->base, old_stream);
2963 
2964 	update_stream_scaling_settings(&mode, dm_state, stream);
2965 
2966 	fill_audio_info(
2967 		&stream->audio_info,
2968 		drm_connector,
2969 		sink);
2970 
2971 	update_stream_signal(stream);
2972 
2973 	if (dm_state && dm_state->freesync_capable)
2974 		stream->ignore_msa_timing_param = true;
2975 
2976 finish:
2977 	if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
2978 		dc_sink_release(sink);
2979 
2980 	return stream;
2981 }
2982 
2983 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2984 {
2985 	drm_crtc_cleanup(crtc);
2986 	kfree(crtc);
2987 }
2988 
2989 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2990 				  struct drm_crtc_state *state)
2991 {
2992 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
2993 
2994 	/* TODO Destroy dc_stream objects are stream object is flattened */
2995 	if (cur->stream)
2996 		dc_stream_release(cur->stream);
2997 
2998 
2999 	__drm_atomic_helper_crtc_destroy_state(state);
3000 
3001 
3002 	kfree(state);
3003 }
3004 
3005 static void dm_crtc_reset_state(struct drm_crtc *crtc)
3006 {
3007 	struct dm_crtc_state *state;
3008 
3009 	if (crtc->state)
3010 		dm_crtc_destroy_state(crtc, crtc->state);
3011 
3012 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3013 	if (WARN_ON(!state))
3014 		return;
3015 
3016 	crtc->state = &state->base;
3017 	crtc->state->crtc = crtc;
3018 
3019 }
3020 
3021 static struct drm_crtc_state *
3022 dm_crtc_duplicate_state(struct drm_crtc *crtc)
3023 {
3024 	struct dm_crtc_state *state, *cur;
3025 
3026 	cur = to_dm_crtc_state(crtc->state);
3027 
3028 	if (WARN_ON(!crtc->state))
3029 		return NULL;
3030 
3031 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3032 	if (!state)
3033 		return NULL;
3034 
3035 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3036 
3037 	if (cur->stream) {
3038 		state->stream = cur->stream;
3039 		dc_stream_retain(state->stream);
3040 	}
3041 
3042 	state->vrr_params = cur->vrr_params;
3043 	state->vrr_infopacket = cur->vrr_infopacket;
3044 	state->abm_level = cur->abm_level;
3045 	state->vrr_supported = cur->vrr_supported;
3046 	state->freesync_config = cur->freesync_config;
3047 	state->crc_enabled = cur->crc_enabled;
3048 
3049 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
3050 
3051 	return &state->base;
3052 }
3053 
3054 
3055 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3056 {
3057 	enum dc_irq_source irq_source;
3058 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3059 	struct amdgpu_device *adev = crtc->dev->dev_private;
3060 
3061 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3062 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3063 }
3064 
3065 static int dm_enable_vblank(struct drm_crtc *crtc)
3066 {
3067 	return dm_set_vblank(crtc, true);
3068 }
3069 
3070 static void dm_disable_vblank(struct drm_crtc *crtc)
3071 {
3072 	dm_set_vblank(crtc, false);
3073 }
3074 
3075 /* Implemented only the options currently availible for the driver */
3076 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3077 	.reset = dm_crtc_reset_state,
3078 	.destroy = amdgpu_dm_crtc_destroy,
3079 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
3080 	.set_config = drm_atomic_helper_set_config,
3081 	.page_flip = drm_atomic_helper_page_flip,
3082 	.atomic_duplicate_state = dm_crtc_duplicate_state,
3083 	.atomic_destroy_state = dm_crtc_destroy_state,
3084 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
3085 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3086 	.enable_vblank = dm_enable_vblank,
3087 	.disable_vblank = dm_disable_vblank,
3088 };
3089 
3090 static enum drm_connector_status
3091 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3092 {
3093 	bool connected;
3094 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3095 
3096 	/*
3097 	 * Notes:
3098 	 * 1. This interface is NOT called in context of HPD irq.
3099 	 * 2. This interface *is called* in context of user-mode ioctl. Which
3100 	 * makes it a bad place for *any* MST-related activity.
3101 	 */
3102 
3103 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3104 	    !aconnector->fake_enable)
3105 		connected = (aconnector->dc_sink != NULL);
3106 	else
3107 		connected = (aconnector->base.force == DRM_FORCE_ON);
3108 
3109 	return (connected ? connector_status_connected :
3110 			connector_status_disconnected);
3111 }
3112 
3113 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3114 					    struct drm_connector_state *connector_state,
3115 					    struct drm_property *property,
3116 					    uint64_t val)
3117 {
3118 	struct drm_device *dev = connector->dev;
3119 	struct amdgpu_device *adev = dev->dev_private;
3120 	struct dm_connector_state *dm_old_state =
3121 		to_dm_connector_state(connector->state);
3122 	struct dm_connector_state *dm_new_state =
3123 		to_dm_connector_state(connector_state);
3124 
3125 	int ret = -EINVAL;
3126 
3127 	if (property == dev->mode_config.scaling_mode_property) {
3128 		enum amdgpu_rmx_type rmx_type;
3129 
3130 		switch (val) {
3131 		case DRM_MODE_SCALE_CENTER:
3132 			rmx_type = RMX_CENTER;
3133 			break;
3134 		case DRM_MODE_SCALE_ASPECT:
3135 			rmx_type = RMX_ASPECT;
3136 			break;
3137 		case DRM_MODE_SCALE_FULLSCREEN:
3138 			rmx_type = RMX_FULL;
3139 			break;
3140 		case DRM_MODE_SCALE_NONE:
3141 		default:
3142 			rmx_type = RMX_OFF;
3143 			break;
3144 		}
3145 
3146 		if (dm_old_state->scaling == rmx_type)
3147 			return 0;
3148 
3149 		dm_new_state->scaling = rmx_type;
3150 		ret = 0;
3151 	} else if (property == adev->mode_info.underscan_hborder_property) {
3152 		dm_new_state->underscan_hborder = val;
3153 		ret = 0;
3154 	} else if (property == adev->mode_info.underscan_vborder_property) {
3155 		dm_new_state->underscan_vborder = val;
3156 		ret = 0;
3157 	} else if (property == adev->mode_info.underscan_property) {
3158 		dm_new_state->underscan_enable = val;
3159 		ret = 0;
3160 	} else if (property == adev->mode_info.max_bpc_property) {
3161 		dm_new_state->max_bpc = val;
3162 		ret = 0;
3163 	} else if (property == adev->mode_info.abm_level_property) {
3164 		dm_new_state->abm_level = val;
3165 		ret = 0;
3166 	}
3167 
3168 	return ret;
3169 }
3170 
3171 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3172 					    const struct drm_connector_state *state,
3173 					    struct drm_property *property,
3174 					    uint64_t *val)
3175 {
3176 	struct drm_device *dev = connector->dev;
3177 	struct amdgpu_device *adev = dev->dev_private;
3178 	struct dm_connector_state *dm_state =
3179 		to_dm_connector_state(state);
3180 	int ret = -EINVAL;
3181 
3182 	if (property == dev->mode_config.scaling_mode_property) {
3183 		switch (dm_state->scaling) {
3184 		case RMX_CENTER:
3185 			*val = DRM_MODE_SCALE_CENTER;
3186 			break;
3187 		case RMX_ASPECT:
3188 			*val = DRM_MODE_SCALE_ASPECT;
3189 			break;
3190 		case RMX_FULL:
3191 			*val = DRM_MODE_SCALE_FULLSCREEN;
3192 			break;
3193 		case RMX_OFF:
3194 		default:
3195 			*val = DRM_MODE_SCALE_NONE;
3196 			break;
3197 		}
3198 		ret = 0;
3199 	} else if (property == adev->mode_info.underscan_hborder_property) {
3200 		*val = dm_state->underscan_hborder;
3201 		ret = 0;
3202 	} else if (property == adev->mode_info.underscan_vborder_property) {
3203 		*val = dm_state->underscan_vborder;
3204 		ret = 0;
3205 	} else if (property == adev->mode_info.underscan_property) {
3206 		*val = dm_state->underscan_enable;
3207 		ret = 0;
3208 	} else if (property == adev->mode_info.max_bpc_property) {
3209 		*val = dm_state->max_bpc;
3210 		ret = 0;
3211 	} else if (property == adev->mode_info.abm_level_property) {
3212 		*val = dm_state->abm_level;
3213 		ret = 0;
3214 	}
3215 
3216 	return ret;
3217 }
3218 
3219 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3220 {
3221 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3222 	const struct dc_link *link = aconnector->dc_link;
3223 	struct amdgpu_device *adev = connector->dev->dev_private;
3224 	struct amdgpu_display_manager *dm = &adev->dm;
3225 
3226 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3227 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3228 
3229 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3230 	    link->type != dc_connection_none &&
3231 	    dm->backlight_dev) {
3232 		backlight_device_unregister(dm->backlight_dev);
3233 		dm->backlight_dev = NULL;
3234 	}
3235 #endif
3236 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3237 	drm_connector_unregister(connector);
3238 	drm_connector_cleanup(connector);
3239 	kfree(connector);
3240 }
3241 
3242 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3243 {
3244 	struct dm_connector_state *state =
3245 		to_dm_connector_state(connector->state);
3246 
3247 	if (connector->state)
3248 		__drm_atomic_helper_connector_destroy_state(connector->state);
3249 
3250 	kfree(state);
3251 
3252 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3253 
3254 	if (state) {
3255 		state->scaling = RMX_OFF;
3256 		state->underscan_enable = false;
3257 		state->underscan_hborder = 0;
3258 		state->underscan_vborder = 0;
3259 		state->max_bpc = 8;
3260 
3261 		__drm_atomic_helper_connector_reset(connector, &state->base);
3262 	}
3263 }
3264 
3265 struct drm_connector_state *
3266 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3267 {
3268 	struct dm_connector_state *state =
3269 		to_dm_connector_state(connector->state);
3270 
3271 	struct dm_connector_state *new_state =
3272 			kmemdup(state, sizeof(*state), GFP_KERNEL);
3273 
3274 	if (!new_state)
3275 		return NULL;
3276 
3277 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3278 
3279 	new_state->freesync_capable = state->freesync_capable;
3280 	new_state->abm_level = state->abm_level;
3281 	new_state->scaling = state->scaling;
3282 	new_state->underscan_enable = state->underscan_enable;
3283 	new_state->underscan_hborder = state->underscan_hborder;
3284 	new_state->underscan_vborder = state->underscan_vborder;
3285 	new_state->max_bpc = state->max_bpc;
3286 
3287 	return &new_state->base;
3288 }
3289 
3290 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
3291 	.reset = amdgpu_dm_connector_funcs_reset,
3292 	.detect = amdgpu_dm_connector_detect,
3293 	.fill_modes = drm_helper_probe_single_connector_modes,
3294 	.destroy = amdgpu_dm_connector_destroy,
3295 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
3296 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
3297 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
3298 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property
3299 };
3300 
3301 static int get_modes(struct drm_connector *connector)
3302 {
3303 	return amdgpu_dm_connector_get_modes(connector);
3304 }
3305 
3306 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
3307 {
3308 	struct dc_sink_init_data init_params = {
3309 			.link = aconnector->dc_link,
3310 			.sink_signal = SIGNAL_TYPE_VIRTUAL
3311 	};
3312 	struct edid *edid;
3313 
3314 	if (!aconnector->base.edid_blob_ptr) {
3315 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3316 				aconnector->base.name);
3317 
3318 		aconnector->base.force = DRM_FORCE_OFF;
3319 		aconnector->base.override_edid = false;
3320 		return;
3321 	}
3322 
3323 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3324 
3325 	aconnector->edid = edid;
3326 
3327 	aconnector->dc_em_sink = dc_link_add_remote_sink(
3328 		aconnector->dc_link,
3329 		(uint8_t *)edid,
3330 		(edid->extensions + 1) * EDID_LENGTH,
3331 		&init_params);
3332 
3333 	if (aconnector->base.force == DRM_FORCE_ON)
3334 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
3335 		aconnector->dc_link->local_sink :
3336 		aconnector->dc_em_sink;
3337 }
3338 
3339 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
3340 {
3341 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3342 
3343 	/*
3344 	 * In case of headless boot with force on for DP managed connector
3345 	 * Those settings have to be != 0 to get initial modeset
3346 	 */
3347 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3348 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3349 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3350 	}
3351 
3352 
3353 	aconnector->base.override_edid = true;
3354 	create_eml_sink(aconnector);
3355 }
3356 
3357 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3358 				   struct drm_display_mode *mode)
3359 {
3360 	int result = MODE_ERROR;
3361 	struct dc_sink *dc_sink;
3362 	struct amdgpu_device *adev = connector->dev->dev_private;
3363 	/* TODO: Unhardcode stream count */
3364 	struct dc_stream_state *stream;
3365 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3366 	enum dc_status dc_result = DC_OK;
3367 
3368 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3369 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
3370 		return result;
3371 
3372 	/*
3373 	 * Only run this the first time mode_valid is called to initilialize
3374 	 * EDID mgmt
3375 	 */
3376 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3377 		!aconnector->dc_em_sink)
3378 		handle_edid_mgmt(aconnector);
3379 
3380 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
3381 
3382 	if (dc_sink == NULL) {
3383 		DRM_ERROR("dc_sink is NULL!\n");
3384 		goto fail;
3385 	}
3386 
3387 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
3388 	if (stream == NULL) {
3389 		DRM_ERROR("Failed to create stream for sink!\n");
3390 		goto fail;
3391 	}
3392 
3393 	dc_result = dc_validate_stream(adev->dm.dc, stream);
3394 
3395 	if (dc_result == DC_OK)
3396 		result = MODE_OK;
3397 	else
3398 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3399 			      mode->vdisplay,
3400 			      mode->hdisplay,
3401 			      mode->clock,
3402 			      dc_result);
3403 
3404 	dc_stream_release(stream);
3405 
3406 fail:
3407 	/* TODO: error handling*/
3408 	return result;
3409 }
3410 
3411 static const struct drm_connector_helper_funcs
3412 amdgpu_dm_connector_helper_funcs = {
3413 	/*
3414 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
3415 	 * modes will be filtered by drm_mode_validate_size(), and those modes
3416 	 * are missing after user start lightdm. So we need to renew modes list.
3417 	 * in get_modes call back, not just return the modes count
3418 	 */
3419 	.get_modes = get_modes,
3420 	.mode_valid = amdgpu_dm_connector_mode_valid,
3421 };
3422 
3423 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
3424 {
3425 }
3426 
3427 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
3428 				       struct drm_crtc_state *state)
3429 {
3430 	struct amdgpu_device *adev = crtc->dev->dev_private;
3431 	struct dc *dc = adev->dm.dc;
3432 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
3433 	int ret = -EINVAL;
3434 
3435 	if (unlikely(!dm_crtc_state->stream &&
3436 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
3437 		WARN_ON(1);
3438 		return ret;
3439 	}
3440 
3441 	/* In some use cases, like reset, no stream is attached */
3442 	if (!dm_crtc_state->stream)
3443 		return 0;
3444 
3445 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
3446 		return 0;
3447 
3448 	return ret;
3449 }
3450 
3451 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
3452 				      const struct drm_display_mode *mode,
3453 				      struct drm_display_mode *adjusted_mode)
3454 {
3455 	return true;
3456 }
3457 
3458 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
3459 	.disable = dm_crtc_helper_disable,
3460 	.atomic_check = dm_crtc_helper_atomic_check,
3461 	.mode_fixup = dm_crtc_helper_mode_fixup
3462 };
3463 
3464 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
3465 {
3466 
3467 }
3468 
3469 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
3470 					  struct drm_crtc_state *crtc_state,
3471 					  struct drm_connector_state *conn_state)
3472 {
3473 	return 0;
3474 }
3475 
3476 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
3477 	.disable = dm_encoder_helper_disable,
3478 	.atomic_check = dm_encoder_helper_atomic_check
3479 };
3480 
3481 static void dm_drm_plane_reset(struct drm_plane *plane)
3482 {
3483 	struct dm_plane_state *amdgpu_state = NULL;
3484 
3485 	if (plane->state)
3486 		plane->funcs->atomic_destroy_state(plane, plane->state);
3487 
3488 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
3489 	WARN_ON(amdgpu_state == NULL);
3490 
3491 	if (amdgpu_state) {
3492 		plane->state = &amdgpu_state->base;
3493 		plane->state->plane = plane;
3494 		plane->state->rotation = DRM_MODE_ROTATE_0;
3495 	}
3496 }
3497 
3498 static struct drm_plane_state *
3499 dm_drm_plane_duplicate_state(struct drm_plane *plane)
3500 {
3501 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
3502 
3503 	old_dm_plane_state = to_dm_plane_state(plane->state);
3504 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
3505 	if (!dm_plane_state)
3506 		return NULL;
3507 
3508 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
3509 
3510 	if (old_dm_plane_state->dc_state) {
3511 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
3512 		dc_plane_state_retain(dm_plane_state->dc_state);
3513 	}
3514 
3515 	return &dm_plane_state->base;
3516 }
3517 
3518 void dm_drm_plane_destroy_state(struct drm_plane *plane,
3519 				struct drm_plane_state *state)
3520 {
3521 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3522 
3523 	if (dm_plane_state->dc_state)
3524 		dc_plane_state_release(dm_plane_state->dc_state);
3525 
3526 	drm_atomic_helper_plane_destroy_state(plane, state);
3527 }
3528 
3529 static const struct drm_plane_funcs dm_plane_funcs = {
3530 	.update_plane	= drm_atomic_helper_update_plane,
3531 	.disable_plane	= drm_atomic_helper_disable_plane,
3532 	.destroy	= drm_primary_helper_destroy,
3533 	.reset = dm_drm_plane_reset,
3534 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
3535 	.atomic_destroy_state = dm_drm_plane_destroy_state,
3536 };
3537 
3538 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3539 				      struct drm_plane_state *new_state)
3540 {
3541 	struct amdgpu_framebuffer *afb;
3542 	struct drm_gem_object *obj;
3543 	struct amdgpu_device *adev;
3544 	struct amdgpu_bo *rbo;
3545 	uint64_t chroma_addr = 0;
3546 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3547 	unsigned int awidth;
3548 	uint32_t domain;
3549 	int r;
3550 
3551 	dm_plane_state_old = to_dm_plane_state(plane->state);
3552 	dm_plane_state_new = to_dm_plane_state(new_state);
3553 
3554 	if (!new_state->fb) {
3555 		DRM_DEBUG_DRIVER("No FB bound\n");
3556 		return 0;
3557 	}
3558 
3559 	afb = to_amdgpu_framebuffer(new_state->fb);
3560 	obj = new_state->fb->obj[0];
3561 	rbo = gem_to_amdgpu_bo(obj);
3562 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3563 	r = amdgpu_bo_reserve(rbo, false);
3564 	if (unlikely(r != 0))
3565 		return r;
3566 
3567 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
3568 		domain = amdgpu_display_supported_domains(adev);
3569 	else
3570 		domain = AMDGPU_GEM_DOMAIN_VRAM;
3571 
3572 	r = amdgpu_bo_pin(rbo, domain);
3573 	if (unlikely(r != 0)) {
3574 		if (r != -ERESTARTSYS)
3575 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3576 		amdgpu_bo_unreserve(rbo);
3577 		return r;
3578 	}
3579 
3580 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
3581 	if (unlikely(r != 0)) {
3582 		amdgpu_bo_unpin(rbo);
3583 		amdgpu_bo_unreserve(rbo);
3584 		DRM_ERROR("%p bind failed\n", rbo);
3585 		return r;
3586 	}
3587 	amdgpu_bo_unreserve(rbo);
3588 
3589 	afb->address = amdgpu_bo_gpu_offset(rbo);
3590 
3591 	amdgpu_bo_ref(rbo);
3592 
3593 	if (dm_plane_state_new->dc_state &&
3594 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3595 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
3596 
3597 		if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3598 			plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3599 			plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3600 		} else {
3601 			awidth = ALIGN(new_state->fb->width, 64);
3602 			plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3603 			plane_state->address.video_progressive.luma_addr.low_part
3604 							= lower_32_bits(afb->address);
3605 			plane_state->address.video_progressive.luma_addr.high_part
3606 							= upper_32_bits(afb->address);
3607 			chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3608 			plane_state->address.video_progressive.chroma_addr.low_part
3609 							= lower_32_bits(chroma_addr);
3610 			plane_state->address.video_progressive.chroma_addr.high_part
3611 							= upper_32_bits(chroma_addr);
3612 		}
3613 	}
3614 
3615 	return 0;
3616 }
3617 
3618 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3619 				       struct drm_plane_state *old_state)
3620 {
3621 	struct amdgpu_bo *rbo;
3622 	int r;
3623 
3624 	if (!old_state->fb)
3625 		return;
3626 
3627 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
3628 	r = amdgpu_bo_reserve(rbo, false);
3629 	if (unlikely(r)) {
3630 		DRM_ERROR("failed to reserve rbo before unpin\n");
3631 		return;
3632 	}
3633 
3634 	amdgpu_bo_unpin(rbo);
3635 	amdgpu_bo_unreserve(rbo);
3636 	amdgpu_bo_unref(&rbo);
3637 }
3638 
3639 static int dm_plane_atomic_check(struct drm_plane *plane,
3640 				 struct drm_plane_state *state)
3641 {
3642 	struct amdgpu_device *adev = plane->dev->dev_private;
3643 	struct dc *dc = adev->dm.dc;
3644 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3645 
3646 	if (!dm_plane_state->dc_state)
3647 		return 0;
3648 
3649 	if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3650 		return -EINVAL;
3651 
3652 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3653 		return 0;
3654 
3655 	return -EINVAL;
3656 }
3657 
3658 static int dm_plane_atomic_async_check(struct drm_plane *plane,
3659 				       struct drm_plane_state *new_plane_state)
3660 {
3661 	struct drm_plane_state *old_plane_state =
3662 		drm_atomic_get_old_plane_state(new_plane_state->state, plane);
3663 
3664 	/* Only support async updates on cursor planes. */
3665 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
3666 		return -EINVAL;
3667 
3668 	/*
3669 	 * DRM calls prepare_fb and cleanup_fb on new_plane_state for
3670 	 * async commits so don't allow fb changes.
3671 	 */
3672 	if (old_plane_state->fb != new_plane_state->fb)
3673 		return -EINVAL;
3674 
3675 	return 0;
3676 }
3677 
3678 static void dm_plane_atomic_async_update(struct drm_plane *plane,
3679 					 struct drm_plane_state *new_state)
3680 {
3681 	struct drm_plane_state *old_state =
3682 		drm_atomic_get_old_plane_state(new_state->state, plane);
3683 
3684 	if (plane->state->fb != new_state->fb)
3685 		drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
3686 
3687 	plane->state->src_x = new_state->src_x;
3688 	plane->state->src_y = new_state->src_y;
3689 	plane->state->src_w = new_state->src_w;
3690 	plane->state->src_h = new_state->src_h;
3691 	plane->state->crtc_x = new_state->crtc_x;
3692 	plane->state->crtc_y = new_state->crtc_y;
3693 	plane->state->crtc_w = new_state->crtc_w;
3694 	plane->state->crtc_h = new_state->crtc_h;
3695 
3696 	handle_cursor_update(plane, old_state);
3697 }
3698 
3699 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3700 	.prepare_fb = dm_plane_helper_prepare_fb,
3701 	.cleanup_fb = dm_plane_helper_cleanup_fb,
3702 	.atomic_check = dm_plane_atomic_check,
3703 	.atomic_async_check = dm_plane_atomic_async_check,
3704 	.atomic_async_update = dm_plane_atomic_async_update
3705 };
3706 
3707 /*
3708  * TODO: these are currently initialized to rgb formats only.
3709  * For future use cases we should either initialize them dynamically based on
3710  * plane capabilities, or initialize this array to all formats, so internal drm
3711  * check will succeed, and let DC implement proper check
3712  */
3713 static const uint32_t rgb_formats[] = {
3714 	DRM_FORMAT_RGB888,
3715 	DRM_FORMAT_XRGB8888,
3716 	DRM_FORMAT_ARGB8888,
3717 	DRM_FORMAT_RGBA8888,
3718 	DRM_FORMAT_XRGB2101010,
3719 	DRM_FORMAT_XBGR2101010,
3720 	DRM_FORMAT_ARGB2101010,
3721 	DRM_FORMAT_ABGR2101010,
3722 	DRM_FORMAT_XBGR8888,
3723 	DRM_FORMAT_ABGR8888,
3724 };
3725 
3726 static const uint32_t yuv_formats[] = {
3727 	DRM_FORMAT_NV12,
3728 	DRM_FORMAT_NV21,
3729 };
3730 
3731 static const u32 cursor_formats[] = {
3732 	DRM_FORMAT_ARGB8888
3733 };
3734 
3735 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3736 				struct drm_plane *plane,
3737 				unsigned long possible_crtcs)
3738 {
3739 	int res = -EPERM;
3740 
3741 	switch (plane->type) {
3742 	case DRM_PLANE_TYPE_PRIMARY:
3743 		res = drm_universal_plane_init(
3744 				dm->adev->ddev,
3745 				plane,
3746 				possible_crtcs,
3747 				&dm_plane_funcs,
3748 				rgb_formats,
3749 				ARRAY_SIZE(rgb_formats),
3750 				NULL, plane->type, NULL);
3751 		break;
3752 	case DRM_PLANE_TYPE_OVERLAY:
3753 		res = drm_universal_plane_init(
3754 				dm->adev->ddev,
3755 				plane,
3756 				possible_crtcs,
3757 				&dm_plane_funcs,
3758 				yuv_formats,
3759 				ARRAY_SIZE(yuv_formats),
3760 				NULL, plane->type, NULL);
3761 		break;
3762 	case DRM_PLANE_TYPE_CURSOR:
3763 		res = drm_universal_plane_init(
3764 				dm->adev->ddev,
3765 				plane,
3766 				possible_crtcs,
3767 				&dm_plane_funcs,
3768 				cursor_formats,
3769 				ARRAY_SIZE(cursor_formats),
3770 				NULL, plane->type, NULL);
3771 		break;
3772 	}
3773 
3774 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
3775 
3776 	/* Create (reset) the plane state */
3777 	if (plane->funcs->reset)
3778 		plane->funcs->reset(plane);
3779 
3780 
3781 	return res;
3782 }
3783 
3784 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3785 			       struct drm_plane *plane,
3786 			       uint32_t crtc_index)
3787 {
3788 	struct amdgpu_crtc *acrtc = NULL;
3789 	struct drm_plane *cursor_plane;
3790 
3791 	int res = -ENOMEM;
3792 
3793 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3794 	if (!cursor_plane)
3795 		goto fail;
3796 
3797 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
3798 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3799 
3800 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3801 	if (!acrtc)
3802 		goto fail;
3803 
3804 	res = drm_crtc_init_with_planes(
3805 			dm->ddev,
3806 			&acrtc->base,
3807 			plane,
3808 			cursor_plane,
3809 			&amdgpu_dm_crtc_funcs, NULL);
3810 
3811 	if (res)
3812 		goto fail;
3813 
3814 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3815 
3816 	/* Create (reset) the plane state */
3817 	if (acrtc->base.funcs->reset)
3818 		acrtc->base.funcs->reset(&acrtc->base);
3819 
3820 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3821 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3822 
3823 	acrtc->crtc_id = crtc_index;
3824 	acrtc->base.enabled = false;
3825 	acrtc->otg_inst = -1;
3826 
3827 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3828 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
3829 				   true, MAX_COLOR_LUT_ENTRIES);
3830 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
3831 
3832 	return 0;
3833 
3834 fail:
3835 	kfree(acrtc);
3836 	kfree(cursor_plane);
3837 	return res;
3838 }
3839 
3840 
3841 static int to_drm_connector_type(enum signal_type st)
3842 {
3843 	switch (st) {
3844 	case SIGNAL_TYPE_HDMI_TYPE_A:
3845 		return DRM_MODE_CONNECTOR_HDMIA;
3846 	case SIGNAL_TYPE_EDP:
3847 		return DRM_MODE_CONNECTOR_eDP;
3848 	case SIGNAL_TYPE_LVDS:
3849 		return DRM_MODE_CONNECTOR_LVDS;
3850 	case SIGNAL_TYPE_RGB:
3851 		return DRM_MODE_CONNECTOR_VGA;
3852 	case SIGNAL_TYPE_DISPLAY_PORT:
3853 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
3854 		return DRM_MODE_CONNECTOR_DisplayPort;
3855 	case SIGNAL_TYPE_DVI_DUAL_LINK:
3856 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
3857 		return DRM_MODE_CONNECTOR_DVID;
3858 	case SIGNAL_TYPE_VIRTUAL:
3859 		return DRM_MODE_CONNECTOR_VIRTUAL;
3860 
3861 	default:
3862 		return DRM_MODE_CONNECTOR_Unknown;
3863 	}
3864 }
3865 
3866 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
3867 {
3868 	return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
3869 }
3870 
3871 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3872 {
3873 	struct drm_encoder *encoder;
3874 	struct amdgpu_encoder *amdgpu_encoder;
3875 
3876 	encoder = amdgpu_dm_connector_to_encoder(connector);
3877 
3878 	if (encoder == NULL)
3879 		return;
3880 
3881 	amdgpu_encoder = to_amdgpu_encoder(encoder);
3882 
3883 	amdgpu_encoder->native_mode.clock = 0;
3884 
3885 	if (!list_empty(&connector->probed_modes)) {
3886 		struct drm_display_mode *preferred_mode = NULL;
3887 
3888 		list_for_each_entry(preferred_mode,
3889 				    &connector->probed_modes,
3890 				    head) {
3891 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3892 				amdgpu_encoder->native_mode = *preferred_mode;
3893 
3894 			break;
3895 		}
3896 
3897 	}
3898 }
3899 
3900 static struct drm_display_mode *
3901 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3902 			     char *name,
3903 			     int hdisplay, int vdisplay)
3904 {
3905 	struct drm_device *dev = encoder->dev;
3906 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3907 	struct drm_display_mode *mode = NULL;
3908 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3909 
3910 	mode = drm_mode_duplicate(dev, native_mode);
3911 
3912 	if (mode == NULL)
3913 		return NULL;
3914 
3915 	mode->hdisplay = hdisplay;
3916 	mode->vdisplay = vdisplay;
3917 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3918 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3919 
3920 	return mode;
3921 
3922 }
3923 
3924 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3925 						 struct drm_connector *connector)
3926 {
3927 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3928 	struct drm_display_mode *mode = NULL;
3929 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3930 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3931 				to_amdgpu_dm_connector(connector);
3932 	int i;
3933 	int n;
3934 	struct mode_size {
3935 		char name[DRM_DISPLAY_MODE_LEN];
3936 		int w;
3937 		int h;
3938 	} common_modes[] = {
3939 		{  "640x480",  640,  480},
3940 		{  "800x600",  800,  600},
3941 		{ "1024x768", 1024,  768},
3942 		{ "1280x720", 1280,  720},
3943 		{ "1280x800", 1280,  800},
3944 		{"1280x1024", 1280, 1024},
3945 		{ "1440x900", 1440,  900},
3946 		{"1680x1050", 1680, 1050},
3947 		{"1600x1200", 1600, 1200},
3948 		{"1920x1080", 1920, 1080},
3949 		{"1920x1200", 1920, 1200}
3950 	};
3951 
3952 	n = ARRAY_SIZE(common_modes);
3953 
3954 	for (i = 0; i < n; i++) {
3955 		struct drm_display_mode *curmode = NULL;
3956 		bool mode_existed = false;
3957 
3958 		if (common_modes[i].w > native_mode->hdisplay ||
3959 		    common_modes[i].h > native_mode->vdisplay ||
3960 		   (common_modes[i].w == native_mode->hdisplay &&
3961 		    common_modes[i].h == native_mode->vdisplay))
3962 			continue;
3963 
3964 		list_for_each_entry(curmode, &connector->probed_modes, head) {
3965 			if (common_modes[i].w == curmode->hdisplay &&
3966 			    common_modes[i].h == curmode->vdisplay) {
3967 				mode_existed = true;
3968 				break;
3969 			}
3970 		}
3971 
3972 		if (mode_existed)
3973 			continue;
3974 
3975 		mode = amdgpu_dm_create_common_mode(encoder,
3976 				common_modes[i].name, common_modes[i].w,
3977 				common_modes[i].h);
3978 		drm_mode_probed_add(connector, mode);
3979 		amdgpu_dm_connector->num_modes++;
3980 	}
3981 }
3982 
3983 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3984 					      struct edid *edid)
3985 {
3986 	struct amdgpu_dm_connector *amdgpu_dm_connector =
3987 			to_amdgpu_dm_connector(connector);
3988 
3989 	if (edid) {
3990 		/* empty probed_modes */
3991 		INIT_LIST_HEAD(&connector->probed_modes);
3992 		amdgpu_dm_connector->num_modes =
3993 				drm_add_edid_modes(connector, edid);
3994 
3995 		amdgpu_dm_get_native_mode(connector);
3996 	} else {
3997 		amdgpu_dm_connector->num_modes = 0;
3998 	}
3999 }
4000 
4001 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
4002 {
4003 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4004 			to_amdgpu_dm_connector(connector);
4005 	struct drm_encoder *encoder;
4006 	struct edid *edid = amdgpu_dm_connector->edid;
4007 
4008 	encoder = amdgpu_dm_connector_to_encoder(connector);
4009 
4010 	if (!edid || !drm_edid_is_valid(edid)) {
4011 		amdgpu_dm_connector->num_modes =
4012 				drm_add_modes_noedid(connector, 640, 480);
4013 	} else {
4014 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
4015 		amdgpu_dm_connector_add_common_modes(encoder, connector);
4016 	}
4017 	amdgpu_dm_fbc_init(connector);
4018 
4019 	return amdgpu_dm_connector->num_modes;
4020 }
4021 
4022 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4023 				     struct amdgpu_dm_connector *aconnector,
4024 				     int connector_type,
4025 				     struct dc_link *link,
4026 				     int link_index)
4027 {
4028 	struct amdgpu_device *adev = dm->ddev->dev_private;
4029 
4030 	aconnector->connector_id = link_index;
4031 	aconnector->dc_link = link;
4032 	aconnector->base.interlace_allowed = false;
4033 	aconnector->base.doublescan_allowed = false;
4034 	aconnector->base.stereo_allowed = false;
4035 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
4036 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
4037 	mutex_init(&aconnector->hpd_lock);
4038 
4039 	/*
4040 	 * configure support HPD hot plug connector_>polled default value is 0
4041 	 * which means HPD hot plug not supported
4042 	 */
4043 	switch (connector_type) {
4044 	case DRM_MODE_CONNECTOR_HDMIA:
4045 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4046 		aconnector->base.ycbcr_420_allowed =
4047 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
4048 		break;
4049 	case DRM_MODE_CONNECTOR_DisplayPort:
4050 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4051 		aconnector->base.ycbcr_420_allowed =
4052 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
4053 		break;
4054 	case DRM_MODE_CONNECTOR_DVID:
4055 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4056 		break;
4057 	default:
4058 		break;
4059 	}
4060 
4061 	drm_object_attach_property(&aconnector->base.base,
4062 				dm->ddev->mode_config.scaling_mode_property,
4063 				DRM_MODE_SCALE_NONE);
4064 
4065 	drm_object_attach_property(&aconnector->base.base,
4066 				adev->mode_info.underscan_property,
4067 				UNDERSCAN_OFF);
4068 	drm_object_attach_property(&aconnector->base.base,
4069 				adev->mode_info.underscan_hborder_property,
4070 				0);
4071 	drm_object_attach_property(&aconnector->base.base,
4072 				adev->mode_info.underscan_vborder_property,
4073 				0);
4074 	drm_object_attach_property(&aconnector->base.base,
4075 				adev->mode_info.max_bpc_property,
4076 				0);
4077 
4078 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
4079 	    dc_is_dmcu_initialized(adev->dm.dc)) {
4080 		drm_object_attach_property(&aconnector->base.base,
4081 				adev->mode_info.abm_level_property, 0);
4082 	}
4083 
4084 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4085 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4086 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
4087 		drm_connector_attach_vrr_capable_property(
4088 			&aconnector->base);
4089 	}
4090 }
4091 
4092 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
4093 			      struct i2c_msg *msgs, int num)
4094 {
4095 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
4096 	struct ddc_service *ddc_service = i2c->ddc_service;
4097 	struct i2c_command cmd;
4098 	int i;
4099 	int result = -EIO;
4100 
4101 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
4102 
4103 	if (!cmd.payloads)
4104 		return result;
4105 
4106 	cmd.number_of_payloads = num;
4107 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
4108 	cmd.speed = 100;
4109 
4110 	for (i = 0; i < num; i++) {
4111 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
4112 		cmd.payloads[i].address = msgs[i].addr;
4113 		cmd.payloads[i].length = msgs[i].len;
4114 		cmd.payloads[i].data = msgs[i].buf;
4115 	}
4116 
4117 	if (dc_submit_i2c(
4118 			ddc_service->ctx->dc,
4119 			ddc_service->ddc_pin->hw_info.ddc_channel,
4120 			&cmd))
4121 		result = num;
4122 
4123 	kfree(cmd.payloads);
4124 	return result;
4125 }
4126 
4127 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
4128 {
4129 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4130 }
4131 
4132 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
4133 	.master_xfer = amdgpu_dm_i2c_xfer,
4134 	.functionality = amdgpu_dm_i2c_func,
4135 };
4136 
4137 static struct amdgpu_i2c_adapter *
4138 create_i2c(struct ddc_service *ddc_service,
4139 	   int link_index,
4140 	   int *res)
4141 {
4142 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
4143 	struct amdgpu_i2c_adapter *i2c;
4144 
4145 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
4146 	if (!i2c)
4147 		return NULL;
4148 	i2c->base.owner = THIS_MODULE;
4149 	i2c->base.class = I2C_CLASS_DDC;
4150 	i2c->base.dev.parent = &adev->pdev->dev;
4151 	i2c->base.algo = &amdgpu_dm_i2c_algo;
4152 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
4153 	i2c_set_adapdata(&i2c->base, i2c);
4154 	i2c->ddc_service = ddc_service;
4155 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
4156 
4157 	return i2c;
4158 }
4159 
4160 
4161 /*
4162  * Note: this function assumes that dc_link_detect() was called for the
4163  * dc_link which will be represented by this aconnector.
4164  */
4165 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
4166 				    struct amdgpu_dm_connector *aconnector,
4167 				    uint32_t link_index,
4168 				    struct amdgpu_encoder *aencoder)
4169 {
4170 	int res = 0;
4171 	int connector_type;
4172 	struct dc *dc = dm->dc;
4173 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
4174 	struct amdgpu_i2c_adapter *i2c;
4175 
4176 	link->priv = aconnector;
4177 
4178 	DRM_DEBUG_DRIVER("%s()\n", __func__);
4179 
4180 	i2c = create_i2c(link->ddc, link->link_index, &res);
4181 	if (!i2c) {
4182 		DRM_ERROR("Failed to create i2c adapter data\n");
4183 		return -ENOMEM;
4184 	}
4185 
4186 	aconnector->i2c = i2c;
4187 	res = i2c_add_adapter(&i2c->base);
4188 
4189 	if (res) {
4190 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
4191 		goto out_free;
4192 	}
4193 
4194 	connector_type = to_drm_connector_type(link->connector_signal);
4195 
4196 	res = drm_connector_init(
4197 			dm->ddev,
4198 			&aconnector->base,
4199 			&amdgpu_dm_connector_funcs,
4200 			connector_type);
4201 
4202 	if (res) {
4203 		DRM_ERROR("connector_init failed\n");
4204 		aconnector->connector_id = -1;
4205 		goto out_free;
4206 	}
4207 
4208 	drm_connector_helper_add(
4209 			&aconnector->base,
4210 			&amdgpu_dm_connector_helper_funcs);
4211 
4212 	if (aconnector->base.funcs->reset)
4213 		aconnector->base.funcs->reset(&aconnector->base);
4214 
4215 	amdgpu_dm_connector_init_helper(
4216 		dm,
4217 		aconnector,
4218 		connector_type,
4219 		link,
4220 		link_index);
4221 
4222 	drm_connector_attach_encoder(
4223 		&aconnector->base, &aencoder->base);
4224 
4225 	drm_connector_register(&aconnector->base);
4226 #if defined(CONFIG_DEBUG_FS)
4227 	res = connector_debugfs_init(aconnector);
4228 	if (res) {
4229 		DRM_ERROR("Failed to create debugfs for connector");
4230 		goto out_free;
4231 	}
4232 #endif
4233 
4234 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
4235 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
4236 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
4237 
4238 out_free:
4239 	if (res) {
4240 		kfree(i2c);
4241 		aconnector->i2c = NULL;
4242 	}
4243 	return res;
4244 }
4245 
4246 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
4247 {
4248 	switch (adev->mode_info.num_crtc) {
4249 	case 1:
4250 		return 0x1;
4251 	case 2:
4252 		return 0x3;
4253 	case 3:
4254 		return 0x7;
4255 	case 4:
4256 		return 0xf;
4257 	case 5:
4258 		return 0x1f;
4259 	case 6:
4260 	default:
4261 		return 0x3f;
4262 	}
4263 }
4264 
4265 static int amdgpu_dm_encoder_init(struct drm_device *dev,
4266 				  struct amdgpu_encoder *aencoder,
4267 				  uint32_t link_index)
4268 {
4269 	struct amdgpu_device *adev = dev->dev_private;
4270 
4271 	int res = drm_encoder_init(dev,
4272 				   &aencoder->base,
4273 				   &amdgpu_dm_encoder_funcs,
4274 				   DRM_MODE_ENCODER_TMDS,
4275 				   NULL);
4276 
4277 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
4278 
4279 	if (!res)
4280 		aencoder->encoder_id = link_index;
4281 	else
4282 		aencoder->encoder_id = -1;
4283 
4284 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
4285 
4286 	return res;
4287 }
4288 
4289 static void manage_dm_interrupts(struct amdgpu_device *adev,
4290 				 struct amdgpu_crtc *acrtc,
4291 				 bool enable)
4292 {
4293 	/*
4294 	 * this is not correct translation but will work as soon as VBLANK
4295 	 * constant is the same as PFLIP
4296 	 */
4297 	int irq_type =
4298 		amdgpu_display_crtc_idx_to_irq_type(
4299 			adev,
4300 			acrtc->crtc_id);
4301 
4302 	if (enable) {
4303 		drm_crtc_vblank_on(&acrtc->base);
4304 		amdgpu_irq_get(
4305 			adev,
4306 			&adev->pageflip_irq,
4307 			irq_type);
4308 	} else {
4309 
4310 		amdgpu_irq_put(
4311 			adev,
4312 			&adev->pageflip_irq,
4313 			irq_type);
4314 		drm_crtc_vblank_off(&acrtc->base);
4315 	}
4316 }
4317 
4318 static bool
4319 is_scaling_state_different(const struct dm_connector_state *dm_state,
4320 			   const struct dm_connector_state *old_dm_state)
4321 {
4322 	if (dm_state->scaling != old_dm_state->scaling)
4323 		return true;
4324 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
4325 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
4326 			return true;
4327 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
4328 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
4329 			return true;
4330 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
4331 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
4332 		return true;
4333 	return false;
4334 }
4335 
4336 static void remove_stream(struct amdgpu_device *adev,
4337 			  struct amdgpu_crtc *acrtc,
4338 			  struct dc_stream_state *stream)
4339 {
4340 	/* this is the update mode case */
4341 
4342 	acrtc->otg_inst = -1;
4343 	acrtc->enabled = false;
4344 }
4345 
4346 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
4347 			       struct dc_cursor_position *position)
4348 {
4349 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4350 	int x, y;
4351 	int xorigin = 0, yorigin = 0;
4352 
4353 	if (!crtc || !plane->state->fb) {
4354 		position->enable = false;
4355 		position->x = 0;
4356 		position->y = 0;
4357 		return 0;
4358 	}
4359 
4360 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
4361 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
4362 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
4363 			  __func__,
4364 			  plane->state->crtc_w,
4365 			  plane->state->crtc_h);
4366 		return -EINVAL;
4367 	}
4368 
4369 	x = plane->state->crtc_x;
4370 	y = plane->state->crtc_y;
4371 	/* avivo cursor are offset into the total surface */
4372 	x += crtc->primary->state->src_x >> 16;
4373 	y += crtc->primary->state->src_y >> 16;
4374 	if (x < 0) {
4375 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
4376 		x = 0;
4377 	}
4378 	if (y < 0) {
4379 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
4380 		y = 0;
4381 	}
4382 	position->enable = true;
4383 	position->x = x;
4384 	position->y = y;
4385 	position->x_hotspot = xorigin;
4386 	position->y_hotspot = yorigin;
4387 
4388 	return 0;
4389 }
4390 
4391 static void handle_cursor_update(struct drm_plane *plane,
4392 				 struct drm_plane_state *old_plane_state)
4393 {
4394 	struct amdgpu_device *adev = plane->dev->dev_private;
4395 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
4396 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
4397 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
4398 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4399 	uint64_t address = afb ? afb->address : 0;
4400 	struct dc_cursor_position position;
4401 	struct dc_cursor_attributes attributes;
4402 	int ret;
4403 
4404 	if (!plane->state->fb && !old_plane_state->fb)
4405 		return;
4406 
4407 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
4408 			 __func__,
4409 			 amdgpu_crtc->crtc_id,
4410 			 plane->state->crtc_w,
4411 			 plane->state->crtc_h);
4412 
4413 	ret = get_cursor_position(plane, crtc, &position);
4414 	if (ret)
4415 		return;
4416 
4417 	if (!position.enable) {
4418 		/* turn off cursor */
4419 		if (crtc_state && crtc_state->stream) {
4420 			mutex_lock(&adev->dm.dc_lock);
4421 			dc_stream_set_cursor_position(crtc_state->stream,
4422 						      &position);
4423 			mutex_unlock(&adev->dm.dc_lock);
4424 		}
4425 		return;
4426 	}
4427 
4428 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
4429 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
4430 
4431 	attributes.address.high_part = upper_32_bits(address);
4432 	attributes.address.low_part  = lower_32_bits(address);
4433 	attributes.width             = plane->state->crtc_w;
4434 	attributes.height            = plane->state->crtc_h;
4435 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
4436 	attributes.rotation_angle    = 0;
4437 	attributes.attribute_flags.value = 0;
4438 
4439 	attributes.pitch = attributes.width;
4440 
4441 	if (crtc_state->stream) {
4442 		mutex_lock(&adev->dm.dc_lock);
4443 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
4444 							 &attributes))
4445 			DRM_ERROR("DC failed to set cursor attributes\n");
4446 
4447 		if (!dc_stream_set_cursor_position(crtc_state->stream,
4448 						   &position))
4449 			DRM_ERROR("DC failed to set cursor position\n");
4450 		mutex_unlock(&adev->dm.dc_lock);
4451 	}
4452 }
4453 
4454 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4455 {
4456 
4457 	assert_spin_locked(&acrtc->base.dev->event_lock);
4458 	WARN_ON(acrtc->event);
4459 
4460 	acrtc->event = acrtc->base.state->event;
4461 
4462 	/* Set the flip status */
4463 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
4464 
4465 	/* Mark this event as consumed */
4466 	acrtc->base.state->event = NULL;
4467 
4468 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4469 						 acrtc->crtc_id);
4470 }
4471 
4472 struct dc_stream_status *dc_state_get_stream_status(
4473 	struct dc_state *state,
4474 	struct dc_stream_state *stream)
4475 {
4476 	uint8_t i;
4477 
4478 	for (i = 0; i < state->stream_count; i++) {
4479 		if (stream == state->streams[i])
4480 			return &state->stream_status[i];
4481 	}
4482 
4483 	return NULL;
4484 }
4485 
4486 static void update_freesync_state_on_stream(
4487 	struct amdgpu_display_manager *dm,
4488 	struct dm_crtc_state *new_crtc_state,
4489 	struct dc_stream_state *new_stream,
4490 	struct dc_plane_state *surface,
4491 	u32 flip_timestamp_in_us)
4492 {
4493 	struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
4494 	struct dc_info_packet vrr_infopacket = {0};
4495 	struct mod_freesync_config config = new_crtc_state->freesync_config;
4496 
4497 	if (!new_stream)
4498 		return;
4499 
4500 	/*
4501 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
4502 	 * For now it's sufficient to just guard against these conditions.
4503 	 */
4504 
4505 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
4506 		return;
4507 
4508 	if (new_crtc_state->vrr_supported &&
4509 	    config.min_refresh_in_uhz &&
4510 	    config.max_refresh_in_uhz) {
4511 		config.state = new_crtc_state->base.vrr_enabled ?
4512 			VRR_STATE_ACTIVE_VARIABLE :
4513 			VRR_STATE_INACTIVE;
4514 	} else {
4515 		config.state = VRR_STATE_UNSUPPORTED;
4516 	}
4517 
4518 	mod_freesync_build_vrr_params(dm->freesync_module,
4519 				      new_stream,
4520 				      &config, &vrr_params);
4521 
4522 	if (surface) {
4523 		mod_freesync_handle_preflip(
4524 			dm->freesync_module,
4525 			surface,
4526 			new_stream,
4527 			flip_timestamp_in_us,
4528 			&vrr_params);
4529 	}
4530 
4531 	mod_freesync_build_vrr_infopacket(
4532 		dm->freesync_module,
4533 		new_stream,
4534 		&vrr_params,
4535 		PACKET_TYPE_VRR,
4536 		TRANSFER_FUNC_UNKNOWN,
4537 		&vrr_infopacket);
4538 
4539 	new_crtc_state->freesync_timing_changed =
4540 		(memcmp(&new_crtc_state->vrr_params.adjust,
4541 			&vrr_params.adjust,
4542 			sizeof(vrr_params.adjust)) != 0);
4543 
4544 	new_crtc_state->freesync_vrr_info_changed =
4545 		(memcmp(&new_crtc_state->vrr_infopacket,
4546 			&vrr_infopacket,
4547 			sizeof(vrr_infopacket)) != 0);
4548 
4549 	new_crtc_state->vrr_params = vrr_params;
4550 	new_crtc_state->vrr_infopacket = vrr_infopacket;
4551 
4552 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
4553 	new_stream->vrr_infopacket = vrr_infopacket;
4554 
4555 	if (new_crtc_state->freesync_vrr_info_changed)
4556 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
4557 			      new_crtc_state->base.crtc->base.id,
4558 			      (int)new_crtc_state->base.vrr_enabled,
4559 			      (int)vrr_params.state);
4560 
4561 	if (new_crtc_state->freesync_timing_changed)
4562 		DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
4563 			      new_crtc_state->base.crtc->base.id,
4564 				  vrr_params.adjust.v_total_min,
4565 				  vrr_params.adjust.v_total_max);
4566 }
4567 
4568 /*
4569  * Executes flip
4570  *
4571  * Waits on all BO's fences and for proper vblank count
4572  */
4573 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
4574 			      struct drm_framebuffer *fb,
4575 			      uint32_t target,
4576 			      struct dc_state *state)
4577 {
4578 	unsigned long flags;
4579 	uint64_t timestamp_ns;
4580 	uint32_t target_vblank;
4581 	int r, vpos, hpos;
4582 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4583 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
4584 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
4585 	struct amdgpu_device *adev = crtc->dev->dev_private;
4586 	bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
4587 	struct dc_flip_addrs addr = { {0} };
4588 	/* TODO eliminate or rename surface_update */
4589 	struct dc_surface_update surface_updates[1] = { {0} };
4590 	struct dc_stream_update stream_update = {0};
4591 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4592 	struct dc_stream_status *stream_status;
4593 	struct dc_plane_state *surface;
4594 
4595 
4596 	/* Prepare wait for target vblank early - before the fence-waits */
4597 	target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
4598 			amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
4599 
4600 	/*
4601 	 * TODO This might fail and hence better not used, wait
4602 	 * explicitly on fences instead
4603 	 * and in general should be called for
4604 	 * blocking commit to as per framework helpers
4605 	 */
4606 	r = amdgpu_bo_reserve(abo, true);
4607 	if (unlikely(r != 0)) {
4608 		DRM_ERROR("failed to reserve buffer before flip\n");
4609 		WARN_ON(1);
4610 	}
4611 
4612 	/* Wait for all fences on this FB */
4613 	WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
4614 								    MAX_SCHEDULE_TIMEOUT) < 0);
4615 
4616 	amdgpu_bo_unreserve(abo);
4617 
4618 	/*
4619 	 * Wait until we're out of the vertical blank period before the one
4620 	 * targeted by the flip
4621 	 */
4622 	while ((acrtc->enabled &&
4623 		(amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
4624 						    0, &vpos, &hpos, NULL,
4625 						    NULL, &crtc->hwmode)
4626 		 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
4627 		(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
4628 		(int)(target_vblank -
4629 		  amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
4630 		usleep_range(1000, 1100);
4631 	}
4632 
4633 	/* Flip */
4634 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
4635 
4636 	WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
4637 	WARN_ON(!acrtc_state->stream);
4638 
4639 	addr.address.grph.addr.low_part = lower_32_bits(afb->address);
4640 	addr.address.grph.addr.high_part = upper_32_bits(afb->address);
4641 	addr.flip_immediate = async_flip;
4642 
4643 	timestamp_ns = ktime_get_ns();
4644 	addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
4645 
4646 
4647 	if (acrtc->base.state->event)
4648 		prepare_flip_isr(acrtc);
4649 
4650 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4651 
4652 	stream_status = dc_stream_get_status(acrtc_state->stream);
4653 	if (!stream_status) {
4654 		DRM_ERROR("No stream status for CRTC: id=%d\n",
4655 			acrtc->crtc_id);
4656 		return;
4657 	}
4658 
4659 	surface = stream_status->plane_states[0];
4660 	surface_updates->surface = surface;
4661 
4662 	if (!surface) {
4663 		DRM_ERROR("No surface for CRTC: id=%d\n",
4664 			acrtc->crtc_id);
4665 		return;
4666 	}
4667 	surface_updates->flip_addr = &addr;
4668 
4669 	if (acrtc_state->stream) {
4670 		update_freesync_state_on_stream(
4671 			&adev->dm,
4672 			acrtc_state,
4673 			acrtc_state->stream,
4674 			surface,
4675 			addr.flip_timestamp_in_us);
4676 
4677 		if (acrtc_state->freesync_timing_changed)
4678 			stream_update.adjust =
4679 				&acrtc_state->stream->adjust;
4680 
4681 		if (acrtc_state->freesync_vrr_info_changed)
4682 			stream_update.vrr_infopacket =
4683 				&acrtc_state->stream->vrr_infopacket;
4684 	}
4685 
4686 	/* Update surface timing information. */
4687 	surface->time.time_elapsed_in_us[surface->time.index] =
4688 		addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
4689 	surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
4690 	surface->time.index++;
4691 	if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
4692 		surface->time.index = 0;
4693 
4694 	mutex_lock(&adev->dm.dc_lock);
4695 
4696 	dc_commit_updates_for_stream(adev->dm.dc,
4697 					     surface_updates,
4698 					     1,
4699 					     acrtc_state->stream,
4700 					     &stream_update,
4701 					     &surface_updates->surface,
4702 					     state);
4703 	mutex_unlock(&adev->dm.dc_lock);
4704 
4705 	DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
4706 			 __func__,
4707 			 addr.address.grph.addr.high_part,
4708 			 addr.address.grph.addr.low_part);
4709 }
4710 
4711 /*
4712  * TODO this whole function needs to go
4713  *
4714  * dc_surface_update is needlessly complex. See if we can just replace this
4715  * with a dc_plane_state and follow the atomic model a bit more closely here.
4716  */
4717 static bool commit_planes_to_stream(
4718 		struct amdgpu_display_manager *dm,
4719 		struct dc *dc,
4720 		struct dc_plane_state **plane_states,
4721 		uint8_t new_plane_count,
4722 		struct dm_crtc_state *dm_new_crtc_state,
4723 		struct dm_crtc_state *dm_old_crtc_state,
4724 		struct dc_state *state)
4725 {
4726 	/* no need to dynamically allocate this. it's pretty small */
4727 	struct dc_surface_update updates[MAX_SURFACES];
4728 	struct dc_flip_addrs *flip_addr;
4729 	struct dc_plane_info *plane_info;
4730 	struct dc_scaling_info *scaling_info;
4731 	int i;
4732 	struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
4733 	struct dc_stream_update *stream_update =
4734 			kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
4735 	unsigned int abm_level;
4736 
4737 	if (!stream_update) {
4738 		BREAK_TO_DEBUGGER();
4739 		return false;
4740 	}
4741 
4742 	flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
4743 			    GFP_KERNEL);
4744 	plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
4745 			     GFP_KERNEL);
4746 	scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
4747 			       GFP_KERNEL);
4748 
4749 	if (!flip_addr || !plane_info || !scaling_info) {
4750 		kfree(flip_addr);
4751 		kfree(plane_info);
4752 		kfree(scaling_info);
4753 		kfree(stream_update);
4754 		return false;
4755 	}
4756 
4757 	memset(updates, 0, sizeof(updates));
4758 
4759 	stream_update->src = dc_stream->src;
4760 	stream_update->dst = dc_stream->dst;
4761 	stream_update->out_transfer_func = dc_stream->out_transfer_func;
4762 
4763 	if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
4764 		abm_level = dm_new_crtc_state->abm_level;
4765 		stream_update->abm_level = &abm_level;
4766 	}
4767 
4768 	for (i = 0; i < new_plane_count; i++) {
4769 		updates[i].surface = plane_states[i];
4770 		updates[i].gamma =
4771 			(struct dc_gamma *)plane_states[i]->gamma_correction;
4772 		updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4773 		flip_addr[i].address = plane_states[i]->address;
4774 		flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4775 		plane_info[i].color_space = plane_states[i]->color_space;
4776 		plane_info[i].format = plane_states[i]->format;
4777 		plane_info[i].plane_size = plane_states[i]->plane_size;
4778 		plane_info[i].rotation = plane_states[i]->rotation;
4779 		plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4780 		plane_info[i].stereo_format = plane_states[i]->stereo_format;
4781 		plane_info[i].tiling_info = plane_states[i]->tiling_info;
4782 		plane_info[i].visible = plane_states[i]->visible;
4783 		plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4784 		plane_info[i].dcc = plane_states[i]->dcc;
4785 		scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4786 		scaling_info[i].src_rect = plane_states[i]->src_rect;
4787 		scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4788 		scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4789 
4790 		updates[i].flip_addr = &flip_addr[i];
4791 		updates[i].plane_info = &plane_info[i];
4792 		updates[i].scaling_info = &scaling_info[i];
4793 	}
4794 
4795 	mutex_lock(&dm->dc_lock);
4796 	dc_commit_updates_for_stream(
4797 			dc,
4798 			updates,
4799 			new_plane_count,
4800 			dc_stream, stream_update, plane_states, state);
4801 	mutex_unlock(&dm->dc_lock);
4802 
4803 	kfree(flip_addr);
4804 	kfree(plane_info);
4805 	kfree(scaling_info);
4806 	kfree(stream_update);
4807 	return true;
4808 }
4809 
4810 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4811 				    struct dc_state *dc_state,
4812 				    struct drm_device *dev,
4813 				    struct amdgpu_display_manager *dm,
4814 				    struct drm_crtc *pcrtc,
4815 				    bool *wait_for_vblank)
4816 {
4817 	uint32_t i;
4818 	struct drm_plane *plane;
4819 	struct drm_plane_state *old_plane_state, *new_plane_state;
4820 	struct dc_stream_state *dc_stream_attach;
4821 	struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
4822 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
4823 	struct drm_crtc_state *new_pcrtc_state =
4824 			drm_atomic_get_new_crtc_state(state, pcrtc);
4825 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4826 	struct dm_crtc_state *dm_old_crtc_state =
4827 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4828 	int planes_count = 0;
4829 	unsigned long flags;
4830 	u64 last_flip_vblank;
4831 	bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
4832 
4833 	/* update planes when needed */
4834 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4835 		struct drm_crtc *crtc = new_plane_state->crtc;
4836 		struct drm_crtc_state *new_crtc_state;
4837 		struct drm_framebuffer *fb = new_plane_state->fb;
4838 		bool pflip_needed;
4839 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
4840 
4841 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
4842 			handle_cursor_update(plane, old_plane_state);
4843 			continue;
4844 		}
4845 
4846 		if (!fb || !crtc || pcrtc != crtc)
4847 			continue;
4848 
4849 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4850 		if (!new_crtc_state->active)
4851 			continue;
4852 
4853 		pflip_needed = !state->allow_modeset;
4854 
4855 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
4856 		if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
4857 			DRM_ERROR("%s: acrtc %d, already busy\n",
4858 				  __func__,
4859 				  acrtc_attach->crtc_id);
4860 			/* In commit tail framework this cannot happen */
4861 			WARN_ON(1);
4862 		}
4863 
4864 		/* For variable refresh rate mode only:
4865 		 * Get vblank of last completed flip to avoid > 1 vrr flips per
4866 		 * video frame by use of throttling, but allow flip programming
4867 		 * anywhere in the possibly large variable vrr vblank interval
4868 		 * for fine-grained flip timing control and more opportunity to
4869 		 * avoid stutter on late submission of amdgpu_dm_do_flip() calls.
4870 		 */
4871 		last_flip_vblank = acrtc_attach->last_flip_vblank;
4872 
4873 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4874 
4875 		if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
4876 			WARN_ON(!dm_new_plane_state->dc_state);
4877 
4878 			plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
4879 
4880 			dc_stream_attach = acrtc_state->stream;
4881 			planes_count++;
4882 
4883 		} else if (new_crtc_state->planes_changed) {
4884 			/* Assume even ONE crtc with immediate flip means
4885 			 * entire can't wait for VBLANK
4886 			 * TODO Check if it's correct
4887 			 */
4888 			*wait_for_vblank =
4889 					new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
4890 				false : true;
4891 
4892 			/* TODO: Needs rework for multiplane flip */
4893 			if (plane->type == DRM_PLANE_TYPE_PRIMARY)
4894 				drm_crtc_vblank_get(crtc);
4895 
4896 			/* Use old throttling in non-vrr fixed refresh rate mode
4897 			 * to keep flip scheduling based on target vblank counts
4898 			 * working in a backwards compatible way, e.g., clients
4899 			 * using GLX_OML_sync_control extension.
4900 			 */
4901 			if (!vrr_active)
4902 				last_flip_vblank = drm_crtc_vblank_count(crtc);
4903 
4904 			amdgpu_dm_do_flip(
4905 				crtc,
4906 				fb,
4907 				(uint32_t) last_flip_vblank + *wait_for_vblank,
4908 				dc_state);
4909 		}
4910 
4911 	}
4912 
4913 	if (planes_count) {
4914 		unsigned long flags;
4915 
4916 		if (new_pcrtc_state->event) {
4917 
4918 			drm_crtc_vblank_get(pcrtc);
4919 
4920 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4921 			prepare_flip_isr(acrtc_attach);
4922 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4923 		}
4924 
4925 		dc_stream_attach->abm_level = acrtc_state->abm_level;
4926 
4927 		if (false == commit_planes_to_stream(dm,
4928 							dm->dc,
4929 							plane_states_constructed,
4930 							planes_count,
4931 							acrtc_state,
4932 							dm_old_crtc_state,
4933 							dc_state))
4934 			dm_error("%s: Failed to attach plane!\n", __func__);
4935 	} else {
4936 		/*TODO BUG Here should go disable planes on CRTC. */
4937 	}
4938 }
4939 
4940 /*
4941  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4942  * @crtc_state: the DRM CRTC state
4943  * @stream_state: the DC stream state.
4944  *
4945  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4946  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4947  */
4948 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4949 						struct dc_stream_state *stream_state)
4950 {
4951 	stream_state->mode_changed = crtc_state->mode_changed;
4952 }
4953 
4954 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4955 				   struct drm_atomic_state *state,
4956 				   bool nonblock)
4957 {
4958 	struct drm_crtc *crtc;
4959 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4960 	struct amdgpu_device *adev = dev->dev_private;
4961 	int i;
4962 
4963 	/*
4964 	 * We evade vblanks and pflips on crtc that
4965 	 * should be changed. We do it here to flush & disable
4966 	 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4967 	 * it will update crtc->dm_crtc_state->stream pointer which is used in
4968 	 * the ISRs.
4969 	 */
4970 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4971 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4972 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4973 
4974 		if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
4975 			manage_dm_interrupts(adev, acrtc, false);
4976 	}
4977 	/*
4978 	 * Add check here for SoC's that support hardware cursor plane, to
4979 	 * unset legacy_cursor_update
4980 	 */
4981 
4982 	return drm_atomic_helper_commit(dev, state, nonblock);
4983 
4984 	/*TODO Handle EINTR, reenable IRQ*/
4985 }
4986 
4987 /**
4988  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
4989  * @state: The atomic state to commit
4990  *
4991  * This will tell DC to commit the constructed DC state from atomic_check,
4992  * programming the hardware. Any failures here implies a hardware failure, since
4993  * atomic check should have filtered anything non-kosher.
4994  */
4995 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4996 {
4997 	struct drm_device *dev = state->dev;
4998 	struct amdgpu_device *adev = dev->dev_private;
4999 	struct amdgpu_display_manager *dm = &adev->dm;
5000 	struct dm_atomic_state *dm_state;
5001 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
5002 	uint32_t i, j;
5003 	struct drm_crtc *crtc;
5004 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5005 	unsigned long flags;
5006 	bool wait_for_vblank = true;
5007 	struct drm_connector *connector;
5008 	struct drm_connector_state *old_con_state, *new_con_state;
5009 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5010 	int crtc_disable_count = 0;
5011 
5012 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
5013 
5014 	dm_state = dm_atomic_get_new_state(state);
5015 	if (dm_state && dm_state->context) {
5016 		dc_state = dm_state->context;
5017 	} else {
5018 		/* No state changes, retain current state. */
5019 		dc_state_temp = dc_create_state();
5020 		ASSERT(dc_state_temp);
5021 		dc_state = dc_state_temp;
5022 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
5023 	}
5024 
5025 	/* update changed items */
5026 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5027 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5028 
5029 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5030 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5031 
5032 		DRM_DEBUG_DRIVER(
5033 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5034 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
5035 			"connectors_changed:%d\n",
5036 			acrtc->crtc_id,
5037 			new_crtc_state->enable,
5038 			new_crtc_state->active,
5039 			new_crtc_state->planes_changed,
5040 			new_crtc_state->mode_changed,
5041 			new_crtc_state->active_changed,
5042 			new_crtc_state->connectors_changed);
5043 
5044 		/* Copy all transient state flags into dc state */
5045 		if (dm_new_crtc_state->stream) {
5046 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
5047 							    dm_new_crtc_state->stream);
5048 		}
5049 
5050 		/* handles headless hotplug case, updating new_state and
5051 		 * aconnector as needed
5052 		 */
5053 
5054 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
5055 
5056 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
5057 
5058 			if (!dm_new_crtc_state->stream) {
5059 				/*
5060 				 * this could happen because of issues with
5061 				 * userspace notifications delivery.
5062 				 * In this case userspace tries to set mode on
5063 				 * display which is disconnected in fact.
5064 				 * dc_sink is NULL in this case on aconnector.
5065 				 * We expect reset mode will come soon.
5066 				 *
5067 				 * This can also happen when unplug is done
5068 				 * during resume sequence ended
5069 				 *
5070 				 * In this case, we want to pretend we still
5071 				 * have a sink to keep the pipe running so that
5072 				 * hw state is consistent with the sw state
5073 				 */
5074 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5075 						__func__, acrtc->base.base.id);
5076 				continue;
5077 			}
5078 
5079 			if (dm_old_crtc_state->stream)
5080 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5081 
5082 			pm_runtime_get_noresume(dev->dev);
5083 
5084 			acrtc->enabled = true;
5085 			acrtc->hw_mode = new_crtc_state->mode;
5086 			crtc->hwmode = new_crtc_state->mode;
5087 		} else if (modereset_required(new_crtc_state)) {
5088 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
5089 
5090 			/* i.e. reset mode */
5091 			if (dm_old_crtc_state->stream)
5092 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5093 		}
5094 	} /* for_each_crtc_in_state() */
5095 
5096 	if (dc_state) {
5097 		dm_enable_per_frame_crtc_master_sync(dc_state);
5098 		mutex_lock(&dm->dc_lock);
5099 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
5100 		mutex_unlock(&dm->dc_lock);
5101 	}
5102 
5103 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5104 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5105 
5106 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5107 
5108 		if (dm_new_crtc_state->stream != NULL) {
5109 			const struct dc_stream_status *status =
5110 					dc_stream_get_status(dm_new_crtc_state->stream);
5111 
5112 			if (!status)
5113 				status = dc_state_get_stream_status(dc_state,
5114 								    dm_new_crtc_state->stream);
5115 
5116 			if (!status)
5117 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
5118 			else
5119 				acrtc->otg_inst = status->primary_otg_inst;
5120 		}
5121 	}
5122 
5123 	/* Handle scaling, underscan, and abm changes*/
5124 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5125 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5126 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5127 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5128 		struct dc_stream_status *status = NULL;
5129 
5130 		if (acrtc) {
5131 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
5132 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
5133 		}
5134 
5135 		/* Skip any modesets/resets */
5136 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
5137 			continue;
5138 
5139 
5140 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5141 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5142 
5143 		/* Skip anything that is not scaling or underscan changes */
5144 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
5145 				(dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
5146 			continue;
5147 
5148 		update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5149 				dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
5150 
5151 		if (!dm_new_crtc_state->stream)
5152 			continue;
5153 
5154 		status = dc_stream_get_status(dm_new_crtc_state->stream);
5155 		WARN_ON(!status);
5156 		WARN_ON(!status->plane_count);
5157 
5158 		dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5159 
5160 		/*TODO How it works with MPO ?*/
5161 		if (!commit_planes_to_stream(
5162 				dm,
5163 				dm->dc,
5164 				status->plane_states,
5165 				status->plane_count,
5166 				dm_new_crtc_state,
5167 				to_dm_crtc_state(old_crtc_state),
5168 				dc_state))
5169 			dm_error("%s: Failed to update stream scaling!\n", __func__);
5170 	}
5171 
5172 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5173 			new_crtc_state, i) {
5174 		/*
5175 		 * loop to enable interrupts on newly arrived crtc
5176 		 */
5177 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5178 		bool modeset_needed;
5179 
5180 		if (old_crtc_state->active && !new_crtc_state->active)
5181 			crtc_disable_count++;
5182 
5183 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5184 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5185 		modeset_needed = modeset_required(
5186 				new_crtc_state,
5187 				dm_new_crtc_state->stream,
5188 				dm_old_crtc_state->stream);
5189 
5190 		if (dm_new_crtc_state->stream == NULL || !modeset_needed)
5191 			continue;
5192 
5193 		manage_dm_interrupts(adev, acrtc, true);
5194 	}
5195 
5196 	/* update planes when needed per crtc*/
5197 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
5198 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5199 
5200 		if (dm_new_crtc_state->stream)
5201 			amdgpu_dm_commit_planes(state, dc_state, dev,
5202 						dm, crtc, &wait_for_vblank);
5203 	}
5204 
5205 
5206 	/*
5207 	 * send vblank event on all events not handled in flip and
5208 	 * mark consumed event for drm_atomic_helper_commit_hw_done
5209 	 */
5210 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
5211 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5212 
5213 		if (new_crtc_state->event)
5214 			drm_send_event_locked(dev, &new_crtc_state->event->base);
5215 
5216 		new_crtc_state->event = NULL;
5217 	}
5218 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5219 
5220 
5221 	if (wait_for_vblank)
5222 		drm_atomic_helper_wait_for_flip_done(dev, state);
5223 
5224 	/*
5225 	 * FIXME:
5226 	 * Delay hw_done() until flip_done() is signaled. This is to block
5227 	 * another commit from freeing the CRTC state while we're still
5228 	 * waiting on flip_done.
5229 	 */
5230 	drm_atomic_helper_commit_hw_done(state);
5231 
5232 	drm_atomic_helper_cleanup_planes(dev, state);
5233 
5234 	/*
5235 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
5236 	 * so we can put the GPU into runtime suspend if we're not driving any
5237 	 * displays anymore
5238 	 */
5239 	for (i = 0; i < crtc_disable_count; i++)
5240 		pm_runtime_put_autosuspend(dev->dev);
5241 	pm_runtime_mark_last_busy(dev->dev);
5242 
5243 	if (dc_state_temp)
5244 		dc_release_state(dc_state_temp);
5245 }
5246 
5247 
5248 static int dm_force_atomic_commit(struct drm_connector *connector)
5249 {
5250 	int ret = 0;
5251 	struct drm_device *ddev = connector->dev;
5252 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
5253 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5254 	struct drm_plane *plane = disconnected_acrtc->base.primary;
5255 	struct drm_connector_state *conn_state;
5256 	struct drm_crtc_state *crtc_state;
5257 	struct drm_plane_state *plane_state;
5258 
5259 	if (!state)
5260 		return -ENOMEM;
5261 
5262 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
5263 
5264 	/* Construct an atomic state to restore previous display setting */
5265 
5266 	/*
5267 	 * Attach connectors to drm_atomic_state
5268 	 */
5269 	conn_state = drm_atomic_get_connector_state(state, connector);
5270 
5271 	ret = PTR_ERR_OR_ZERO(conn_state);
5272 	if (ret)
5273 		goto err;
5274 
5275 	/* Attach crtc to drm_atomic_state*/
5276 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
5277 
5278 	ret = PTR_ERR_OR_ZERO(crtc_state);
5279 	if (ret)
5280 		goto err;
5281 
5282 	/* force a restore */
5283 	crtc_state->mode_changed = true;
5284 
5285 	/* Attach plane to drm_atomic_state */
5286 	plane_state = drm_atomic_get_plane_state(state, plane);
5287 
5288 	ret = PTR_ERR_OR_ZERO(plane_state);
5289 	if (ret)
5290 		goto err;
5291 
5292 
5293 	/* Call commit internally with the state we just constructed */
5294 	ret = drm_atomic_commit(state);
5295 	if (!ret)
5296 		return 0;
5297 
5298 err:
5299 	DRM_ERROR("Restoring old state failed with %i\n", ret);
5300 	drm_atomic_state_put(state);
5301 
5302 	return ret;
5303 }
5304 
5305 /*
5306  * This function handles all cases when set mode does not come upon hotplug.
5307  * This includes when a display is unplugged then plugged back into the
5308  * same port and when running without usermode desktop manager supprot
5309  */
5310 void dm_restore_drm_connector_state(struct drm_device *dev,
5311 				    struct drm_connector *connector)
5312 {
5313 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5314 	struct amdgpu_crtc *disconnected_acrtc;
5315 	struct dm_crtc_state *acrtc_state;
5316 
5317 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
5318 		return;
5319 
5320 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5321 	if (!disconnected_acrtc)
5322 		return;
5323 
5324 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
5325 	if (!acrtc_state->stream)
5326 		return;
5327 
5328 	/*
5329 	 * If the previous sink is not released and different from the current,
5330 	 * we deduce we are in a state where we can not rely on usermode call
5331 	 * to turn on the display, so we do it here
5332 	 */
5333 	if (acrtc_state->stream->sink != aconnector->dc_sink)
5334 		dm_force_atomic_commit(&aconnector->base);
5335 }
5336 
5337 /*
5338  * Grabs all modesetting locks to serialize against any blocking commits,
5339  * Waits for completion of all non blocking commits.
5340  */
5341 static int do_aquire_global_lock(struct drm_device *dev,
5342 				 struct drm_atomic_state *state)
5343 {
5344 	struct drm_crtc *crtc;
5345 	struct drm_crtc_commit *commit;
5346 	long ret;
5347 
5348 	/*
5349 	 * Adding all modeset locks to aquire_ctx will
5350 	 * ensure that when the framework release it the
5351 	 * extra locks we are locking here will get released to
5352 	 */
5353 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
5354 	if (ret)
5355 		return ret;
5356 
5357 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5358 		spin_lock(&crtc->commit_lock);
5359 		commit = list_first_entry_or_null(&crtc->commit_list,
5360 				struct drm_crtc_commit, commit_entry);
5361 		if (commit)
5362 			drm_crtc_commit_get(commit);
5363 		spin_unlock(&crtc->commit_lock);
5364 
5365 		if (!commit)
5366 			continue;
5367 
5368 		/*
5369 		 * Make sure all pending HW programming completed and
5370 		 * page flips done
5371 		 */
5372 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
5373 
5374 		if (ret > 0)
5375 			ret = wait_for_completion_interruptible_timeout(
5376 					&commit->flip_done, 10*HZ);
5377 
5378 		if (ret == 0)
5379 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
5380 				  "timed out\n", crtc->base.id, crtc->name);
5381 
5382 		drm_crtc_commit_put(commit);
5383 	}
5384 
5385 	return ret < 0 ? ret : 0;
5386 }
5387 
5388 static void get_freesync_config_for_crtc(
5389 	struct dm_crtc_state *new_crtc_state,
5390 	struct dm_connector_state *new_con_state)
5391 {
5392 	struct mod_freesync_config config = {0};
5393 	struct amdgpu_dm_connector *aconnector =
5394 			to_amdgpu_dm_connector(new_con_state->base.connector);
5395 
5396 	new_crtc_state->vrr_supported = new_con_state->freesync_capable;
5397 
5398 	if (new_con_state->freesync_capable) {
5399 		config.state = new_crtc_state->base.vrr_enabled ?
5400 				VRR_STATE_ACTIVE_VARIABLE :
5401 				VRR_STATE_INACTIVE;
5402 		config.min_refresh_in_uhz =
5403 				aconnector->min_vfreq * 1000000;
5404 		config.max_refresh_in_uhz =
5405 				aconnector->max_vfreq * 1000000;
5406 		config.vsif_supported = true;
5407 		config.btr = true;
5408 	}
5409 
5410 	new_crtc_state->freesync_config = config;
5411 }
5412 
5413 static void reset_freesync_config_for_crtc(
5414 	struct dm_crtc_state *new_crtc_state)
5415 {
5416 	new_crtc_state->vrr_supported = false;
5417 
5418 	memset(&new_crtc_state->vrr_params, 0,
5419 	       sizeof(new_crtc_state->vrr_params));
5420 	memset(&new_crtc_state->vrr_infopacket, 0,
5421 	       sizeof(new_crtc_state->vrr_infopacket));
5422 }
5423 
5424 static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
5425 				 struct drm_atomic_state *state,
5426 				 bool enable,
5427 				 bool *lock_and_validation_needed)
5428 {
5429 	struct dm_atomic_state *dm_state = NULL;
5430 	struct drm_crtc *crtc;
5431 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5432 	int i;
5433 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5434 	struct dc_stream_state *new_stream;
5435 	int ret = 0;
5436 
5437 	/*
5438 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
5439 	 * update changed items
5440 	 */
5441 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5442 		struct amdgpu_crtc *acrtc = NULL;
5443 		struct amdgpu_dm_connector *aconnector = NULL;
5444 		struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
5445 		struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
5446 		struct drm_plane_state *new_plane_state = NULL;
5447 
5448 		new_stream = NULL;
5449 
5450 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5451 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5452 		acrtc = to_amdgpu_crtc(crtc);
5453 
5454 		new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
5455 
5456 		if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
5457 			ret = -EINVAL;
5458 			goto fail;
5459 		}
5460 
5461 		aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
5462 
5463 		/* TODO This hack should go away */
5464 		if (aconnector && enable) {
5465 			/* Make sure fake sink is created in plug-in scenario */
5466 			drm_new_conn_state = drm_atomic_get_new_connector_state(state,
5467  								    &aconnector->base);
5468 			drm_old_conn_state = drm_atomic_get_old_connector_state(state,
5469 								    &aconnector->base);
5470 
5471 			if (IS_ERR(drm_new_conn_state)) {
5472 				ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
5473 				break;
5474 			}
5475 
5476 			dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
5477 			dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
5478 
5479 			new_stream = create_stream_for_sink(aconnector,
5480 							     &new_crtc_state->mode,
5481 							    dm_new_conn_state,
5482 							    dm_old_crtc_state->stream);
5483 
5484 			/*
5485 			 * we can have no stream on ACTION_SET if a display
5486 			 * was disconnected during S3, in this case it is not an
5487 			 * error, the OS will be updated after detection, and
5488 			 * will do the right thing on next atomic commit
5489 			 */
5490 
5491 			if (!new_stream) {
5492 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5493 						__func__, acrtc->base.base.id);
5494 				break;
5495 			}
5496 
5497 			dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
5498 
5499 			if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
5500 			    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
5501 				new_crtc_state->mode_changed = false;
5502 				DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5503 						 new_crtc_state->mode_changed);
5504 			}
5505 		}
5506 
5507 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5508 			goto next_crtc;
5509 
5510 		DRM_DEBUG_DRIVER(
5511 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5512 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
5513 			"connectors_changed:%d\n",
5514 			acrtc->crtc_id,
5515 			new_crtc_state->enable,
5516 			new_crtc_state->active,
5517 			new_crtc_state->planes_changed,
5518 			new_crtc_state->mode_changed,
5519 			new_crtc_state->active_changed,
5520 			new_crtc_state->connectors_changed);
5521 
5522 		/* Remove stream for any changed/disabled CRTC */
5523 		if (!enable) {
5524 
5525 			if (!dm_old_crtc_state->stream)
5526 				goto next_crtc;
5527 
5528 			ret = dm_atomic_get_state(state, &dm_state);
5529 			if (ret)
5530 				goto fail;
5531 
5532 			DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5533 					crtc->base.id);
5534 
5535 			/* i.e. reset mode */
5536 			if (dc_remove_stream_from_ctx(
5537 					dm->dc,
5538 					dm_state->context,
5539 					dm_old_crtc_state->stream) != DC_OK) {
5540 				ret = -EINVAL;
5541 				goto fail;
5542 			}
5543 
5544 			dc_stream_release(dm_old_crtc_state->stream);
5545 			dm_new_crtc_state->stream = NULL;
5546 
5547 			reset_freesync_config_for_crtc(dm_new_crtc_state);
5548 
5549 			*lock_and_validation_needed = true;
5550 
5551 		} else {/* Add stream for any updated/enabled CRTC */
5552 			/*
5553 			 * Quick fix to prevent NULL pointer on new_stream when
5554 			 * added MST connectors not found in existing crtc_state in the chained mode
5555 			 * TODO: need to dig out the root cause of that
5556 			 */
5557 			if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
5558 				goto next_crtc;
5559 
5560 			if (modereset_required(new_crtc_state))
5561 				goto next_crtc;
5562 
5563 			if (modeset_required(new_crtc_state, new_stream,
5564 					     dm_old_crtc_state->stream)) {
5565 
5566 				WARN_ON(dm_new_crtc_state->stream);
5567 
5568 				ret = dm_atomic_get_state(state, &dm_state);
5569 				if (ret)
5570 					goto fail;
5571 
5572 				dm_new_crtc_state->stream = new_stream;
5573 
5574 				dc_stream_retain(new_stream);
5575 
5576 				DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5577 							crtc->base.id);
5578 
5579 				if (dc_add_stream_to_ctx(
5580 						dm->dc,
5581 						dm_state->context,
5582 						dm_new_crtc_state->stream) != DC_OK) {
5583 					ret = -EINVAL;
5584 					goto fail;
5585 				}
5586 
5587 				*lock_and_validation_needed = true;
5588 			}
5589 		}
5590 
5591 next_crtc:
5592 		/* Release extra reference */
5593 		if (new_stream)
5594 			 dc_stream_release(new_stream);
5595 
5596 		/*
5597 		 * We want to do dc stream updates that do not require a
5598 		 * full modeset below.
5599 		 */
5600 		if (!(enable && aconnector && new_crtc_state->enable &&
5601 		      new_crtc_state->active))
5602 			continue;
5603 		/*
5604 		 * Given above conditions, the dc state cannot be NULL because:
5605 		 * 1. We're in the process of enabling CRTCs (just been added
5606 		 *    to the dc context, or already is on the context)
5607 		 * 2. Has a valid connector attached, and
5608 		 * 3. Is currently active and enabled.
5609 		 * => The dc stream state currently exists.
5610 		 */
5611 		BUG_ON(dm_new_crtc_state->stream == NULL);
5612 
5613 		/* Scaling or underscan settings */
5614 		if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
5615 			update_stream_scaling_settings(
5616 				&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
5617 
5618 		/*
5619 		 * Color management settings. We also update color properties
5620 		 * when a modeset is needed, to ensure it gets reprogrammed.
5621 		 */
5622 		if (dm_new_crtc_state->base.color_mgmt_changed ||
5623 		    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
5624 			ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
5625 			if (ret)
5626 				goto fail;
5627 			amdgpu_dm_set_ctm(dm_new_crtc_state);
5628 		}
5629 
5630 		/* Update Freesync settings. */
5631 		get_freesync_config_for_crtc(dm_new_crtc_state,
5632 					     dm_new_conn_state);
5633 	}
5634 
5635 	return ret;
5636 
5637 fail:
5638 	if (new_stream)
5639 		dc_stream_release(new_stream);
5640 	return ret;
5641 }
5642 
5643 static int dm_update_planes_state(struct dc *dc,
5644 				  struct drm_atomic_state *state,
5645 				  bool enable,
5646 				  bool *lock_and_validation_needed)
5647 {
5648 
5649 	struct dm_atomic_state *dm_state = NULL;
5650 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5651 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5652 	struct drm_plane *plane;
5653 	struct drm_plane_state *old_plane_state, *new_plane_state;
5654 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
5655 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
5656 	int i ;
5657 	/* TODO return page_flip_needed() function */
5658 	bool pflip_needed  = !state->allow_modeset;
5659 	int ret = 0;
5660 
5661 
5662 	/* Add new planes, in reverse order as DC expectation */
5663 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
5664 		new_plane_crtc = new_plane_state->crtc;
5665 		old_plane_crtc = old_plane_state->crtc;
5666 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
5667 		dm_old_plane_state = to_dm_plane_state(old_plane_state);
5668 
5669 		/*TODO Implement atomic check for cursor plane */
5670 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5671 			continue;
5672 
5673 		/* Remove any changed/removed planes */
5674 		if (!enable) {
5675 			if (pflip_needed &&
5676 			    plane->type != DRM_PLANE_TYPE_OVERLAY)
5677 				continue;
5678 
5679 			if (!old_plane_crtc)
5680 				continue;
5681 
5682 			old_crtc_state = drm_atomic_get_old_crtc_state(
5683 					state, old_plane_crtc);
5684 			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5685 
5686 			if (!dm_old_crtc_state->stream)
5687 				continue;
5688 
5689 			DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5690 					plane->base.id, old_plane_crtc->base.id);
5691 
5692 			ret = dm_atomic_get_state(state, &dm_state);
5693 			if (ret)
5694 				return ret;
5695 
5696 			if (!dc_remove_plane_from_context(
5697 					dc,
5698 					dm_old_crtc_state->stream,
5699 					dm_old_plane_state->dc_state,
5700 					dm_state->context)) {
5701 
5702 				ret = EINVAL;
5703 				return ret;
5704 			}
5705 
5706 
5707 			dc_plane_state_release(dm_old_plane_state->dc_state);
5708 			dm_new_plane_state->dc_state = NULL;
5709 
5710 			*lock_and_validation_needed = true;
5711 
5712 		} else { /* Add new planes */
5713 			struct dc_plane_state *dc_new_plane_state;
5714 
5715 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
5716 				continue;
5717 
5718 			if (!new_plane_crtc)
5719 				continue;
5720 
5721 			new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
5722 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5723 
5724 			if (!dm_new_crtc_state->stream)
5725 				continue;
5726 
5727 			if (pflip_needed &&
5728 			    plane->type != DRM_PLANE_TYPE_OVERLAY)
5729 				continue;
5730 
5731 			WARN_ON(dm_new_plane_state->dc_state);
5732 
5733 			dc_new_plane_state = dc_create_plane_state(dc);
5734 			if (!dc_new_plane_state)
5735 				return -ENOMEM;
5736 
5737 			DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5738 					plane->base.id, new_plane_crtc->base.id);
5739 
5740 			ret = fill_plane_attributes(
5741 				new_plane_crtc->dev->dev_private,
5742 				dc_new_plane_state,
5743 				new_plane_state,
5744 				new_crtc_state);
5745 			if (ret) {
5746 				dc_plane_state_release(dc_new_plane_state);
5747 				return ret;
5748 			}
5749 
5750 			ret = dm_atomic_get_state(state, &dm_state);
5751 			if (ret) {
5752 				dc_plane_state_release(dc_new_plane_state);
5753 				return ret;
5754 			}
5755 
5756 			/*
5757 			 * Any atomic check errors that occur after this will
5758 			 * not need a release. The plane state will be attached
5759 			 * to the stream, and therefore part of the atomic
5760 			 * state. It'll be released when the atomic state is
5761 			 * cleaned.
5762 			 */
5763 			if (!dc_add_plane_to_context(
5764 					dc,
5765 					dm_new_crtc_state->stream,
5766 					dc_new_plane_state,
5767 					dm_state->context)) {
5768 
5769 				dc_plane_state_release(dc_new_plane_state);
5770 				return -EINVAL;
5771 			}
5772 
5773 			dm_new_plane_state->dc_state = dc_new_plane_state;
5774 
5775 			/* Tell DC to do a full surface update every time there
5776 			 * is a plane change. Inefficient, but works for now.
5777 			 */
5778 			dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
5779 
5780 			*lock_and_validation_needed = true;
5781 		}
5782 	}
5783 
5784 
5785 	return ret;
5786 }
5787 
5788 static int
5789 dm_determine_update_type_for_commit(struct dc *dc,
5790 				    struct drm_atomic_state *state,
5791 				    enum surface_update_type *out_type)
5792 {
5793 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
5794 	int i, j, num_plane, ret = 0;
5795 	struct drm_plane_state *old_plane_state, *new_plane_state;
5796 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
5797 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5798 	struct drm_plane *plane;
5799 
5800 	struct drm_crtc *crtc;
5801 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
5802 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
5803 	struct dc_stream_status *status = NULL;
5804 
5805 	struct dc_surface_update *updates = kzalloc(MAX_SURFACES * sizeof(struct dc_surface_update), GFP_KERNEL);
5806 	struct dc_plane_state *surface = kzalloc(MAX_SURFACES * sizeof(struct dc_plane_state), GFP_KERNEL);
5807 	struct dc_stream_update stream_update;
5808 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
5809 
5810 	if (!updates || !surface) {
5811 		DRM_ERROR("Plane or surface update failed to allocate");
5812 		/* Set type to FULL to avoid crashing in DC*/
5813 		update_type = UPDATE_TYPE_FULL;
5814 		goto cleanup;
5815 	}
5816 
5817 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5818 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
5819 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
5820 		num_plane = 0;
5821 
5822 		if (new_dm_crtc_state->stream) {
5823 
5824 			for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
5825 				new_plane_crtc = new_plane_state->crtc;
5826 				old_plane_crtc = old_plane_state->crtc;
5827 				new_dm_plane_state = to_dm_plane_state(new_plane_state);
5828 				old_dm_plane_state = to_dm_plane_state(old_plane_state);
5829 
5830 				if (plane->type == DRM_PLANE_TYPE_CURSOR)
5831 					continue;
5832 
5833 				if (!state->allow_modeset)
5834 					continue;
5835 
5836 				if (crtc == new_plane_crtc) {
5837 					updates[num_plane].surface = &surface[num_plane];
5838 
5839 					if (new_crtc_state->mode_changed) {
5840 						updates[num_plane].surface->src_rect =
5841 									new_dm_plane_state->dc_state->src_rect;
5842 						updates[num_plane].surface->dst_rect =
5843 									new_dm_plane_state->dc_state->dst_rect;
5844 						updates[num_plane].surface->rotation =
5845 									new_dm_plane_state->dc_state->rotation;
5846 						updates[num_plane].surface->in_transfer_func =
5847 									new_dm_plane_state->dc_state->in_transfer_func;
5848 						stream_update.dst = new_dm_crtc_state->stream->dst;
5849 						stream_update.src = new_dm_crtc_state->stream->src;
5850 					}
5851 
5852 					if (new_crtc_state->color_mgmt_changed) {
5853 						updates[num_plane].gamma =
5854 								new_dm_plane_state->dc_state->gamma_correction;
5855 						updates[num_plane].in_transfer_func =
5856 								new_dm_plane_state->dc_state->in_transfer_func;
5857 						stream_update.gamut_remap =
5858 								&new_dm_crtc_state->stream->gamut_remap_matrix;
5859 						stream_update.out_transfer_func =
5860 								new_dm_crtc_state->stream->out_transfer_func;
5861 					}
5862 
5863 					num_plane++;
5864 				}
5865 			}
5866 
5867 			if (num_plane > 0) {
5868 				ret = dm_atomic_get_state(state, &dm_state);
5869 				if (ret)
5870 					goto cleanup;
5871 
5872 				old_dm_state = dm_atomic_get_old_state(state);
5873 				if (!old_dm_state) {
5874 					ret = -EINVAL;
5875 					goto cleanup;
5876 				}
5877 
5878 				status = dc_state_get_stream_status(old_dm_state->context,
5879 								    new_dm_crtc_state->stream);
5880 
5881 				update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
5882 										  &stream_update, status);
5883 
5884 				if (update_type > UPDATE_TYPE_MED) {
5885 					update_type = UPDATE_TYPE_FULL;
5886 					goto cleanup;
5887 				}
5888 			}
5889 
5890 		} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
5891 			update_type = UPDATE_TYPE_FULL;
5892 			goto cleanup;
5893 		}
5894 	}
5895 
5896 cleanup:
5897 	kfree(updates);
5898 	kfree(surface);
5899 
5900 	*out_type = update_type;
5901 	return ret;
5902 }
5903 
5904 /**
5905  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
5906  * @dev: The DRM device
5907  * @state: The atomic state to commit
5908  *
5909  * Validate that the given atomic state is programmable by DC into hardware.
5910  * This involves constructing a &struct dc_state reflecting the new hardware
5911  * state we wish to commit, then querying DC to see if it is programmable. It's
5912  * important not to modify the existing DC state. Otherwise, atomic_check
5913  * may unexpectedly commit hardware changes.
5914  *
5915  * When validating the DC state, it's important that the right locks are
5916  * acquired. For full updates case which removes/adds/updates streams on one
5917  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
5918  * that any such full update commit will wait for completion of any outstanding
5919  * flip using DRMs synchronization events. See
5920  * dm_determine_update_type_for_commit()
5921  *
5922  * Note that DM adds the affected connectors for all CRTCs in state, when that
5923  * might not seem necessary. This is because DC stream creation requires the
5924  * DC sink, which is tied to the DRM connector state. Cleaning this up should
5925  * be possible but non-trivial - a possible TODO item.
5926  *
5927  * Return: -Error code if validation failed.
5928  */
5929 static int amdgpu_dm_atomic_check(struct drm_device *dev,
5930 				  struct drm_atomic_state *state)
5931 {
5932 	struct amdgpu_device *adev = dev->dev_private;
5933 	struct dm_atomic_state *dm_state = NULL;
5934 	struct dc *dc = adev->dm.dc;
5935 	struct drm_connector *connector;
5936 	struct drm_connector_state *old_con_state, *new_con_state;
5937 	struct drm_crtc *crtc;
5938 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5939 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
5940 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
5941 
5942 	int ret, i;
5943 
5944 	/*
5945 	 * This bool will be set for true for any modeset/reset
5946 	 * or plane update which implies non fast surface update.
5947 	 */
5948 	bool lock_and_validation_needed = false;
5949 
5950 	ret = drm_atomic_helper_check_modeset(dev, state);
5951 	if (ret)
5952 		goto fail;
5953 
5954 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5955 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5956 		    !new_crtc_state->color_mgmt_changed &&
5957 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
5958 			continue;
5959 
5960 		if (!new_crtc_state->enable)
5961 			continue;
5962 
5963 		ret = drm_atomic_add_affected_connectors(state, crtc);
5964 		if (ret)
5965 			return ret;
5966 
5967 		ret = drm_atomic_add_affected_planes(state, crtc);
5968 		if (ret)
5969 			goto fail;
5970 	}
5971 
5972 	/* Remove exiting planes if they are modified */
5973 	ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
5974 	if (ret) {
5975 		goto fail;
5976 	}
5977 
5978 	/* Disable all crtcs which require disable */
5979 	ret = dm_update_crtcs_state(&adev->dm, state, false, &lock_and_validation_needed);
5980 	if (ret) {
5981 		goto fail;
5982 	}
5983 
5984 	/* Enable all crtcs which require enable */
5985 	ret = dm_update_crtcs_state(&adev->dm, state, true, &lock_and_validation_needed);
5986 	if (ret) {
5987 		goto fail;
5988 	}
5989 
5990 	/* Add new/modified planes */
5991 	ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
5992 	if (ret) {
5993 		goto fail;
5994 	}
5995 
5996 	/* Run this here since we want to validate the streams we created */
5997 	ret = drm_atomic_helper_check_planes(dev, state);
5998 	if (ret)
5999 		goto fail;
6000 
6001 	/* Check scaling and underscan changes*/
6002 	/* TODO Removed scaling changes validation due to inability to commit
6003 	 * new stream into context w\o causing full reset. Need to
6004 	 * decide how to handle.
6005 	 */
6006 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6007 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6008 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6009 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6010 
6011 		/* Skip any modesets/resets */
6012 		if (!acrtc || drm_atomic_crtc_needs_modeset(
6013 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
6014 			continue;
6015 
6016 		/* Skip any thing not scale or underscan changes */
6017 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
6018 			continue;
6019 
6020 		overall_update_type = UPDATE_TYPE_FULL;
6021 		lock_and_validation_needed = true;
6022 	}
6023 
6024 	ret = dm_determine_update_type_for_commit(dc, state, &update_type);
6025 	if (ret)
6026 		goto fail;
6027 
6028 	if (overall_update_type < update_type)
6029 		overall_update_type = update_type;
6030 
6031 	/*
6032 	 * lock_and_validation_needed was an old way to determine if we need to set
6033 	 * the global lock. Leaving it in to check if we broke any corner cases
6034 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6035 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
6036 	 */
6037 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
6038 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6039 	else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
6040 		WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
6041 
6042 
6043 	if (overall_update_type > UPDATE_TYPE_FAST) {
6044 		ret = dm_atomic_get_state(state, &dm_state);
6045 		if (ret)
6046 			goto fail;
6047 
6048 		ret = do_aquire_global_lock(dev, state);
6049 		if (ret)
6050 			goto fail;
6051 
6052 		if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
6053 			ret = -EINVAL;
6054 			goto fail;
6055 		}
6056 	} else if (state->legacy_cursor_update) {
6057 		/*
6058 		 * This is a fast cursor update coming from the plane update
6059 		 * helper, check if it can be done asynchronously for better
6060 		 * performance.
6061 		 */
6062 		state->async_update = !drm_atomic_helper_async_check(dev, state);
6063 	}
6064 
6065 	/* Must be success */
6066 	WARN_ON(ret);
6067 	return ret;
6068 
6069 fail:
6070 	if (ret == -EDEADLK)
6071 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
6072 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
6073 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
6074 	else
6075 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
6076 
6077 	return ret;
6078 }
6079 
6080 static bool is_dp_capable_without_timing_msa(struct dc *dc,
6081 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
6082 {
6083 	uint8_t dpcd_data;
6084 	bool capable = false;
6085 
6086 	if (amdgpu_dm_connector->dc_link &&
6087 		dm_helpers_dp_read_dpcd(
6088 				NULL,
6089 				amdgpu_dm_connector->dc_link,
6090 				DP_DOWN_STREAM_PORT_COUNT,
6091 				&dpcd_data,
6092 				sizeof(dpcd_data))) {
6093 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
6094 	}
6095 
6096 	return capable;
6097 }
6098 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
6099 					struct edid *edid)
6100 {
6101 	int i;
6102 	bool edid_check_required;
6103 	struct detailed_timing *timing;
6104 	struct detailed_non_pixel *data;
6105 	struct detailed_data_monitor_range *range;
6106 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6107 			to_amdgpu_dm_connector(connector);
6108 	struct dm_connector_state *dm_con_state = NULL;
6109 
6110 	struct drm_device *dev = connector->dev;
6111 	struct amdgpu_device *adev = dev->dev_private;
6112 	bool freesync_capable = false;
6113 
6114 	if (!connector->state) {
6115 		DRM_ERROR("%s - Connector has no state", __func__);
6116 		goto update;
6117 	}
6118 
6119 	if (!edid) {
6120 		dm_con_state = to_dm_connector_state(connector->state);
6121 
6122 		amdgpu_dm_connector->min_vfreq = 0;
6123 		amdgpu_dm_connector->max_vfreq = 0;
6124 		amdgpu_dm_connector->pixel_clock_mhz = 0;
6125 
6126 		goto update;
6127 	}
6128 
6129 	dm_con_state = to_dm_connector_state(connector->state);
6130 
6131 	edid_check_required = false;
6132 	if (!amdgpu_dm_connector->dc_sink) {
6133 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
6134 		goto update;
6135 	}
6136 	if (!adev->dm.freesync_module)
6137 		goto update;
6138 	/*
6139 	 * if edid non zero restrict freesync only for dp and edp
6140 	 */
6141 	if (edid) {
6142 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
6143 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
6144 			edid_check_required = is_dp_capable_without_timing_msa(
6145 						adev->dm.dc,
6146 						amdgpu_dm_connector);
6147 		}
6148 	}
6149 	if (edid_check_required == true && (edid->version > 1 ||
6150 	   (edid->version == 1 && edid->revision > 1))) {
6151 		for (i = 0; i < 4; i++) {
6152 
6153 			timing	= &edid->detailed_timings[i];
6154 			data	= &timing->data.other_data;
6155 			range	= &data->data.range;
6156 			/*
6157 			 * Check if monitor has continuous frequency mode
6158 			 */
6159 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
6160 				continue;
6161 			/*
6162 			 * Check for flag range limits only. If flag == 1 then
6163 			 * no additional timing information provided.
6164 			 * Default GTF, GTF Secondary curve and CVT are not
6165 			 * supported
6166 			 */
6167 			if (range->flags != 1)
6168 				continue;
6169 
6170 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
6171 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
6172 			amdgpu_dm_connector->pixel_clock_mhz =
6173 				range->pixel_clock_mhz * 10;
6174 			break;
6175 		}
6176 
6177 		if (amdgpu_dm_connector->max_vfreq -
6178 		    amdgpu_dm_connector->min_vfreq > 10) {
6179 
6180 			freesync_capable = true;
6181 		}
6182 	}
6183 
6184 update:
6185 	if (dm_con_state)
6186 		dm_con_state->freesync_capable = freesync_capable;
6187 
6188 	if (connector->vrr_capable_property)
6189 		drm_connector_set_vrr_capable_property(connector,
6190 						       freesync_capable);
6191 }
6192 
6193