1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 
34 #include "vid.h"
35 #include "amdgpu.h"
36 #include "amdgpu_display.h"
37 #include "amdgpu_ucode.h"
38 #include "atom.h"
39 #include "amdgpu_dm.h"
40 #include "amdgpu_pm.h"
41 
42 #include "amd_shared.h"
43 #include "amdgpu_dm_irq.h"
44 #include "dm_helpers.h"
45 #include "amdgpu_dm_mst_types.h"
46 #if defined(CONFIG_DEBUG_FS)
47 #include "amdgpu_dm_debugfs.h"
48 #endif
49 
50 #include "ivsrcid/ivsrcid_vislands30.h"
51 
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/version.h>
55 #include <linux/types.h>
56 #include <linux/pm_runtime.h>
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include <linux/component.h>
60 
61 #include <drm/drm_atomic.h>
62 #include <drm/drm_atomic_uapi.h>
63 #include <drm/drm_atomic_helper.h>
64 #include <drm/drm_dp_mst_helper.h>
65 #include <drm/drm_fb_helper.h>
66 #include <drm/drm_fourcc.h>
67 #include <drm/drm_edid.h>
68 #include <drm/drm_vblank.h>
69 #include <drm/drm_audio_component.h>
70 
71 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
72 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
73 
74 #include "dcn/dcn_1_0_offset.h"
75 #include "dcn/dcn_1_0_sh_mask.h"
76 #include "soc15_hw_ip.h"
77 #include "vega10_ip_offset.h"
78 
79 #include "soc15_common.h"
80 #endif
81 
82 #include "modules/inc/mod_freesync.h"
83 #include "modules/power/power_helpers.h"
84 #include "modules/inc/mod_info_packet.h"
85 
86 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
87 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
88 
89 /**
90  * DOC: overview
91  *
92  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
93  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
94  * requests into DC requests, and DC responses into DRM responses.
95  *
96  * The root control structure is &struct amdgpu_display_manager.
97  */
98 
99 /* basic init/fini API */
100 static int amdgpu_dm_init(struct amdgpu_device *adev);
101 static void amdgpu_dm_fini(struct amdgpu_device *adev);
102 
103 /*
104  * initializes drm_device display related structures, based on the information
105  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
106  * drm_encoder, drm_mode_config
107  *
108  * Returns 0 on success
109  */
110 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
111 /* removes and deallocates the drm structures, created by the above function */
112 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
113 
114 static void
115 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
116 
117 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
118 				struct drm_plane *plane,
119 				unsigned long possible_crtcs,
120 				const struct dc_plane_cap *plane_cap);
121 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
122 			       struct drm_plane *plane,
123 			       uint32_t link_index);
124 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
125 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
126 				    uint32_t link_index,
127 				    struct amdgpu_encoder *amdgpu_encoder);
128 static int amdgpu_dm_encoder_init(struct drm_device *dev,
129 				  struct amdgpu_encoder *aencoder,
130 				  uint32_t link_index);
131 
132 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
133 
134 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
135 				   struct drm_atomic_state *state,
136 				   bool nonblock);
137 
138 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
139 
140 static int amdgpu_dm_atomic_check(struct drm_device *dev,
141 				  struct drm_atomic_state *state);
142 
143 static void handle_cursor_update(struct drm_plane *plane,
144 				 struct drm_plane_state *old_plane_state);
145 
146 /*
147  * dm_vblank_get_counter
148  *
149  * @brief
150  * Get counter for number of vertical blanks
151  *
152  * @param
153  * struct amdgpu_device *adev - [in] desired amdgpu device
154  * int disp_idx - [in] which CRTC to get the counter from
155  *
156  * @return
157  * Counter for vertical blanks
158  */
159 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
160 {
161 	if (crtc >= adev->mode_info.num_crtc)
162 		return 0;
163 	else {
164 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
165 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
166 				acrtc->base.state);
167 
168 
169 		if (acrtc_state->stream == NULL) {
170 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
171 				  crtc);
172 			return 0;
173 		}
174 
175 		return dc_stream_get_vblank_counter(acrtc_state->stream);
176 	}
177 }
178 
179 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
180 				  u32 *vbl, u32 *position)
181 {
182 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
183 
184 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
185 		return -EINVAL;
186 	else {
187 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
188 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
189 						acrtc->base.state);
190 
191 		if (acrtc_state->stream ==  NULL) {
192 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
193 				  crtc);
194 			return 0;
195 		}
196 
197 		/*
198 		 * TODO rework base driver to use values directly.
199 		 * for now parse it back into reg-format
200 		 */
201 		dc_stream_get_scanoutpos(acrtc_state->stream,
202 					 &v_blank_start,
203 					 &v_blank_end,
204 					 &h_position,
205 					 &v_position);
206 
207 		*position = v_position | (h_position << 16);
208 		*vbl = v_blank_start | (v_blank_end << 16);
209 	}
210 
211 	return 0;
212 }
213 
214 static bool dm_is_idle(void *handle)
215 {
216 	/* XXX todo */
217 	return true;
218 }
219 
220 static int dm_wait_for_idle(void *handle)
221 {
222 	/* XXX todo */
223 	return 0;
224 }
225 
226 static bool dm_check_soft_reset(void *handle)
227 {
228 	return false;
229 }
230 
231 static int dm_soft_reset(void *handle)
232 {
233 	/* XXX todo */
234 	return 0;
235 }
236 
237 static struct amdgpu_crtc *
238 get_crtc_by_otg_inst(struct amdgpu_device *adev,
239 		     int otg_inst)
240 {
241 	struct drm_device *dev = adev->ddev;
242 	struct drm_crtc *crtc;
243 	struct amdgpu_crtc *amdgpu_crtc;
244 
245 	if (otg_inst == -1) {
246 		WARN_ON(1);
247 		return adev->mode_info.crtcs[0];
248 	}
249 
250 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
251 		amdgpu_crtc = to_amdgpu_crtc(crtc);
252 
253 		if (amdgpu_crtc->otg_inst == otg_inst)
254 			return amdgpu_crtc;
255 	}
256 
257 	return NULL;
258 }
259 
260 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
261 {
262 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
263 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
264 }
265 
266 static void dm_pflip_high_irq(void *interrupt_params)
267 {
268 	struct amdgpu_crtc *amdgpu_crtc;
269 	struct common_irq_params *irq_params = interrupt_params;
270 	struct amdgpu_device *adev = irq_params->adev;
271 	unsigned long flags;
272 	struct drm_pending_vblank_event *e;
273 	struct dm_crtc_state *acrtc_state;
274 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
275 	bool vrr_active;
276 
277 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
278 
279 	/* IRQ could occur when in initial stage */
280 	/* TODO work and BO cleanup */
281 	if (amdgpu_crtc == NULL) {
282 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
283 		return;
284 	}
285 
286 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
287 
288 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
289 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
290 						 amdgpu_crtc->pflip_status,
291 						 AMDGPU_FLIP_SUBMITTED,
292 						 amdgpu_crtc->crtc_id,
293 						 amdgpu_crtc);
294 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
295 		return;
296 	}
297 
298 	/* page flip completed. */
299 	e = amdgpu_crtc->event;
300 	amdgpu_crtc->event = NULL;
301 
302 	if (!e)
303 		WARN_ON(1);
304 
305 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
306 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
307 
308 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
309 	if (!vrr_active ||
310 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
311 				      &v_blank_end, &hpos, &vpos) ||
312 	    (vpos < v_blank_start)) {
313 		/* Update to correct count and vblank timestamp if racing with
314 		 * vblank irq. This also updates to the correct vblank timestamp
315 		 * even in VRR mode, as scanout is past the front-porch atm.
316 		 */
317 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
318 
319 		/* Wake up userspace by sending the pageflip event with proper
320 		 * count and timestamp of vblank of flip completion.
321 		 */
322 		if (e) {
323 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
324 
325 			/* Event sent, so done with vblank for this flip */
326 			drm_crtc_vblank_put(&amdgpu_crtc->base);
327 		}
328 	} else if (e) {
329 		/* VRR active and inside front-porch: vblank count and
330 		 * timestamp for pageflip event will only be up to date after
331 		 * drm_crtc_handle_vblank() has been executed from late vblank
332 		 * irq handler after start of back-porch (vline 0). We queue the
333 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
334 		 * updated timestamp and count, once it runs after us.
335 		 *
336 		 * We need to open-code this instead of using the helper
337 		 * drm_crtc_arm_vblank_event(), as that helper would
338 		 * call drm_crtc_accurate_vblank_count(), which we must
339 		 * not call in VRR mode while we are in front-porch!
340 		 */
341 
342 		/* sequence will be replaced by real count during send-out. */
343 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
344 		e->pipe = amdgpu_crtc->crtc_id;
345 
346 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
347 		e = NULL;
348 	}
349 
350 	/* Keep track of vblank of this flip for flip throttling. We use the
351 	 * cooked hw counter, as that one incremented at start of this vblank
352 	 * of pageflip completion, so last_flip_vblank is the forbidden count
353 	 * for queueing new pageflips if vsync + VRR is enabled.
354 	 */
355 	amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
356 							amdgpu_crtc->crtc_id);
357 
358 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
359 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
360 
361 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
362 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
363 			 vrr_active, (int) !e);
364 }
365 
366 static void dm_vupdate_high_irq(void *interrupt_params)
367 {
368 	struct common_irq_params *irq_params = interrupt_params;
369 	struct amdgpu_device *adev = irq_params->adev;
370 	struct amdgpu_crtc *acrtc;
371 	struct dm_crtc_state *acrtc_state;
372 	unsigned long flags;
373 
374 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
375 
376 	if (acrtc) {
377 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
378 
379 		DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
380 				 amdgpu_dm_vrr_active(acrtc_state));
381 
382 		/* Core vblank handling is done here after end of front-porch in
383 		 * vrr mode, as vblank timestamping will give valid results
384 		 * while now done after front-porch. This will also deliver
385 		 * page-flip completion events that have been queued to us
386 		 * if a pageflip happened inside front-porch.
387 		 */
388 		if (amdgpu_dm_vrr_active(acrtc_state)) {
389 			drm_crtc_handle_vblank(&acrtc->base);
390 
391 			/* BTR processing for pre-DCE12 ASICs */
392 			if (acrtc_state->stream &&
393 			    adev->family < AMDGPU_FAMILY_AI) {
394 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
395 				mod_freesync_handle_v_update(
396 				    adev->dm.freesync_module,
397 				    acrtc_state->stream,
398 				    &acrtc_state->vrr_params);
399 
400 				dc_stream_adjust_vmin_vmax(
401 				    adev->dm.dc,
402 				    acrtc_state->stream,
403 				    &acrtc_state->vrr_params.adjust);
404 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
405 			}
406 		}
407 	}
408 }
409 
410 static void dm_crtc_high_irq(void *interrupt_params)
411 {
412 	struct common_irq_params *irq_params = interrupt_params;
413 	struct amdgpu_device *adev = irq_params->adev;
414 	struct amdgpu_crtc *acrtc;
415 	struct dm_crtc_state *acrtc_state;
416 	unsigned long flags;
417 
418 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
419 
420 	if (acrtc) {
421 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
422 
423 		DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
424 				 amdgpu_dm_vrr_active(acrtc_state));
425 
426 		/* Core vblank handling at start of front-porch is only possible
427 		 * in non-vrr mode, as only there vblank timestamping will give
428 		 * valid results while done in front-porch. Otherwise defer it
429 		 * to dm_vupdate_high_irq after end of front-porch.
430 		 */
431 		if (!amdgpu_dm_vrr_active(acrtc_state))
432 			drm_crtc_handle_vblank(&acrtc->base);
433 
434 		/* Following stuff must happen at start of vblank, for crc
435 		 * computation and below-the-range btr support in vrr mode.
436 		 */
437 		amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
438 
439 		if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
440 		    acrtc_state->vrr_params.supported &&
441 		    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
442 			spin_lock_irqsave(&adev->ddev->event_lock, flags);
443 			mod_freesync_handle_v_update(
444 				adev->dm.freesync_module,
445 				acrtc_state->stream,
446 				&acrtc_state->vrr_params);
447 
448 			dc_stream_adjust_vmin_vmax(
449 				adev->dm.dc,
450 				acrtc_state->stream,
451 				&acrtc_state->vrr_params.adjust);
452 			spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
453 		}
454 	}
455 }
456 
457 static int dm_set_clockgating_state(void *handle,
458 		  enum amd_clockgating_state state)
459 {
460 	return 0;
461 }
462 
463 static int dm_set_powergating_state(void *handle,
464 		  enum amd_powergating_state state)
465 {
466 	return 0;
467 }
468 
469 /* Prototypes of private functions */
470 static int dm_early_init(void* handle);
471 
472 /* Allocate memory for FBC compressed data  */
473 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
474 {
475 	struct drm_device *dev = connector->dev;
476 	struct amdgpu_device *adev = dev->dev_private;
477 	struct dm_comressor_info *compressor = &adev->dm.compressor;
478 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
479 	struct drm_display_mode *mode;
480 	unsigned long max_size = 0;
481 
482 	if (adev->dm.dc->fbc_compressor == NULL)
483 		return;
484 
485 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
486 		return;
487 
488 	if (compressor->bo_ptr)
489 		return;
490 
491 
492 	list_for_each_entry(mode, &connector->modes, head) {
493 		if (max_size < mode->htotal * mode->vtotal)
494 			max_size = mode->htotal * mode->vtotal;
495 	}
496 
497 	if (max_size) {
498 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
499 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
500 			    &compressor->gpu_addr, &compressor->cpu_addr);
501 
502 		if (r)
503 			DRM_ERROR("DM: Failed to initialize FBC\n");
504 		else {
505 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
506 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
507 		}
508 
509 	}
510 
511 }
512 
513 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
514 					  int pipe, bool *enabled,
515 					  unsigned char *buf, int max_bytes)
516 {
517 	struct drm_device *dev = dev_get_drvdata(kdev);
518 	struct amdgpu_device *adev = dev->dev_private;
519 	struct drm_connector *connector;
520 	struct drm_connector_list_iter conn_iter;
521 	struct amdgpu_dm_connector *aconnector;
522 	int ret = 0;
523 
524 	*enabled = false;
525 
526 	mutex_lock(&adev->dm.audio_lock);
527 
528 	drm_connector_list_iter_begin(dev, &conn_iter);
529 	drm_for_each_connector_iter(connector, &conn_iter) {
530 		aconnector = to_amdgpu_dm_connector(connector);
531 		if (aconnector->audio_inst != port)
532 			continue;
533 
534 		*enabled = true;
535 		ret = drm_eld_size(connector->eld);
536 		memcpy(buf, connector->eld, min(max_bytes, ret));
537 
538 		break;
539 	}
540 	drm_connector_list_iter_end(&conn_iter);
541 
542 	mutex_unlock(&adev->dm.audio_lock);
543 
544 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
545 
546 	return ret;
547 }
548 
549 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
550 	.get_eld = amdgpu_dm_audio_component_get_eld,
551 };
552 
553 static int amdgpu_dm_audio_component_bind(struct device *kdev,
554 				       struct device *hda_kdev, void *data)
555 {
556 	struct drm_device *dev = dev_get_drvdata(kdev);
557 	struct amdgpu_device *adev = dev->dev_private;
558 	struct drm_audio_component *acomp = data;
559 
560 	acomp->ops = &amdgpu_dm_audio_component_ops;
561 	acomp->dev = kdev;
562 	adev->dm.audio_component = acomp;
563 
564 	return 0;
565 }
566 
567 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
568 					  struct device *hda_kdev, void *data)
569 {
570 	struct drm_device *dev = dev_get_drvdata(kdev);
571 	struct amdgpu_device *adev = dev->dev_private;
572 	struct drm_audio_component *acomp = data;
573 
574 	acomp->ops = NULL;
575 	acomp->dev = NULL;
576 	adev->dm.audio_component = NULL;
577 }
578 
579 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
580 	.bind	= amdgpu_dm_audio_component_bind,
581 	.unbind	= amdgpu_dm_audio_component_unbind,
582 };
583 
584 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
585 {
586 	int i, ret;
587 
588 	if (!amdgpu_audio)
589 		return 0;
590 
591 	adev->mode_info.audio.enabled = true;
592 
593 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
594 
595 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
596 		adev->mode_info.audio.pin[i].channels = -1;
597 		adev->mode_info.audio.pin[i].rate = -1;
598 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
599 		adev->mode_info.audio.pin[i].status_bits = 0;
600 		adev->mode_info.audio.pin[i].category_code = 0;
601 		adev->mode_info.audio.pin[i].connected = false;
602 		adev->mode_info.audio.pin[i].id =
603 			adev->dm.dc->res_pool->audios[i]->inst;
604 		adev->mode_info.audio.pin[i].offset = 0;
605 	}
606 
607 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
608 	if (ret < 0)
609 		return ret;
610 
611 	adev->dm.audio_registered = true;
612 
613 	return 0;
614 }
615 
616 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
617 {
618 	if (!amdgpu_audio)
619 		return;
620 
621 	if (!adev->mode_info.audio.enabled)
622 		return;
623 
624 	if (adev->dm.audio_registered) {
625 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
626 		adev->dm.audio_registered = false;
627 	}
628 
629 	/* TODO: Disable audio? */
630 
631 	adev->mode_info.audio.enabled = false;
632 }
633 
634 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
635 {
636 	struct drm_audio_component *acomp = adev->dm.audio_component;
637 
638 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
639 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
640 
641 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
642 						 pin, -1);
643 	}
644 }
645 
646 static int amdgpu_dm_init(struct amdgpu_device *adev)
647 {
648 	struct dc_init_data init_data;
649 	adev->dm.ddev = adev->ddev;
650 	adev->dm.adev = adev;
651 
652 	/* Zero all the fields */
653 	memset(&init_data, 0, sizeof(init_data));
654 
655 	mutex_init(&adev->dm.dc_lock);
656 	mutex_init(&adev->dm.audio_lock);
657 
658 	if(amdgpu_dm_irq_init(adev)) {
659 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
660 		goto error;
661 	}
662 
663 	init_data.asic_id.chip_family = adev->family;
664 
665 	init_data.asic_id.pci_revision_id = adev->rev_id;
666 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
667 
668 	init_data.asic_id.vram_width = adev->gmc.vram_width;
669 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
670 	init_data.asic_id.atombios_base_address =
671 		adev->mode_info.atom_context->bios;
672 
673 	init_data.driver = adev;
674 
675 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
676 
677 	if (!adev->dm.cgs_device) {
678 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
679 		goto error;
680 	}
681 
682 	init_data.cgs_device = adev->dm.cgs_device;
683 
684 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
685 
686 	/*
687 	 * TODO debug why this doesn't work on Raven
688 	 */
689 	if (adev->flags & AMD_IS_APU &&
690 	    adev->asic_type >= CHIP_CARRIZO &&
691 	    adev->asic_type <= CHIP_RAVEN)
692 		init_data.flags.gpu_vm_support = true;
693 
694 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
695 		init_data.flags.fbc_support = true;
696 
697 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
698 		init_data.flags.multi_mon_pp_mclk_switch = true;
699 
700 	init_data.flags.power_down_display_on_boot = true;
701 
702 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
703 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
704 #endif
705 
706 	/* Display Core create. */
707 	adev->dm.dc = dc_create(&init_data);
708 
709 	if (adev->dm.dc) {
710 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
711 	} else {
712 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
713 		goto error;
714 	}
715 
716 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
717 	if (!adev->dm.freesync_module) {
718 		DRM_ERROR(
719 		"amdgpu: failed to initialize freesync_module.\n");
720 	} else
721 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
722 				adev->dm.freesync_module);
723 
724 	amdgpu_dm_init_color_mod();
725 
726 	if (amdgpu_dm_initialize_drm_device(adev)) {
727 		DRM_ERROR(
728 		"amdgpu: failed to initialize sw for display support.\n");
729 		goto error;
730 	}
731 
732 	/* Update the actual used number of crtc */
733 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
734 
735 	/* TODO: Add_display_info? */
736 
737 	/* TODO use dynamic cursor width */
738 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
739 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
740 
741 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
742 		DRM_ERROR(
743 		"amdgpu: failed to initialize sw for display support.\n");
744 		goto error;
745 	}
746 
747 #if defined(CONFIG_DEBUG_FS)
748 	if (dtn_debugfs_init(adev))
749 		DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
750 #endif
751 
752 	DRM_DEBUG_DRIVER("KMS initialized.\n");
753 
754 	return 0;
755 error:
756 	amdgpu_dm_fini(adev);
757 
758 	return -EINVAL;
759 }
760 
761 static void amdgpu_dm_fini(struct amdgpu_device *adev)
762 {
763 	amdgpu_dm_audio_fini(adev);
764 
765 	amdgpu_dm_destroy_drm_device(&adev->dm);
766 
767 	/* DC Destroy TODO: Replace destroy DAL */
768 	if (adev->dm.dc)
769 		dc_destroy(&adev->dm.dc);
770 	/*
771 	 * TODO: pageflip, vlank interrupt
772 	 *
773 	 * amdgpu_dm_irq_fini(adev);
774 	 */
775 
776 	if (adev->dm.cgs_device) {
777 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
778 		adev->dm.cgs_device = NULL;
779 	}
780 	if (adev->dm.freesync_module) {
781 		mod_freesync_destroy(adev->dm.freesync_module);
782 		adev->dm.freesync_module = NULL;
783 	}
784 
785 	mutex_destroy(&adev->dm.audio_lock);
786 	mutex_destroy(&adev->dm.dc_lock);
787 
788 	return;
789 }
790 
791 static int load_dmcu_fw(struct amdgpu_device *adev)
792 {
793 	const char *fw_name_dmcu = NULL;
794 	int r;
795 	const struct dmcu_firmware_header_v1_0 *hdr;
796 
797 	switch(adev->asic_type) {
798 	case CHIP_BONAIRE:
799 	case CHIP_HAWAII:
800 	case CHIP_KAVERI:
801 	case CHIP_KABINI:
802 	case CHIP_MULLINS:
803 	case CHIP_TONGA:
804 	case CHIP_FIJI:
805 	case CHIP_CARRIZO:
806 	case CHIP_STONEY:
807 	case CHIP_POLARIS11:
808 	case CHIP_POLARIS10:
809 	case CHIP_POLARIS12:
810 	case CHIP_VEGAM:
811 	case CHIP_VEGA10:
812 	case CHIP_VEGA12:
813 	case CHIP_VEGA20:
814 	case CHIP_NAVI10:
815 	case CHIP_NAVI14:
816 	case CHIP_NAVI12:
817 	case CHIP_RENOIR:
818 		return 0;
819 	case CHIP_RAVEN:
820 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
821 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
822 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
823 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
824 		else
825 			return 0;
826 		break;
827 	default:
828 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
829 		return -EINVAL;
830 	}
831 
832 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
833 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
834 		return 0;
835 	}
836 
837 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
838 	if (r == -ENOENT) {
839 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
840 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
841 		adev->dm.fw_dmcu = NULL;
842 		return 0;
843 	}
844 	if (r) {
845 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
846 			fw_name_dmcu);
847 		return r;
848 	}
849 
850 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
851 	if (r) {
852 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
853 			fw_name_dmcu);
854 		release_firmware(adev->dm.fw_dmcu);
855 		adev->dm.fw_dmcu = NULL;
856 		return r;
857 	}
858 
859 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
860 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
861 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
862 	adev->firmware.fw_size +=
863 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
864 
865 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
866 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
867 	adev->firmware.fw_size +=
868 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
869 
870 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
871 
872 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
873 
874 	return 0;
875 }
876 
877 static int dm_sw_init(void *handle)
878 {
879 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
880 
881 	return load_dmcu_fw(adev);
882 }
883 
884 static int dm_sw_fini(void *handle)
885 {
886 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
887 
888 	if(adev->dm.fw_dmcu) {
889 		release_firmware(adev->dm.fw_dmcu);
890 		adev->dm.fw_dmcu = NULL;
891 	}
892 
893 	return 0;
894 }
895 
896 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
897 {
898 	struct amdgpu_dm_connector *aconnector;
899 	struct drm_connector *connector;
900 	int ret = 0;
901 
902 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
903 
904 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
905 		aconnector = to_amdgpu_dm_connector(connector);
906 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
907 		    aconnector->mst_mgr.aux) {
908 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
909 					aconnector, aconnector->base.base.id);
910 
911 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
912 			if (ret < 0) {
913 				DRM_ERROR("DM_MST: Failed to start MST\n");
914 				((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
915 				return ret;
916 				}
917 			}
918 	}
919 
920 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
921 	return ret;
922 }
923 
924 static int dm_late_init(void *handle)
925 {
926 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
927 
928 	struct dmcu_iram_parameters params;
929 	unsigned int linear_lut[16];
930 	int i;
931 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
932 	bool ret = false;
933 
934 	for (i = 0; i < 16; i++)
935 		linear_lut[i] = 0xFFFF * i / 15;
936 
937 	params.set = 0;
938 	params.backlight_ramping_start = 0xCCCC;
939 	params.backlight_ramping_reduction = 0xCCCCCCCC;
940 	params.backlight_lut_array_size = 16;
941 	params.backlight_lut_array = linear_lut;
942 
943 	/* todo will enable for navi10 */
944 	if (adev->asic_type <= CHIP_RAVEN) {
945 		ret = dmcu_load_iram(dmcu, params);
946 
947 		if (!ret)
948 			return -EINVAL;
949 	}
950 
951 	return detect_mst_link_for_all_connectors(adev->ddev);
952 }
953 
954 static void s3_handle_mst(struct drm_device *dev, bool suspend)
955 {
956 	struct amdgpu_dm_connector *aconnector;
957 	struct drm_connector *connector;
958 	struct drm_dp_mst_topology_mgr *mgr;
959 	int ret;
960 	bool need_hotplug = false;
961 
962 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
963 
964 	list_for_each_entry(connector, &dev->mode_config.connector_list,
965 			    head) {
966 		aconnector = to_amdgpu_dm_connector(connector);
967 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
968 		    aconnector->mst_port)
969 			continue;
970 
971 		mgr = &aconnector->mst_mgr;
972 
973 		if (suspend) {
974 			drm_dp_mst_topology_mgr_suspend(mgr);
975 		} else {
976 			ret = drm_dp_mst_topology_mgr_resume(mgr);
977 			if (ret < 0) {
978 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
979 				need_hotplug = true;
980 			}
981 		}
982 	}
983 
984 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
985 
986 	if (need_hotplug)
987 		drm_kms_helper_hotplug_event(dev);
988 }
989 
990 /**
991  * dm_hw_init() - Initialize DC device
992  * @handle: The base driver device containing the amdpgu_dm device.
993  *
994  * Initialize the &struct amdgpu_display_manager device. This involves calling
995  * the initializers of each DM component, then populating the struct with them.
996  *
997  * Although the function implies hardware initialization, both hardware and
998  * software are initialized here. Splitting them out to their relevant init
999  * hooks is a future TODO item.
1000  *
1001  * Some notable things that are initialized here:
1002  *
1003  * - Display Core, both software and hardware
1004  * - DC modules that we need (freesync and color management)
1005  * - DRM software states
1006  * - Interrupt sources and handlers
1007  * - Vblank support
1008  * - Debug FS entries, if enabled
1009  */
1010 static int dm_hw_init(void *handle)
1011 {
1012 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1013 	/* Create DAL display manager */
1014 	amdgpu_dm_init(adev);
1015 	amdgpu_dm_hpd_init(adev);
1016 
1017 	return 0;
1018 }
1019 
1020 /**
1021  * dm_hw_fini() - Teardown DC device
1022  * @handle: The base driver device containing the amdpgu_dm device.
1023  *
1024  * Teardown components within &struct amdgpu_display_manager that require
1025  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1026  * were loaded. Also flush IRQ workqueues and disable them.
1027  */
1028 static int dm_hw_fini(void *handle)
1029 {
1030 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1031 
1032 	amdgpu_dm_hpd_fini(adev);
1033 
1034 	amdgpu_dm_irq_fini(adev);
1035 	amdgpu_dm_fini(adev);
1036 	return 0;
1037 }
1038 
1039 static int dm_suspend(void *handle)
1040 {
1041 	struct amdgpu_device *adev = handle;
1042 	struct amdgpu_display_manager *dm = &adev->dm;
1043 	int ret = 0;
1044 
1045 	WARN_ON(adev->dm.cached_state);
1046 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1047 
1048 	s3_handle_mst(adev->ddev, true);
1049 
1050 	amdgpu_dm_irq_suspend(adev);
1051 
1052 
1053 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1054 
1055 	return ret;
1056 }
1057 
1058 static struct amdgpu_dm_connector *
1059 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1060 					     struct drm_crtc *crtc)
1061 {
1062 	uint32_t i;
1063 	struct drm_connector_state *new_con_state;
1064 	struct drm_connector *connector;
1065 	struct drm_crtc *crtc_from_state;
1066 
1067 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1068 		crtc_from_state = new_con_state->crtc;
1069 
1070 		if (crtc_from_state == crtc)
1071 			return to_amdgpu_dm_connector(connector);
1072 	}
1073 
1074 	return NULL;
1075 }
1076 
1077 static void emulated_link_detect(struct dc_link *link)
1078 {
1079 	struct dc_sink_init_data sink_init_data = { 0 };
1080 	struct display_sink_capability sink_caps = { 0 };
1081 	enum dc_edid_status edid_status;
1082 	struct dc_context *dc_ctx = link->ctx;
1083 	struct dc_sink *sink = NULL;
1084 	struct dc_sink *prev_sink = NULL;
1085 
1086 	link->type = dc_connection_none;
1087 	prev_sink = link->local_sink;
1088 
1089 	if (prev_sink != NULL)
1090 		dc_sink_retain(prev_sink);
1091 
1092 	switch (link->connector_signal) {
1093 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1094 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1095 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1096 		break;
1097 	}
1098 
1099 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1100 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1101 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1102 		break;
1103 	}
1104 
1105 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1106 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1107 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1108 		break;
1109 	}
1110 
1111 	case SIGNAL_TYPE_LVDS: {
1112 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1113 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1114 		break;
1115 	}
1116 
1117 	case SIGNAL_TYPE_EDP: {
1118 		sink_caps.transaction_type =
1119 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1120 		sink_caps.signal = SIGNAL_TYPE_EDP;
1121 		break;
1122 	}
1123 
1124 	case SIGNAL_TYPE_DISPLAY_PORT: {
1125 		sink_caps.transaction_type =
1126 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1127 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1128 		break;
1129 	}
1130 
1131 	default:
1132 		DC_ERROR("Invalid connector type! signal:%d\n",
1133 			link->connector_signal);
1134 		return;
1135 	}
1136 
1137 	sink_init_data.link = link;
1138 	sink_init_data.sink_signal = sink_caps.signal;
1139 
1140 	sink = dc_sink_create(&sink_init_data);
1141 	if (!sink) {
1142 		DC_ERROR("Failed to create sink!\n");
1143 		return;
1144 	}
1145 
1146 	/* dc_sink_create returns a new reference */
1147 	link->local_sink = sink;
1148 
1149 	edid_status = dm_helpers_read_local_edid(
1150 			link->ctx,
1151 			link,
1152 			sink);
1153 
1154 	if (edid_status != EDID_OK)
1155 		DC_ERROR("Failed to read EDID");
1156 
1157 }
1158 
1159 static int dm_resume(void *handle)
1160 {
1161 	struct amdgpu_device *adev = handle;
1162 	struct drm_device *ddev = adev->ddev;
1163 	struct amdgpu_display_manager *dm = &adev->dm;
1164 	struct amdgpu_dm_connector *aconnector;
1165 	struct drm_connector *connector;
1166 	struct drm_crtc *crtc;
1167 	struct drm_crtc_state *new_crtc_state;
1168 	struct dm_crtc_state *dm_new_crtc_state;
1169 	struct drm_plane *plane;
1170 	struct drm_plane_state *new_plane_state;
1171 	struct dm_plane_state *dm_new_plane_state;
1172 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1173 	enum dc_connection_type new_connection_type = dc_connection_none;
1174 	int i;
1175 
1176 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1177 	dc_release_state(dm_state->context);
1178 	dm_state->context = dc_create_state(dm->dc);
1179 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1180 	dc_resource_state_construct(dm->dc, dm_state->context);
1181 
1182 	/* power on hardware */
1183 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1184 
1185 	/* program HPD filter */
1186 	dc_resume(dm->dc);
1187 
1188 	/* On resume we need to  rewrite the MSTM control bits to enamble MST*/
1189 	s3_handle_mst(ddev, false);
1190 
1191 	/*
1192 	 * early enable HPD Rx IRQ, should be done before set mode as short
1193 	 * pulse interrupts are used for MST
1194 	 */
1195 	amdgpu_dm_irq_resume_early(adev);
1196 
1197 	/* Do detection*/
1198 	list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
1199 		aconnector = to_amdgpu_dm_connector(connector);
1200 
1201 		/*
1202 		 * this is the case when traversing through already created
1203 		 * MST connectors, should be skipped
1204 		 */
1205 		if (aconnector->mst_port)
1206 			continue;
1207 
1208 		mutex_lock(&aconnector->hpd_lock);
1209 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1210 			DRM_ERROR("KMS: Failed to detect connector\n");
1211 
1212 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1213 			emulated_link_detect(aconnector->dc_link);
1214 		else
1215 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1216 
1217 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1218 			aconnector->fake_enable = false;
1219 
1220 		if (aconnector->dc_sink)
1221 			dc_sink_release(aconnector->dc_sink);
1222 		aconnector->dc_sink = NULL;
1223 		amdgpu_dm_update_connector_after_detect(aconnector);
1224 		mutex_unlock(&aconnector->hpd_lock);
1225 	}
1226 
1227 	/* Force mode set in atomic commit */
1228 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1229 		new_crtc_state->active_changed = true;
1230 
1231 	/*
1232 	 * atomic_check is expected to create the dc states. We need to release
1233 	 * them here, since they were duplicated as part of the suspend
1234 	 * procedure.
1235 	 */
1236 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1237 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1238 		if (dm_new_crtc_state->stream) {
1239 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1240 			dc_stream_release(dm_new_crtc_state->stream);
1241 			dm_new_crtc_state->stream = NULL;
1242 		}
1243 	}
1244 
1245 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1246 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1247 		if (dm_new_plane_state->dc_state) {
1248 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1249 			dc_plane_state_release(dm_new_plane_state->dc_state);
1250 			dm_new_plane_state->dc_state = NULL;
1251 		}
1252 	}
1253 
1254 	drm_atomic_helper_resume(ddev, dm->cached_state);
1255 
1256 	dm->cached_state = NULL;
1257 
1258 	amdgpu_dm_irq_resume_late(adev);
1259 
1260 	return 0;
1261 }
1262 
1263 /**
1264  * DOC: DM Lifecycle
1265  *
1266  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1267  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1268  * the base driver's device list to be initialized and torn down accordingly.
1269  *
1270  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1271  */
1272 
1273 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1274 	.name = "dm",
1275 	.early_init = dm_early_init,
1276 	.late_init = dm_late_init,
1277 	.sw_init = dm_sw_init,
1278 	.sw_fini = dm_sw_fini,
1279 	.hw_init = dm_hw_init,
1280 	.hw_fini = dm_hw_fini,
1281 	.suspend = dm_suspend,
1282 	.resume = dm_resume,
1283 	.is_idle = dm_is_idle,
1284 	.wait_for_idle = dm_wait_for_idle,
1285 	.check_soft_reset = dm_check_soft_reset,
1286 	.soft_reset = dm_soft_reset,
1287 	.set_clockgating_state = dm_set_clockgating_state,
1288 	.set_powergating_state = dm_set_powergating_state,
1289 };
1290 
1291 const struct amdgpu_ip_block_version dm_ip_block =
1292 {
1293 	.type = AMD_IP_BLOCK_TYPE_DCE,
1294 	.major = 1,
1295 	.minor = 0,
1296 	.rev = 0,
1297 	.funcs = &amdgpu_dm_funcs,
1298 };
1299 
1300 
1301 /**
1302  * DOC: atomic
1303  *
1304  * *WIP*
1305  */
1306 
1307 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1308 	.fb_create = amdgpu_display_user_framebuffer_create,
1309 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1310 	.atomic_check = amdgpu_dm_atomic_check,
1311 	.atomic_commit = amdgpu_dm_atomic_commit,
1312 };
1313 
1314 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1315 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1316 };
1317 
1318 static void
1319 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1320 {
1321 	struct drm_connector *connector = &aconnector->base;
1322 	struct drm_device *dev = connector->dev;
1323 	struct dc_sink *sink;
1324 
1325 	/* MST handled by drm_mst framework */
1326 	if (aconnector->mst_mgr.mst_state == true)
1327 		return;
1328 
1329 
1330 	sink = aconnector->dc_link->local_sink;
1331 	if (sink)
1332 		dc_sink_retain(sink);
1333 
1334 	/*
1335 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1336 	 * the connector sink is set to either fake or physical sink depends on link status.
1337 	 * Skip if already done during boot.
1338 	 */
1339 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1340 			&& aconnector->dc_em_sink) {
1341 
1342 		/*
1343 		 * For S3 resume with headless use eml_sink to fake stream
1344 		 * because on resume connector->sink is set to NULL
1345 		 */
1346 		mutex_lock(&dev->mode_config.mutex);
1347 
1348 		if (sink) {
1349 			if (aconnector->dc_sink) {
1350 				amdgpu_dm_update_freesync_caps(connector, NULL);
1351 				/*
1352 				 * retain and release below are used to
1353 				 * bump up refcount for sink because the link doesn't point
1354 				 * to it anymore after disconnect, so on next crtc to connector
1355 				 * reshuffle by UMD we will get into unwanted dc_sink release
1356 				 */
1357 				dc_sink_release(aconnector->dc_sink);
1358 			}
1359 			aconnector->dc_sink = sink;
1360 			dc_sink_retain(aconnector->dc_sink);
1361 			amdgpu_dm_update_freesync_caps(connector,
1362 					aconnector->edid);
1363 		} else {
1364 			amdgpu_dm_update_freesync_caps(connector, NULL);
1365 			if (!aconnector->dc_sink) {
1366 				aconnector->dc_sink = aconnector->dc_em_sink;
1367 				dc_sink_retain(aconnector->dc_sink);
1368 			}
1369 		}
1370 
1371 		mutex_unlock(&dev->mode_config.mutex);
1372 
1373 		if (sink)
1374 			dc_sink_release(sink);
1375 		return;
1376 	}
1377 
1378 	/*
1379 	 * TODO: temporary guard to look for proper fix
1380 	 * if this sink is MST sink, we should not do anything
1381 	 */
1382 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1383 		dc_sink_release(sink);
1384 		return;
1385 	}
1386 
1387 	if (aconnector->dc_sink == sink) {
1388 		/*
1389 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1390 		 * Do nothing!!
1391 		 */
1392 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1393 				aconnector->connector_id);
1394 		if (sink)
1395 			dc_sink_release(sink);
1396 		return;
1397 	}
1398 
1399 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1400 		aconnector->connector_id, aconnector->dc_sink, sink);
1401 
1402 	mutex_lock(&dev->mode_config.mutex);
1403 
1404 	/*
1405 	 * 1. Update status of the drm connector
1406 	 * 2. Send an event and let userspace tell us what to do
1407 	 */
1408 	if (sink) {
1409 		/*
1410 		 * TODO: check if we still need the S3 mode update workaround.
1411 		 * If yes, put it here.
1412 		 */
1413 		if (aconnector->dc_sink)
1414 			amdgpu_dm_update_freesync_caps(connector, NULL);
1415 
1416 		aconnector->dc_sink = sink;
1417 		dc_sink_retain(aconnector->dc_sink);
1418 		if (sink->dc_edid.length == 0) {
1419 			aconnector->edid = NULL;
1420 			drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1421 		} else {
1422 			aconnector->edid =
1423 				(struct edid *) sink->dc_edid.raw_edid;
1424 
1425 
1426 			drm_connector_update_edid_property(connector,
1427 					aconnector->edid);
1428 			drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1429 					    aconnector->edid);
1430 		}
1431 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1432 
1433 	} else {
1434 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1435 		amdgpu_dm_update_freesync_caps(connector, NULL);
1436 		drm_connector_update_edid_property(connector, NULL);
1437 		aconnector->num_modes = 0;
1438 		dc_sink_release(aconnector->dc_sink);
1439 		aconnector->dc_sink = NULL;
1440 		aconnector->edid = NULL;
1441 	}
1442 
1443 	mutex_unlock(&dev->mode_config.mutex);
1444 
1445 	if (sink)
1446 		dc_sink_release(sink);
1447 }
1448 
1449 static void handle_hpd_irq(void *param)
1450 {
1451 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1452 	struct drm_connector *connector = &aconnector->base;
1453 	struct drm_device *dev = connector->dev;
1454 	enum dc_connection_type new_connection_type = dc_connection_none;
1455 
1456 	/*
1457 	 * In case of failure or MST no need to update connector status or notify the OS
1458 	 * since (for MST case) MST does this in its own context.
1459 	 */
1460 	mutex_lock(&aconnector->hpd_lock);
1461 
1462 	if (aconnector->fake_enable)
1463 		aconnector->fake_enable = false;
1464 
1465 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1466 		DRM_ERROR("KMS: Failed to detect connector\n");
1467 
1468 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
1469 		emulated_link_detect(aconnector->dc_link);
1470 
1471 
1472 		drm_modeset_lock_all(dev);
1473 		dm_restore_drm_connector_state(dev, connector);
1474 		drm_modeset_unlock_all(dev);
1475 
1476 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1477 			drm_kms_helper_hotplug_event(dev);
1478 
1479 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1480 		amdgpu_dm_update_connector_after_detect(aconnector);
1481 
1482 
1483 		drm_modeset_lock_all(dev);
1484 		dm_restore_drm_connector_state(dev, connector);
1485 		drm_modeset_unlock_all(dev);
1486 
1487 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1488 			drm_kms_helper_hotplug_event(dev);
1489 	}
1490 	mutex_unlock(&aconnector->hpd_lock);
1491 
1492 }
1493 
1494 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1495 {
1496 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1497 	uint8_t dret;
1498 	bool new_irq_handled = false;
1499 	int dpcd_addr;
1500 	int dpcd_bytes_to_read;
1501 
1502 	const int max_process_count = 30;
1503 	int process_count = 0;
1504 
1505 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1506 
1507 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1508 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1509 		/* DPCD 0x200 - 0x201 for downstream IRQ */
1510 		dpcd_addr = DP_SINK_COUNT;
1511 	} else {
1512 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1513 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
1514 		dpcd_addr = DP_SINK_COUNT_ESI;
1515 	}
1516 
1517 	dret = drm_dp_dpcd_read(
1518 		&aconnector->dm_dp_aux.aux,
1519 		dpcd_addr,
1520 		esi,
1521 		dpcd_bytes_to_read);
1522 
1523 	while (dret == dpcd_bytes_to_read &&
1524 		process_count < max_process_count) {
1525 		uint8_t retry;
1526 		dret = 0;
1527 
1528 		process_count++;
1529 
1530 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1531 		/* handle HPD short pulse irq */
1532 		if (aconnector->mst_mgr.mst_state)
1533 			drm_dp_mst_hpd_irq(
1534 				&aconnector->mst_mgr,
1535 				esi,
1536 				&new_irq_handled);
1537 
1538 		if (new_irq_handled) {
1539 			/* ACK at DPCD to notify down stream */
1540 			const int ack_dpcd_bytes_to_write =
1541 				dpcd_bytes_to_read - 1;
1542 
1543 			for (retry = 0; retry < 3; retry++) {
1544 				uint8_t wret;
1545 
1546 				wret = drm_dp_dpcd_write(
1547 					&aconnector->dm_dp_aux.aux,
1548 					dpcd_addr + 1,
1549 					&esi[1],
1550 					ack_dpcd_bytes_to_write);
1551 				if (wret == ack_dpcd_bytes_to_write)
1552 					break;
1553 			}
1554 
1555 			/* check if there is new irq to be handled */
1556 			dret = drm_dp_dpcd_read(
1557 				&aconnector->dm_dp_aux.aux,
1558 				dpcd_addr,
1559 				esi,
1560 				dpcd_bytes_to_read);
1561 
1562 			new_irq_handled = false;
1563 		} else {
1564 			break;
1565 		}
1566 	}
1567 
1568 	if (process_count == max_process_count)
1569 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1570 }
1571 
1572 static void handle_hpd_rx_irq(void *param)
1573 {
1574 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1575 	struct drm_connector *connector = &aconnector->base;
1576 	struct drm_device *dev = connector->dev;
1577 	struct dc_link *dc_link = aconnector->dc_link;
1578 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1579 	enum dc_connection_type new_connection_type = dc_connection_none;
1580 
1581 	/*
1582 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1583 	 * conflict, after implement i2c helper, this mutex should be
1584 	 * retired.
1585 	 */
1586 	if (dc_link->type != dc_connection_mst_branch)
1587 		mutex_lock(&aconnector->hpd_lock);
1588 
1589 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1590 			!is_mst_root_connector) {
1591 		/* Downstream Port status changed. */
1592 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
1593 			DRM_ERROR("KMS: Failed to detect connector\n");
1594 
1595 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
1596 			emulated_link_detect(dc_link);
1597 
1598 			if (aconnector->fake_enable)
1599 				aconnector->fake_enable = false;
1600 
1601 			amdgpu_dm_update_connector_after_detect(aconnector);
1602 
1603 
1604 			drm_modeset_lock_all(dev);
1605 			dm_restore_drm_connector_state(dev, connector);
1606 			drm_modeset_unlock_all(dev);
1607 
1608 			drm_kms_helper_hotplug_event(dev);
1609 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1610 
1611 			if (aconnector->fake_enable)
1612 				aconnector->fake_enable = false;
1613 
1614 			amdgpu_dm_update_connector_after_detect(aconnector);
1615 
1616 
1617 			drm_modeset_lock_all(dev);
1618 			dm_restore_drm_connector_state(dev, connector);
1619 			drm_modeset_unlock_all(dev);
1620 
1621 			drm_kms_helper_hotplug_event(dev);
1622 		}
1623 	}
1624 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1625 	    (dc_link->type == dc_connection_mst_branch))
1626 		dm_handle_hpd_rx_irq(aconnector);
1627 
1628 	if (dc_link->type != dc_connection_mst_branch) {
1629 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1630 		mutex_unlock(&aconnector->hpd_lock);
1631 	}
1632 }
1633 
1634 static void register_hpd_handlers(struct amdgpu_device *adev)
1635 {
1636 	struct drm_device *dev = adev->ddev;
1637 	struct drm_connector *connector;
1638 	struct amdgpu_dm_connector *aconnector;
1639 	const struct dc_link *dc_link;
1640 	struct dc_interrupt_params int_params = {0};
1641 
1642 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1643 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1644 
1645 	list_for_each_entry(connector,
1646 			&dev->mode_config.connector_list, head)	{
1647 
1648 		aconnector = to_amdgpu_dm_connector(connector);
1649 		dc_link = aconnector->dc_link;
1650 
1651 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1652 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1653 			int_params.irq_source = dc_link->irq_source_hpd;
1654 
1655 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1656 					handle_hpd_irq,
1657 					(void *) aconnector);
1658 		}
1659 
1660 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1661 
1662 			/* Also register for DP short pulse (hpd_rx). */
1663 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1664 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
1665 
1666 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
1667 					handle_hpd_rx_irq,
1668 					(void *) aconnector);
1669 		}
1670 	}
1671 }
1672 
1673 /* Register IRQ sources and initialize IRQ callbacks */
1674 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1675 {
1676 	struct dc *dc = adev->dm.dc;
1677 	struct common_irq_params *c_irq_params;
1678 	struct dc_interrupt_params int_params = {0};
1679 	int r;
1680 	int i;
1681 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1682 
1683 	if (adev->asic_type >= CHIP_VEGA10)
1684 		client_id = SOC15_IH_CLIENTID_DCE;
1685 
1686 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1687 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1688 
1689 	/*
1690 	 * Actions of amdgpu_irq_add_id():
1691 	 * 1. Register a set() function with base driver.
1692 	 *    Base driver will call set() function to enable/disable an
1693 	 *    interrupt in DC hardware.
1694 	 * 2. Register amdgpu_dm_irq_handler().
1695 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1696 	 *    coming from DC hardware.
1697 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1698 	 *    for acknowledging and handling. */
1699 
1700 	/* Use VBLANK interrupt */
1701 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1702 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1703 		if (r) {
1704 			DRM_ERROR("Failed to add crtc irq id!\n");
1705 			return r;
1706 		}
1707 
1708 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1709 		int_params.irq_source =
1710 			dc_interrupt_to_irq_source(dc, i, 0);
1711 
1712 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1713 
1714 		c_irq_params->adev = adev;
1715 		c_irq_params->irq_src = int_params.irq_source;
1716 
1717 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1718 				dm_crtc_high_irq, c_irq_params);
1719 	}
1720 
1721 	/* Use VUPDATE interrupt */
1722 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1723 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
1724 		if (r) {
1725 			DRM_ERROR("Failed to add vupdate irq id!\n");
1726 			return r;
1727 		}
1728 
1729 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1730 		int_params.irq_source =
1731 			dc_interrupt_to_irq_source(dc, i, 0);
1732 
1733 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1734 
1735 		c_irq_params->adev = adev;
1736 		c_irq_params->irq_src = int_params.irq_source;
1737 
1738 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1739 				dm_vupdate_high_irq, c_irq_params);
1740 	}
1741 
1742 	/* Use GRPH_PFLIP interrupt */
1743 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1744 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1745 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1746 		if (r) {
1747 			DRM_ERROR("Failed to add page flip irq id!\n");
1748 			return r;
1749 		}
1750 
1751 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1752 		int_params.irq_source =
1753 			dc_interrupt_to_irq_source(dc, i, 0);
1754 
1755 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1756 
1757 		c_irq_params->adev = adev;
1758 		c_irq_params->irq_src = int_params.irq_source;
1759 
1760 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1761 				dm_pflip_high_irq, c_irq_params);
1762 
1763 	}
1764 
1765 	/* HPD */
1766 	r = amdgpu_irq_add_id(adev, client_id,
1767 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1768 	if (r) {
1769 		DRM_ERROR("Failed to add hpd irq id!\n");
1770 		return r;
1771 	}
1772 
1773 	register_hpd_handlers(adev);
1774 
1775 	return 0;
1776 }
1777 
1778 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1779 /* Register IRQ sources and initialize IRQ callbacks */
1780 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1781 {
1782 	struct dc *dc = adev->dm.dc;
1783 	struct common_irq_params *c_irq_params;
1784 	struct dc_interrupt_params int_params = {0};
1785 	int r;
1786 	int i;
1787 
1788 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1789 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1790 
1791 	/*
1792 	 * Actions of amdgpu_irq_add_id():
1793 	 * 1. Register a set() function with base driver.
1794 	 *    Base driver will call set() function to enable/disable an
1795 	 *    interrupt in DC hardware.
1796 	 * 2. Register amdgpu_dm_irq_handler().
1797 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1798 	 *    coming from DC hardware.
1799 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1800 	 *    for acknowledging and handling.
1801 	 */
1802 
1803 	/* Use VSTARTUP interrupt */
1804 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1805 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1806 			i++) {
1807 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1808 
1809 		if (r) {
1810 			DRM_ERROR("Failed to add crtc irq id!\n");
1811 			return r;
1812 		}
1813 
1814 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1815 		int_params.irq_source =
1816 			dc_interrupt_to_irq_source(dc, i, 0);
1817 
1818 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1819 
1820 		c_irq_params->adev = adev;
1821 		c_irq_params->irq_src = int_params.irq_source;
1822 
1823 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1824 				dm_crtc_high_irq, c_irq_params);
1825 	}
1826 
1827 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
1828 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
1829 	 * to trigger at end of each vblank, regardless of state of the lock,
1830 	 * matching DCE behaviour.
1831 	 */
1832 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
1833 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
1834 	     i++) {
1835 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
1836 
1837 		if (r) {
1838 			DRM_ERROR("Failed to add vupdate irq id!\n");
1839 			return r;
1840 		}
1841 
1842 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1843 		int_params.irq_source =
1844 			dc_interrupt_to_irq_source(dc, i, 0);
1845 
1846 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1847 
1848 		c_irq_params->adev = adev;
1849 		c_irq_params->irq_src = int_params.irq_source;
1850 
1851 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1852 				dm_vupdate_high_irq, c_irq_params);
1853 	}
1854 
1855 	/* Use GRPH_PFLIP interrupt */
1856 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1857 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1858 			i++) {
1859 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1860 		if (r) {
1861 			DRM_ERROR("Failed to add page flip irq id!\n");
1862 			return r;
1863 		}
1864 
1865 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1866 		int_params.irq_source =
1867 			dc_interrupt_to_irq_source(dc, i, 0);
1868 
1869 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1870 
1871 		c_irq_params->adev = adev;
1872 		c_irq_params->irq_src = int_params.irq_source;
1873 
1874 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
1875 				dm_pflip_high_irq, c_irq_params);
1876 
1877 	}
1878 
1879 	/* HPD */
1880 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1881 			&adev->hpd_irq);
1882 	if (r) {
1883 		DRM_ERROR("Failed to add hpd irq id!\n");
1884 		return r;
1885 	}
1886 
1887 	register_hpd_handlers(adev);
1888 
1889 	return 0;
1890 }
1891 #endif
1892 
1893 /*
1894  * Acquires the lock for the atomic state object and returns
1895  * the new atomic state.
1896  *
1897  * This should only be called during atomic check.
1898  */
1899 static int dm_atomic_get_state(struct drm_atomic_state *state,
1900 			       struct dm_atomic_state **dm_state)
1901 {
1902 	struct drm_device *dev = state->dev;
1903 	struct amdgpu_device *adev = dev->dev_private;
1904 	struct amdgpu_display_manager *dm = &adev->dm;
1905 	struct drm_private_state *priv_state;
1906 
1907 	if (*dm_state)
1908 		return 0;
1909 
1910 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1911 	if (IS_ERR(priv_state))
1912 		return PTR_ERR(priv_state);
1913 
1914 	*dm_state = to_dm_atomic_state(priv_state);
1915 
1916 	return 0;
1917 }
1918 
1919 struct dm_atomic_state *
1920 dm_atomic_get_new_state(struct drm_atomic_state *state)
1921 {
1922 	struct drm_device *dev = state->dev;
1923 	struct amdgpu_device *adev = dev->dev_private;
1924 	struct amdgpu_display_manager *dm = &adev->dm;
1925 	struct drm_private_obj *obj;
1926 	struct drm_private_state *new_obj_state;
1927 	int i;
1928 
1929 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1930 		if (obj->funcs == dm->atomic_obj.funcs)
1931 			return to_dm_atomic_state(new_obj_state);
1932 	}
1933 
1934 	return NULL;
1935 }
1936 
1937 struct dm_atomic_state *
1938 dm_atomic_get_old_state(struct drm_atomic_state *state)
1939 {
1940 	struct drm_device *dev = state->dev;
1941 	struct amdgpu_device *adev = dev->dev_private;
1942 	struct amdgpu_display_manager *dm = &adev->dm;
1943 	struct drm_private_obj *obj;
1944 	struct drm_private_state *old_obj_state;
1945 	int i;
1946 
1947 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1948 		if (obj->funcs == dm->atomic_obj.funcs)
1949 			return to_dm_atomic_state(old_obj_state);
1950 	}
1951 
1952 	return NULL;
1953 }
1954 
1955 static struct drm_private_state *
1956 dm_atomic_duplicate_state(struct drm_private_obj *obj)
1957 {
1958 	struct dm_atomic_state *old_state, *new_state;
1959 
1960 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1961 	if (!new_state)
1962 		return NULL;
1963 
1964 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1965 
1966 	old_state = to_dm_atomic_state(obj->state);
1967 
1968 	if (old_state && old_state->context)
1969 		new_state->context = dc_copy_state(old_state->context);
1970 
1971 	if (!new_state->context) {
1972 		kfree(new_state);
1973 		return NULL;
1974 	}
1975 
1976 	return &new_state->base;
1977 }
1978 
1979 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1980 				    struct drm_private_state *state)
1981 {
1982 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1983 
1984 	if (dm_state && dm_state->context)
1985 		dc_release_state(dm_state->context);
1986 
1987 	kfree(dm_state);
1988 }
1989 
1990 static struct drm_private_state_funcs dm_atomic_state_funcs = {
1991 	.atomic_duplicate_state = dm_atomic_duplicate_state,
1992 	.atomic_destroy_state = dm_atomic_destroy_state,
1993 };
1994 
1995 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1996 {
1997 	struct dm_atomic_state *state;
1998 	int r;
1999 
2000 	adev->mode_info.mode_config_initialized = true;
2001 
2002 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2003 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2004 
2005 	adev->ddev->mode_config.max_width = 16384;
2006 	adev->ddev->mode_config.max_height = 16384;
2007 
2008 	adev->ddev->mode_config.preferred_depth = 24;
2009 	adev->ddev->mode_config.prefer_shadow = 1;
2010 	/* indicates support for immediate flip */
2011 	adev->ddev->mode_config.async_page_flip = true;
2012 
2013 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2014 
2015 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2016 	if (!state)
2017 		return -ENOMEM;
2018 
2019 	state->context = dc_create_state(adev->dm.dc);
2020 	if (!state->context) {
2021 		kfree(state);
2022 		return -ENOMEM;
2023 	}
2024 
2025 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2026 
2027 	drm_atomic_private_obj_init(adev->ddev,
2028 				    &adev->dm.atomic_obj,
2029 				    &state->base,
2030 				    &dm_atomic_state_funcs);
2031 
2032 	r = amdgpu_display_modeset_create_props(adev);
2033 	if (r)
2034 		return r;
2035 
2036 	r = amdgpu_dm_audio_init(adev);
2037 	if (r)
2038 		return r;
2039 
2040 	return 0;
2041 }
2042 
2043 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2044 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2045 
2046 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2047 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2048 
2049 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2050 {
2051 #if defined(CONFIG_ACPI)
2052 	struct amdgpu_dm_backlight_caps caps;
2053 
2054 	if (dm->backlight_caps.caps_valid)
2055 		return;
2056 
2057 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2058 	if (caps.caps_valid) {
2059 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2060 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2061 		dm->backlight_caps.caps_valid = true;
2062 	} else {
2063 		dm->backlight_caps.min_input_signal =
2064 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2065 		dm->backlight_caps.max_input_signal =
2066 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2067 	}
2068 #else
2069 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2070 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2071 #endif
2072 }
2073 
2074 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2075 {
2076 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2077 	struct amdgpu_dm_backlight_caps caps;
2078 	uint32_t brightness = bd->props.brightness;
2079 
2080 	amdgpu_dm_update_backlight_caps(dm);
2081 	caps = dm->backlight_caps;
2082 	/*
2083 	 * The brightness input is in the range 0-255
2084 	 * It needs to be rescaled to be between the
2085 	 * requested min and max input signal
2086 	 *
2087 	 * It also needs to be scaled up by 0x101 to
2088 	 * match the DC interface which has a range of
2089 	 * 0 to 0xffff
2090 	 */
2091 	brightness =
2092 		brightness
2093 		* 0x101
2094 		* (caps.max_input_signal - caps.min_input_signal)
2095 		/ AMDGPU_MAX_BL_LEVEL
2096 		+ caps.min_input_signal * 0x101;
2097 
2098 	if (dc_link_set_backlight_level(dm->backlight_link,
2099 			brightness, 0))
2100 		return 0;
2101 	else
2102 		return 1;
2103 }
2104 
2105 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2106 {
2107 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2108 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2109 
2110 	if (ret == DC_ERROR_UNEXPECTED)
2111 		return bd->props.brightness;
2112 	return ret;
2113 }
2114 
2115 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2116 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2117 	.update_status	= amdgpu_dm_backlight_update_status,
2118 };
2119 
2120 static void
2121 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2122 {
2123 	char bl_name[16];
2124 	struct backlight_properties props = { 0 };
2125 
2126 	amdgpu_dm_update_backlight_caps(dm);
2127 
2128 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2129 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2130 	props.type = BACKLIGHT_RAW;
2131 
2132 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2133 			dm->adev->ddev->primary->index);
2134 
2135 	dm->backlight_dev = backlight_device_register(bl_name,
2136 			dm->adev->ddev->dev,
2137 			dm,
2138 			&amdgpu_dm_backlight_ops,
2139 			&props);
2140 
2141 	if (IS_ERR(dm->backlight_dev))
2142 		DRM_ERROR("DM: Backlight registration failed!\n");
2143 	else
2144 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2145 }
2146 
2147 #endif
2148 
2149 static int initialize_plane(struct amdgpu_display_manager *dm,
2150 			    struct amdgpu_mode_info *mode_info, int plane_id,
2151 			    enum drm_plane_type plane_type,
2152 			    const struct dc_plane_cap *plane_cap)
2153 {
2154 	struct drm_plane *plane;
2155 	unsigned long possible_crtcs;
2156 	int ret = 0;
2157 
2158 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2159 	if (!plane) {
2160 		DRM_ERROR("KMS: Failed to allocate plane\n");
2161 		return -ENOMEM;
2162 	}
2163 	plane->type = plane_type;
2164 
2165 	/*
2166 	 * HACK: IGT tests expect that the primary plane for a CRTC
2167 	 * can only have one possible CRTC. Only expose support for
2168 	 * any CRTC if they're not going to be used as a primary plane
2169 	 * for a CRTC - like overlay or underlay planes.
2170 	 */
2171 	possible_crtcs = 1 << plane_id;
2172 	if (plane_id >= dm->dc->caps.max_streams)
2173 		possible_crtcs = 0xff;
2174 
2175 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2176 
2177 	if (ret) {
2178 		DRM_ERROR("KMS: Failed to initialize plane\n");
2179 		kfree(plane);
2180 		return ret;
2181 	}
2182 
2183 	if (mode_info)
2184 		mode_info->planes[plane_id] = plane;
2185 
2186 	return ret;
2187 }
2188 
2189 
2190 static void register_backlight_device(struct amdgpu_display_manager *dm,
2191 				      struct dc_link *link)
2192 {
2193 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2194 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2195 
2196 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2197 	    link->type != dc_connection_none) {
2198 		/*
2199 		 * Event if registration failed, we should continue with
2200 		 * DM initialization because not having a backlight control
2201 		 * is better then a black screen.
2202 		 */
2203 		amdgpu_dm_register_backlight_device(dm);
2204 
2205 		if (dm->backlight_dev)
2206 			dm->backlight_link = link;
2207 	}
2208 #endif
2209 }
2210 
2211 
2212 /*
2213  * In this architecture, the association
2214  * connector -> encoder -> crtc
2215  * id not really requried. The crtc and connector will hold the
2216  * display_index as an abstraction to use with DAL component
2217  *
2218  * Returns 0 on success
2219  */
2220 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2221 {
2222 	struct amdgpu_display_manager *dm = &adev->dm;
2223 	int32_t i;
2224 	struct amdgpu_dm_connector *aconnector = NULL;
2225 	struct amdgpu_encoder *aencoder = NULL;
2226 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2227 	uint32_t link_cnt;
2228 	int32_t primary_planes;
2229 	enum dc_connection_type new_connection_type = dc_connection_none;
2230 	const struct dc_plane_cap *plane;
2231 
2232 	link_cnt = dm->dc->caps.max_links;
2233 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2234 		DRM_ERROR("DM: Failed to initialize mode config\n");
2235 		return -EINVAL;
2236 	}
2237 
2238 	/* There is one primary plane per CRTC */
2239 	primary_planes = dm->dc->caps.max_streams;
2240 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2241 
2242 	/*
2243 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2244 	 * Order is reversed to match iteration order in atomic check.
2245 	 */
2246 	for (i = (primary_planes - 1); i >= 0; i--) {
2247 		plane = &dm->dc->caps.planes[i];
2248 
2249 		if (initialize_plane(dm, mode_info, i,
2250 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2251 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2252 			goto fail;
2253 		}
2254 	}
2255 
2256 	/*
2257 	 * Initialize overlay planes, index starting after primary planes.
2258 	 * These planes have a higher DRM index than the primary planes since
2259 	 * they should be considered as having a higher z-order.
2260 	 * Order is reversed to match iteration order in atomic check.
2261 	 *
2262 	 * Only support DCN for now, and only expose one so we don't encourage
2263 	 * userspace to use up all the pipes.
2264 	 */
2265 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2266 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2267 
2268 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2269 			continue;
2270 
2271 		if (!plane->blends_with_above || !plane->blends_with_below)
2272 			continue;
2273 
2274 		if (!plane->pixel_format_support.argb8888)
2275 			continue;
2276 
2277 		if (initialize_plane(dm, NULL, primary_planes + i,
2278 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2279 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2280 			goto fail;
2281 		}
2282 
2283 		/* Only create one overlay plane. */
2284 		break;
2285 	}
2286 
2287 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2288 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2289 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2290 			goto fail;
2291 		}
2292 
2293 	dm->display_indexes_num = dm->dc->caps.max_streams;
2294 
2295 	/* loops over all connectors on the board */
2296 	for (i = 0; i < link_cnt; i++) {
2297 		struct dc_link *link = NULL;
2298 
2299 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2300 			DRM_ERROR(
2301 				"KMS: Cannot support more than %d display indexes\n",
2302 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2303 			continue;
2304 		}
2305 
2306 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2307 		if (!aconnector)
2308 			goto fail;
2309 
2310 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2311 		if (!aencoder)
2312 			goto fail;
2313 
2314 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2315 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2316 			goto fail;
2317 		}
2318 
2319 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2320 			DRM_ERROR("KMS: Failed to initialize connector\n");
2321 			goto fail;
2322 		}
2323 
2324 		link = dc_get_link_at_index(dm->dc, i);
2325 
2326 		if (!dc_link_detect_sink(link, &new_connection_type))
2327 			DRM_ERROR("KMS: Failed to detect connector\n");
2328 
2329 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2330 			emulated_link_detect(link);
2331 			amdgpu_dm_update_connector_after_detect(aconnector);
2332 
2333 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2334 			amdgpu_dm_update_connector_after_detect(aconnector);
2335 			register_backlight_device(dm, link);
2336 		}
2337 
2338 
2339 	}
2340 
2341 	/* Software is initialized. Now we can register interrupt handlers. */
2342 	switch (adev->asic_type) {
2343 	case CHIP_BONAIRE:
2344 	case CHIP_HAWAII:
2345 	case CHIP_KAVERI:
2346 	case CHIP_KABINI:
2347 	case CHIP_MULLINS:
2348 	case CHIP_TONGA:
2349 	case CHIP_FIJI:
2350 	case CHIP_CARRIZO:
2351 	case CHIP_STONEY:
2352 	case CHIP_POLARIS11:
2353 	case CHIP_POLARIS10:
2354 	case CHIP_POLARIS12:
2355 	case CHIP_VEGAM:
2356 	case CHIP_VEGA10:
2357 	case CHIP_VEGA12:
2358 	case CHIP_VEGA20:
2359 		if (dce110_register_irq_handlers(dm->adev)) {
2360 			DRM_ERROR("DM: Failed to initialize IRQ\n");
2361 			goto fail;
2362 		}
2363 		break;
2364 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2365 	case CHIP_RAVEN:
2366 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2367 	case CHIP_NAVI12:
2368 	case CHIP_NAVI10:
2369 	case CHIP_NAVI14:
2370 #endif
2371 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2372 	case CHIP_RENOIR:
2373 #endif
2374 		if (dcn10_register_irq_handlers(dm->adev)) {
2375 			DRM_ERROR("DM: Failed to initialize IRQ\n");
2376 			goto fail;
2377 		}
2378 		break;
2379 #endif
2380 	default:
2381 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2382 		goto fail;
2383 	}
2384 
2385 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2386 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2387 
2388 	return 0;
2389 fail:
2390 	kfree(aencoder);
2391 	kfree(aconnector);
2392 
2393 	return -EINVAL;
2394 }
2395 
2396 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2397 {
2398 	drm_mode_config_cleanup(dm->ddev);
2399 	drm_atomic_private_obj_fini(&dm->atomic_obj);
2400 	return;
2401 }
2402 
2403 /******************************************************************************
2404  * amdgpu_display_funcs functions
2405  *****************************************************************************/
2406 
2407 /*
2408  * dm_bandwidth_update - program display watermarks
2409  *
2410  * @adev: amdgpu_device pointer
2411  *
2412  * Calculate and program the display watermarks and line buffer allocation.
2413  */
2414 static void dm_bandwidth_update(struct amdgpu_device *adev)
2415 {
2416 	/* TODO: implement later */
2417 }
2418 
2419 static const struct amdgpu_display_funcs dm_display_funcs = {
2420 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2421 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2422 	.backlight_set_level = NULL, /* never called for DC */
2423 	.backlight_get_level = NULL, /* never called for DC */
2424 	.hpd_sense = NULL,/* called unconditionally */
2425 	.hpd_set_polarity = NULL, /* called unconditionally */
2426 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2427 	.page_flip_get_scanoutpos =
2428 		dm_crtc_get_scanoutpos,/* called unconditionally */
2429 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2430 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
2431 };
2432 
2433 #if defined(CONFIG_DEBUG_KERNEL_DC)
2434 
2435 static ssize_t s3_debug_store(struct device *device,
2436 			      struct device_attribute *attr,
2437 			      const char *buf,
2438 			      size_t count)
2439 {
2440 	int ret;
2441 	int s3_state;
2442 	struct drm_device *drm_dev = dev_get_drvdata(device);
2443 	struct amdgpu_device *adev = drm_dev->dev_private;
2444 
2445 	ret = kstrtoint(buf, 0, &s3_state);
2446 
2447 	if (ret == 0) {
2448 		if (s3_state) {
2449 			dm_resume(adev);
2450 			drm_kms_helper_hotplug_event(adev->ddev);
2451 		} else
2452 			dm_suspend(adev);
2453 	}
2454 
2455 	return ret == 0 ? count : 0;
2456 }
2457 
2458 DEVICE_ATTR_WO(s3_debug);
2459 
2460 #endif
2461 
2462 static int dm_early_init(void *handle)
2463 {
2464 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2465 
2466 	switch (adev->asic_type) {
2467 	case CHIP_BONAIRE:
2468 	case CHIP_HAWAII:
2469 		adev->mode_info.num_crtc = 6;
2470 		adev->mode_info.num_hpd = 6;
2471 		adev->mode_info.num_dig = 6;
2472 		break;
2473 	case CHIP_KAVERI:
2474 		adev->mode_info.num_crtc = 4;
2475 		adev->mode_info.num_hpd = 6;
2476 		adev->mode_info.num_dig = 7;
2477 		break;
2478 	case CHIP_KABINI:
2479 	case CHIP_MULLINS:
2480 		adev->mode_info.num_crtc = 2;
2481 		adev->mode_info.num_hpd = 6;
2482 		adev->mode_info.num_dig = 6;
2483 		break;
2484 	case CHIP_FIJI:
2485 	case CHIP_TONGA:
2486 		adev->mode_info.num_crtc = 6;
2487 		adev->mode_info.num_hpd = 6;
2488 		adev->mode_info.num_dig = 7;
2489 		break;
2490 	case CHIP_CARRIZO:
2491 		adev->mode_info.num_crtc = 3;
2492 		adev->mode_info.num_hpd = 6;
2493 		adev->mode_info.num_dig = 9;
2494 		break;
2495 	case CHIP_STONEY:
2496 		adev->mode_info.num_crtc = 2;
2497 		adev->mode_info.num_hpd = 6;
2498 		adev->mode_info.num_dig = 9;
2499 		break;
2500 	case CHIP_POLARIS11:
2501 	case CHIP_POLARIS12:
2502 		adev->mode_info.num_crtc = 5;
2503 		adev->mode_info.num_hpd = 5;
2504 		adev->mode_info.num_dig = 5;
2505 		break;
2506 	case CHIP_POLARIS10:
2507 	case CHIP_VEGAM:
2508 		adev->mode_info.num_crtc = 6;
2509 		adev->mode_info.num_hpd = 6;
2510 		adev->mode_info.num_dig = 6;
2511 		break;
2512 	case CHIP_VEGA10:
2513 	case CHIP_VEGA12:
2514 	case CHIP_VEGA20:
2515 		adev->mode_info.num_crtc = 6;
2516 		adev->mode_info.num_hpd = 6;
2517 		adev->mode_info.num_dig = 6;
2518 		break;
2519 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2520 	case CHIP_RAVEN:
2521 		adev->mode_info.num_crtc = 4;
2522 		adev->mode_info.num_hpd = 4;
2523 		adev->mode_info.num_dig = 4;
2524 		break;
2525 #endif
2526 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2527 	case CHIP_NAVI10:
2528 	case CHIP_NAVI12:
2529 		adev->mode_info.num_crtc = 6;
2530 		adev->mode_info.num_hpd = 6;
2531 		adev->mode_info.num_dig = 6;
2532 		break;
2533 	case CHIP_NAVI14:
2534 		adev->mode_info.num_crtc = 5;
2535 		adev->mode_info.num_hpd = 5;
2536 		adev->mode_info.num_dig = 5;
2537 		break;
2538 #endif
2539 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2540 	case CHIP_RENOIR:
2541 		adev->mode_info.num_crtc = 4;
2542 		adev->mode_info.num_hpd = 4;
2543 		adev->mode_info.num_dig = 4;
2544 		break;
2545 #endif
2546 	default:
2547 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2548 		return -EINVAL;
2549 	}
2550 
2551 	amdgpu_dm_set_irq_funcs(adev);
2552 
2553 	if (adev->mode_info.funcs == NULL)
2554 		adev->mode_info.funcs = &dm_display_funcs;
2555 
2556 	/*
2557 	 * Note: Do NOT change adev->audio_endpt_rreg and
2558 	 * adev->audio_endpt_wreg because they are initialised in
2559 	 * amdgpu_device_init()
2560 	 */
2561 #if defined(CONFIG_DEBUG_KERNEL_DC)
2562 	device_create_file(
2563 		adev->ddev->dev,
2564 		&dev_attr_s3_debug);
2565 #endif
2566 
2567 	return 0;
2568 }
2569 
2570 static bool modeset_required(struct drm_crtc_state *crtc_state,
2571 			     struct dc_stream_state *new_stream,
2572 			     struct dc_stream_state *old_stream)
2573 {
2574 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
2575 		return false;
2576 
2577 	if (!crtc_state->enable)
2578 		return false;
2579 
2580 	return crtc_state->active;
2581 }
2582 
2583 static bool modereset_required(struct drm_crtc_state *crtc_state)
2584 {
2585 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
2586 		return false;
2587 
2588 	return !crtc_state->enable || !crtc_state->active;
2589 }
2590 
2591 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2592 {
2593 	drm_encoder_cleanup(encoder);
2594 	kfree(encoder);
2595 }
2596 
2597 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2598 	.destroy = amdgpu_dm_encoder_destroy,
2599 };
2600 
2601 
2602 static int fill_dc_scaling_info(const struct drm_plane_state *state,
2603 				struct dc_scaling_info *scaling_info)
2604 {
2605 	int scale_w, scale_h;
2606 
2607 	memset(scaling_info, 0, sizeof(*scaling_info));
2608 
2609 	/* Source is fixed 16.16 but we ignore mantissa for now... */
2610 	scaling_info->src_rect.x = state->src_x >> 16;
2611 	scaling_info->src_rect.y = state->src_y >> 16;
2612 
2613 	scaling_info->src_rect.width = state->src_w >> 16;
2614 	if (scaling_info->src_rect.width == 0)
2615 		return -EINVAL;
2616 
2617 	scaling_info->src_rect.height = state->src_h >> 16;
2618 	if (scaling_info->src_rect.height == 0)
2619 		return -EINVAL;
2620 
2621 	scaling_info->dst_rect.x = state->crtc_x;
2622 	scaling_info->dst_rect.y = state->crtc_y;
2623 
2624 	if (state->crtc_w == 0)
2625 		return -EINVAL;
2626 
2627 	scaling_info->dst_rect.width = state->crtc_w;
2628 
2629 	if (state->crtc_h == 0)
2630 		return -EINVAL;
2631 
2632 	scaling_info->dst_rect.height = state->crtc_h;
2633 
2634 	/* DRM doesn't specify clipping on destination output. */
2635 	scaling_info->clip_rect = scaling_info->dst_rect;
2636 
2637 	/* TODO: Validate scaling per-format with DC plane caps */
2638 	scale_w = scaling_info->dst_rect.width * 1000 /
2639 		  scaling_info->src_rect.width;
2640 
2641 	if (scale_w < 250 || scale_w > 16000)
2642 		return -EINVAL;
2643 
2644 	scale_h = scaling_info->dst_rect.height * 1000 /
2645 		  scaling_info->src_rect.height;
2646 
2647 	if (scale_h < 250 || scale_h > 16000)
2648 		return -EINVAL;
2649 
2650 	/*
2651 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
2652 	 * assume reasonable defaults based on the format.
2653 	 */
2654 
2655 	return 0;
2656 }
2657 
2658 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2659 		       uint64_t *tiling_flags)
2660 {
2661 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2662 	int r = amdgpu_bo_reserve(rbo, false);
2663 
2664 	if (unlikely(r)) {
2665 		/* Don't show error message when returning -ERESTARTSYS */
2666 		if (r != -ERESTARTSYS)
2667 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
2668 		return r;
2669 	}
2670 
2671 	if (tiling_flags)
2672 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2673 
2674 	amdgpu_bo_unreserve(rbo);
2675 
2676 	return r;
2677 }
2678 
2679 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2680 {
2681 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2682 
2683 	return offset ? (address + offset * 256) : 0;
2684 }
2685 
2686 static int
2687 fill_plane_dcc_attributes(struct amdgpu_device *adev,
2688 			  const struct amdgpu_framebuffer *afb,
2689 			  const enum surface_pixel_format format,
2690 			  const enum dc_rotation_angle rotation,
2691 			  const struct plane_size *plane_size,
2692 			  const union dc_tiling_info *tiling_info,
2693 			  const uint64_t info,
2694 			  struct dc_plane_dcc_param *dcc,
2695 			  struct dc_plane_address *address)
2696 {
2697 	struct dc *dc = adev->dm.dc;
2698 	struct dc_dcc_surface_param input;
2699 	struct dc_surface_dcc_cap output;
2700 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2701 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2702 	uint64_t dcc_address;
2703 
2704 	memset(&input, 0, sizeof(input));
2705 	memset(&output, 0, sizeof(output));
2706 
2707 	if (!offset)
2708 		return 0;
2709 
2710 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2711 		return 0;
2712 
2713 	if (!dc->cap_funcs.get_dcc_compression_cap)
2714 		return -EINVAL;
2715 
2716 	input.format = format;
2717 	input.surface_size.width = plane_size->surface_size.width;
2718 	input.surface_size.height = plane_size->surface_size.height;
2719 	input.swizzle_mode = tiling_info->gfx9.swizzle;
2720 
2721 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
2722 		input.scan = SCAN_DIRECTION_HORIZONTAL;
2723 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
2724 		input.scan = SCAN_DIRECTION_VERTICAL;
2725 
2726 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
2727 		return -EINVAL;
2728 
2729 	if (!output.capable)
2730 		return -EINVAL;
2731 
2732 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
2733 		return -EINVAL;
2734 
2735 	dcc->enable = 1;
2736 	dcc->meta_pitch =
2737 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
2738 	dcc->independent_64b_blks = i64b;
2739 
2740 	dcc_address = get_dcc_address(afb->address, info);
2741 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
2742 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
2743 
2744 	return 0;
2745 }
2746 
2747 static int
2748 fill_plane_buffer_attributes(struct amdgpu_device *adev,
2749 			     const struct amdgpu_framebuffer *afb,
2750 			     const enum surface_pixel_format format,
2751 			     const enum dc_rotation_angle rotation,
2752 			     const uint64_t tiling_flags,
2753 			     union dc_tiling_info *tiling_info,
2754 			     struct plane_size *plane_size,
2755 			     struct dc_plane_dcc_param *dcc,
2756 			     struct dc_plane_address *address)
2757 {
2758 	const struct drm_framebuffer *fb = &afb->base;
2759 	int ret;
2760 
2761 	memset(tiling_info, 0, sizeof(*tiling_info));
2762 	memset(plane_size, 0, sizeof(*plane_size));
2763 	memset(dcc, 0, sizeof(*dcc));
2764 	memset(address, 0, sizeof(*address));
2765 
2766 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2767 		plane_size->surface_size.x = 0;
2768 		plane_size->surface_size.y = 0;
2769 		plane_size->surface_size.width = fb->width;
2770 		plane_size->surface_size.height = fb->height;
2771 		plane_size->surface_pitch =
2772 			fb->pitches[0] / fb->format->cpp[0];
2773 
2774 		address->type = PLN_ADDR_TYPE_GRAPHICS;
2775 		address->grph.addr.low_part = lower_32_bits(afb->address);
2776 		address->grph.addr.high_part = upper_32_bits(afb->address);
2777 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
2778 		uint64_t chroma_addr = afb->address + fb->offsets[1];
2779 
2780 		plane_size->surface_size.x = 0;
2781 		plane_size->surface_size.y = 0;
2782 		plane_size->surface_size.width = fb->width;
2783 		plane_size->surface_size.height = fb->height;
2784 		plane_size->surface_pitch =
2785 			fb->pitches[0] / fb->format->cpp[0];
2786 
2787 		plane_size->chroma_size.x = 0;
2788 		plane_size->chroma_size.y = 0;
2789 		/* TODO: set these based on surface format */
2790 		plane_size->chroma_size.width = fb->width / 2;
2791 		plane_size->chroma_size.height = fb->height / 2;
2792 
2793 		plane_size->chroma_pitch =
2794 			fb->pitches[1] / fb->format->cpp[1];
2795 
2796 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2797 		address->video_progressive.luma_addr.low_part =
2798 			lower_32_bits(afb->address);
2799 		address->video_progressive.luma_addr.high_part =
2800 			upper_32_bits(afb->address);
2801 		address->video_progressive.chroma_addr.low_part =
2802 			lower_32_bits(chroma_addr);
2803 		address->video_progressive.chroma_addr.high_part =
2804 			upper_32_bits(chroma_addr);
2805 	}
2806 
2807 	/* Fill GFX8 params */
2808 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2809 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2810 
2811 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2812 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2813 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2814 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2815 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2816 
2817 		/* XXX fix me for VI */
2818 		tiling_info->gfx8.num_banks = num_banks;
2819 		tiling_info->gfx8.array_mode =
2820 				DC_ARRAY_2D_TILED_THIN1;
2821 		tiling_info->gfx8.tile_split = tile_split;
2822 		tiling_info->gfx8.bank_width = bankw;
2823 		tiling_info->gfx8.bank_height = bankh;
2824 		tiling_info->gfx8.tile_aspect = mtaspect;
2825 		tiling_info->gfx8.tile_mode =
2826 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2827 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2828 			== DC_ARRAY_1D_TILED_THIN1) {
2829 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2830 	}
2831 
2832 	tiling_info->gfx8.pipe_config =
2833 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2834 
2835 	if (adev->asic_type == CHIP_VEGA10 ||
2836 	    adev->asic_type == CHIP_VEGA12 ||
2837 	    adev->asic_type == CHIP_VEGA20 ||
2838 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2839 	    adev->asic_type == CHIP_NAVI10 ||
2840 	    adev->asic_type == CHIP_NAVI14 ||
2841 	    adev->asic_type == CHIP_NAVI12 ||
2842 #endif
2843 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2844 	    adev->asic_type == CHIP_RENOIR ||
2845 #endif
2846 	    adev->asic_type == CHIP_RAVEN) {
2847 		/* Fill GFX9 params */
2848 		tiling_info->gfx9.num_pipes =
2849 			adev->gfx.config.gb_addr_config_fields.num_pipes;
2850 		tiling_info->gfx9.num_banks =
2851 			adev->gfx.config.gb_addr_config_fields.num_banks;
2852 		tiling_info->gfx9.pipe_interleave =
2853 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2854 		tiling_info->gfx9.num_shader_engines =
2855 			adev->gfx.config.gb_addr_config_fields.num_se;
2856 		tiling_info->gfx9.max_compressed_frags =
2857 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2858 		tiling_info->gfx9.num_rb_per_se =
2859 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2860 		tiling_info->gfx9.swizzle =
2861 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2862 		tiling_info->gfx9.shaderEnable = 1;
2863 
2864 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
2865 						plane_size, tiling_info,
2866 						tiling_flags, dcc, address);
2867 		if (ret)
2868 			return ret;
2869 	}
2870 
2871 	return 0;
2872 }
2873 
2874 static void
2875 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
2876 			       bool *per_pixel_alpha, bool *global_alpha,
2877 			       int *global_alpha_value)
2878 {
2879 	*per_pixel_alpha = false;
2880 	*global_alpha = false;
2881 	*global_alpha_value = 0xff;
2882 
2883 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
2884 		return;
2885 
2886 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
2887 		static const uint32_t alpha_formats[] = {
2888 			DRM_FORMAT_ARGB8888,
2889 			DRM_FORMAT_RGBA8888,
2890 			DRM_FORMAT_ABGR8888,
2891 		};
2892 		uint32_t format = plane_state->fb->format->format;
2893 		unsigned int i;
2894 
2895 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
2896 			if (format == alpha_formats[i]) {
2897 				*per_pixel_alpha = true;
2898 				break;
2899 			}
2900 		}
2901 	}
2902 
2903 	if (plane_state->alpha < 0xffff) {
2904 		*global_alpha = true;
2905 		*global_alpha_value = plane_state->alpha >> 8;
2906 	}
2907 }
2908 
2909 static int
2910 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
2911 			    const enum surface_pixel_format format,
2912 			    enum dc_color_space *color_space)
2913 {
2914 	bool full_range;
2915 
2916 	*color_space = COLOR_SPACE_SRGB;
2917 
2918 	/* DRM color properties only affect non-RGB formats. */
2919 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2920 		return 0;
2921 
2922 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
2923 
2924 	switch (plane_state->color_encoding) {
2925 	case DRM_COLOR_YCBCR_BT601:
2926 		if (full_range)
2927 			*color_space = COLOR_SPACE_YCBCR601;
2928 		else
2929 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
2930 		break;
2931 
2932 	case DRM_COLOR_YCBCR_BT709:
2933 		if (full_range)
2934 			*color_space = COLOR_SPACE_YCBCR709;
2935 		else
2936 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
2937 		break;
2938 
2939 	case DRM_COLOR_YCBCR_BT2020:
2940 		if (full_range)
2941 			*color_space = COLOR_SPACE_2020_YCBCR;
2942 		else
2943 			return -EINVAL;
2944 		break;
2945 
2946 	default:
2947 		return -EINVAL;
2948 	}
2949 
2950 	return 0;
2951 }
2952 
2953 static int
2954 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
2955 			    const struct drm_plane_state *plane_state,
2956 			    const uint64_t tiling_flags,
2957 			    struct dc_plane_info *plane_info,
2958 			    struct dc_plane_address *address)
2959 {
2960 	const struct drm_framebuffer *fb = plane_state->fb;
2961 	const struct amdgpu_framebuffer *afb =
2962 		to_amdgpu_framebuffer(plane_state->fb);
2963 	struct drm_format_name_buf format_name;
2964 	int ret;
2965 
2966 	memset(plane_info, 0, sizeof(*plane_info));
2967 
2968 	switch (fb->format->format) {
2969 	case DRM_FORMAT_C8:
2970 		plane_info->format =
2971 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2972 		break;
2973 	case DRM_FORMAT_RGB565:
2974 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2975 		break;
2976 	case DRM_FORMAT_XRGB8888:
2977 	case DRM_FORMAT_ARGB8888:
2978 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2979 		break;
2980 	case DRM_FORMAT_XRGB2101010:
2981 	case DRM_FORMAT_ARGB2101010:
2982 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2983 		break;
2984 	case DRM_FORMAT_XBGR2101010:
2985 	case DRM_FORMAT_ABGR2101010:
2986 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2987 		break;
2988 	case DRM_FORMAT_XBGR8888:
2989 	case DRM_FORMAT_ABGR8888:
2990 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2991 		break;
2992 	case DRM_FORMAT_NV21:
2993 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2994 		break;
2995 	case DRM_FORMAT_NV12:
2996 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2997 		break;
2998 	default:
2999 		DRM_ERROR(
3000 			"Unsupported screen format %s\n",
3001 			drm_get_format_name(fb->format->format, &format_name));
3002 		return -EINVAL;
3003 	}
3004 
3005 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3006 	case DRM_MODE_ROTATE_0:
3007 		plane_info->rotation = ROTATION_ANGLE_0;
3008 		break;
3009 	case DRM_MODE_ROTATE_90:
3010 		plane_info->rotation = ROTATION_ANGLE_90;
3011 		break;
3012 	case DRM_MODE_ROTATE_180:
3013 		plane_info->rotation = ROTATION_ANGLE_180;
3014 		break;
3015 	case DRM_MODE_ROTATE_270:
3016 		plane_info->rotation = ROTATION_ANGLE_270;
3017 		break;
3018 	default:
3019 		plane_info->rotation = ROTATION_ANGLE_0;
3020 		break;
3021 	}
3022 
3023 	plane_info->visible = true;
3024 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3025 
3026 	plane_info->layer_index = 0;
3027 
3028 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3029 					  &plane_info->color_space);
3030 	if (ret)
3031 		return ret;
3032 
3033 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3034 					   plane_info->rotation, tiling_flags,
3035 					   &plane_info->tiling_info,
3036 					   &plane_info->plane_size,
3037 					   &plane_info->dcc, address);
3038 	if (ret)
3039 		return ret;
3040 
3041 	fill_blending_from_plane_state(
3042 		plane_state, &plane_info->per_pixel_alpha,
3043 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3044 
3045 	return 0;
3046 }
3047 
3048 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3049 				    struct dc_plane_state *dc_plane_state,
3050 				    struct drm_plane_state *plane_state,
3051 				    struct drm_crtc_state *crtc_state)
3052 {
3053 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3054 	const struct amdgpu_framebuffer *amdgpu_fb =
3055 		to_amdgpu_framebuffer(plane_state->fb);
3056 	struct dc_scaling_info scaling_info;
3057 	struct dc_plane_info plane_info;
3058 	uint64_t tiling_flags;
3059 	int ret;
3060 
3061 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3062 	if (ret)
3063 		return ret;
3064 
3065 	dc_plane_state->src_rect = scaling_info.src_rect;
3066 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3067 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3068 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3069 
3070 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3071 	if (ret)
3072 		return ret;
3073 
3074 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3075 					  &plane_info,
3076 					  &dc_plane_state->address);
3077 	if (ret)
3078 		return ret;
3079 
3080 	dc_plane_state->format = plane_info.format;
3081 	dc_plane_state->color_space = plane_info.color_space;
3082 	dc_plane_state->format = plane_info.format;
3083 	dc_plane_state->plane_size = plane_info.plane_size;
3084 	dc_plane_state->rotation = plane_info.rotation;
3085 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3086 	dc_plane_state->stereo_format = plane_info.stereo_format;
3087 	dc_plane_state->tiling_info = plane_info.tiling_info;
3088 	dc_plane_state->visible = plane_info.visible;
3089 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3090 	dc_plane_state->global_alpha = plane_info.global_alpha;
3091 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3092 	dc_plane_state->dcc = plane_info.dcc;
3093 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3094 
3095 	/*
3096 	 * Always set input transfer function, since plane state is refreshed
3097 	 * every time.
3098 	 */
3099 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3100 	if (ret)
3101 		return ret;
3102 
3103 	return 0;
3104 }
3105 
3106 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3107 					   const struct dm_connector_state *dm_state,
3108 					   struct dc_stream_state *stream)
3109 {
3110 	enum amdgpu_rmx_type rmx_type;
3111 
3112 	struct rect src = { 0 }; /* viewport in composition space*/
3113 	struct rect dst = { 0 }; /* stream addressable area */
3114 
3115 	/* no mode. nothing to be done */
3116 	if (!mode)
3117 		return;
3118 
3119 	/* Full screen scaling by default */
3120 	src.width = mode->hdisplay;
3121 	src.height = mode->vdisplay;
3122 	dst.width = stream->timing.h_addressable;
3123 	dst.height = stream->timing.v_addressable;
3124 
3125 	if (dm_state) {
3126 		rmx_type = dm_state->scaling;
3127 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3128 			if (src.width * dst.height <
3129 					src.height * dst.width) {
3130 				/* height needs less upscaling/more downscaling */
3131 				dst.width = src.width *
3132 						dst.height / src.height;
3133 			} else {
3134 				/* width needs less upscaling/more downscaling */
3135 				dst.height = src.height *
3136 						dst.width / src.width;
3137 			}
3138 		} else if (rmx_type == RMX_CENTER) {
3139 			dst = src;
3140 		}
3141 
3142 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3143 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3144 
3145 		if (dm_state->underscan_enable) {
3146 			dst.x += dm_state->underscan_hborder / 2;
3147 			dst.y += dm_state->underscan_vborder / 2;
3148 			dst.width -= dm_state->underscan_hborder;
3149 			dst.height -= dm_state->underscan_vborder;
3150 		}
3151 	}
3152 
3153 	stream->src = src;
3154 	stream->dst = dst;
3155 
3156 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3157 			dst.x, dst.y, dst.width, dst.height);
3158 
3159 }
3160 
3161 static enum dc_color_depth
3162 convert_color_depth_from_display_info(const struct drm_connector *connector,
3163 				      const struct drm_connector_state *state)
3164 {
3165 	uint8_t bpc = (uint8_t)connector->display_info.bpc;
3166 
3167 	/* Assume 8 bpc by default if no bpc is specified. */
3168 	bpc = bpc ? bpc : 8;
3169 
3170 	if (!state)
3171 		state = connector->state;
3172 
3173 	if (state) {
3174 		/*
3175 		 * Cap display bpc based on the user requested value.
3176 		 *
3177 		 * The value for state->max_bpc may not correctly updated
3178 		 * depending on when the connector gets added to the state
3179 		 * or if this was called outside of atomic check, so it
3180 		 * can't be used directly.
3181 		 */
3182 		bpc = min(bpc, state->max_requested_bpc);
3183 
3184 		/* Round down to the nearest even number. */
3185 		bpc = bpc - (bpc & 1);
3186 	}
3187 
3188 	switch (bpc) {
3189 	case 0:
3190 		/*
3191 		 * Temporary Work around, DRM doesn't parse color depth for
3192 		 * EDID revision before 1.4
3193 		 * TODO: Fix edid parsing
3194 		 */
3195 		return COLOR_DEPTH_888;
3196 	case 6:
3197 		return COLOR_DEPTH_666;
3198 	case 8:
3199 		return COLOR_DEPTH_888;
3200 	case 10:
3201 		return COLOR_DEPTH_101010;
3202 	case 12:
3203 		return COLOR_DEPTH_121212;
3204 	case 14:
3205 		return COLOR_DEPTH_141414;
3206 	case 16:
3207 		return COLOR_DEPTH_161616;
3208 	default:
3209 		return COLOR_DEPTH_UNDEFINED;
3210 	}
3211 }
3212 
3213 static enum dc_aspect_ratio
3214 get_aspect_ratio(const struct drm_display_mode *mode_in)
3215 {
3216 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3217 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3218 }
3219 
3220 static enum dc_color_space
3221 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3222 {
3223 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3224 
3225 	switch (dc_crtc_timing->pixel_encoding)	{
3226 	case PIXEL_ENCODING_YCBCR422:
3227 	case PIXEL_ENCODING_YCBCR444:
3228 	case PIXEL_ENCODING_YCBCR420:
3229 	{
3230 		/*
3231 		 * 27030khz is the separation point between HDTV and SDTV
3232 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3233 		 * respectively
3234 		 */
3235 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3236 			if (dc_crtc_timing->flags.Y_ONLY)
3237 				color_space =
3238 					COLOR_SPACE_YCBCR709_LIMITED;
3239 			else
3240 				color_space = COLOR_SPACE_YCBCR709;
3241 		} else {
3242 			if (dc_crtc_timing->flags.Y_ONLY)
3243 				color_space =
3244 					COLOR_SPACE_YCBCR601_LIMITED;
3245 			else
3246 				color_space = COLOR_SPACE_YCBCR601;
3247 		}
3248 
3249 	}
3250 	break;
3251 	case PIXEL_ENCODING_RGB:
3252 		color_space = COLOR_SPACE_SRGB;
3253 		break;
3254 
3255 	default:
3256 		WARN_ON(1);
3257 		break;
3258 	}
3259 
3260 	return color_space;
3261 }
3262 
3263 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
3264 {
3265 	if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3266 		return;
3267 
3268 	timing_out->display_color_depth--;
3269 }
3270 
3271 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
3272 						const struct drm_display_info *info)
3273 {
3274 	int normalized_clk;
3275 	if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3276 		return;
3277 	do {
3278 		normalized_clk = timing_out->pix_clk_100hz / 10;
3279 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3280 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3281 			normalized_clk /= 2;
3282 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3283 		switch (timing_out->display_color_depth) {
3284 		case COLOR_DEPTH_101010:
3285 			normalized_clk = (normalized_clk * 30) / 24;
3286 			break;
3287 		case COLOR_DEPTH_121212:
3288 			normalized_clk = (normalized_clk * 36) / 24;
3289 			break;
3290 		case COLOR_DEPTH_161616:
3291 			normalized_clk = (normalized_clk * 48) / 24;
3292 			break;
3293 		default:
3294 			return;
3295 		}
3296 		if (normalized_clk <= info->max_tmds_clock)
3297 			return;
3298 		reduce_mode_colour_depth(timing_out);
3299 
3300 	} while (timing_out->display_color_depth > COLOR_DEPTH_888);
3301 
3302 }
3303 
3304 static void fill_stream_properties_from_drm_display_mode(
3305 	struct dc_stream_state *stream,
3306 	const struct drm_display_mode *mode_in,
3307 	const struct drm_connector *connector,
3308 	const struct drm_connector_state *connector_state,
3309 	const struct dc_stream_state *old_stream)
3310 {
3311 	struct dc_crtc_timing *timing_out = &stream->timing;
3312 	const struct drm_display_info *info = &connector->display_info;
3313 
3314 	memset(timing_out, 0, sizeof(struct dc_crtc_timing));
3315 
3316 	timing_out->h_border_left = 0;
3317 	timing_out->h_border_right = 0;
3318 	timing_out->v_border_top = 0;
3319 	timing_out->v_border_bottom = 0;
3320 	/* TODO: un-hardcode */
3321 	if (drm_mode_is_420_only(info, mode_in)
3322 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3323 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3324 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3325 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3326 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3327 	else
3328 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3329 
3330 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3331 	timing_out->display_color_depth = convert_color_depth_from_display_info(
3332 		connector, connector_state);
3333 	timing_out->scan_type = SCANNING_TYPE_NODATA;
3334 	timing_out->hdmi_vic = 0;
3335 
3336 	if(old_stream) {
3337 		timing_out->vic = old_stream->timing.vic;
3338 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3339 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3340 	} else {
3341 		timing_out->vic = drm_match_cea_mode(mode_in);
3342 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3343 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3344 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3345 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3346 	}
3347 
3348 	timing_out->h_addressable = mode_in->crtc_hdisplay;
3349 	timing_out->h_total = mode_in->crtc_htotal;
3350 	timing_out->h_sync_width =
3351 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3352 	timing_out->h_front_porch =
3353 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3354 	timing_out->v_total = mode_in->crtc_vtotal;
3355 	timing_out->v_addressable = mode_in->crtc_vdisplay;
3356 	timing_out->v_front_porch =
3357 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3358 	timing_out->v_sync_width =
3359 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3360 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3361 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3362 
3363 	stream->output_color_space = get_output_color_space(timing_out);
3364 
3365 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3366 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3367 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3368 		adjust_colour_depth_from_display_info(timing_out, info);
3369 }
3370 
3371 static void fill_audio_info(struct audio_info *audio_info,
3372 			    const struct drm_connector *drm_connector,
3373 			    const struct dc_sink *dc_sink)
3374 {
3375 	int i = 0;
3376 	int cea_revision = 0;
3377 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3378 
3379 	audio_info->manufacture_id = edid_caps->manufacturer_id;
3380 	audio_info->product_id = edid_caps->product_id;
3381 
3382 	cea_revision = drm_connector->display_info.cea_rev;
3383 
3384 	strscpy(audio_info->display_name,
3385 		edid_caps->display_name,
3386 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3387 
3388 	if (cea_revision >= 3) {
3389 		audio_info->mode_count = edid_caps->audio_mode_count;
3390 
3391 		for (i = 0; i < audio_info->mode_count; ++i) {
3392 			audio_info->modes[i].format_code =
3393 					(enum audio_format_code)
3394 					(edid_caps->audio_modes[i].format_code);
3395 			audio_info->modes[i].channel_count =
3396 					edid_caps->audio_modes[i].channel_count;
3397 			audio_info->modes[i].sample_rates.all =
3398 					edid_caps->audio_modes[i].sample_rate;
3399 			audio_info->modes[i].sample_size =
3400 					edid_caps->audio_modes[i].sample_size;
3401 		}
3402 	}
3403 
3404 	audio_info->flags.all = edid_caps->speaker_flags;
3405 
3406 	/* TODO: We only check for the progressive mode, check for interlace mode too */
3407 	if (drm_connector->latency_present[0]) {
3408 		audio_info->video_latency = drm_connector->video_latency[0];
3409 		audio_info->audio_latency = drm_connector->audio_latency[0];
3410 	}
3411 
3412 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3413 
3414 }
3415 
3416 static void
3417 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3418 				      struct drm_display_mode *dst_mode)
3419 {
3420 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3421 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3422 	dst_mode->crtc_clock = src_mode->crtc_clock;
3423 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3424 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3425 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
3426 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3427 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
3428 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
3429 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3430 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3431 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3432 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3433 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3434 }
3435 
3436 static void
3437 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3438 					const struct drm_display_mode *native_mode,
3439 					bool scale_enabled)
3440 {
3441 	if (scale_enabled) {
3442 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3443 	} else if (native_mode->clock == drm_mode->clock &&
3444 			native_mode->htotal == drm_mode->htotal &&
3445 			native_mode->vtotal == drm_mode->vtotal) {
3446 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3447 	} else {
3448 		/* no scaling nor amdgpu inserted, no need to patch */
3449 	}
3450 }
3451 
3452 static struct dc_sink *
3453 create_fake_sink(struct amdgpu_dm_connector *aconnector)
3454 {
3455 	struct dc_sink_init_data sink_init_data = { 0 };
3456 	struct dc_sink *sink = NULL;
3457 	sink_init_data.link = aconnector->dc_link;
3458 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3459 
3460 	sink = dc_sink_create(&sink_init_data);
3461 	if (!sink) {
3462 		DRM_ERROR("Failed to create sink!\n");
3463 		return NULL;
3464 	}
3465 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
3466 
3467 	return sink;
3468 }
3469 
3470 static void set_multisync_trigger_params(
3471 		struct dc_stream_state *stream)
3472 {
3473 	if (stream->triggered_crtc_reset.enabled) {
3474 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3475 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3476 	}
3477 }
3478 
3479 static void set_master_stream(struct dc_stream_state *stream_set[],
3480 			      int stream_count)
3481 {
3482 	int j, highest_rfr = 0, master_stream = 0;
3483 
3484 	for (j = 0;  j < stream_count; j++) {
3485 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3486 			int refresh_rate = 0;
3487 
3488 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
3489 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3490 			if (refresh_rate > highest_rfr) {
3491 				highest_rfr = refresh_rate;
3492 				master_stream = j;
3493 			}
3494 		}
3495 	}
3496 	for (j = 0;  j < stream_count; j++) {
3497 		if (stream_set[j])
3498 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3499 	}
3500 }
3501 
3502 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3503 {
3504 	int i = 0;
3505 
3506 	if (context->stream_count < 2)
3507 		return;
3508 	for (i = 0; i < context->stream_count ; i++) {
3509 		if (!context->streams[i])
3510 			continue;
3511 		/*
3512 		 * TODO: add a function to read AMD VSDB bits and set
3513 		 * crtc_sync_master.multi_sync_enabled flag
3514 		 * For now it's set to false
3515 		 */
3516 		set_multisync_trigger_params(context->streams[i]);
3517 	}
3518 	set_master_stream(context->streams, context->stream_count);
3519 }
3520 
3521 static struct dc_stream_state *
3522 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3523 		       const struct drm_display_mode *drm_mode,
3524 		       const struct dm_connector_state *dm_state,
3525 		       const struct dc_stream_state *old_stream)
3526 {
3527 	struct drm_display_mode *preferred_mode = NULL;
3528 	struct drm_connector *drm_connector;
3529 	const struct drm_connector_state *con_state =
3530 		dm_state ? &dm_state->base : NULL;
3531 	struct dc_stream_state *stream = NULL;
3532 	struct drm_display_mode mode = *drm_mode;
3533 	bool native_mode_found = false;
3534 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3535 	int mode_refresh;
3536 	int preferred_refresh = 0;
3537 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3538 	struct dsc_dec_dpcd_caps dsc_caps;
3539 	uint32_t link_bandwidth_kbps;
3540 #endif
3541 
3542 	struct dc_sink *sink = NULL;
3543 	if (aconnector == NULL) {
3544 		DRM_ERROR("aconnector is NULL!\n");
3545 		return stream;
3546 	}
3547 
3548 	drm_connector = &aconnector->base;
3549 
3550 	if (!aconnector->dc_sink) {
3551 		sink = create_fake_sink(aconnector);
3552 		if (!sink)
3553 			return stream;
3554 	} else {
3555 		sink = aconnector->dc_sink;
3556 		dc_sink_retain(sink);
3557 	}
3558 
3559 	stream = dc_create_stream_for_sink(sink);
3560 
3561 	if (stream == NULL) {
3562 		DRM_ERROR("Failed to create stream for sink!\n");
3563 		goto finish;
3564 	}
3565 
3566 	stream->dm_stream_context = aconnector;
3567 
3568 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3569 		/* Search for preferred mode */
3570 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3571 			native_mode_found = true;
3572 			break;
3573 		}
3574 	}
3575 	if (!native_mode_found)
3576 		preferred_mode = list_first_entry_or_null(
3577 				&aconnector->base.modes,
3578 				struct drm_display_mode,
3579 				head);
3580 
3581 	mode_refresh = drm_mode_vrefresh(&mode);
3582 
3583 	if (preferred_mode == NULL) {
3584 		/*
3585 		 * This may not be an error, the use case is when we have no
3586 		 * usermode calls to reset and set mode upon hotplug. In this
3587 		 * case, we call set mode ourselves to restore the previous mode
3588 		 * and the modelist may not be filled in in time.
3589 		 */
3590 		DRM_DEBUG_DRIVER("No preferred mode found\n");
3591 	} else {
3592 		decide_crtc_timing_for_drm_display_mode(
3593 				&mode, preferred_mode,
3594 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
3595 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
3596 	}
3597 
3598 	if (!dm_state)
3599 		drm_mode_set_crtcinfo(&mode, 0);
3600 
3601 	/*
3602 	* If scaling is enabled and refresh rate didn't change
3603 	* we copy the vic and polarities of the old timings
3604 	*/
3605 	if (!scale || mode_refresh != preferred_refresh)
3606 		fill_stream_properties_from_drm_display_mode(stream,
3607 			&mode, &aconnector->base, con_state, NULL);
3608 	else
3609 		fill_stream_properties_from_drm_display_mode(stream,
3610 			&mode, &aconnector->base, con_state, old_stream);
3611 
3612 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3613 	stream->timing.flags.DSC = 0;
3614 
3615 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3616 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
3617 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
3618 				      &dsc_caps);
3619 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
3620 							     dc_link_get_link_cap(aconnector->dc_link));
3621 
3622 		if (dsc_caps.is_dsc_supported)
3623 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
3624 						  &dsc_caps,
3625 						  link_bandwidth_kbps,
3626 						  &stream->timing,
3627 						  &stream->timing.dsc_cfg))
3628 				stream->timing.flags.DSC = 1;
3629 	}
3630 #endif
3631 
3632 	update_stream_scaling_settings(&mode, dm_state, stream);
3633 
3634 	fill_audio_info(
3635 		&stream->audio_info,
3636 		drm_connector,
3637 		sink);
3638 
3639 	update_stream_signal(stream, sink);
3640 
3641 finish:
3642 	dc_sink_release(sink);
3643 
3644 	return stream;
3645 }
3646 
3647 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
3648 {
3649 	drm_crtc_cleanup(crtc);
3650 	kfree(crtc);
3651 }
3652 
3653 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3654 				  struct drm_crtc_state *state)
3655 {
3656 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
3657 
3658 	/* TODO Destroy dc_stream objects are stream object is flattened */
3659 	if (cur->stream)
3660 		dc_stream_release(cur->stream);
3661 
3662 
3663 	__drm_atomic_helper_crtc_destroy_state(state);
3664 
3665 
3666 	kfree(state);
3667 }
3668 
3669 static void dm_crtc_reset_state(struct drm_crtc *crtc)
3670 {
3671 	struct dm_crtc_state *state;
3672 
3673 	if (crtc->state)
3674 		dm_crtc_destroy_state(crtc, crtc->state);
3675 
3676 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3677 	if (WARN_ON(!state))
3678 		return;
3679 
3680 	crtc->state = &state->base;
3681 	crtc->state->crtc = crtc;
3682 
3683 }
3684 
3685 static struct drm_crtc_state *
3686 dm_crtc_duplicate_state(struct drm_crtc *crtc)
3687 {
3688 	struct dm_crtc_state *state, *cur;
3689 
3690 	cur = to_dm_crtc_state(crtc->state);
3691 
3692 	if (WARN_ON(!crtc->state))
3693 		return NULL;
3694 
3695 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3696 	if (!state)
3697 		return NULL;
3698 
3699 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3700 
3701 	if (cur->stream) {
3702 		state->stream = cur->stream;
3703 		dc_stream_retain(state->stream);
3704 	}
3705 
3706 	state->active_planes = cur->active_planes;
3707 	state->interrupts_enabled = cur->interrupts_enabled;
3708 	state->vrr_params = cur->vrr_params;
3709 	state->vrr_infopacket = cur->vrr_infopacket;
3710 	state->abm_level = cur->abm_level;
3711 	state->vrr_supported = cur->vrr_supported;
3712 	state->freesync_config = cur->freesync_config;
3713 	state->crc_src = cur->crc_src;
3714 	state->cm_has_degamma = cur->cm_has_degamma;
3715 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
3716 
3717 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
3718 
3719 	return &state->base;
3720 }
3721 
3722 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
3723 {
3724 	enum dc_irq_source irq_source;
3725 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3726 	struct amdgpu_device *adev = crtc->dev->dev_private;
3727 	int rc;
3728 
3729 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
3730 
3731 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3732 
3733 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
3734 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
3735 	return rc;
3736 }
3737 
3738 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3739 {
3740 	enum dc_irq_source irq_source;
3741 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3742 	struct amdgpu_device *adev = crtc->dev->dev_private;
3743 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3744 	int rc = 0;
3745 
3746 	if (enable) {
3747 		/* vblank irq on -> Only need vupdate irq in vrr mode */
3748 		if (amdgpu_dm_vrr_active(acrtc_state))
3749 			rc = dm_set_vupdate_irq(crtc, true);
3750 	} else {
3751 		/* vblank irq off -> vupdate irq off */
3752 		rc = dm_set_vupdate_irq(crtc, false);
3753 	}
3754 
3755 	if (rc)
3756 		return rc;
3757 
3758 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3759 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3760 }
3761 
3762 static int dm_enable_vblank(struct drm_crtc *crtc)
3763 {
3764 	return dm_set_vblank(crtc, true);
3765 }
3766 
3767 static void dm_disable_vblank(struct drm_crtc *crtc)
3768 {
3769 	dm_set_vblank(crtc, false);
3770 }
3771 
3772 /* Implemented only the options currently availible for the driver */
3773 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3774 	.reset = dm_crtc_reset_state,
3775 	.destroy = amdgpu_dm_crtc_destroy,
3776 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
3777 	.set_config = drm_atomic_helper_set_config,
3778 	.page_flip = drm_atomic_helper_page_flip,
3779 	.atomic_duplicate_state = dm_crtc_duplicate_state,
3780 	.atomic_destroy_state = dm_crtc_destroy_state,
3781 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
3782 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3783 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
3784 	.enable_vblank = dm_enable_vblank,
3785 	.disable_vblank = dm_disable_vblank,
3786 };
3787 
3788 static enum drm_connector_status
3789 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3790 {
3791 	bool connected;
3792 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3793 
3794 	/*
3795 	 * Notes:
3796 	 * 1. This interface is NOT called in context of HPD irq.
3797 	 * 2. This interface *is called* in context of user-mode ioctl. Which
3798 	 * makes it a bad place for *any* MST-related activity.
3799 	 */
3800 
3801 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3802 	    !aconnector->fake_enable)
3803 		connected = (aconnector->dc_sink != NULL);
3804 	else
3805 		connected = (aconnector->base.force == DRM_FORCE_ON);
3806 
3807 	return (connected ? connector_status_connected :
3808 			connector_status_disconnected);
3809 }
3810 
3811 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3812 					    struct drm_connector_state *connector_state,
3813 					    struct drm_property *property,
3814 					    uint64_t val)
3815 {
3816 	struct drm_device *dev = connector->dev;
3817 	struct amdgpu_device *adev = dev->dev_private;
3818 	struct dm_connector_state *dm_old_state =
3819 		to_dm_connector_state(connector->state);
3820 	struct dm_connector_state *dm_new_state =
3821 		to_dm_connector_state(connector_state);
3822 
3823 	int ret = -EINVAL;
3824 
3825 	if (property == dev->mode_config.scaling_mode_property) {
3826 		enum amdgpu_rmx_type rmx_type;
3827 
3828 		switch (val) {
3829 		case DRM_MODE_SCALE_CENTER:
3830 			rmx_type = RMX_CENTER;
3831 			break;
3832 		case DRM_MODE_SCALE_ASPECT:
3833 			rmx_type = RMX_ASPECT;
3834 			break;
3835 		case DRM_MODE_SCALE_FULLSCREEN:
3836 			rmx_type = RMX_FULL;
3837 			break;
3838 		case DRM_MODE_SCALE_NONE:
3839 		default:
3840 			rmx_type = RMX_OFF;
3841 			break;
3842 		}
3843 
3844 		if (dm_old_state->scaling == rmx_type)
3845 			return 0;
3846 
3847 		dm_new_state->scaling = rmx_type;
3848 		ret = 0;
3849 	} else if (property == adev->mode_info.underscan_hborder_property) {
3850 		dm_new_state->underscan_hborder = val;
3851 		ret = 0;
3852 	} else if (property == adev->mode_info.underscan_vborder_property) {
3853 		dm_new_state->underscan_vborder = val;
3854 		ret = 0;
3855 	} else if (property == adev->mode_info.underscan_property) {
3856 		dm_new_state->underscan_enable = val;
3857 		ret = 0;
3858 	} else if (property == adev->mode_info.abm_level_property) {
3859 		dm_new_state->abm_level = val;
3860 		ret = 0;
3861 	}
3862 
3863 	return ret;
3864 }
3865 
3866 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3867 					    const struct drm_connector_state *state,
3868 					    struct drm_property *property,
3869 					    uint64_t *val)
3870 {
3871 	struct drm_device *dev = connector->dev;
3872 	struct amdgpu_device *adev = dev->dev_private;
3873 	struct dm_connector_state *dm_state =
3874 		to_dm_connector_state(state);
3875 	int ret = -EINVAL;
3876 
3877 	if (property == dev->mode_config.scaling_mode_property) {
3878 		switch (dm_state->scaling) {
3879 		case RMX_CENTER:
3880 			*val = DRM_MODE_SCALE_CENTER;
3881 			break;
3882 		case RMX_ASPECT:
3883 			*val = DRM_MODE_SCALE_ASPECT;
3884 			break;
3885 		case RMX_FULL:
3886 			*val = DRM_MODE_SCALE_FULLSCREEN;
3887 			break;
3888 		case RMX_OFF:
3889 		default:
3890 			*val = DRM_MODE_SCALE_NONE;
3891 			break;
3892 		}
3893 		ret = 0;
3894 	} else if (property == adev->mode_info.underscan_hborder_property) {
3895 		*val = dm_state->underscan_hborder;
3896 		ret = 0;
3897 	} else if (property == adev->mode_info.underscan_vborder_property) {
3898 		*val = dm_state->underscan_vborder;
3899 		ret = 0;
3900 	} else if (property == adev->mode_info.underscan_property) {
3901 		*val = dm_state->underscan_enable;
3902 		ret = 0;
3903 	} else if (property == adev->mode_info.abm_level_property) {
3904 		*val = dm_state->abm_level;
3905 		ret = 0;
3906 	}
3907 
3908 	return ret;
3909 }
3910 
3911 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
3912 {
3913 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
3914 
3915 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
3916 }
3917 
3918 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3919 {
3920 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3921 	const struct dc_link *link = aconnector->dc_link;
3922 	struct amdgpu_device *adev = connector->dev->dev_private;
3923 	struct amdgpu_display_manager *dm = &adev->dm;
3924 
3925 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3926 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3927 
3928 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3929 	    link->type != dc_connection_none &&
3930 	    dm->backlight_dev) {
3931 		backlight_device_unregister(dm->backlight_dev);
3932 		dm->backlight_dev = NULL;
3933 	}
3934 #endif
3935 
3936 	if (aconnector->dc_em_sink)
3937 		dc_sink_release(aconnector->dc_em_sink);
3938 	aconnector->dc_em_sink = NULL;
3939 	if (aconnector->dc_sink)
3940 		dc_sink_release(aconnector->dc_sink);
3941 	aconnector->dc_sink = NULL;
3942 
3943 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3944 	drm_connector_unregister(connector);
3945 	drm_connector_cleanup(connector);
3946 	if (aconnector->i2c) {
3947 		i2c_del_adapter(&aconnector->i2c->base);
3948 		kfree(aconnector->i2c);
3949 	}
3950 
3951 	kfree(connector);
3952 }
3953 
3954 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3955 {
3956 	struct dm_connector_state *state =
3957 		to_dm_connector_state(connector->state);
3958 
3959 	if (connector->state)
3960 		__drm_atomic_helper_connector_destroy_state(connector->state);
3961 
3962 	kfree(state);
3963 
3964 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3965 
3966 	if (state) {
3967 		state->scaling = RMX_OFF;
3968 		state->underscan_enable = false;
3969 		state->underscan_hborder = 0;
3970 		state->underscan_vborder = 0;
3971 		state->base.max_requested_bpc = 8;
3972 
3973 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3974 			state->abm_level = amdgpu_dm_abm_level;
3975 
3976 		__drm_atomic_helper_connector_reset(connector, &state->base);
3977 	}
3978 }
3979 
3980 struct drm_connector_state *
3981 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3982 {
3983 	struct dm_connector_state *state =
3984 		to_dm_connector_state(connector->state);
3985 
3986 	struct dm_connector_state *new_state =
3987 			kmemdup(state, sizeof(*state), GFP_KERNEL);
3988 
3989 	if (!new_state)
3990 		return NULL;
3991 
3992 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3993 
3994 	new_state->freesync_capable = state->freesync_capable;
3995 	new_state->abm_level = state->abm_level;
3996 	new_state->scaling = state->scaling;
3997 	new_state->underscan_enable = state->underscan_enable;
3998 	new_state->underscan_hborder = state->underscan_hborder;
3999 	new_state->underscan_vborder = state->underscan_vborder;
4000 
4001 	return &new_state->base;
4002 }
4003 
4004 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4005 	.reset = amdgpu_dm_connector_funcs_reset,
4006 	.detect = amdgpu_dm_connector_detect,
4007 	.fill_modes = drm_helper_probe_single_connector_modes,
4008 	.destroy = amdgpu_dm_connector_destroy,
4009 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4010 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4011 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4012 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4013 	.early_unregister = amdgpu_dm_connector_unregister
4014 };
4015 
4016 static int get_modes(struct drm_connector *connector)
4017 {
4018 	return amdgpu_dm_connector_get_modes(connector);
4019 }
4020 
4021 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4022 {
4023 	struct dc_sink_init_data init_params = {
4024 			.link = aconnector->dc_link,
4025 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4026 	};
4027 	struct edid *edid;
4028 
4029 	if (!aconnector->base.edid_blob_ptr) {
4030 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4031 				aconnector->base.name);
4032 
4033 		aconnector->base.force = DRM_FORCE_OFF;
4034 		aconnector->base.override_edid = false;
4035 		return;
4036 	}
4037 
4038 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4039 
4040 	aconnector->edid = edid;
4041 
4042 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4043 		aconnector->dc_link,
4044 		(uint8_t *)edid,
4045 		(edid->extensions + 1) * EDID_LENGTH,
4046 		&init_params);
4047 
4048 	if (aconnector->base.force == DRM_FORCE_ON) {
4049 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4050 		aconnector->dc_link->local_sink :
4051 		aconnector->dc_em_sink;
4052 		dc_sink_retain(aconnector->dc_sink);
4053 	}
4054 }
4055 
4056 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4057 {
4058 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4059 
4060 	/*
4061 	 * In case of headless boot with force on for DP managed connector
4062 	 * Those settings have to be != 0 to get initial modeset
4063 	 */
4064 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4065 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4066 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4067 	}
4068 
4069 
4070 	aconnector->base.override_edid = true;
4071 	create_eml_sink(aconnector);
4072 }
4073 
4074 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4075 				   struct drm_display_mode *mode)
4076 {
4077 	int result = MODE_ERROR;
4078 	struct dc_sink *dc_sink;
4079 	struct amdgpu_device *adev = connector->dev->dev_private;
4080 	/* TODO: Unhardcode stream count */
4081 	struct dc_stream_state *stream;
4082 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4083 	enum dc_status dc_result = DC_OK;
4084 
4085 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4086 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4087 		return result;
4088 
4089 	/*
4090 	 * Only run this the first time mode_valid is called to initilialize
4091 	 * EDID mgmt
4092 	 */
4093 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4094 		!aconnector->dc_em_sink)
4095 		handle_edid_mgmt(aconnector);
4096 
4097 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4098 
4099 	if (dc_sink == NULL) {
4100 		DRM_ERROR("dc_sink is NULL!\n");
4101 		goto fail;
4102 	}
4103 
4104 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4105 	if (stream == NULL) {
4106 		DRM_ERROR("Failed to create stream for sink!\n");
4107 		goto fail;
4108 	}
4109 
4110 	dc_result = dc_validate_stream(adev->dm.dc, stream);
4111 
4112 	if (dc_result == DC_OK)
4113 		result = MODE_OK;
4114 	else
4115 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4116 			      mode->vdisplay,
4117 			      mode->hdisplay,
4118 			      mode->clock,
4119 			      dc_result);
4120 
4121 	dc_stream_release(stream);
4122 
4123 fail:
4124 	/* TODO: error handling*/
4125 	return result;
4126 }
4127 
4128 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4129 				struct dc_info_packet *out)
4130 {
4131 	struct hdmi_drm_infoframe frame;
4132 	unsigned char buf[30]; /* 26 + 4 */
4133 	ssize_t len;
4134 	int ret, i;
4135 
4136 	memset(out, 0, sizeof(*out));
4137 
4138 	if (!state->hdr_output_metadata)
4139 		return 0;
4140 
4141 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4142 	if (ret)
4143 		return ret;
4144 
4145 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4146 	if (len < 0)
4147 		return (int)len;
4148 
4149 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4150 	if (len != 30)
4151 		return -EINVAL;
4152 
4153 	/* Prepare the infopacket for DC. */
4154 	switch (state->connector->connector_type) {
4155 	case DRM_MODE_CONNECTOR_HDMIA:
4156 		out->hb0 = 0x87; /* type */
4157 		out->hb1 = 0x01; /* version */
4158 		out->hb2 = 0x1A; /* length */
4159 		out->sb[0] = buf[3]; /* checksum */
4160 		i = 1;
4161 		break;
4162 
4163 	case DRM_MODE_CONNECTOR_DisplayPort:
4164 	case DRM_MODE_CONNECTOR_eDP:
4165 		out->hb0 = 0x00; /* sdp id, zero */
4166 		out->hb1 = 0x87; /* type */
4167 		out->hb2 = 0x1D; /* payload len - 1 */
4168 		out->hb3 = (0x13 << 2); /* sdp version */
4169 		out->sb[0] = 0x01; /* version */
4170 		out->sb[1] = 0x1A; /* length */
4171 		i = 2;
4172 		break;
4173 
4174 	default:
4175 		return -EINVAL;
4176 	}
4177 
4178 	memcpy(&out->sb[i], &buf[4], 26);
4179 	out->valid = true;
4180 
4181 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4182 		       sizeof(out->sb), false);
4183 
4184 	return 0;
4185 }
4186 
4187 static bool
4188 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4189 			  const struct drm_connector_state *new_state)
4190 {
4191 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4192 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4193 
4194 	if (old_blob != new_blob) {
4195 		if (old_blob && new_blob &&
4196 		    old_blob->length == new_blob->length)
4197 			return memcmp(old_blob->data, new_blob->data,
4198 				      old_blob->length);
4199 
4200 		return true;
4201 	}
4202 
4203 	return false;
4204 }
4205 
4206 static int
4207 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4208 				 struct drm_atomic_state *state)
4209 {
4210 	struct drm_connector_state *new_con_state =
4211 		drm_atomic_get_new_connector_state(state, conn);
4212 	struct drm_connector_state *old_con_state =
4213 		drm_atomic_get_old_connector_state(state, conn);
4214 	struct drm_crtc *crtc = new_con_state->crtc;
4215 	struct drm_crtc_state *new_crtc_state;
4216 	int ret;
4217 
4218 	if (!crtc)
4219 		return 0;
4220 
4221 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4222 		struct dc_info_packet hdr_infopacket;
4223 
4224 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4225 		if (ret)
4226 			return ret;
4227 
4228 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4229 		if (IS_ERR(new_crtc_state))
4230 			return PTR_ERR(new_crtc_state);
4231 
4232 		/*
4233 		 * DC considers the stream backends changed if the
4234 		 * static metadata changes. Forcing the modeset also
4235 		 * gives a simple way for userspace to switch from
4236 		 * 8bpc to 10bpc when setting the metadata to enter
4237 		 * or exit HDR.
4238 		 *
4239 		 * Changing the static metadata after it's been
4240 		 * set is permissible, however. So only force a
4241 		 * modeset if we're entering or exiting HDR.
4242 		 */
4243 		new_crtc_state->mode_changed =
4244 			!old_con_state->hdr_output_metadata ||
4245 			!new_con_state->hdr_output_metadata;
4246 	}
4247 
4248 	return 0;
4249 }
4250 
4251 static const struct drm_connector_helper_funcs
4252 amdgpu_dm_connector_helper_funcs = {
4253 	/*
4254 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4255 	 * modes will be filtered by drm_mode_validate_size(), and those modes
4256 	 * are missing after user start lightdm. So we need to renew modes list.
4257 	 * in get_modes call back, not just return the modes count
4258 	 */
4259 	.get_modes = get_modes,
4260 	.mode_valid = amdgpu_dm_connector_mode_valid,
4261 	.atomic_check = amdgpu_dm_connector_atomic_check,
4262 };
4263 
4264 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4265 {
4266 }
4267 
4268 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4269 {
4270 	struct drm_device *dev = new_crtc_state->crtc->dev;
4271 	struct drm_plane *plane;
4272 
4273 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4274 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
4275 			return true;
4276 	}
4277 
4278 	return false;
4279 }
4280 
4281 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4282 {
4283 	struct drm_atomic_state *state = new_crtc_state->state;
4284 	struct drm_plane *plane;
4285 	int num_active = 0;
4286 
4287 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4288 		struct drm_plane_state *new_plane_state;
4289 
4290 		/* Cursor planes are "fake". */
4291 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
4292 			continue;
4293 
4294 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4295 
4296 		if (!new_plane_state) {
4297 			/*
4298 			 * The plane is enable on the CRTC and hasn't changed
4299 			 * state. This means that it previously passed
4300 			 * validation and is therefore enabled.
4301 			 */
4302 			num_active += 1;
4303 			continue;
4304 		}
4305 
4306 		/* We need a framebuffer to be considered enabled. */
4307 		num_active += (new_plane_state->fb != NULL);
4308 	}
4309 
4310 	return num_active;
4311 }
4312 
4313 /*
4314  * Sets whether interrupts should be enabled on a specific CRTC.
4315  * We require that the stream be enabled and that there exist active
4316  * DC planes on the stream.
4317  */
4318 static void
4319 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4320 			       struct drm_crtc_state *new_crtc_state)
4321 {
4322 	struct dm_crtc_state *dm_new_crtc_state =
4323 		to_dm_crtc_state(new_crtc_state);
4324 
4325 	dm_new_crtc_state->active_planes = 0;
4326 	dm_new_crtc_state->interrupts_enabled = false;
4327 
4328 	if (!dm_new_crtc_state->stream)
4329 		return;
4330 
4331 	dm_new_crtc_state->active_planes =
4332 		count_crtc_active_planes(new_crtc_state);
4333 
4334 	dm_new_crtc_state->interrupts_enabled =
4335 		dm_new_crtc_state->active_planes > 0;
4336 }
4337 
4338 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4339 				       struct drm_crtc_state *state)
4340 {
4341 	struct amdgpu_device *adev = crtc->dev->dev_private;
4342 	struct dc *dc = adev->dm.dc;
4343 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4344 	int ret = -EINVAL;
4345 
4346 	/*
4347 	 * Update interrupt state for the CRTC. This needs to happen whenever
4348 	 * the CRTC has changed or whenever any of its planes have changed.
4349 	 * Atomic check satisfies both of these requirements since the CRTC
4350 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
4351 	 */
4352 	dm_update_crtc_interrupt_state(crtc, state);
4353 
4354 	if (unlikely(!dm_crtc_state->stream &&
4355 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
4356 		WARN_ON(1);
4357 		return ret;
4358 	}
4359 
4360 	/* In some use cases, like reset, no stream is attached */
4361 	if (!dm_crtc_state->stream)
4362 		return 0;
4363 
4364 	/*
4365 	 * We want at least one hardware plane enabled to use
4366 	 * the stream with a cursor enabled.
4367 	 */
4368 	if (state->enable && state->active &&
4369 	    does_crtc_have_active_cursor(state) &&
4370 	    dm_crtc_state->active_planes == 0)
4371 		return -EINVAL;
4372 
4373 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
4374 		return 0;
4375 
4376 	return ret;
4377 }
4378 
4379 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4380 				      const struct drm_display_mode *mode,
4381 				      struct drm_display_mode *adjusted_mode)
4382 {
4383 	return true;
4384 }
4385 
4386 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4387 	.disable = dm_crtc_helper_disable,
4388 	.atomic_check = dm_crtc_helper_atomic_check,
4389 	.mode_fixup = dm_crtc_helper_mode_fixup
4390 };
4391 
4392 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4393 {
4394 
4395 }
4396 
4397 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4398 					  struct drm_crtc_state *crtc_state,
4399 					  struct drm_connector_state *conn_state)
4400 {
4401 	return 0;
4402 }
4403 
4404 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
4405 	.disable = dm_encoder_helper_disable,
4406 	.atomic_check = dm_encoder_helper_atomic_check
4407 };
4408 
4409 static void dm_drm_plane_reset(struct drm_plane *plane)
4410 {
4411 	struct dm_plane_state *amdgpu_state = NULL;
4412 
4413 	if (plane->state)
4414 		plane->funcs->atomic_destroy_state(plane, plane->state);
4415 
4416 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
4417 	WARN_ON(amdgpu_state == NULL);
4418 
4419 	if (amdgpu_state)
4420 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
4421 }
4422 
4423 static struct drm_plane_state *
4424 dm_drm_plane_duplicate_state(struct drm_plane *plane)
4425 {
4426 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
4427 
4428 	old_dm_plane_state = to_dm_plane_state(plane->state);
4429 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
4430 	if (!dm_plane_state)
4431 		return NULL;
4432 
4433 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
4434 
4435 	if (old_dm_plane_state->dc_state) {
4436 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
4437 		dc_plane_state_retain(dm_plane_state->dc_state);
4438 	}
4439 
4440 	return &dm_plane_state->base;
4441 }
4442 
4443 void dm_drm_plane_destroy_state(struct drm_plane *plane,
4444 				struct drm_plane_state *state)
4445 {
4446 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
4447 
4448 	if (dm_plane_state->dc_state)
4449 		dc_plane_state_release(dm_plane_state->dc_state);
4450 
4451 	drm_atomic_helper_plane_destroy_state(plane, state);
4452 }
4453 
4454 static const struct drm_plane_funcs dm_plane_funcs = {
4455 	.update_plane	= drm_atomic_helper_update_plane,
4456 	.disable_plane	= drm_atomic_helper_disable_plane,
4457 	.destroy	= drm_primary_helper_destroy,
4458 	.reset = dm_drm_plane_reset,
4459 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
4460 	.atomic_destroy_state = dm_drm_plane_destroy_state,
4461 };
4462 
4463 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
4464 				      struct drm_plane_state *new_state)
4465 {
4466 	struct amdgpu_framebuffer *afb;
4467 	struct drm_gem_object *obj;
4468 	struct amdgpu_device *adev;
4469 	struct amdgpu_bo *rbo;
4470 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
4471 	struct list_head list;
4472 	struct ttm_validate_buffer tv;
4473 	struct ww_acquire_ctx ticket;
4474 	uint64_t tiling_flags;
4475 	uint32_t domain;
4476 	int r;
4477 
4478 	dm_plane_state_old = to_dm_plane_state(plane->state);
4479 	dm_plane_state_new = to_dm_plane_state(new_state);
4480 
4481 	if (!new_state->fb) {
4482 		DRM_DEBUG_DRIVER("No FB bound\n");
4483 		return 0;
4484 	}
4485 
4486 	afb = to_amdgpu_framebuffer(new_state->fb);
4487 	obj = new_state->fb->obj[0];
4488 	rbo = gem_to_amdgpu_bo(obj);
4489 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
4490 	INIT_LIST_HEAD(&list);
4491 
4492 	tv.bo = &rbo->tbo;
4493 	tv.num_shared = 1;
4494 	list_add(&tv.head, &list);
4495 
4496 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
4497 	if (r) {
4498 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
4499 		return r;
4500 	}
4501 
4502 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
4503 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
4504 	else
4505 		domain = AMDGPU_GEM_DOMAIN_VRAM;
4506 
4507 	r = amdgpu_bo_pin(rbo, domain);
4508 	if (unlikely(r != 0)) {
4509 		if (r != -ERESTARTSYS)
4510 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
4511 		ttm_eu_backoff_reservation(&ticket, &list);
4512 		return r;
4513 	}
4514 
4515 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
4516 	if (unlikely(r != 0)) {
4517 		amdgpu_bo_unpin(rbo);
4518 		ttm_eu_backoff_reservation(&ticket, &list);
4519 		DRM_ERROR("%p bind failed\n", rbo);
4520 		return r;
4521 	}
4522 
4523 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
4524 
4525 	ttm_eu_backoff_reservation(&ticket, &list);
4526 
4527 	afb->address = amdgpu_bo_gpu_offset(rbo);
4528 
4529 	amdgpu_bo_ref(rbo);
4530 
4531 	if (dm_plane_state_new->dc_state &&
4532 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
4533 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
4534 
4535 		fill_plane_buffer_attributes(
4536 			adev, afb, plane_state->format, plane_state->rotation,
4537 			tiling_flags, &plane_state->tiling_info,
4538 			&plane_state->plane_size, &plane_state->dcc,
4539 			&plane_state->address);
4540 	}
4541 
4542 	return 0;
4543 }
4544 
4545 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
4546 				       struct drm_plane_state *old_state)
4547 {
4548 	struct amdgpu_bo *rbo;
4549 	int r;
4550 
4551 	if (!old_state->fb)
4552 		return;
4553 
4554 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
4555 	r = amdgpu_bo_reserve(rbo, false);
4556 	if (unlikely(r)) {
4557 		DRM_ERROR("failed to reserve rbo before unpin\n");
4558 		return;
4559 	}
4560 
4561 	amdgpu_bo_unpin(rbo);
4562 	amdgpu_bo_unreserve(rbo);
4563 	amdgpu_bo_unref(&rbo);
4564 }
4565 
4566 static int dm_plane_atomic_check(struct drm_plane *plane,
4567 				 struct drm_plane_state *state)
4568 {
4569 	struct amdgpu_device *adev = plane->dev->dev_private;
4570 	struct dc *dc = adev->dm.dc;
4571 	struct dm_plane_state *dm_plane_state;
4572 	struct dc_scaling_info scaling_info;
4573 	int ret;
4574 
4575 	dm_plane_state = to_dm_plane_state(state);
4576 
4577 	if (!dm_plane_state->dc_state)
4578 		return 0;
4579 
4580 	ret = fill_dc_scaling_info(state, &scaling_info);
4581 	if (ret)
4582 		return ret;
4583 
4584 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
4585 		return 0;
4586 
4587 	return -EINVAL;
4588 }
4589 
4590 static int dm_plane_atomic_async_check(struct drm_plane *plane,
4591 				       struct drm_plane_state *new_plane_state)
4592 {
4593 	/* Only support async updates on cursor planes. */
4594 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
4595 		return -EINVAL;
4596 
4597 	return 0;
4598 }
4599 
4600 static void dm_plane_atomic_async_update(struct drm_plane *plane,
4601 					 struct drm_plane_state *new_state)
4602 {
4603 	struct drm_plane_state *old_state =
4604 		drm_atomic_get_old_plane_state(new_state->state, plane);
4605 
4606 	swap(plane->state->fb, new_state->fb);
4607 
4608 	plane->state->src_x = new_state->src_x;
4609 	plane->state->src_y = new_state->src_y;
4610 	plane->state->src_w = new_state->src_w;
4611 	plane->state->src_h = new_state->src_h;
4612 	plane->state->crtc_x = new_state->crtc_x;
4613 	plane->state->crtc_y = new_state->crtc_y;
4614 	plane->state->crtc_w = new_state->crtc_w;
4615 	plane->state->crtc_h = new_state->crtc_h;
4616 
4617 	handle_cursor_update(plane, old_state);
4618 }
4619 
4620 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
4621 	.prepare_fb = dm_plane_helper_prepare_fb,
4622 	.cleanup_fb = dm_plane_helper_cleanup_fb,
4623 	.atomic_check = dm_plane_atomic_check,
4624 	.atomic_async_check = dm_plane_atomic_async_check,
4625 	.atomic_async_update = dm_plane_atomic_async_update
4626 };
4627 
4628 /*
4629  * TODO: these are currently initialized to rgb formats only.
4630  * For future use cases we should either initialize them dynamically based on
4631  * plane capabilities, or initialize this array to all formats, so internal drm
4632  * check will succeed, and let DC implement proper check
4633  */
4634 static const uint32_t rgb_formats[] = {
4635 	DRM_FORMAT_XRGB8888,
4636 	DRM_FORMAT_ARGB8888,
4637 	DRM_FORMAT_RGBA8888,
4638 	DRM_FORMAT_XRGB2101010,
4639 	DRM_FORMAT_XBGR2101010,
4640 	DRM_FORMAT_ARGB2101010,
4641 	DRM_FORMAT_ABGR2101010,
4642 	DRM_FORMAT_XBGR8888,
4643 	DRM_FORMAT_ABGR8888,
4644 	DRM_FORMAT_RGB565,
4645 };
4646 
4647 static const uint32_t overlay_formats[] = {
4648 	DRM_FORMAT_XRGB8888,
4649 	DRM_FORMAT_ARGB8888,
4650 	DRM_FORMAT_RGBA8888,
4651 	DRM_FORMAT_XBGR8888,
4652 	DRM_FORMAT_ABGR8888,
4653 	DRM_FORMAT_RGB565
4654 };
4655 
4656 static const u32 cursor_formats[] = {
4657 	DRM_FORMAT_ARGB8888
4658 };
4659 
4660 static int get_plane_formats(const struct drm_plane *plane,
4661 			     const struct dc_plane_cap *plane_cap,
4662 			     uint32_t *formats, int max_formats)
4663 {
4664 	int i, num_formats = 0;
4665 
4666 	/*
4667 	 * TODO: Query support for each group of formats directly from
4668 	 * DC plane caps. This will require adding more formats to the
4669 	 * caps list.
4670 	 */
4671 
4672 	switch (plane->type) {
4673 	case DRM_PLANE_TYPE_PRIMARY:
4674 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
4675 			if (num_formats >= max_formats)
4676 				break;
4677 
4678 			formats[num_formats++] = rgb_formats[i];
4679 		}
4680 
4681 		if (plane_cap && plane_cap->pixel_format_support.nv12)
4682 			formats[num_formats++] = DRM_FORMAT_NV12;
4683 		break;
4684 
4685 	case DRM_PLANE_TYPE_OVERLAY:
4686 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
4687 			if (num_formats >= max_formats)
4688 				break;
4689 
4690 			formats[num_formats++] = overlay_formats[i];
4691 		}
4692 		break;
4693 
4694 	case DRM_PLANE_TYPE_CURSOR:
4695 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
4696 			if (num_formats >= max_formats)
4697 				break;
4698 
4699 			formats[num_formats++] = cursor_formats[i];
4700 		}
4701 		break;
4702 	}
4703 
4704 	return num_formats;
4705 }
4706 
4707 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
4708 				struct drm_plane *plane,
4709 				unsigned long possible_crtcs,
4710 				const struct dc_plane_cap *plane_cap)
4711 {
4712 	uint32_t formats[32];
4713 	int num_formats;
4714 	int res = -EPERM;
4715 
4716 	num_formats = get_plane_formats(plane, plane_cap, formats,
4717 					ARRAY_SIZE(formats));
4718 
4719 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
4720 				       &dm_plane_funcs, formats, num_formats,
4721 				       NULL, plane->type, NULL);
4722 	if (res)
4723 		return res;
4724 
4725 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
4726 	    plane_cap && plane_cap->per_pixel_alpha) {
4727 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
4728 					  BIT(DRM_MODE_BLEND_PREMULTI);
4729 
4730 		drm_plane_create_alpha_property(plane);
4731 		drm_plane_create_blend_mode_property(plane, blend_caps);
4732 	}
4733 
4734 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
4735 	    plane_cap && plane_cap->pixel_format_support.nv12) {
4736 		/* This only affects YUV formats. */
4737 		drm_plane_create_color_properties(
4738 			plane,
4739 			BIT(DRM_COLOR_YCBCR_BT601) |
4740 			BIT(DRM_COLOR_YCBCR_BT709),
4741 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
4742 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
4743 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
4744 	}
4745 
4746 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
4747 
4748 	/* Create (reset) the plane state */
4749 	if (plane->funcs->reset)
4750 		plane->funcs->reset(plane);
4751 
4752 	return 0;
4753 }
4754 
4755 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
4756 			       struct drm_plane *plane,
4757 			       uint32_t crtc_index)
4758 {
4759 	struct amdgpu_crtc *acrtc = NULL;
4760 	struct drm_plane *cursor_plane;
4761 
4762 	int res = -ENOMEM;
4763 
4764 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
4765 	if (!cursor_plane)
4766 		goto fail;
4767 
4768 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
4769 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
4770 
4771 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
4772 	if (!acrtc)
4773 		goto fail;
4774 
4775 	res = drm_crtc_init_with_planes(
4776 			dm->ddev,
4777 			&acrtc->base,
4778 			plane,
4779 			cursor_plane,
4780 			&amdgpu_dm_crtc_funcs, NULL);
4781 
4782 	if (res)
4783 		goto fail;
4784 
4785 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
4786 
4787 	/* Create (reset) the plane state */
4788 	if (acrtc->base.funcs->reset)
4789 		acrtc->base.funcs->reset(&acrtc->base);
4790 
4791 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
4792 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
4793 
4794 	acrtc->crtc_id = crtc_index;
4795 	acrtc->base.enabled = false;
4796 	acrtc->otg_inst = -1;
4797 
4798 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
4799 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
4800 				   true, MAX_COLOR_LUT_ENTRIES);
4801 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
4802 
4803 	return 0;
4804 
4805 fail:
4806 	kfree(acrtc);
4807 	kfree(cursor_plane);
4808 	return res;
4809 }
4810 
4811 
4812 static int to_drm_connector_type(enum signal_type st)
4813 {
4814 	switch (st) {
4815 	case SIGNAL_TYPE_HDMI_TYPE_A:
4816 		return DRM_MODE_CONNECTOR_HDMIA;
4817 	case SIGNAL_TYPE_EDP:
4818 		return DRM_MODE_CONNECTOR_eDP;
4819 	case SIGNAL_TYPE_LVDS:
4820 		return DRM_MODE_CONNECTOR_LVDS;
4821 	case SIGNAL_TYPE_RGB:
4822 		return DRM_MODE_CONNECTOR_VGA;
4823 	case SIGNAL_TYPE_DISPLAY_PORT:
4824 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
4825 		return DRM_MODE_CONNECTOR_DisplayPort;
4826 	case SIGNAL_TYPE_DVI_DUAL_LINK:
4827 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
4828 		return DRM_MODE_CONNECTOR_DVID;
4829 	case SIGNAL_TYPE_VIRTUAL:
4830 		return DRM_MODE_CONNECTOR_VIRTUAL;
4831 
4832 	default:
4833 		return DRM_MODE_CONNECTOR_Unknown;
4834 	}
4835 }
4836 
4837 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
4838 {
4839 	return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
4840 }
4841 
4842 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
4843 {
4844 	struct drm_encoder *encoder;
4845 	struct amdgpu_encoder *amdgpu_encoder;
4846 
4847 	encoder = amdgpu_dm_connector_to_encoder(connector);
4848 
4849 	if (encoder == NULL)
4850 		return;
4851 
4852 	amdgpu_encoder = to_amdgpu_encoder(encoder);
4853 
4854 	amdgpu_encoder->native_mode.clock = 0;
4855 
4856 	if (!list_empty(&connector->probed_modes)) {
4857 		struct drm_display_mode *preferred_mode = NULL;
4858 
4859 		list_for_each_entry(preferred_mode,
4860 				    &connector->probed_modes,
4861 				    head) {
4862 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
4863 				amdgpu_encoder->native_mode = *preferred_mode;
4864 
4865 			break;
4866 		}
4867 
4868 	}
4869 }
4870 
4871 static struct drm_display_mode *
4872 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
4873 			     char *name,
4874 			     int hdisplay, int vdisplay)
4875 {
4876 	struct drm_device *dev = encoder->dev;
4877 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4878 	struct drm_display_mode *mode = NULL;
4879 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4880 
4881 	mode = drm_mode_duplicate(dev, native_mode);
4882 
4883 	if (mode == NULL)
4884 		return NULL;
4885 
4886 	mode->hdisplay = hdisplay;
4887 	mode->vdisplay = vdisplay;
4888 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
4889 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
4890 
4891 	return mode;
4892 
4893 }
4894 
4895 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
4896 						 struct drm_connector *connector)
4897 {
4898 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4899 	struct drm_display_mode *mode = NULL;
4900 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4901 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4902 				to_amdgpu_dm_connector(connector);
4903 	int i;
4904 	int n;
4905 	struct mode_size {
4906 		char name[DRM_DISPLAY_MODE_LEN];
4907 		int w;
4908 		int h;
4909 	} common_modes[] = {
4910 		{  "640x480",  640,  480},
4911 		{  "800x600",  800,  600},
4912 		{ "1024x768", 1024,  768},
4913 		{ "1280x720", 1280,  720},
4914 		{ "1280x800", 1280,  800},
4915 		{"1280x1024", 1280, 1024},
4916 		{ "1440x900", 1440,  900},
4917 		{"1680x1050", 1680, 1050},
4918 		{"1600x1200", 1600, 1200},
4919 		{"1920x1080", 1920, 1080},
4920 		{"1920x1200", 1920, 1200}
4921 	};
4922 
4923 	n = ARRAY_SIZE(common_modes);
4924 
4925 	for (i = 0; i < n; i++) {
4926 		struct drm_display_mode *curmode = NULL;
4927 		bool mode_existed = false;
4928 
4929 		if (common_modes[i].w > native_mode->hdisplay ||
4930 		    common_modes[i].h > native_mode->vdisplay ||
4931 		   (common_modes[i].w == native_mode->hdisplay &&
4932 		    common_modes[i].h == native_mode->vdisplay))
4933 			continue;
4934 
4935 		list_for_each_entry(curmode, &connector->probed_modes, head) {
4936 			if (common_modes[i].w == curmode->hdisplay &&
4937 			    common_modes[i].h == curmode->vdisplay) {
4938 				mode_existed = true;
4939 				break;
4940 			}
4941 		}
4942 
4943 		if (mode_existed)
4944 			continue;
4945 
4946 		mode = amdgpu_dm_create_common_mode(encoder,
4947 				common_modes[i].name, common_modes[i].w,
4948 				common_modes[i].h);
4949 		drm_mode_probed_add(connector, mode);
4950 		amdgpu_dm_connector->num_modes++;
4951 	}
4952 }
4953 
4954 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
4955 					      struct edid *edid)
4956 {
4957 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4958 			to_amdgpu_dm_connector(connector);
4959 
4960 	if (edid) {
4961 		/* empty probed_modes */
4962 		INIT_LIST_HEAD(&connector->probed_modes);
4963 		amdgpu_dm_connector->num_modes =
4964 				drm_add_edid_modes(connector, edid);
4965 
4966 		/* sorting the probed modes before calling function
4967 		 * amdgpu_dm_get_native_mode() since EDID can have
4968 		 * more than one preferred mode. The modes that are
4969 		 * later in the probed mode list could be of higher
4970 		 * and preferred resolution. For example, 3840x2160
4971 		 * resolution in base EDID preferred timing and 4096x2160
4972 		 * preferred resolution in DID extension block later.
4973 		 */
4974 		drm_mode_sort(&connector->probed_modes);
4975 		amdgpu_dm_get_native_mode(connector);
4976 	} else {
4977 		amdgpu_dm_connector->num_modes = 0;
4978 	}
4979 }
4980 
4981 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
4982 {
4983 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4984 			to_amdgpu_dm_connector(connector);
4985 	struct drm_encoder *encoder;
4986 	struct edid *edid = amdgpu_dm_connector->edid;
4987 
4988 	encoder = amdgpu_dm_connector_to_encoder(connector);
4989 
4990 	if (!edid || !drm_edid_is_valid(edid)) {
4991 		amdgpu_dm_connector->num_modes =
4992 				drm_add_modes_noedid(connector, 640, 480);
4993 	} else {
4994 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
4995 		amdgpu_dm_connector_add_common_modes(encoder, connector);
4996 	}
4997 	amdgpu_dm_fbc_init(connector);
4998 
4999 	return amdgpu_dm_connector->num_modes;
5000 }
5001 
5002 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5003 				     struct amdgpu_dm_connector *aconnector,
5004 				     int connector_type,
5005 				     struct dc_link *link,
5006 				     int link_index)
5007 {
5008 	struct amdgpu_device *adev = dm->ddev->dev_private;
5009 
5010 	/*
5011 	 * Some of the properties below require access to state, like bpc.
5012 	 * Allocate some default initial connector state with our reset helper.
5013 	 */
5014 	if (aconnector->base.funcs->reset)
5015 		aconnector->base.funcs->reset(&aconnector->base);
5016 
5017 	aconnector->connector_id = link_index;
5018 	aconnector->dc_link = link;
5019 	aconnector->base.interlace_allowed = false;
5020 	aconnector->base.doublescan_allowed = false;
5021 	aconnector->base.stereo_allowed = false;
5022 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5023 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5024 	aconnector->audio_inst = -1;
5025 	mutex_init(&aconnector->hpd_lock);
5026 
5027 	/*
5028 	 * configure support HPD hot plug connector_>polled default value is 0
5029 	 * which means HPD hot plug not supported
5030 	 */
5031 	switch (connector_type) {
5032 	case DRM_MODE_CONNECTOR_HDMIA:
5033 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5034 		aconnector->base.ycbcr_420_allowed =
5035 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5036 		break;
5037 	case DRM_MODE_CONNECTOR_DisplayPort:
5038 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5039 		aconnector->base.ycbcr_420_allowed =
5040 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5041 		break;
5042 	case DRM_MODE_CONNECTOR_DVID:
5043 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5044 		break;
5045 	default:
5046 		break;
5047 	}
5048 
5049 	drm_object_attach_property(&aconnector->base.base,
5050 				dm->ddev->mode_config.scaling_mode_property,
5051 				DRM_MODE_SCALE_NONE);
5052 
5053 	drm_object_attach_property(&aconnector->base.base,
5054 				adev->mode_info.underscan_property,
5055 				UNDERSCAN_OFF);
5056 	drm_object_attach_property(&aconnector->base.base,
5057 				adev->mode_info.underscan_hborder_property,
5058 				0);
5059 	drm_object_attach_property(&aconnector->base.base,
5060 				adev->mode_info.underscan_vborder_property,
5061 				0);
5062 
5063 	drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5064 
5065 	/* This defaults to the max in the range, but we want 8bpc. */
5066 	aconnector->base.state->max_bpc = 8;
5067 	aconnector->base.state->max_requested_bpc = 8;
5068 
5069 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5070 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5071 		drm_object_attach_property(&aconnector->base.base,
5072 				adev->mode_info.abm_level_property, 0);
5073 	}
5074 
5075 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5076 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5077 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
5078 		drm_object_attach_property(
5079 			&aconnector->base.base,
5080 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
5081 
5082 		drm_connector_attach_vrr_capable_property(
5083 			&aconnector->base);
5084 	}
5085 }
5086 
5087 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5088 			      struct i2c_msg *msgs, int num)
5089 {
5090 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5091 	struct ddc_service *ddc_service = i2c->ddc_service;
5092 	struct i2c_command cmd;
5093 	int i;
5094 	int result = -EIO;
5095 
5096 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5097 
5098 	if (!cmd.payloads)
5099 		return result;
5100 
5101 	cmd.number_of_payloads = num;
5102 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5103 	cmd.speed = 100;
5104 
5105 	for (i = 0; i < num; i++) {
5106 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5107 		cmd.payloads[i].address = msgs[i].addr;
5108 		cmd.payloads[i].length = msgs[i].len;
5109 		cmd.payloads[i].data = msgs[i].buf;
5110 	}
5111 
5112 	if (dc_submit_i2c(
5113 			ddc_service->ctx->dc,
5114 			ddc_service->ddc_pin->hw_info.ddc_channel,
5115 			&cmd))
5116 		result = num;
5117 
5118 	kfree(cmd.payloads);
5119 	return result;
5120 }
5121 
5122 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5123 {
5124 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5125 }
5126 
5127 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5128 	.master_xfer = amdgpu_dm_i2c_xfer,
5129 	.functionality = amdgpu_dm_i2c_func,
5130 };
5131 
5132 static struct amdgpu_i2c_adapter *
5133 create_i2c(struct ddc_service *ddc_service,
5134 	   int link_index,
5135 	   int *res)
5136 {
5137 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5138 	struct amdgpu_i2c_adapter *i2c;
5139 
5140 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
5141 	if (!i2c)
5142 		return NULL;
5143 	i2c->base.owner = THIS_MODULE;
5144 	i2c->base.class = I2C_CLASS_DDC;
5145 	i2c->base.dev.parent = &adev->pdev->dev;
5146 	i2c->base.algo = &amdgpu_dm_i2c_algo;
5147 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
5148 	i2c_set_adapdata(&i2c->base, i2c);
5149 	i2c->ddc_service = ddc_service;
5150 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
5151 
5152 	return i2c;
5153 }
5154 
5155 
5156 /*
5157  * Note: this function assumes that dc_link_detect() was called for the
5158  * dc_link which will be represented by this aconnector.
5159  */
5160 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5161 				    struct amdgpu_dm_connector *aconnector,
5162 				    uint32_t link_index,
5163 				    struct amdgpu_encoder *aencoder)
5164 {
5165 	int res = 0;
5166 	int connector_type;
5167 	struct dc *dc = dm->dc;
5168 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
5169 	struct amdgpu_i2c_adapter *i2c;
5170 
5171 	link->priv = aconnector;
5172 
5173 	DRM_DEBUG_DRIVER("%s()\n", __func__);
5174 
5175 	i2c = create_i2c(link->ddc, link->link_index, &res);
5176 	if (!i2c) {
5177 		DRM_ERROR("Failed to create i2c adapter data\n");
5178 		return -ENOMEM;
5179 	}
5180 
5181 	aconnector->i2c = i2c;
5182 	res = i2c_add_adapter(&i2c->base);
5183 
5184 	if (res) {
5185 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
5186 		goto out_free;
5187 	}
5188 
5189 	connector_type = to_drm_connector_type(link->connector_signal);
5190 
5191 	res = drm_connector_init(
5192 			dm->ddev,
5193 			&aconnector->base,
5194 			&amdgpu_dm_connector_funcs,
5195 			connector_type);
5196 
5197 	if (res) {
5198 		DRM_ERROR("connector_init failed\n");
5199 		aconnector->connector_id = -1;
5200 		goto out_free;
5201 	}
5202 
5203 	drm_connector_helper_add(
5204 			&aconnector->base,
5205 			&amdgpu_dm_connector_helper_funcs);
5206 
5207 	amdgpu_dm_connector_init_helper(
5208 		dm,
5209 		aconnector,
5210 		connector_type,
5211 		link,
5212 		link_index);
5213 
5214 	drm_connector_attach_encoder(
5215 		&aconnector->base, &aencoder->base);
5216 
5217 	drm_connector_register(&aconnector->base);
5218 #if defined(CONFIG_DEBUG_FS)
5219 	connector_debugfs_init(aconnector);
5220 	aconnector->debugfs_dpcd_address = 0;
5221 	aconnector->debugfs_dpcd_size = 0;
5222 #endif
5223 
5224 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
5225 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
5226 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
5227 
5228 out_free:
5229 	if (res) {
5230 		kfree(i2c);
5231 		aconnector->i2c = NULL;
5232 	}
5233 	return res;
5234 }
5235 
5236 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
5237 {
5238 	switch (adev->mode_info.num_crtc) {
5239 	case 1:
5240 		return 0x1;
5241 	case 2:
5242 		return 0x3;
5243 	case 3:
5244 		return 0x7;
5245 	case 4:
5246 		return 0xf;
5247 	case 5:
5248 		return 0x1f;
5249 	case 6:
5250 	default:
5251 		return 0x3f;
5252 	}
5253 }
5254 
5255 static int amdgpu_dm_encoder_init(struct drm_device *dev,
5256 				  struct amdgpu_encoder *aencoder,
5257 				  uint32_t link_index)
5258 {
5259 	struct amdgpu_device *adev = dev->dev_private;
5260 
5261 	int res = drm_encoder_init(dev,
5262 				   &aencoder->base,
5263 				   &amdgpu_dm_encoder_funcs,
5264 				   DRM_MODE_ENCODER_TMDS,
5265 				   NULL);
5266 
5267 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5268 
5269 	if (!res)
5270 		aencoder->encoder_id = link_index;
5271 	else
5272 		aencoder->encoder_id = -1;
5273 
5274 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5275 
5276 	return res;
5277 }
5278 
5279 static void manage_dm_interrupts(struct amdgpu_device *adev,
5280 				 struct amdgpu_crtc *acrtc,
5281 				 bool enable)
5282 {
5283 	/*
5284 	 * this is not correct translation but will work as soon as VBLANK
5285 	 * constant is the same as PFLIP
5286 	 */
5287 	int irq_type =
5288 		amdgpu_display_crtc_idx_to_irq_type(
5289 			adev,
5290 			acrtc->crtc_id);
5291 
5292 	if (enable) {
5293 		drm_crtc_vblank_on(&acrtc->base);
5294 		amdgpu_irq_get(
5295 			adev,
5296 			&adev->pageflip_irq,
5297 			irq_type);
5298 	} else {
5299 
5300 		amdgpu_irq_put(
5301 			adev,
5302 			&adev->pageflip_irq,
5303 			irq_type);
5304 		drm_crtc_vblank_off(&acrtc->base);
5305 	}
5306 }
5307 
5308 static bool
5309 is_scaling_state_different(const struct dm_connector_state *dm_state,
5310 			   const struct dm_connector_state *old_dm_state)
5311 {
5312 	if (dm_state->scaling != old_dm_state->scaling)
5313 		return true;
5314 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
5315 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
5316 			return true;
5317 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
5318 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
5319 			return true;
5320 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
5321 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
5322 		return true;
5323 	return false;
5324 }
5325 
5326 static void remove_stream(struct amdgpu_device *adev,
5327 			  struct amdgpu_crtc *acrtc,
5328 			  struct dc_stream_state *stream)
5329 {
5330 	/* this is the update mode case */
5331 
5332 	acrtc->otg_inst = -1;
5333 	acrtc->enabled = false;
5334 }
5335 
5336 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
5337 			       struct dc_cursor_position *position)
5338 {
5339 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5340 	int x, y;
5341 	int xorigin = 0, yorigin = 0;
5342 
5343 	position->enable = false;
5344 	position->x = 0;
5345 	position->y = 0;
5346 
5347 	if (!crtc || !plane->state->fb)
5348 		return 0;
5349 
5350 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
5351 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
5352 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
5353 			  __func__,
5354 			  plane->state->crtc_w,
5355 			  plane->state->crtc_h);
5356 		return -EINVAL;
5357 	}
5358 
5359 	x = plane->state->crtc_x;
5360 	y = plane->state->crtc_y;
5361 
5362 	if (x <= -amdgpu_crtc->max_cursor_width ||
5363 	    y <= -amdgpu_crtc->max_cursor_height)
5364 		return 0;
5365 
5366 	if (crtc->primary->state) {
5367 		/* avivo cursor are offset into the total surface */
5368 		x += crtc->primary->state->src_x >> 16;
5369 		y += crtc->primary->state->src_y >> 16;
5370 	}
5371 
5372 	if (x < 0) {
5373 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
5374 		x = 0;
5375 	}
5376 	if (y < 0) {
5377 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
5378 		y = 0;
5379 	}
5380 	position->enable = true;
5381 	position->x = x;
5382 	position->y = y;
5383 	position->x_hotspot = xorigin;
5384 	position->y_hotspot = yorigin;
5385 
5386 	return 0;
5387 }
5388 
5389 static void handle_cursor_update(struct drm_plane *plane,
5390 				 struct drm_plane_state *old_plane_state)
5391 {
5392 	struct amdgpu_device *adev = plane->dev->dev_private;
5393 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
5394 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
5395 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
5396 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5397 	uint64_t address = afb ? afb->address : 0;
5398 	struct dc_cursor_position position;
5399 	struct dc_cursor_attributes attributes;
5400 	int ret;
5401 
5402 	if (!plane->state->fb && !old_plane_state->fb)
5403 		return;
5404 
5405 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
5406 			 __func__,
5407 			 amdgpu_crtc->crtc_id,
5408 			 plane->state->crtc_w,
5409 			 plane->state->crtc_h);
5410 
5411 	ret = get_cursor_position(plane, crtc, &position);
5412 	if (ret)
5413 		return;
5414 
5415 	if (!position.enable) {
5416 		/* turn off cursor */
5417 		if (crtc_state && crtc_state->stream) {
5418 			mutex_lock(&adev->dm.dc_lock);
5419 			dc_stream_set_cursor_position(crtc_state->stream,
5420 						      &position);
5421 			mutex_unlock(&adev->dm.dc_lock);
5422 		}
5423 		return;
5424 	}
5425 
5426 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
5427 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
5428 
5429 	memset(&attributes, 0, sizeof(attributes));
5430 	attributes.address.high_part = upper_32_bits(address);
5431 	attributes.address.low_part  = lower_32_bits(address);
5432 	attributes.width             = plane->state->crtc_w;
5433 	attributes.height            = plane->state->crtc_h;
5434 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
5435 	attributes.rotation_angle    = 0;
5436 	attributes.attribute_flags.value = 0;
5437 
5438 	attributes.pitch = attributes.width;
5439 
5440 	if (crtc_state->stream) {
5441 		mutex_lock(&adev->dm.dc_lock);
5442 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
5443 							 &attributes))
5444 			DRM_ERROR("DC failed to set cursor attributes\n");
5445 
5446 		if (!dc_stream_set_cursor_position(crtc_state->stream,
5447 						   &position))
5448 			DRM_ERROR("DC failed to set cursor position\n");
5449 		mutex_unlock(&adev->dm.dc_lock);
5450 	}
5451 }
5452 
5453 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
5454 {
5455 
5456 	assert_spin_locked(&acrtc->base.dev->event_lock);
5457 	WARN_ON(acrtc->event);
5458 
5459 	acrtc->event = acrtc->base.state->event;
5460 
5461 	/* Set the flip status */
5462 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
5463 
5464 	/* Mark this event as consumed */
5465 	acrtc->base.state->event = NULL;
5466 
5467 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
5468 						 acrtc->crtc_id);
5469 }
5470 
5471 static void update_freesync_state_on_stream(
5472 	struct amdgpu_display_manager *dm,
5473 	struct dm_crtc_state *new_crtc_state,
5474 	struct dc_stream_state *new_stream,
5475 	struct dc_plane_state *surface,
5476 	u32 flip_timestamp_in_us)
5477 {
5478 	struct mod_vrr_params vrr_params;
5479 	struct dc_info_packet vrr_infopacket = {0};
5480 	struct amdgpu_device *adev = dm->adev;
5481 	unsigned long flags;
5482 
5483 	if (!new_stream)
5484 		return;
5485 
5486 	/*
5487 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5488 	 * For now it's sufficient to just guard against these conditions.
5489 	 */
5490 
5491 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5492 		return;
5493 
5494 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
5495 	vrr_params = new_crtc_state->vrr_params;
5496 
5497 	if (surface) {
5498 		mod_freesync_handle_preflip(
5499 			dm->freesync_module,
5500 			surface,
5501 			new_stream,
5502 			flip_timestamp_in_us,
5503 			&vrr_params);
5504 
5505 		if (adev->family < AMDGPU_FAMILY_AI &&
5506 		    amdgpu_dm_vrr_active(new_crtc_state)) {
5507 			mod_freesync_handle_v_update(dm->freesync_module,
5508 						     new_stream, &vrr_params);
5509 
5510 			/* Need to call this before the frame ends. */
5511 			dc_stream_adjust_vmin_vmax(dm->dc,
5512 						   new_crtc_state->stream,
5513 						   &vrr_params.adjust);
5514 		}
5515 	}
5516 
5517 	mod_freesync_build_vrr_infopacket(
5518 		dm->freesync_module,
5519 		new_stream,
5520 		&vrr_params,
5521 		PACKET_TYPE_VRR,
5522 		TRANSFER_FUNC_UNKNOWN,
5523 		&vrr_infopacket);
5524 
5525 	new_crtc_state->freesync_timing_changed |=
5526 		(memcmp(&new_crtc_state->vrr_params.adjust,
5527 			&vrr_params.adjust,
5528 			sizeof(vrr_params.adjust)) != 0);
5529 
5530 	new_crtc_state->freesync_vrr_info_changed |=
5531 		(memcmp(&new_crtc_state->vrr_infopacket,
5532 			&vrr_infopacket,
5533 			sizeof(vrr_infopacket)) != 0);
5534 
5535 	new_crtc_state->vrr_params = vrr_params;
5536 	new_crtc_state->vrr_infopacket = vrr_infopacket;
5537 
5538 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
5539 	new_stream->vrr_infopacket = vrr_infopacket;
5540 
5541 	if (new_crtc_state->freesync_vrr_info_changed)
5542 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
5543 			      new_crtc_state->base.crtc->base.id,
5544 			      (int)new_crtc_state->base.vrr_enabled,
5545 			      (int)vrr_params.state);
5546 
5547 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5548 }
5549 
5550 static void pre_update_freesync_state_on_stream(
5551 	struct amdgpu_display_manager *dm,
5552 	struct dm_crtc_state *new_crtc_state)
5553 {
5554 	struct dc_stream_state *new_stream = new_crtc_state->stream;
5555 	struct mod_vrr_params vrr_params;
5556 	struct mod_freesync_config config = new_crtc_state->freesync_config;
5557 	struct amdgpu_device *adev = dm->adev;
5558 	unsigned long flags;
5559 
5560 	if (!new_stream)
5561 		return;
5562 
5563 	/*
5564 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5565 	 * For now it's sufficient to just guard against these conditions.
5566 	 */
5567 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5568 		return;
5569 
5570 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
5571 	vrr_params = new_crtc_state->vrr_params;
5572 
5573 	if (new_crtc_state->vrr_supported &&
5574 	    config.min_refresh_in_uhz &&
5575 	    config.max_refresh_in_uhz) {
5576 		config.state = new_crtc_state->base.vrr_enabled ?
5577 			VRR_STATE_ACTIVE_VARIABLE :
5578 			VRR_STATE_INACTIVE;
5579 	} else {
5580 		config.state = VRR_STATE_UNSUPPORTED;
5581 	}
5582 
5583 	mod_freesync_build_vrr_params(dm->freesync_module,
5584 				      new_stream,
5585 				      &config, &vrr_params);
5586 
5587 	new_crtc_state->freesync_timing_changed |=
5588 		(memcmp(&new_crtc_state->vrr_params.adjust,
5589 			&vrr_params.adjust,
5590 			sizeof(vrr_params.adjust)) != 0);
5591 
5592 	new_crtc_state->vrr_params = vrr_params;
5593 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5594 }
5595 
5596 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
5597 					    struct dm_crtc_state *new_state)
5598 {
5599 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
5600 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
5601 
5602 	if (!old_vrr_active && new_vrr_active) {
5603 		/* Transition VRR inactive -> active:
5604 		 * While VRR is active, we must not disable vblank irq, as a
5605 		 * reenable after disable would compute bogus vblank/pflip
5606 		 * timestamps if it likely happened inside display front-porch.
5607 		 *
5608 		 * We also need vupdate irq for the actual core vblank handling
5609 		 * at end of vblank.
5610 		 */
5611 		dm_set_vupdate_irq(new_state->base.crtc, true);
5612 		drm_crtc_vblank_get(new_state->base.crtc);
5613 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
5614 				 __func__, new_state->base.crtc->base.id);
5615 	} else if (old_vrr_active && !new_vrr_active) {
5616 		/* Transition VRR active -> inactive:
5617 		 * Allow vblank irq disable again for fixed refresh rate.
5618 		 */
5619 		dm_set_vupdate_irq(new_state->base.crtc, false);
5620 		drm_crtc_vblank_put(new_state->base.crtc);
5621 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
5622 				 __func__, new_state->base.crtc->base.id);
5623 	}
5624 }
5625 
5626 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
5627 {
5628 	struct drm_plane *plane;
5629 	struct drm_plane_state *old_plane_state, *new_plane_state;
5630 	int i;
5631 
5632 	/*
5633 	 * TODO: Make this per-stream so we don't issue redundant updates for
5634 	 * commits with multiple streams.
5635 	 */
5636 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
5637 				       new_plane_state, i)
5638 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5639 			handle_cursor_update(plane, old_plane_state);
5640 }
5641 
5642 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5643 				    struct dc_state *dc_state,
5644 				    struct drm_device *dev,
5645 				    struct amdgpu_display_manager *dm,
5646 				    struct drm_crtc *pcrtc,
5647 				    bool wait_for_vblank)
5648 {
5649 	uint32_t i;
5650 	uint64_t timestamp_ns;
5651 	struct drm_plane *plane;
5652 	struct drm_plane_state *old_plane_state, *new_plane_state;
5653 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
5654 	struct drm_crtc_state *new_pcrtc_state =
5655 			drm_atomic_get_new_crtc_state(state, pcrtc);
5656 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
5657 	struct dm_crtc_state *dm_old_crtc_state =
5658 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
5659 	int planes_count = 0, vpos, hpos;
5660 	long r;
5661 	unsigned long flags;
5662 	struct amdgpu_bo *abo;
5663 	uint64_t tiling_flags;
5664 	uint32_t target_vblank, last_flip_vblank;
5665 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
5666 	bool pflip_present = false;
5667 	struct {
5668 		struct dc_surface_update surface_updates[MAX_SURFACES];
5669 		struct dc_plane_info plane_infos[MAX_SURFACES];
5670 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
5671 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
5672 		struct dc_stream_update stream_update;
5673 	} *bundle;
5674 
5675 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
5676 
5677 	if (!bundle) {
5678 		dm_error("Failed to allocate update bundle\n");
5679 		goto cleanup;
5680 	}
5681 
5682 	/*
5683 	 * Disable the cursor first if we're disabling all the planes.
5684 	 * It'll remain on the screen after the planes are re-enabled
5685 	 * if we don't.
5686 	 */
5687 	if (acrtc_state->active_planes == 0)
5688 		amdgpu_dm_commit_cursors(state);
5689 
5690 	/* update planes when needed */
5691 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
5692 		struct drm_crtc *crtc = new_plane_state->crtc;
5693 		struct drm_crtc_state *new_crtc_state;
5694 		struct drm_framebuffer *fb = new_plane_state->fb;
5695 		bool plane_needs_flip;
5696 		struct dc_plane_state *dc_plane;
5697 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
5698 
5699 		/* Cursor plane is handled after stream updates */
5700 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5701 			continue;
5702 
5703 		if (!fb || !crtc || pcrtc != crtc)
5704 			continue;
5705 
5706 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
5707 		if (!new_crtc_state->active)
5708 			continue;
5709 
5710 		dc_plane = dm_new_plane_state->dc_state;
5711 
5712 		bundle->surface_updates[planes_count].surface = dc_plane;
5713 		if (new_pcrtc_state->color_mgmt_changed) {
5714 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
5715 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
5716 		}
5717 
5718 		fill_dc_scaling_info(new_plane_state,
5719 				     &bundle->scaling_infos[planes_count]);
5720 
5721 		bundle->surface_updates[planes_count].scaling_info =
5722 			&bundle->scaling_infos[planes_count];
5723 
5724 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
5725 
5726 		pflip_present = pflip_present || plane_needs_flip;
5727 
5728 		if (!plane_needs_flip) {
5729 			planes_count += 1;
5730 			continue;
5731 		}
5732 
5733 		abo = gem_to_amdgpu_bo(fb->obj[0]);
5734 
5735 		/*
5736 		 * Wait for all fences on this FB. Do limited wait to avoid
5737 		 * deadlock during GPU reset when this fence will not signal
5738 		 * but we hold reservation lock for the BO.
5739 		 */
5740 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
5741 							false,
5742 							msecs_to_jiffies(5000));
5743 		if (unlikely(r <= 0))
5744 			DRM_ERROR("Waiting for fences timed out!");
5745 
5746 		/*
5747 		 * TODO This might fail and hence better not used, wait
5748 		 * explicitly on fences instead
5749 		 * and in general should be called for
5750 		 * blocking commit to as per framework helpers
5751 		 */
5752 		r = amdgpu_bo_reserve(abo, true);
5753 		if (unlikely(r != 0))
5754 			DRM_ERROR("failed to reserve buffer before flip\n");
5755 
5756 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
5757 
5758 		amdgpu_bo_unreserve(abo);
5759 
5760 		fill_dc_plane_info_and_addr(
5761 			dm->adev, new_plane_state, tiling_flags,
5762 			&bundle->plane_infos[planes_count],
5763 			&bundle->flip_addrs[planes_count].address);
5764 
5765 		bundle->surface_updates[planes_count].plane_info =
5766 			&bundle->plane_infos[planes_count];
5767 
5768 		/*
5769 		 * Only allow immediate flips for fast updates that don't
5770 		 * change FB pitch, DCC state, rotation or mirroing.
5771 		 */
5772 		bundle->flip_addrs[planes_count].flip_immediate =
5773 			(crtc->state->pageflip_flags &
5774 			 DRM_MODE_PAGE_FLIP_ASYNC) != 0 &&
5775 			acrtc_state->update_type == UPDATE_TYPE_FAST;
5776 
5777 		timestamp_ns = ktime_get_ns();
5778 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
5779 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
5780 		bundle->surface_updates[planes_count].surface = dc_plane;
5781 
5782 		if (!bundle->surface_updates[planes_count].surface) {
5783 			DRM_ERROR("No surface for CRTC: id=%d\n",
5784 					acrtc_attach->crtc_id);
5785 			continue;
5786 		}
5787 
5788 		if (plane == pcrtc->primary)
5789 			update_freesync_state_on_stream(
5790 				dm,
5791 				acrtc_state,
5792 				acrtc_state->stream,
5793 				dc_plane,
5794 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
5795 
5796 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
5797 				 __func__,
5798 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
5799 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
5800 
5801 		planes_count += 1;
5802 
5803 	}
5804 
5805 	if (pflip_present) {
5806 		if (!vrr_active) {
5807 			/* Use old throttling in non-vrr fixed refresh rate mode
5808 			 * to keep flip scheduling based on target vblank counts
5809 			 * working in a backwards compatible way, e.g., for
5810 			 * clients using the GLX_OML_sync_control extension or
5811 			 * DRI3/Present extension with defined target_msc.
5812 			 */
5813 			last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
5814 		}
5815 		else {
5816 			/* For variable refresh rate mode only:
5817 			 * Get vblank of last completed flip to avoid > 1 vrr
5818 			 * flips per video frame by use of throttling, but allow
5819 			 * flip programming anywhere in the possibly large
5820 			 * variable vrr vblank interval for fine-grained flip
5821 			 * timing control and more opportunity to avoid stutter
5822 			 * on late submission of flips.
5823 			 */
5824 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5825 			last_flip_vblank = acrtc_attach->last_flip_vblank;
5826 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5827 		}
5828 
5829 		target_vblank = last_flip_vblank + wait_for_vblank;
5830 
5831 		/*
5832 		 * Wait until we're out of the vertical blank period before the one
5833 		 * targeted by the flip
5834 		 */
5835 		while ((acrtc_attach->enabled &&
5836 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
5837 							    0, &vpos, &hpos, NULL,
5838 							    NULL, &pcrtc->hwmode)
5839 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
5840 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
5841 			(int)(target_vblank -
5842 			  amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
5843 			usleep_range(1000, 1100);
5844 		}
5845 
5846 		if (acrtc_attach->base.state->event) {
5847 			drm_crtc_vblank_get(pcrtc);
5848 
5849 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5850 
5851 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
5852 			prepare_flip_isr(acrtc_attach);
5853 
5854 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5855 		}
5856 
5857 		if (acrtc_state->stream) {
5858 			if (acrtc_state->freesync_vrr_info_changed)
5859 				bundle->stream_update.vrr_infopacket =
5860 					&acrtc_state->stream->vrr_infopacket;
5861 		}
5862 	}
5863 
5864 	/* Update the planes if changed or disable if we don't have any. */
5865 	if ((planes_count || acrtc_state->active_planes == 0) &&
5866 		acrtc_state->stream) {
5867 		if (new_pcrtc_state->mode_changed) {
5868 			bundle->stream_update.src = acrtc_state->stream->src;
5869 			bundle->stream_update.dst = acrtc_state->stream->dst;
5870 		}
5871 
5872 		if (new_pcrtc_state->color_mgmt_changed) {
5873 			/*
5874 			 * TODO: This isn't fully correct since we've actually
5875 			 * already modified the stream in place.
5876 			 */
5877 			bundle->stream_update.gamut_remap =
5878 				&acrtc_state->stream->gamut_remap_matrix;
5879 			bundle->stream_update.output_csc_transform =
5880 				&acrtc_state->stream->csc_color_matrix;
5881 			bundle->stream_update.out_transfer_func =
5882 				acrtc_state->stream->out_transfer_func;
5883 		}
5884 
5885 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
5886 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
5887 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
5888 
5889 		/*
5890 		 * If FreeSync state on the stream has changed then we need to
5891 		 * re-adjust the min/max bounds now that DC doesn't handle this
5892 		 * as part of commit.
5893 		 */
5894 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
5895 		    amdgpu_dm_vrr_active(acrtc_state)) {
5896 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5897 			dc_stream_adjust_vmin_vmax(
5898 				dm->dc, acrtc_state->stream,
5899 				&acrtc_state->vrr_params.adjust);
5900 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5901 		}
5902 
5903 		mutex_lock(&dm->dc_lock);
5904 		dc_commit_updates_for_stream(dm->dc,
5905 						     bundle->surface_updates,
5906 						     planes_count,
5907 						     acrtc_state->stream,
5908 						     &bundle->stream_update,
5909 						     dc_state);
5910 		mutex_unlock(&dm->dc_lock);
5911 	}
5912 
5913 	/*
5914 	 * Update cursor state *after* programming all the planes.
5915 	 * This avoids redundant programming in the case where we're going
5916 	 * to be disabling a single plane - those pipes are being disabled.
5917 	 */
5918 	if (acrtc_state->active_planes)
5919 		amdgpu_dm_commit_cursors(state);
5920 
5921 cleanup:
5922 	kfree(bundle);
5923 }
5924 
5925 static void amdgpu_dm_commit_audio(struct drm_device *dev,
5926 				   struct drm_atomic_state *state)
5927 {
5928 	struct amdgpu_device *adev = dev->dev_private;
5929 	struct amdgpu_dm_connector *aconnector;
5930 	struct drm_connector *connector;
5931 	struct drm_connector_state *old_con_state, *new_con_state;
5932 	struct drm_crtc_state *new_crtc_state;
5933 	struct dm_crtc_state *new_dm_crtc_state;
5934 	const struct dc_stream_status *status;
5935 	int i, inst;
5936 
5937 	/* Notify device removals. */
5938 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5939 		if (old_con_state->crtc != new_con_state->crtc) {
5940 			/* CRTC changes require notification. */
5941 			goto notify;
5942 		}
5943 
5944 		if (!new_con_state->crtc)
5945 			continue;
5946 
5947 		new_crtc_state = drm_atomic_get_new_crtc_state(
5948 			state, new_con_state->crtc);
5949 
5950 		if (!new_crtc_state)
5951 			continue;
5952 
5953 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5954 			continue;
5955 
5956 	notify:
5957 		aconnector = to_amdgpu_dm_connector(connector);
5958 
5959 		mutex_lock(&adev->dm.audio_lock);
5960 		inst = aconnector->audio_inst;
5961 		aconnector->audio_inst = -1;
5962 		mutex_unlock(&adev->dm.audio_lock);
5963 
5964 		amdgpu_dm_audio_eld_notify(adev, inst);
5965 	}
5966 
5967 	/* Notify audio device additions. */
5968 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
5969 		if (!new_con_state->crtc)
5970 			continue;
5971 
5972 		new_crtc_state = drm_atomic_get_new_crtc_state(
5973 			state, new_con_state->crtc);
5974 
5975 		if (!new_crtc_state)
5976 			continue;
5977 
5978 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5979 			continue;
5980 
5981 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
5982 		if (!new_dm_crtc_state->stream)
5983 			continue;
5984 
5985 		status = dc_stream_get_status(new_dm_crtc_state->stream);
5986 		if (!status)
5987 			continue;
5988 
5989 		aconnector = to_amdgpu_dm_connector(connector);
5990 
5991 		mutex_lock(&adev->dm.audio_lock);
5992 		inst = status->audio_inst;
5993 		aconnector->audio_inst = inst;
5994 		mutex_unlock(&adev->dm.audio_lock);
5995 
5996 		amdgpu_dm_audio_eld_notify(adev, inst);
5997 	}
5998 }
5999 
6000 /*
6001  * Enable interrupts on CRTCs that are newly active, undergone
6002  * a modeset, or have active planes again.
6003  *
6004  * Done in two passes, based on the for_modeset flag:
6005  * Pass 1: For CRTCs going through modeset
6006  * Pass 2: For CRTCs going from 0 to n active planes
6007  *
6008  * Interrupts can only be enabled after the planes are programmed,
6009  * so this requires a two-pass approach since we don't want to
6010  * just defer the interrupts until after commit planes every time.
6011  */
6012 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6013 					     struct drm_atomic_state *state,
6014 					     bool for_modeset)
6015 {
6016 	struct amdgpu_device *adev = dev->dev_private;
6017 	struct drm_crtc *crtc;
6018 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6019 	int i;
6020 	enum amdgpu_dm_pipe_crc_source source;
6021 
6022 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6023 				      new_crtc_state, i) {
6024 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6025 		struct dm_crtc_state *dm_new_crtc_state =
6026 			to_dm_crtc_state(new_crtc_state);
6027 		struct dm_crtc_state *dm_old_crtc_state =
6028 			to_dm_crtc_state(old_crtc_state);
6029 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6030 		bool run_pass;
6031 
6032 		run_pass = (for_modeset && modeset) ||
6033 			   (!for_modeset && !modeset &&
6034 			    !dm_old_crtc_state->interrupts_enabled);
6035 
6036 		if (!run_pass)
6037 			continue;
6038 
6039 		if (!dm_new_crtc_state->interrupts_enabled)
6040 			continue;
6041 
6042 		manage_dm_interrupts(adev, acrtc, true);
6043 
6044 #ifdef CONFIG_DEBUG_FS
6045 		/* The stream has changed so CRC capture needs to re-enabled. */
6046 		source = dm_new_crtc_state->crc_src;
6047 		if (amdgpu_dm_is_valid_crc_source(source)) {
6048 			amdgpu_dm_crtc_configure_crc_source(
6049 				crtc, dm_new_crtc_state,
6050 				dm_new_crtc_state->crc_src);
6051 		}
6052 #endif
6053 	}
6054 }
6055 
6056 /*
6057  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6058  * @crtc_state: the DRM CRTC state
6059  * @stream_state: the DC stream state.
6060  *
6061  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6062  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6063  */
6064 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6065 						struct dc_stream_state *stream_state)
6066 {
6067 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6068 }
6069 
6070 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6071 				   struct drm_atomic_state *state,
6072 				   bool nonblock)
6073 {
6074 	struct drm_crtc *crtc;
6075 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6076 	struct amdgpu_device *adev = dev->dev_private;
6077 	int i;
6078 
6079 	/*
6080 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6081 	 * a modeset, being disabled, or have no active planes.
6082 	 *
6083 	 * It's done in atomic commit rather than commit tail for now since
6084 	 * some of these interrupt handlers access the current CRTC state and
6085 	 * potentially the stream pointer itself.
6086 	 *
6087 	 * Since the atomic state is swapped within atomic commit and not within
6088 	 * commit tail this would leave to new state (that hasn't been committed yet)
6089 	 * being accesssed from within the handlers.
6090 	 *
6091 	 * TODO: Fix this so we can do this in commit tail and not have to block
6092 	 * in atomic check.
6093 	 */
6094 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6095 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6096 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6097 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6098 
6099 		if (dm_old_crtc_state->interrupts_enabled &&
6100 		    (!dm_new_crtc_state->interrupts_enabled ||
6101 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
6102 			manage_dm_interrupts(adev, acrtc, false);
6103 	}
6104 	/*
6105 	 * Add check here for SoC's that support hardware cursor plane, to
6106 	 * unset legacy_cursor_update
6107 	 */
6108 
6109 	return drm_atomic_helper_commit(dev, state, nonblock);
6110 
6111 	/*TODO Handle EINTR, reenable IRQ*/
6112 }
6113 
6114 /**
6115  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6116  * @state: The atomic state to commit
6117  *
6118  * This will tell DC to commit the constructed DC state from atomic_check,
6119  * programming the hardware. Any failures here implies a hardware failure, since
6120  * atomic check should have filtered anything non-kosher.
6121  */
6122 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
6123 {
6124 	struct drm_device *dev = state->dev;
6125 	struct amdgpu_device *adev = dev->dev_private;
6126 	struct amdgpu_display_manager *dm = &adev->dm;
6127 	struct dm_atomic_state *dm_state;
6128 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
6129 	uint32_t i, j;
6130 	struct drm_crtc *crtc;
6131 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6132 	unsigned long flags;
6133 	bool wait_for_vblank = true;
6134 	struct drm_connector *connector;
6135 	struct drm_connector_state *old_con_state, *new_con_state;
6136 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6137 	int crtc_disable_count = 0;
6138 
6139 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
6140 
6141 	dm_state = dm_atomic_get_new_state(state);
6142 	if (dm_state && dm_state->context) {
6143 		dc_state = dm_state->context;
6144 	} else {
6145 		/* No state changes, retain current state. */
6146 		dc_state_temp = dc_create_state(dm->dc);
6147 		ASSERT(dc_state_temp);
6148 		dc_state = dc_state_temp;
6149 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
6150 	}
6151 
6152 	/* update changed items */
6153 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6154 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6155 
6156 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6157 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6158 
6159 		DRM_DEBUG_DRIVER(
6160 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6161 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
6162 			"connectors_changed:%d\n",
6163 			acrtc->crtc_id,
6164 			new_crtc_state->enable,
6165 			new_crtc_state->active,
6166 			new_crtc_state->planes_changed,
6167 			new_crtc_state->mode_changed,
6168 			new_crtc_state->active_changed,
6169 			new_crtc_state->connectors_changed);
6170 
6171 		/* Copy all transient state flags into dc state */
6172 		if (dm_new_crtc_state->stream) {
6173 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
6174 							    dm_new_crtc_state->stream);
6175 		}
6176 
6177 		/* handles headless hotplug case, updating new_state and
6178 		 * aconnector as needed
6179 		 */
6180 
6181 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
6182 
6183 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
6184 
6185 			if (!dm_new_crtc_state->stream) {
6186 				/*
6187 				 * this could happen because of issues with
6188 				 * userspace notifications delivery.
6189 				 * In this case userspace tries to set mode on
6190 				 * display which is disconnected in fact.
6191 				 * dc_sink is NULL in this case on aconnector.
6192 				 * We expect reset mode will come soon.
6193 				 *
6194 				 * This can also happen when unplug is done
6195 				 * during resume sequence ended
6196 				 *
6197 				 * In this case, we want to pretend we still
6198 				 * have a sink to keep the pipe running so that
6199 				 * hw state is consistent with the sw state
6200 				 */
6201 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6202 						__func__, acrtc->base.base.id);
6203 				continue;
6204 			}
6205 
6206 			if (dm_old_crtc_state->stream)
6207 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6208 
6209 			pm_runtime_get_noresume(dev->dev);
6210 
6211 			acrtc->enabled = true;
6212 			acrtc->hw_mode = new_crtc_state->mode;
6213 			crtc->hwmode = new_crtc_state->mode;
6214 		} else if (modereset_required(new_crtc_state)) {
6215 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
6216 
6217 			/* i.e. reset mode */
6218 			if (dm_old_crtc_state->stream)
6219 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6220 		}
6221 	} /* for_each_crtc_in_state() */
6222 
6223 	if (dc_state) {
6224 		dm_enable_per_frame_crtc_master_sync(dc_state);
6225 		mutex_lock(&dm->dc_lock);
6226 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
6227 		mutex_unlock(&dm->dc_lock);
6228 	}
6229 
6230 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6231 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6232 
6233 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6234 
6235 		if (dm_new_crtc_state->stream != NULL) {
6236 			const struct dc_stream_status *status =
6237 					dc_stream_get_status(dm_new_crtc_state->stream);
6238 
6239 			if (!status)
6240 				status = dc_stream_get_status_from_state(dc_state,
6241 									 dm_new_crtc_state->stream);
6242 
6243 			if (!status)
6244 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
6245 			else
6246 				acrtc->otg_inst = status->primary_otg_inst;
6247 		}
6248 	}
6249 
6250 	/* Handle connector state changes */
6251 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6252 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6253 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6254 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6255 		struct dc_surface_update dummy_updates[MAX_SURFACES];
6256 		struct dc_stream_update stream_update;
6257 		struct dc_info_packet hdr_packet;
6258 		struct dc_stream_status *status = NULL;
6259 		bool abm_changed, hdr_changed, scaling_changed;
6260 
6261 		memset(&dummy_updates, 0, sizeof(dummy_updates));
6262 		memset(&stream_update, 0, sizeof(stream_update));
6263 
6264 		if (acrtc) {
6265 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
6266 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
6267 		}
6268 
6269 		/* Skip any modesets/resets */
6270 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
6271 			continue;
6272 
6273 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6274 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6275 
6276 		scaling_changed = is_scaling_state_different(dm_new_con_state,
6277 							     dm_old_con_state);
6278 
6279 		abm_changed = dm_new_crtc_state->abm_level !=
6280 			      dm_old_crtc_state->abm_level;
6281 
6282 		hdr_changed =
6283 			is_hdr_metadata_different(old_con_state, new_con_state);
6284 
6285 		if (!scaling_changed && !abm_changed && !hdr_changed)
6286 			continue;
6287 
6288 		if (scaling_changed) {
6289 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
6290 					dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
6291 
6292 			stream_update.src = dm_new_crtc_state->stream->src;
6293 			stream_update.dst = dm_new_crtc_state->stream->dst;
6294 		}
6295 
6296 		if (abm_changed) {
6297 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
6298 
6299 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
6300 		}
6301 
6302 		if (hdr_changed) {
6303 			fill_hdr_info_packet(new_con_state, &hdr_packet);
6304 			stream_update.hdr_static_metadata = &hdr_packet;
6305 		}
6306 
6307 		status = dc_stream_get_status(dm_new_crtc_state->stream);
6308 		WARN_ON(!status);
6309 		WARN_ON(!status->plane_count);
6310 
6311 		/*
6312 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
6313 		 * Here we create an empty update on each plane.
6314 		 * To fix this, DC should permit updating only stream properties.
6315 		 */
6316 		for (j = 0; j < status->plane_count; j++)
6317 			dummy_updates[j].surface = status->plane_states[0];
6318 
6319 
6320 		mutex_lock(&dm->dc_lock);
6321 		dc_commit_updates_for_stream(dm->dc,
6322 						     dummy_updates,
6323 						     status->plane_count,
6324 						     dm_new_crtc_state->stream,
6325 						     &stream_update,
6326 						     dc_state);
6327 		mutex_unlock(&dm->dc_lock);
6328 	}
6329 
6330 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
6331 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6332 				      new_crtc_state, i) {
6333 		if (old_crtc_state->active && !new_crtc_state->active)
6334 			crtc_disable_count++;
6335 
6336 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6337 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6338 
6339 		/* Update freesync active state. */
6340 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
6341 
6342 		/* Handle vrr on->off / off->on transitions */
6343 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
6344 						dm_new_crtc_state);
6345 	}
6346 
6347 	/* Enable interrupts for CRTCs going through a modeset. */
6348 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
6349 
6350 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
6351 		if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
6352 			wait_for_vblank = false;
6353 
6354 	/* update planes when needed per crtc*/
6355 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
6356 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6357 
6358 		if (dm_new_crtc_state->stream)
6359 			amdgpu_dm_commit_planes(state, dc_state, dev,
6360 						dm, crtc, wait_for_vblank);
6361 	}
6362 
6363 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
6364 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
6365 
6366 	/* Update audio instances for each connector. */
6367 	amdgpu_dm_commit_audio(dev, state);
6368 
6369 	/*
6370 	 * send vblank event on all events not handled in flip and
6371 	 * mark consumed event for drm_atomic_helper_commit_hw_done
6372 	 */
6373 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6374 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6375 
6376 		if (new_crtc_state->event)
6377 			drm_send_event_locked(dev, &new_crtc_state->event->base);
6378 
6379 		new_crtc_state->event = NULL;
6380 	}
6381 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6382 
6383 	/* Signal HW programming completion */
6384 	drm_atomic_helper_commit_hw_done(state);
6385 
6386 	if (wait_for_vblank)
6387 		drm_atomic_helper_wait_for_flip_done(dev, state);
6388 
6389 	drm_atomic_helper_cleanup_planes(dev, state);
6390 
6391 	/*
6392 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
6393 	 * so we can put the GPU into runtime suspend if we're not driving any
6394 	 * displays anymore
6395 	 */
6396 	for (i = 0; i < crtc_disable_count; i++)
6397 		pm_runtime_put_autosuspend(dev->dev);
6398 	pm_runtime_mark_last_busy(dev->dev);
6399 
6400 	if (dc_state_temp)
6401 		dc_release_state(dc_state_temp);
6402 }
6403 
6404 
6405 static int dm_force_atomic_commit(struct drm_connector *connector)
6406 {
6407 	int ret = 0;
6408 	struct drm_device *ddev = connector->dev;
6409 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
6410 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6411 	struct drm_plane *plane = disconnected_acrtc->base.primary;
6412 	struct drm_connector_state *conn_state;
6413 	struct drm_crtc_state *crtc_state;
6414 	struct drm_plane_state *plane_state;
6415 
6416 	if (!state)
6417 		return -ENOMEM;
6418 
6419 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
6420 
6421 	/* Construct an atomic state to restore previous display setting */
6422 
6423 	/*
6424 	 * Attach connectors to drm_atomic_state
6425 	 */
6426 	conn_state = drm_atomic_get_connector_state(state, connector);
6427 
6428 	ret = PTR_ERR_OR_ZERO(conn_state);
6429 	if (ret)
6430 		goto err;
6431 
6432 	/* Attach crtc to drm_atomic_state*/
6433 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
6434 
6435 	ret = PTR_ERR_OR_ZERO(crtc_state);
6436 	if (ret)
6437 		goto err;
6438 
6439 	/* force a restore */
6440 	crtc_state->mode_changed = true;
6441 
6442 	/* Attach plane to drm_atomic_state */
6443 	plane_state = drm_atomic_get_plane_state(state, plane);
6444 
6445 	ret = PTR_ERR_OR_ZERO(plane_state);
6446 	if (ret)
6447 		goto err;
6448 
6449 
6450 	/* Call commit internally with the state we just constructed */
6451 	ret = drm_atomic_commit(state);
6452 	if (!ret)
6453 		return 0;
6454 
6455 err:
6456 	DRM_ERROR("Restoring old state failed with %i\n", ret);
6457 	drm_atomic_state_put(state);
6458 
6459 	return ret;
6460 }
6461 
6462 /*
6463  * This function handles all cases when set mode does not come upon hotplug.
6464  * This includes when a display is unplugged then plugged back into the
6465  * same port and when running without usermode desktop manager supprot
6466  */
6467 void dm_restore_drm_connector_state(struct drm_device *dev,
6468 				    struct drm_connector *connector)
6469 {
6470 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6471 	struct amdgpu_crtc *disconnected_acrtc;
6472 	struct dm_crtc_state *acrtc_state;
6473 
6474 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
6475 		return;
6476 
6477 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6478 	if (!disconnected_acrtc)
6479 		return;
6480 
6481 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
6482 	if (!acrtc_state->stream)
6483 		return;
6484 
6485 	/*
6486 	 * If the previous sink is not released and different from the current,
6487 	 * we deduce we are in a state where we can not rely on usermode call
6488 	 * to turn on the display, so we do it here
6489 	 */
6490 	if (acrtc_state->stream->sink != aconnector->dc_sink)
6491 		dm_force_atomic_commit(&aconnector->base);
6492 }
6493 
6494 /*
6495  * Grabs all modesetting locks to serialize against any blocking commits,
6496  * Waits for completion of all non blocking commits.
6497  */
6498 static int do_aquire_global_lock(struct drm_device *dev,
6499 				 struct drm_atomic_state *state)
6500 {
6501 	struct drm_crtc *crtc;
6502 	struct drm_crtc_commit *commit;
6503 	long ret;
6504 
6505 	/*
6506 	 * Adding all modeset locks to aquire_ctx will
6507 	 * ensure that when the framework release it the
6508 	 * extra locks we are locking here will get released to
6509 	 */
6510 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
6511 	if (ret)
6512 		return ret;
6513 
6514 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6515 		spin_lock(&crtc->commit_lock);
6516 		commit = list_first_entry_or_null(&crtc->commit_list,
6517 				struct drm_crtc_commit, commit_entry);
6518 		if (commit)
6519 			drm_crtc_commit_get(commit);
6520 		spin_unlock(&crtc->commit_lock);
6521 
6522 		if (!commit)
6523 			continue;
6524 
6525 		/*
6526 		 * Make sure all pending HW programming completed and
6527 		 * page flips done
6528 		 */
6529 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
6530 
6531 		if (ret > 0)
6532 			ret = wait_for_completion_interruptible_timeout(
6533 					&commit->flip_done, 10*HZ);
6534 
6535 		if (ret == 0)
6536 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
6537 				  "timed out\n", crtc->base.id, crtc->name);
6538 
6539 		drm_crtc_commit_put(commit);
6540 	}
6541 
6542 	return ret < 0 ? ret : 0;
6543 }
6544 
6545 static void get_freesync_config_for_crtc(
6546 	struct dm_crtc_state *new_crtc_state,
6547 	struct dm_connector_state *new_con_state)
6548 {
6549 	struct mod_freesync_config config = {0};
6550 	struct amdgpu_dm_connector *aconnector =
6551 			to_amdgpu_dm_connector(new_con_state->base.connector);
6552 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
6553 	int vrefresh = drm_mode_vrefresh(mode);
6554 
6555 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
6556 					vrefresh >= aconnector->min_vfreq &&
6557 					vrefresh <= aconnector->max_vfreq;
6558 
6559 	if (new_crtc_state->vrr_supported) {
6560 		new_crtc_state->stream->ignore_msa_timing_param = true;
6561 		config.state = new_crtc_state->base.vrr_enabled ?
6562 				VRR_STATE_ACTIVE_VARIABLE :
6563 				VRR_STATE_INACTIVE;
6564 		config.min_refresh_in_uhz =
6565 				aconnector->min_vfreq * 1000000;
6566 		config.max_refresh_in_uhz =
6567 				aconnector->max_vfreq * 1000000;
6568 		config.vsif_supported = true;
6569 		config.btr = true;
6570 	}
6571 
6572 	new_crtc_state->freesync_config = config;
6573 }
6574 
6575 static void reset_freesync_config_for_crtc(
6576 	struct dm_crtc_state *new_crtc_state)
6577 {
6578 	new_crtc_state->vrr_supported = false;
6579 
6580 	memset(&new_crtc_state->vrr_params, 0,
6581 	       sizeof(new_crtc_state->vrr_params));
6582 	memset(&new_crtc_state->vrr_infopacket, 0,
6583 	       sizeof(new_crtc_state->vrr_infopacket));
6584 }
6585 
6586 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
6587 				struct drm_atomic_state *state,
6588 				struct drm_crtc *crtc,
6589 				struct drm_crtc_state *old_crtc_state,
6590 				struct drm_crtc_state *new_crtc_state,
6591 				bool enable,
6592 				bool *lock_and_validation_needed)
6593 {
6594 	struct dm_atomic_state *dm_state = NULL;
6595 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6596 	struct dc_stream_state *new_stream;
6597 	int ret = 0;
6598 
6599 	/*
6600 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
6601 	 * update changed items
6602 	 */
6603 	struct amdgpu_crtc *acrtc = NULL;
6604 	struct amdgpu_dm_connector *aconnector = NULL;
6605 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
6606 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
6607 
6608 	new_stream = NULL;
6609 
6610 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6611 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6612 	acrtc = to_amdgpu_crtc(crtc);
6613 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
6614 
6615 	/* TODO This hack should go away */
6616 	if (aconnector && enable) {
6617 		/* Make sure fake sink is created in plug-in scenario */
6618 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
6619 							    &aconnector->base);
6620 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
6621 							    &aconnector->base);
6622 
6623 		if (IS_ERR(drm_new_conn_state)) {
6624 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
6625 			goto fail;
6626 		}
6627 
6628 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
6629 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
6630 
6631 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6632 			goto skip_modeset;
6633 
6634 		new_stream = create_stream_for_sink(aconnector,
6635 						     &new_crtc_state->mode,
6636 						    dm_new_conn_state,
6637 						    dm_old_crtc_state->stream);
6638 
6639 		/*
6640 		 * we can have no stream on ACTION_SET if a display
6641 		 * was disconnected during S3, in this case it is not an
6642 		 * error, the OS will be updated after detection, and
6643 		 * will do the right thing on next atomic commit
6644 		 */
6645 
6646 		if (!new_stream) {
6647 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6648 					__func__, acrtc->base.base.id);
6649 			ret = -ENOMEM;
6650 			goto fail;
6651 		}
6652 
6653 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6654 
6655 		ret = fill_hdr_info_packet(drm_new_conn_state,
6656 					   &new_stream->hdr_static_metadata);
6657 		if (ret)
6658 			goto fail;
6659 
6660 		/*
6661 		 * If we already removed the old stream from the context
6662 		 * (and set the new stream to NULL) then we can't reuse
6663 		 * the old stream even if the stream and scaling are unchanged.
6664 		 * We'll hit the BUG_ON and black screen.
6665 		 *
6666 		 * TODO: Refactor this function to allow this check to work
6667 		 * in all conditions.
6668 		 */
6669 		if (dm_new_crtc_state->stream &&
6670 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
6671 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
6672 			new_crtc_state->mode_changed = false;
6673 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
6674 					 new_crtc_state->mode_changed);
6675 		}
6676 	}
6677 
6678 	/* mode_changed flag may get updated above, need to check again */
6679 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6680 		goto skip_modeset;
6681 
6682 	DRM_DEBUG_DRIVER(
6683 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6684 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
6685 		"connectors_changed:%d\n",
6686 		acrtc->crtc_id,
6687 		new_crtc_state->enable,
6688 		new_crtc_state->active,
6689 		new_crtc_state->planes_changed,
6690 		new_crtc_state->mode_changed,
6691 		new_crtc_state->active_changed,
6692 		new_crtc_state->connectors_changed);
6693 
6694 	/* Remove stream for any changed/disabled CRTC */
6695 	if (!enable) {
6696 
6697 		if (!dm_old_crtc_state->stream)
6698 			goto skip_modeset;
6699 
6700 		ret = dm_atomic_get_state(state, &dm_state);
6701 		if (ret)
6702 			goto fail;
6703 
6704 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
6705 				crtc->base.id);
6706 
6707 		/* i.e. reset mode */
6708 		if (dc_remove_stream_from_ctx(
6709 				dm->dc,
6710 				dm_state->context,
6711 				dm_old_crtc_state->stream) != DC_OK) {
6712 			ret = -EINVAL;
6713 			goto fail;
6714 		}
6715 
6716 		dc_stream_release(dm_old_crtc_state->stream);
6717 		dm_new_crtc_state->stream = NULL;
6718 
6719 		reset_freesync_config_for_crtc(dm_new_crtc_state);
6720 
6721 		*lock_and_validation_needed = true;
6722 
6723 	} else {/* Add stream for any updated/enabled CRTC */
6724 		/*
6725 		 * Quick fix to prevent NULL pointer on new_stream when
6726 		 * added MST connectors not found in existing crtc_state in the chained mode
6727 		 * TODO: need to dig out the root cause of that
6728 		 */
6729 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
6730 			goto skip_modeset;
6731 
6732 		if (modereset_required(new_crtc_state))
6733 			goto skip_modeset;
6734 
6735 		if (modeset_required(new_crtc_state, new_stream,
6736 				     dm_old_crtc_state->stream)) {
6737 
6738 			WARN_ON(dm_new_crtc_state->stream);
6739 
6740 			ret = dm_atomic_get_state(state, &dm_state);
6741 			if (ret)
6742 				goto fail;
6743 
6744 			dm_new_crtc_state->stream = new_stream;
6745 
6746 			dc_stream_retain(new_stream);
6747 
6748 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
6749 						crtc->base.id);
6750 
6751 			if (dc_add_stream_to_ctx(
6752 					dm->dc,
6753 					dm_state->context,
6754 					dm_new_crtc_state->stream) != DC_OK) {
6755 				ret = -EINVAL;
6756 				goto fail;
6757 			}
6758 
6759 			*lock_and_validation_needed = true;
6760 		}
6761 	}
6762 
6763 skip_modeset:
6764 	/* Release extra reference */
6765 	if (new_stream)
6766 		 dc_stream_release(new_stream);
6767 
6768 	/*
6769 	 * We want to do dc stream updates that do not require a
6770 	 * full modeset below.
6771 	 */
6772 	if (!(enable && aconnector && new_crtc_state->enable &&
6773 	      new_crtc_state->active))
6774 		return 0;
6775 	/*
6776 	 * Given above conditions, the dc state cannot be NULL because:
6777 	 * 1. We're in the process of enabling CRTCs (just been added
6778 	 *    to the dc context, or already is on the context)
6779 	 * 2. Has a valid connector attached, and
6780 	 * 3. Is currently active and enabled.
6781 	 * => The dc stream state currently exists.
6782 	 */
6783 	BUG_ON(dm_new_crtc_state->stream == NULL);
6784 
6785 	/* Scaling or underscan settings */
6786 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
6787 		update_stream_scaling_settings(
6788 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
6789 
6790 	/* ABM settings */
6791 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6792 
6793 	/*
6794 	 * Color management settings. We also update color properties
6795 	 * when a modeset is needed, to ensure it gets reprogrammed.
6796 	 */
6797 	if (dm_new_crtc_state->base.color_mgmt_changed ||
6798 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
6799 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
6800 		if (ret)
6801 			goto fail;
6802 	}
6803 
6804 	/* Update Freesync settings. */
6805 	get_freesync_config_for_crtc(dm_new_crtc_state,
6806 				     dm_new_conn_state);
6807 
6808 	return ret;
6809 
6810 fail:
6811 	if (new_stream)
6812 		dc_stream_release(new_stream);
6813 	return ret;
6814 }
6815 
6816 static bool should_reset_plane(struct drm_atomic_state *state,
6817 			       struct drm_plane *plane,
6818 			       struct drm_plane_state *old_plane_state,
6819 			       struct drm_plane_state *new_plane_state)
6820 {
6821 	struct drm_plane *other;
6822 	struct drm_plane_state *old_other_state, *new_other_state;
6823 	struct drm_crtc_state *new_crtc_state;
6824 	int i;
6825 
6826 	/*
6827 	 * TODO: Remove this hack once the checks below are sufficient
6828 	 * enough to determine when we need to reset all the planes on
6829 	 * the stream.
6830 	 */
6831 	if (state->allow_modeset)
6832 		return true;
6833 
6834 	/* Exit early if we know that we're adding or removing the plane. */
6835 	if (old_plane_state->crtc != new_plane_state->crtc)
6836 		return true;
6837 
6838 	/* old crtc == new_crtc == NULL, plane not in context. */
6839 	if (!new_plane_state->crtc)
6840 		return false;
6841 
6842 	new_crtc_state =
6843 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
6844 
6845 	if (!new_crtc_state)
6846 		return true;
6847 
6848 	/* CRTC Degamma changes currently require us to recreate planes. */
6849 	if (new_crtc_state->color_mgmt_changed)
6850 		return true;
6851 
6852 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
6853 		return true;
6854 
6855 	/*
6856 	 * If there are any new primary or overlay planes being added or
6857 	 * removed then the z-order can potentially change. To ensure
6858 	 * correct z-order and pipe acquisition the current DC architecture
6859 	 * requires us to remove and recreate all existing planes.
6860 	 *
6861 	 * TODO: Come up with a more elegant solution for this.
6862 	 */
6863 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6864 		if (other->type == DRM_PLANE_TYPE_CURSOR)
6865 			continue;
6866 
6867 		if (old_other_state->crtc != new_plane_state->crtc &&
6868 		    new_other_state->crtc != new_plane_state->crtc)
6869 			continue;
6870 
6871 		if (old_other_state->crtc != new_other_state->crtc)
6872 			return true;
6873 
6874 		/* TODO: Remove this once we can handle fast format changes. */
6875 		if (old_other_state->fb && new_other_state->fb &&
6876 		    old_other_state->fb->format != new_other_state->fb->format)
6877 			return true;
6878 	}
6879 
6880 	return false;
6881 }
6882 
6883 static int dm_update_plane_state(struct dc *dc,
6884 				 struct drm_atomic_state *state,
6885 				 struct drm_plane *plane,
6886 				 struct drm_plane_state *old_plane_state,
6887 				 struct drm_plane_state *new_plane_state,
6888 				 bool enable,
6889 				 bool *lock_and_validation_needed)
6890 {
6891 
6892 	struct dm_atomic_state *dm_state = NULL;
6893 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6894 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6895 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
6896 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
6897 	bool needs_reset;
6898 	int ret = 0;
6899 
6900 
6901 	new_plane_crtc = new_plane_state->crtc;
6902 	old_plane_crtc = old_plane_state->crtc;
6903 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
6904 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
6905 
6906 	/*TODO Implement atomic check for cursor plane */
6907 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
6908 		return 0;
6909 
6910 	needs_reset = should_reset_plane(state, plane, old_plane_state,
6911 					 new_plane_state);
6912 
6913 	/* Remove any changed/removed planes */
6914 	if (!enable) {
6915 		if (!needs_reset)
6916 			return 0;
6917 
6918 		if (!old_plane_crtc)
6919 			return 0;
6920 
6921 		old_crtc_state = drm_atomic_get_old_crtc_state(
6922 				state, old_plane_crtc);
6923 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6924 
6925 		if (!dm_old_crtc_state->stream)
6926 			return 0;
6927 
6928 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
6929 				plane->base.id, old_plane_crtc->base.id);
6930 
6931 		ret = dm_atomic_get_state(state, &dm_state);
6932 		if (ret)
6933 			return ret;
6934 
6935 		if (!dc_remove_plane_from_context(
6936 				dc,
6937 				dm_old_crtc_state->stream,
6938 				dm_old_plane_state->dc_state,
6939 				dm_state->context)) {
6940 
6941 			ret = EINVAL;
6942 			return ret;
6943 		}
6944 
6945 
6946 		dc_plane_state_release(dm_old_plane_state->dc_state);
6947 		dm_new_plane_state->dc_state = NULL;
6948 
6949 		*lock_and_validation_needed = true;
6950 
6951 	} else { /* Add new planes */
6952 		struct dc_plane_state *dc_new_plane_state;
6953 
6954 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
6955 			return 0;
6956 
6957 		if (!new_plane_crtc)
6958 			return 0;
6959 
6960 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
6961 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6962 
6963 		if (!dm_new_crtc_state->stream)
6964 			return 0;
6965 
6966 		if (!needs_reset)
6967 			return 0;
6968 
6969 		WARN_ON(dm_new_plane_state->dc_state);
6970 
6971 		dc_new_plane_state = dc_create_plane_state(dc);
6972 		if (!dc_new_plane_state)
6973 			return -ENOMEM;
6974 
6975 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
6976 				plane->base.id, new_plane_crtc->base.id);
6977 
6978 		ret = fill_dc_plane_attributes(
6979 			new_plane_crtc->dev->dev_private,
6980 			dc_new_plane_state,
6981 			new_plane_state,
6982 			new_crtc_state);
6983 		if (ret) {
6984 			dc_plane_state_release(dc_new_plane_state);
6985 			return ret;
6986 		}
6987 
6988 		ret = dm_atomic_get_state(state, &dm_state);
6989 		if (ret) {
6990 			dc_plane_state_release(dc_new_plane_state);
6991 			return ret;
6992 		}
6993 
6994 		/*
6995 		 * Any atomic check errors that occur after this will
6996 		 * not need a release. The plane state will be attached
6997 		 * to the stream, and therefore part of the atomic
6998 		 * state. It'll be released when the atomic state is
6999 		 * cleaned.
7000 		 */
7001 		if (!dc_add_plane_to_context(
7002 				dc,
7003 				dm_new_crtc_state->stream,
7004 				dc_new_plane_state,
7005 				dm_state->context)) {
7006 
7007 			dc_plane_state_release(dc_new_plane_state);
7008 			return -EINVAL;
7009 		}
7010 
7011 		dm_new_plane_state->dc_state = dc_new_plane_state;
7012 
7013 		/* Tell DC to do a full surface update every time there
7014 		 * is a plane change. Inefficient, but works for now.
7015 		 */
7016 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7017 
7018 		*lock_and_validation_needed = true;
7019 	}
7020 
7021 
7022 	return ret;
7023 }
7024 
7025 static int
7026 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7027 				    struct drm_atomic_state *state,
7028 				    enum surface_update_type *out_type)
7029 {
7030 	struct dc *dc = dm->dc;
7031 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7032 	int i, j, num_plane, ret = 0;
7033 	struct drm_plane_state *old_plane_state, *new_plane_state;
7034 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7035 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7036 	struct drm_plane *plane;
7037 
7038 	struct drm_crtc *crtc;
7039 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7040 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7041 	struct dc_stream_status *status = NULL;
7042 
7043 	struct dc_surface_update *updates;
7044 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
7045 
7046 	updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
7047 
7048 	if (!updates) {
7049 		DRM_ERROR("Failed to allocate plane updates\n");
7050 		/* Set type to FULL to avoid crashing in DC*/
7051 		update_type = UPDATE_TYPE_FULL;
7052 		goto cleanup;
7053 	}
7054 
7055 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7056 		struct dc_scaling_info scaling_info;
7057 		struct dc_stream_update stream_update;
7058 
7059 		memset(&stream_update, 0, sizeof(stream_update));
7060 
7061 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7062 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7063 		num_plane = 0;
7064 
7065 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7066 			update_type = UPDATE_TYPE_FULL;
7067 			goto cleanup;
7068 		}
7069 
7070 		if (!new_dm_crtc_state->stream)
7071 			continue;
7072 
7073 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
7074 			const struct amdgpu_framebuffer *amdgpu_fb =
7075 				to_amdgpu_framebuffer(new_plane_state->fb);
7076 			struct dc_plane_info plane_info;
7077 			struct dc_flip_addrs flip_addr;
7078 			uint64_t tiling_flags;
7079 
7080 			new_plane_crtc = new_plane_state->crtc;
7081 			old_plane_crtc = old_plane_state->crtc;
7082 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
7083 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
7084 
7085 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
7086 				continue;
7087 
7088 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
7089 				update_type = UPDATE_TYPE_FULL;
7090 				goto cleanup;
7091 			}
7092 
7093 			if (crtc != new_plane_crtc)
7094 				continue;
7095 
7096 			updates[num_plane].surface = new_dm_plane_state->dc_state;
7097 
7098 			if (new_crtc_state->mode_changed) {
7099 				stream_update.dst = new_dm_crtc_state->stream->dst;
7100 				stream_update.src = new_dm_crtc_state->stream->src;
7101 			}
7102 
7103 			if (new_crtc_state->color_mgmt_changed) {
7104 				updates[num_plane].gamma =
7105 						new_dm_plane_state->dc_state->gamma_correction;
7106 				updates[num_plane].in_transfer_func =
7107 						new_dm_plane_state->dc_state->in_transfer_func;
7108 				stream_update.gamut_remap =
7109 						&new_dm_crtc_state->stream->gamut_remap_matrix;
7110 				stream_update.output_csc_transform =
7111 						&new_dm_crtc_state->stream->csc_color_matrix;
7112 				stream_update.out_transfer_func =
7113 						new_dm_crtc_state->stream->out_transfer_func;
7114 			}
7115 
7116 			ret = fill_dc_scaling_info(new_plane_state,
7117 						   &scaling_info);
7118 			if (ret)
7119 				goto cleanup;
7120 
7121 			updates[num_plane].scaling_info = &scaling_info;
7122 
7123 			if (amdgpu_fb) {
7124 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
7125 				if (ret)
7126 					goto cleanup;
7127 
7128 				memset(&flip_addr, 0, sizeof(flip_addr));
7129 
7130 				ret = fill_dc_plane_info_and_addr(
7131 					dm->adev, new_plane_state, tiling_flags,
7132 					&plane_info,
7133 					&flip_addr.address);
7134 				if (ret)
7135 					goto cleanup;
7136 
7137 				updates[num_plane].plane_info = &plane_info;
7138 				updates[num_plane].flip_addr = &flip_addr;
7139 			}
7140 
7141 			num_plane++;
7142 		}
7143 
7144 		if (num_plane == 0)
7145 			continue;
7146 
7147 		ret = dm_atomic_get_state(state, &dm_state);
7148 		if (ret)
7149 			goto cleanup;
7150 
7151 		old_dm_state = dm_atomic_get_old_state(state);
7152 		if (!old_dm_state) {
7153 			ret = -EINVAL;
7154 			goto cleanup;
7155 		}
7156 
7157 		status = dc_stream_get_status_from_state(old_dm_state->context,
7158 							 new_dm_crtc_state->stream);
7159 
7160 		/*
7161 		 * TODO: DC modifies the surface during this call so we need
7162 		 * to lock here - find a way to do this without locking.
7163 		 */
7164 		mutex_lock(&dm->dc_lock);
7165 		update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
7166 								  &stream_update, status);
7167 		mutex_unlock(&dm->dc_lock);
7168 
7169 		if (update_type > UPDATE_TYPE_MED) {
7170 			update_type = UPDATE_TYPE_FULL;
7171 			goto cleanup;
7172 		}
7173 	}
7174 
7175 cleanup:
7176 	kfree(updates);
7177 
7178 	*out_type = update_type;
7179 	return ret;
7180 }
7181 
7182 /**
7183  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
7184  * @dev: The DRM device
7185  * @state: The atomic state to commit
7186  *
7187  * Validate that the given atomic state is programmable by DC into hardware.
7188  * This involves constructing a &struct dc_state reflecting the new hardware
7189  * state we wish to commit, then querying DC to see if it is programmable. It's
7190  * important not to modify the existing DC state. Otherwise, atomic_check
7191  * may unexpectedly commit hardware changes.
7192  *
7193  * When validating the DC state, it's important that the right locks are
7194  * acquired. For full updates case which removes/adds/updates streams on one
7195  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
7196  * that any such full update commit will wait for completion of any outstanding
7197  * flip using DRMs synchronization events. See
7198  * dm_determine_update_type_for_commit()
7199  *
7200  * Note that DM adds the affected connectors for all CRTCs in state, when that
7201  * might not seem necessary. This is because DC stream creation requires the
7202  * DC sink, which is tied to the DRM connector state. Cleaning this up should
7203  * be possible but non-trivial - a possible TODO item.
7204  *
7205  * Return: -Error code if validation failed.
7206  */
7207 static int amdgpu_dm_atomic_check(struct drm_device *dev,
7208 				  struct drm_atomic_state *state)
7209 {
7210 	struct amdgpu_device *adev = dev->dev_private;
7211 	struct dm_atomic_state *dm_state = NULL;
7212 	struct dc *dc = adev->dm.dc;
7213 	struct drm_connector *connector;
7214 	struct drm_connector_state *old_con_state, *new_con_state;
7215 	struct drm_crtc *crtc;
7216 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7217 	struct drm_plane *plane;
7218 	struct drm_plane_state *old_plane_state, *new_plane_state;
7219 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
7220 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
7221 
7222 	int ret, i;
7223 
7224 	/*
7225 	 * This bool will be set for true for any modeset/reset
7226 	 * or plane update which implies non fast surface update.
7227 	 */
7228 	bool lock_and_validation_needed = false;
7229 
7230 	ret = drm_atomic_helper_check_modeset(dev, state);
7231 	if (ret)
7232 		goto fail;
7233 
7234 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7235 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
7236 		    !new_crtc_state->color_mgmt_changed &&
7237 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
7238 			continue;
7239 
7240 		if (!new_crtc_state->enable)
7241 			continue;
7242 
7243 		ret = drm_atomic_add_affected_connectors(state, crtc);
7244 		if (ret)
7245 			return ret;
7246 
7247 		ret = drm_atomic_add_affected_planes(state, crtc);
7248 		if (ret)
7249 			goto fail;
7250 	}
7251 
7252 	/*
7253 	 * Add all primary and overlay planes on the CRTC to the state
7254 	 * whenever a plane is enabled to maintain correct z-ordering
7255 	 * and to enable fast surface updates.
7256 	 */
7257 	drm_for_each_crtc(crtc, dev) {
7258 		bool modified = false;
7259 
7260 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7261 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
7262 				continue;
7263 
7264 			if (new_plane_state->crtc == crtc ||
7265 			    old_plane_state->crtc == crtc) {
7266 				modified = true;
7267 				break;
7268 			}
7269 		}
7270 
7271 		if (!modified)
7272 			continue;
7273 
7274 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
7275 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
7276 				continue;
7277 
7278 			new_plane_state =
7279 				drm_atomic_get_plane_state(state, plane);
7280 
7281 			if (IS_ERR(new_plane_state)) {
7282 				ret = PTR_ERR(new_plane_state);
7283 				goto fail;
7284 			}
7285 		}
7286 	}
7287 
7288 	/* Remove exiting planes if they are modified */
7289 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7290 		ret = dm_update_plane_state(dc, state, plane,
7291 					    old_plane_state,
7292 					    new_plane_state,
7293 					    false,
7294 					    &lock_and_validation_needed);
7295 		if (ret)
7296 			goto fail;
7297 	}
7298 
7299 	/* Disable all crtcs which require disable */
7300 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7301 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
7302 					   old_crtc_state,
7303 					   new_crtc_state,
7304 					   false,
7305 					   &lock_and_validation_needed);
7306 		if (ret)
7307 			goto fail;
7308 	}
7309 
7310 	/* Enable all crtcs which require enable */
7311 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7312 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
7313 					   old_crtc_state,
7314 					   new_crtc_state,
7315 					   true,
7316 					   &lock_and_validation_needed);
7317 		if (ret)
7318 			goto fail;
7319 	}
7320 
7321 	/* Add new/modified planes */
7322 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7323 		ret = dm_update_plane_state(dc, state, plane,
7324 					    old_plane_state,
7325 					    new_plane_state,
7326 					    true,
7327 					    &lock_and_validation_needed);
7328 		if (ret)
7329 			goto fail;
7330 	}
7331 
7332 	/* Run this here since we want to validate the streams we created */
7333 	ret = drm_atomic_helper_check_planes(dev, state);
7334 	if (ret)
7335 		goto fail;
7336 
7337 	if (state->legacy_cursor_update) {
7338 		/*
7339 		 * This is a fast cursor update coming from the plane update
7340 		 * helper, check if it can be done asynchronously for better
7341 		 * performance.
7342 		 */
7343 		state->async_update =
7344 			!drm_atomic_helper_async_check(dev, state);
7345 
7346 		/*
7347 		 * Skip the remaining global validation if this is an async
7348 		 * update. Cursor updates can be done without affecting
7349 		 * state or bandwidth calcs and this avoids the performance
7350 		 * penalty of locking the private state object and
7351 		 * allocating a new dc_state.
7352 		 */
7353 		if (state->async_update)
7354 			return 0;
7355 	}
7356 
7357 	/* Check scaling and underscan changes*/
7358 	/* TODO Removed scaling changes validation due to inability to commit
7359 	 * new stream into context w\o causing full reset. Need to
7360 	 * decide how to handle.
7361 	 */
7362 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7363 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7364 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7365 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7366 
7367 		/* Skip any modesets/resets */
7368 		if (!acrtc || drm_atomic_crtc_needs_modeset(
7369 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
7370 			continue;
7371 
7372 		/* Skip any thing not scale or underscan changes */
7373 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
7374 			continue;
7375 
7376 		overall_update_type = UPDATE_TYPE_FULL;
7377 		lock_and_validation_needed = true;
7378 	}
7379 
7380 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
7381 	if (ret)
7382 		goto fail;
7383 
7384 	if (overall_update_type < update_type)
7385 		overall_update_type = update_type;
7386 
7387 	/*
7388 	 * lock_and_validation_needed was an old way to determine if we need to set
7389 	 * the global lock. Leaving it in to check if we broke any corner cases
7390 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
7391 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
7392 	 */
7393 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
7394 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
7395 
7396 	if (overall_update_type > UPDATE_TYPE_FAST) {
7397 		ret = dm_atomic_get_state(state, &dm_state);
7398 		if (ret)
7399 			goto fail;
7400 
7401 		ret = do_aquire_global_lock(dev, state);
7402 		if (ret)
7403 			goto fail;
7404 
7405 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
7406 			ret = -EINVAL;
7407 			goto fail;
7408 		}
7409 	} else {
7410 		/*
7411 		 * The commit is a fast update. Fast updates shouldn't change
7412 		 * the DC context, affect global validation, and can have their
7413 		 * commit work done in parallel with other commits not touching
7414 		 * the same resource. If we have a new DC context as part of
7415 		 * the DM atomic state from validation we need to free it and
7416 		 * retain the existing one instead.
7417 		 */
7418 		struct dm_atomic_state *new_dm_state, *old_dm_state;
7419 
7420 		new_dm_state = dm_atomic_get_new_state(state);
7421 		old_dm_state = dm_atomic_get_old_state(state);
7422 
7423 		if (new_dm_state && old_dm_state) {
7424 			if (new_dm_state->context)
7425 				dc_release_state(new_dm_state->context);
7426 
7427 			new_dm_state->context = old_dm_state->context;
7428 
7429 			if (old_dm_state->context)
7430 				dc_retain_state(old_dm_state->context);
7431 		}
7432 	}
7433 
7434 	/* Store the overall update type for use later in atomic check. */
7435 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
7436 		struct dm_crtc_state *dm_new_crtc_state =
7437 			to_dm_crtc_state(new_crtc_state);
7438 
7439 		dm_new_crtc_state->update_type = (int)overall_update_type;
7440 	}
7441 
7442 	/* Must be success */
7443 	WARN_ON(ret);
7444 	return ret;
7445 
7446 fail:
7447 	if (ret == -EDEADLK)
7448 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
7449 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
7450 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
7451 	else
7452 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
7453 
7454 	return ret;
7455 }
7456 
7457 static bool is_dp_capable_without_timing_msa(struct dc *dc,
7458 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
7459 {
7460 	uint8_t dpcd_data;
7461 	bool capable = false;
7462 
7463 	if (amdgpu_dm_connector->dc_link &&
7464 		dm_helpers_dp_read_dpcd(
7465 				NULL,
7466 				amdgpu_dm_connector->dc_link,
7467 				DP_DOWN_STREAM_PORT_COUNT,
7468 				&dpcd_data,
7469 				sizeof(dpcd_data))) {
7470 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
7471 	}
7472 
7473 	return capable;
7474 }
7475 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
7476 					struct edid *edid)
7477 {
7478 	int i;
7479 	bool edid_check_required;
7480 	struct detailed_timing *timing;
7481 	struct detailed_non_pixel *data;
7482 	struct detailed_data_monitor_range *range;
7483 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7484 			to_amdgpu_dm_connector(connector);
7485 	struct dm_connector_state *dm_con_state = NULL;
7486 
7487 	struct drm_device *dev = connector->dev;
7488 	struct amdgpu_device *adev = dev->dev_private;
7489 	bool freesync_capable = false;
7490 
7491 	if (!connector->state) {
7492 		DRM_ERROR("%s - Connector has no state", __func__);
7493 		goto update;
7494 	}
7495 
7496 	if (!edid) {
7497 		dm_con_state = to_dm_connector_state(connector->state);
7498 
7499 		amdgpu_dm_connector->min_vfreq = 0;
7500 		amdgpu_dm_connector->max_vfreq = 0;
7501 		amdgpu_dm_connector->pixel_clock_mhz = 0;
7502 
7503 		goto update;
7504 	}
7505 
7506 	dm_con_state = to_dm_connector_state(connector->state);
7507 
7508 	edid_check_required = false;
7509 	if (!amdgpu_dm_connector->dc_sink) {
7510 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
7511 		goto update;
7512 	}
7513 	if (!adev->dm.freesync_module)
7514 		goto update;
7515 	/*
7516 	 * if edid non zero restrict freesync only for dp and edp
7517 	 */
7518 	if (edid) {
7519 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
7520 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
7521 			edid_check_required = is_dp_capable_without_timing_msa(
7522 						adev->dm.dc,
7523 						amdgpu_dm_connector);
7524 		}
7525 	}
7526 	if (edid_check_required == true && (edid->version > 1 ||
7527 	   (edid->version == 1 && edid->revision > 1))) {
7528 		for (i = 0; i < 4; i++) {
7529 
7530 			timing	= &edid->detailed_timings[i];
7531 			data	= &timing->data.other_data;
7532 			range	= &data->data.range;
7533 			/*
7534 			 * Check if monitor has continuous frequency mode
7535 			 */
7536 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
7537 				continue;
7538 			/*
7539 			 * Check for flag range limits only. If flag == 1 then
7540 			 * no additional timing information provided.
7541 			 * Default GTF, GTF Secondary curve and CVT are not
7542 			 * supported
7543 			 */
7544 			if (range->flags != 1)
7545 				continue;
7546 
7547 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
7548 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
7549 			amdgpu_dm_connector->pixel_clock_mhz =
7550 				range->pixel_clock_mhz * 10;
7551 			break;
7552 		}
7553 
7554 		if (amdgpu_dm_connector->max_vfreq -
7555 		    amdgpu_dm_connector->min_vfreq > 10) {
7556 
7557 			freesync_capable = true;
7558 		}
7559 	}
7560 
7561 update:
7562 	if (dm_con_state)
7563 		dm_con_state->freesync_capable = freesync_capable;
7564 
7565 	if (connector->vrr_capable_property)
7566 		drm_connector_set_vrr_capable_property(connector,
7567 						       freesync_capable);
7568 }
7569 
7570