1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 
106 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108 
109 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111 
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114 
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117 
118 /**
119  * DOC: overview
120  *
121  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123  * requests into DC requests, and DC responses into DRM responses.
124  *
125  * The root control structure is &struct amdgpu_display_manager.
126  */
127 
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 
132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 	switch (link->dpcd_caps.dongle_type) {
135 	case DISPLAY_DONGLE_NONE:
136 		return DRM_MODE_SUBCONNECTOR_Native;
137 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 		return DRM_MODE_SUBCONNECTOR_VGA;
139 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 		return DRM_MODE_SUBCONNECTOR_DVID;
142 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_HDMIA;
145 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 	default:
147 		return DRM_MODE_SUBCONNECTOR_Unknown;
148 	}
149 }
150 
151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 	struct dc_link *link = aconnector->dc_link;
154 	struct drm_connector *connector = &aconnector->base;
155 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156 
157 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 		return;
159 
160 	if (aconnector->dc_sink)
161 		subconnector = get_subconnector_type(link);
162 
163 	drm_object_property_set_value(&connector->base,
164 			connector->dev->mode_config.dp_subconnector_property,
165 			subconnector);
166 }
167 
168 /*
169  * initializes drm_device display related structures, based on the information
170  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171  * drm_encoder, drm_mode_config
172  *
173  * Returns 0 on success
174  */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178 
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 				struct drm_plane *plane,
181 				unsigned long possible_crtcs,
182 				const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 			       struct drm_plane *plane,
185 			       uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
188 				    uint32_t link_index,
189 				    struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 				  struct amdgpu_encoder *aencoder,
192 				  uint32_t link_index);
193 
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195 
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 				   struct drm_atomic_state *state,
198 				   bool nonblock);
199 
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 				  struct drm_atomic_state *state);
204 
205 static void handle_cursor_update(struct drm_plane *plane,
206 				 struct drm_plane_state *old_plane_state);
207 
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 
214 /*
215  * dm_vblank_get_counter
216  *
217  * @brief
218  * Get counter for number of vertical blanks
219  *
220  * @param
221  * struct amdgpu_device *adev - [in] desired amdgpu device
222  * int disp_idx - [in] which CRTC to get the counter from
223  *
224  * @return
225  * Counter for vertical blanks
226  */
227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 	if (crtc >= adev->mode_info.num_crtc)
230 		return 0;
231 	else {
232 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233 
234 		if (acrtc->dm_irq_params.stream == NULL) {
235 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 				  crtc);
237 			return 0;
238 		}
239 
240 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 	}
242 }
243 
244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 				  u32 *vbl, u32 *position)
246 {
247 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
248 
249 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 		return -EINVAL;
251 	else {
252 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253 
254 		if (acrtc->dm_irq_params.stream ==  NULL) {
255 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 				  crtc);
257 			return 0;
258 		}
259 
260 		/*
261 		 * TODO rework base driver to use values directly.
262 		 * for now parse it back into reg-format
263 		 */
264 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 					 &v_blank_start,
266 					 &v_blank_end,
267 					 &h_position,
268 					 &v_position);
269 
270 		*position = v_position | (h_position << 16);
271 		*vbl = v_blank_start | (v_blank_end << 16);
272 	}
273 
274 	return 0;
275 }
276 
277 static bool dm_is_idle(void *handle)
278 {
279 	/* XXX todo */
280 	return true;
281 }
282 
283 static int dm_wait_for_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return 0;
287 }
288 
289 static bool dm_check_soft_reset(void *handle)
290 {
291 	return false;
292 }
293 
294 static int dm_soft_reset(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static struct amdgpu_crtc *
301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 		     int otg_inst)
303 {
304 	struct drm_device *dev = adev_to_drm(adev);
305 	struct drm_crtc *crtc;
306 	struct amdgpu_crtc *amdgpu_crtc;
307 
308 	if (otg_inst == -1) {
309 		WARN_ON(1);
310 		return adev->mode_info.crtcs[0];
311 	}
312 
313 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 		amdgpu_crtc = to_amdgpu_crtc(crtc);
315 
316 		if (amdgpu_crtc->otg_inst == otg_inst)
317 			return amdgpu_crtc;
318 	}
319 
320 	return NULL;
321 }
322 
323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 	return acrtc->dm_irq_params.freesync_config.state ==
326 		       VRR_STATE_ACTIVE_VARIABLE ||
327 	       acrtc->dm_irq_params.freesync_config.state ==
328 		       VRR_STATE_ACTIVE_FIXED;
329 }
330 
331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 /**
338  * dm_pflip_high_irq() - Handle pageflip interrupt
339  * @interrupt_params: ignored
340  *
341  * Handles the pageflip interrupt by notifying all interested parties
342  * that the pageflip has been completed.
343  */
344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 	struct amdgpu_crtc *amdgpu_crtc;
347 	struct common_irq_params *irq_params = interrupt_params;
348 	struct amdgpu_device *adev = irq_params->adev;
349 	unsigned long flags;
350 	struct drm_pending_vblank_event *e;
351 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 	bool vrr_active;
353 
354 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355 
356 	/* IRQ could occur when in initial stage */
357 	/* TODO work and BO cleanup */
358 	if (amdgpu_crtc == NULL) {
359 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 		return;
361 	}
362 
363 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364 
365 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 						 amdgpu_crtc->pflip_status,
368 						 AMDGPU_FLIP_SUBMITTED,
369 						 amdgpu_crtc->crtc_id,
370 						 amdgpu_crtc);
371 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 		return;
373 	}
374 
375 	/* page flip completed. */
376 	e = amdgpu_crtc->event;
377 	amdgpu_crtc->event = NULL;
378 
379 	if (!e)
380 		WARN_ON(1);
381 
382 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383 
384 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 	if (!vrr_active ||
386 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 				      &v_blank_end, &hpos, &vpos) ||
388 	    (vpos < v_blank_start)) {
389 		/* Update to correct count and vblank timestamp if racing with
390 		 * vblank irq. This also updates to the correct vblank timestamp
391 		 * even in VRR mode, as scanout is past the front-porch atm.
392 		 */
393 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394 
395 		/* Wake up userspace by sending the pageflip event with proper
396 		 * count and timestamp of vblank of flip completion.
397 		 */
398 		if (e) {
399 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400 
401 			/* Event sent, so done with vblank for this flip */
402 			drm_crtc_vblank_put(&amdgpu_crtc->base);
403 		}
404 	} else if (e) {
405 		/* VRR active and inside front-porch: vblank count and
406 		 * timestamp for pageflip event will only be up to date after
407 		 * drm_crtc_handle_vblank() has been executed from late vblank
408 		 * irq handler after start of back-porch (vline 0). We queue the
409 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 		 * updated timestamp and count, once it runs after us.
411 		 *
412 		 * We need to open-code this instead of using the helper
413 		 * drm_crtc_arm_vblank_event(), as that helper would
414 		 * call drm_crtc_accurate_vblank_count(), which we must
415 		 * not call in VRR mode while we are in front-porch!
416 		 */
417 
418 		/* sequence will be replaced by real count during send-out. */
419 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 		e->pipe = amdgpu_crtc->crtc_id;
421 
422 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 		e = NULL;
424 	}
425 
426 	/* Keep track of vblank of this flip for flip throttling. We use the
427 	 * cooked hw counter, as that one incremented at start of this vblank
428 	 * of pageflip completion, so last_flip_vblank is the forbidden count
429 	 * for queueing new pageflips if vsync + VRR is enabled.
430 	 */
431 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433 
434 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436 
437 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 			 vrr_active, (int) !e);
440 }
441 
442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 	struct common_irq_params *irq_params = interrupt_params;
445 	struct amdgpu_device *adev = irq_params->adev;
446 	struct amdgpu_crtc *acrtc;
447 	unsigned long flags;
448 	int vrr_active;
449 
450 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451 
452 	if (acrtc) {
453 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454 
455 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 			      acrtc->crtc_id,
457 			      vrr_active);
458 
459 		/* Core vblank handling is done here after end of front-porch in
460 		 * vrr mode, as vblank timestamping will give valid results
461 		 * while now done after front-porch. This will also deliver
462 		 * page-flip completion events that have been queued to us
463 		 * if a pageflip happened inside front-porch.
464 		 */
465 		if (vrr_active) {
466 			drm_crtc_handle_vblank(&acrtc->base);
467 
468 			/* BTR processing for pre-DCE12 ASICs */
469 			if (acrtc->dm_irq_params.stream &&
470 			    adev->family < AMDGPU_FAMILY_AI) {
471 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 				mod_freesync_handle_v_update(
473 				    adev->dm.freesync_module,
474 				    acrtc->dm_irq_params.stream,
475 				    &acrtc->dm_irq_params.vrr_params);
476 
477 				dc_stream_adjust_vmin_vmax(
478 				    adev->dm.dc,
479 				    acrtc->dm_irq_params.stream,
480 				    &acrtc->dm_irq_params.vrr_params.adjust);
481 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 			}
483 		}
484 	}
485 }
486 
487 /**
488  * dm_crtc_high_irq() - Handles CRTC interrupt
489  * @interrupt_params: used for determining the CRTC instance
490  *
491  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492  * event handler.
493  */
494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 	struct common_irq_params *irq_params = interrupt_params;
497 	struct amdgpu_device *adev = irq_params->adev;
498 	struct amdgpu_crtc *acrtc;
499 	unsigned long flags;
500 	int vrr_active;
501 
502 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 	if (!acrtc)
504 		return;
505 
506 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507 
508 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 		      vrr_active, acrtc->dm_irq_params.active_planes);
510 
511 	/**
512 	 * Core vblank handling at start of front-porch is only possible
513 	 * in non-vrr mode, as only there vblank timestamping will give
514 	 * valid results while done in front-porch. Otherwise defer it
515 	 * to dm_vupdate_high_irq after end of front-porch.
516 	 */
517 	if (!vrr_active)
518 		drm_crtc_handle_vblank(&acrtc->base);
519 
520 	/**
521 	 * Following stuff must happen at start of vblank, for crc
522 	 * computation and below-the-range btr support in vrr mode.
523 	 */
524 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525 
526 	/* BTR updates need to happen before VUPDATE on Vega and above. */
527 	if (adev->family < AMDGPU_FAMILY_AI)
528 		return;
529 
530 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531 
532 	if (acrtc->dm_irq_params.stream &&
533 	    acrtc->dm_irq_params.vrr_params.supported &&
534 	    acrtc->dm_irq_params.freesync_config.state ==
535 		    VRR_STATE_ACTIVE_VARIABLE) {
536 		mod_freesync_handle_v_update(adev->dm.freesync_module,
537 					     acrtc->dm_irq_params.stream,
538 					     &acrtc->dm_irq_params.vrr_params);
539 
540 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 					   &acrtc->dm_irq_params.vrr_params.adjust);
542 	}
543 
544 	/*
545 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 	 * In that case, pageflip completion interrupts won't fire and pageflip
547 	 * completion events won't get delivered. Prevent this by sending
548 	 * pending pageflip events from here if a flip is still pending.
549 	 *
550 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 	 * avoid race conditions between flip programming and completion,
552 	 * which could cause too early flip completion events.
553 	 */
554 	if (adev->family >= AMDGPU_FAMILY_RV &&
555 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 	    acrtc->dm_irq_params.active_planes == 0) {
557 		if (acrtc->event) {
558 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 			acrtc->event = NULL;
560 			drm_crtc_vblank_put(&acrtc->base);
561 		}
562 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 	}
564 
565 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567 
568 static int dm_set_clockgating_state(void *handle,
569 		  enum amd_clockgating_state state)
570 {
571 	return 0;
572 }
573 
574 static int dm_set_powergating_state(void *handle,
575 		  enum amd_powergating_state state)
576 {
577 	return 0;
578 }
579 
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582 
583 /* Allocate memory for FBC compressed data  */
584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 	struct drm_device *dev = connector->dev;
587 	struct amdgpu_device *adev = drm_to_adev(dev);
588 	struct dm_compressor_info *compressor = &adev->dm.compressor;
589 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 	struct drm_display_mode *mode;
591 	unsigned long max_size = 0;
592 
593 	if (adev->dm.dc->fbc_compressor == NULL)
594 		return;
595 
596 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 		return;
598 
599 	if (compressor->bo_ptr)
600 		return;
601 
602 
603 	list_for_each_entry(mode, &connector->modes, head) {
604 		if (max_size < mode->htotal * mode->vtotal)
605 			max_size = mode->htotal * mode->vtotal;
606 	}
607 
608 	if (max_size) {
609 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 			    &compressor->gpu_addr, &compressor->cpu_addr);
612 
613 		if (r)
614 			DRM_ERROR("DM: Failed to initialize FBC\n");
615 		else {
616 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 		}
619 
620 	}
621 
622 }
623 
624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 					  int pipe, bool *enabled,
626 					  unsigned char *buf, int max_bytes)
627 {
628 	struct drm_device *dev = dev_get_drvdata(kdev);
629 	struct amdgpu_device *adev = drm_to_adev(dev);
630 	struct drm_connector *connector;
631 	struct drm_connector_list_iter conn_iter;
632 	struct amdgpu_dm_connector *aconnector;
633 	int ret = 0;
634 
635 	*enabled = false;
636 
637 	mutex_lock(&adev->dm.audio_lock);
638 
639 	drm_connector_list_iter_begin(dev, &conn_iter);
640 	drm_for_each_connector_iter(connector, &conn_iter) {
641 		aconnector = to_amdgpu_dm_connector(connector);
642 		if (aconnector->audio_inst != port)
643 			continue;
644 
645 		*enabled = true;
646 		ret = drm_eld_size(connector->eld);
647 		memcpy(buf, connector->eld, min(max_bytes, ret));
648 
649 		break;
650 	}
651 	drm_connector_list_iter_end(&conn_iter);
652 
653 	mutex_unlock(&adev->dm.audio_lock);
654 
655 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656 
657 	return ret;
658 }
659 
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 	.get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663 
664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 				       struct device *hda_kdev, void *data)
666 {
667 	struct drm_device *dev = dev_get_drvdata(kdev);
668 	struct amdgpu_device *adev = drm_to_adev(dev);
669 	struct drm_audio_component *acomp = data;
670 
671 	acomp->ops = &amdgpu_dm_audio_component_ops;
672 	acomp->dev = kdev;
673 	adev->dm.audio_component = acomp;
674 
675 	return 0;
676 }
677 
678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 					  struct device *hda_kdev, void *data)
680 {
681 	struct drm_device *dev = dev_get_drvdata(kdev);
682 	struct amdgpu_device *adev = drm_to_adev(dev);
683 	struct drm_audio_component *acomp = data;
684 
685 	acomp->ops = NULL;
686 	acomp->dev = NULL;
687 	adev->dm.audio_component = NULL;
688 }
689 
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 	.bind	= amdgpu_dm_audio_component_bind,
692 	.unbind	= amdgpu_dm_audio_component_unbind,
693 };
694 
695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696 {
697 	int i, ret;
698 
699 	if (!amdgpu_audio)
700 		return 0;
701 
702 	adev->mode_info.audio.enabled = true;
703 
704 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705 
706 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 		adev->mode_info.audio.pin[i].channels = -1;
708 		adev->mode_info.audio.pin[i].rate = -1;
709 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 		adev->mode_info.audio.pin[i].status_bits = 0;
711 		adev->mode_info.audio.pin[i].category_code = 0;
712 		adev->mode_info.audio.pin[i].connected = false;
713 		adev->mode_info.audio.pin[i].id =
714 			adev->dm.dc->res_pool->audios[i]->inst;
715 		adev->mode_info.audio.pin[i].offset = 0;
716 	}
717 
718 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719 	if (ret < 0)
720 		return ret;
721 
722 	adev->dm.audio_registered = true;
723 
724 	return 0;
725 }
726 
727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 {
729 	if (!amdgpu_audio)
730 		return;
731 
732 	if (!adev->mode_info.audio.enabled)
733 		return;
734 
735 	if (adev->dm.audio_registered) {
736 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 		adev->dm.audio_registered = false;
738 	}
739 
740 	/* TODO: Disable audio? */
741 
742 	adev->mode_info.audio.enabled = false;
743 }
744 
745 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746 {
747 	struct drm_audio_component *acomp = adev->dm.audio_component;
748 
749 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751 
752 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 						 pin, -1);
754 	}
755 }
756 
757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
758 {
759 	const struct dmcub_firmware_header_v1_0 *hdr;
760 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 	struct abm *abm = adev->dm.dc->res_pool->abm;
765 	struct dmub_srv_hw_params hw_params;
766 	enum dmub_status status;
767 	const unsigned char *fw_inst_const, *fw_bss_data;
768 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
769 	bool has_hw_support;
770 
771 	if (!dmub_srv)
772 		/* DMUB isn't supported on the ASIC. */
773 		return 0;
774 
775 	if (!fb_info) {
776 		DRM_ERROR("No framebuffer info for DMUB service.\n");
777 		return -EINVAL;
778 	}
779 
780 	if (!dmub_fw) {
781 		/* Firmware required for DMUB support. */
782 		DRM_ERROR("No firmware provided for DMUB.\n");
783 		return -EINVAL;
784 	}
785 
786 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 	if (status != DMUB_STATUS_OK) {
788 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789 		return -EINVAL;
790 	}
791 
792 	if (!has_hw_support) {
793 		DRM_INFO("DMUB unsupported on ASIC\n");
794 		return 0;
795 	}
796 
797 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798 
799 	fw_inst_const = dmub_fw->data +
800 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 			PSP_HEADER_BYTES;
802 
803 	fw_bss_data = dmub_fw->data +
804 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 		      le32_to_cpu(hdr->inst_const_bytes);
806 
807 	/* Copy firmware and bios info into FB memory. */
808 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810 
811 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812 
813 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 	 * amdgpu_ucode_init_single_fw will load dmub firmware
815 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 	 * will be done by dm_dmub_hw_init
817 	 */
818 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820 				fw_inst_const_size);
821 	}
822 
823 	if (fw_bss_data_size)
824 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 		       fw_bss_data, fw_bss_data_size);
826 
827 	/* Copy firmware bios info into FB memory. */
828 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 	       adev->bios_size);
830 
831 	/* Reset regions that need to be reset. */
832 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834 
835 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837 
838 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840 
841 	/* Initialize hardware. */
842 	memset(&hw_params, 0, sizeof(hw_params));
843 	hw_params.fb_base = adev->gmc.fb_start;
844 	hw_params.fb_offset = adev->gmc.aper_base;
845 
846 	/* backdoor load firmware and trigger dmub running */
847 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 		hw_params.load_inst_const = true;
849 
850 	if (dmcu)
851 		hw_params.psp_version = dmcu->psp_version;
852 
853 	for (i = 0; i < fb_info->num_fb; ++i)
854 		hw_params.fb[i] = &fb_info->fb[i];
855 
856 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 	if (status != DMUB_STATUS_OK) {
858 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859 		return -EINVAL;
860 	}
861 
862 	/* Wait for firmware load to finish. */
863 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 	if (status != DMUB_STATUS_OK)
865 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866 
867 	/* Init DMCU and ABM if available. */
868 	if (dmcu && abm) {
869 		dmcu->funcs->dmcu_init(dmcu);
870 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 	}
872 
873 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
874 	if (!adev->dm.dc->ctx->dmub_srv) {
875 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
876 		return -ENOMEM;
877 	}
878 
879 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
880 		 adev->dm.dmcub_fw_version);
881 
882 	return 0;
883 }
884 
885 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
886 							   struct drm_atomic_state *state)
887 {
888 	struct drm_connector *connector;
889 	struct drm_crtc *crtc;
890 	struct amdgpu_dm_connector *amdgpu_dm_connector;
891 	struct drm_connector_state *conn_state;
892 	struct dm_crtc_state *acrtc_state;
893 	struct drm_crtc_state *crtc_state;
894 	struct dc_stream_state *stream;
895 	struct drm_device *dev = adev_to_drm(adev);
896 
897 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
898 
899 		amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
900 		conn_state = connector->state;
901 
902 		if (!(conn_state && conn_state->crtc))
903 			continue;
904 
905 		crtc = conn_state->crtc;
906 		acrtc_state = to_dm_crtc_state(crtc->state);
907 
908 		if (!(acrtc_state && acrtc_state->stream))
909 			continue;
910 
911 		stream = acrtc_state->stream;
912 
913 		if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
914 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
915 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
916 		    amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
917 			conn_state = drm_atomic_get_connector_state(state, connector);
918 			crtc_state = drm_atomic_get_crtc_state(state, crtc);
919 			crtc_state->mode_changed = true;
920 		}
921 	}
922 }
923 
924 static int amdgpu_dm_init(struct amdgpu_device *adev)
925 {
926 	struct dc_init_data init_data;
927 #ifdef CONFIG_DRM_AMD_DC_HDCP
928 	struct dc_callback_init init_params;
929 #endif
930 	int r;
931 
932 	adev->dm.ddev = adev_to_drm(adev);
933 	adev->dm.adev = adev;
934 
935 	/* Zero all the fields */
936 	memset(&init_data, 0, sizeof(init_data));
937 #ifdef CONFIG_DRM_AMD_DC_HDCP
938 	memset(&init_params, 0, sizeof(init_params));
939 #endif
940 
941 	mutex_init(&adev->dm.dc_lock);
942 	mutex_init(&adev->dm.audio_lock);
943 
944 	if(amdgpu_dm_irq_init(adev)) {
945 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
946 		goto error;
947 	}
948 
949 	init_data.asic_id.chip_family = adev->family;
950 
951 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
952 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
953 
954 	init_data.asic_id.vram_width = adev->gmc.vram_width;
955 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
956 	init_data.asic_id.atombios_base_address =
957 		adev->mode_info.atom_context->bios;
958 
959 	init_data.driver = adev;
960 
961 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
962 
963 	if (!adev->dm.cgs_device) {
964 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
965 		goto error;
966 	}
967 
968 	init_data.cgs_device = adev->dm.cgs_device;
969 
970 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
971 
972 	switch (adev->asic_type) {
973 	case CHIP_CARRIZO:
974 	case CHIP_STONEY:
975 	case CHIP_RAVEN:
976 	case CHIP_RENOIR:
977 		init_data.flags.gpu_vm_support = true;
978 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
979 			init_data.flags.disable_dmcu = true;
980 		break;
981 	default:
982 		break;
983 	}
984 
985 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
986 		init_data.flags.fbc_support = true;
987 
988 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
989 		init_data.flags.multi_mon_pp_mclk_switch = true;
990 
991 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
992 		init_data.flags.disable_fractional_pwm = true;
993 
994 	init_data.flags.power_down_display_on_boot = true;
995 
996 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
997 
998 	/* Display Core create. */
999 	adev->dm.dc = dc_create(&init_data);
1000 
1001 	if (adev->dm.dc) {
1002 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1003 	} else {
1004 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1005 		goto error;
1006 	}
1007 
1008 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1009 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1010 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1011 	}
1012 
1013 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1014 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1015 
1016 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1017 		adev->dm.dc->debug.disable_stutter = true;
1018 
1019 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1020 		adev->dm.dc->debug.disable_dsc = true;
1021 
1022 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1023 		adev->dm.dc->debug.disable_clock_gate = true;
1024 
1025 	r = dm_dmub_hw_init(adev);
1026 	if (r) {
1027 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1028 		goto error;
1029 	}
1030 
1031 	dc_hardware_init(adev->dm.dc);
1032 
1033 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1034 	if (!adev->dm.freesync_module) {
1035 		DRM_ERROR(
1036 		"amdgpu: failed to initialize freesync_module.\n");
1037 	} else
1038 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1039 				adev->dm.freesync_module);
1040 
1041 	amdgpu_dm_init_color_mod();
1042 
1043 #ifdef CONFIG_DRM_AMD_DC_HDCP
1044 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1045 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1046 
1047 		if (!adev->dm.hdcp_workqueue)
1048 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1049 		else
1050 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1051 
1052 		dc_init_callbacks(adev->dm.dc, &init_params);
1053 	}
1054 #endif
1055 	if (amdgpu_dm_initialize_drm_device(adev)) {
1056 		DRM_ERROR(
1057 		"amdgpu: failed to initialize sw for display support.\n");
1058 		goto error;
1059 	}
1060 
1061 	/* Update the actual used number of crtc */
1062 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1063 
1064 	/* create fake encoders for MST */
1065 	dm_dp_create_fake_mst_encoders(adev);
1066 
1067 	/* TODO: Add_display_info? */
1068 
1069 	/* TODO use dynamic cursor width */
1070 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1071 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1072 
1073 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1074 		DRM_ERROR(
1075 		"amdgpu: failed to initialize sw for display support.\n");
1076 		goto error;
1077 	}
1078 
1079 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1080 
1081 	return 0;
1082 error:
1083 	amdgpu_dm_fini(adev);
1084 
1085 	return -EINVAL;
1086 }
1087 
1088 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1089 {
1090 	int i;
1091 
1092 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1093 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1094 	}
1095 
1096 	amdgpu_dm_audio_fini(adev);
1097 
1098 	amdgpu_dm_destroy_drm_device(&adev->dm);
1099 
1100 #ifdef CONFIG_DRM_AMD_DC_HDCP
1101 	if (adev->dm.hdcp_workqueue) {
1102 		hdcp_destroy(adev->dm.hdcp_workqueue);
1103 		adev->dm.hdcp_workqueue = NULL;
1104 	}
1105 
1106 	if (adev->dm.dc)
1107 		dc_deinit_callbacks(adev->dm.dc);
1108 #endif
1109 	if (adev->dm.dc->ctx->dmub_srv) {
1110 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1111 		adev->dm.dc->ctx->dmub_srv = NULL;
1112 	}
1113 
1114 	if (adev->dm.dmub_bo)
1115 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1116 				      &adev->dm.dmub_bo_gpu_addr,
1117 				      &adev->dm.dmub_bo_cpu_addr);
1118 
1119 	/* DC Destroy TODO: Replace destroy DAL */
1120 	if (adev->dm.dc)
1121 		dc_destroy(&adev->dm.dc);
1122 	/*
1123 	 * TODO: pageflip, vlank interrupt
1124 	 *
1125 	 * amdgpu_dm_irq_fini(adev);
1126 	 */
1127 
1128 	if (adev->dm.cgs_device) {
1129 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1130 		adev->dm.cgs_device = NULL;
1131 	}
1132 	if (adev->dm.freesync_module) {
1133 		mod_freesync_destroy(adev->dm.freesync_module);
1134 		adev->dm.freesync_module = NULL;
1135 	}
1136 
1137 	mutex_destroy(&adev->dm.audio_lock);
1138 	mutex_destroy(&adev->dm.dc_lock);
1139 
1140 	return;
1141 }
1142 
1143 static int load_dmcu_fw(struct amdgpu_device *adev)
1144 {
1145 	const char *fw_name_dmcu = NULL;
1146 	int r;
1147 	const struct dmcu_firmware_header_v1_0 *hdr;
1148 
1149 	switch(adev->asic_type) {
1150 #if defined(CONFIG_DRM_AMD_DC_SI)
1151 	case CHIP_TAHITI:
1152 	case CHIP_PITCAIRN:
1153 	case CHIP_VERDE:
1154 	case CHIP_OLAND:
1155 #endif
1156 	case CHIP_BONAIRE:
1157 	case CHIP_HAWAII:
1158 	case CHIP_KAVERI:
1159 	case CHIP_KABINI:
1160 	case CHIP_MULLINS:
1161 	case CHIP_TONGA:
1162 	case CHIP_FIJI:
1163 	case CHIP_CARRIZO:
1164 	case CHIP_STONEY:
1165 	case CHIP_POLARIS11:
1166 	case CHIP_POLARIS10:
1167 	case CHIP_POLARIS12:
1168 	case CHIP_VEGAM:
1169 	case CHIP_VEGA10:
1170 	case CHIP_VEGA12:
1171 	case CHIP_VEGA20:
1172 	case CHIP_NAVI10:
1173 	case CHIP_NAVI14:
1174 	case CHIP_RENOIR:
1175 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1176 	case CHIP_SIENNA_CICHLID:
1177 	case CHIP_NAVY_FLOUNDER:
1178 #endif
1179 		return 0;
1180 	case CHIP_NAVI12:
1181 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1182 		break;
1183 	case CHIP_RAVEN:
1184 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1185 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1186 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1187 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1188 		else
1189 			return 0;
1190 		break;
1191 	default:
1192 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1193 		return -EINVAL;
1194 	}
1195 
1196 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1197 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1198 		return 0;
1199 	}
1200 
1201 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1202 	if (r == -ENOENT) {
1203 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1204 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1205 		adev->dm.fw_dmcu = NULL;
1206 		return 0;
1207 	}
1208 	if (r) {
1209 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1210 			fw_name_dmcu);
1211 		return r;
1212 	}
1213 
1214 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1215 	if (r) {
1216 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1217 			fw_name_dmcu);
1218 		release_firmware(adev->dm.fw_dmcu);
1219 		adev->dm.fw_dmcu = NULL;
1220 		return r;
1221 	}
1222 
1223 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1224 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1225 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1226 	adev->firmware.fw_size +=
1227 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1228 
1229 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1230 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1231 	adev->firmware.fw_size +=
1232 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1233 
1234 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1235 
1236 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1237 
1238 	return 0;
1239 }
1240 
1241 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1242 {
1243 	struct amdgpu_device *adev = ctx;
1244 
1245 	return dm_read_reg(adev->dm.dc->ctx, address);
1246 }
1247 
1248 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1249 				     uint32_t value)
1250 {
1251 	struct amdgpu_device *adev = ctx;
1252 
1253 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1254 }
1255 
1256 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1257 {
1258 	struct dmub_srv_create_params create_params;
1259 	struct dmub_srv_region_params region_params;
1260 	struct dmub_srv_region_info region_info;
1261 	struct dmub_srv_fb_params fb_params;
1262 	struct dmub_srv_fb_info *fb_info;
1263 	struct dmub_srv *dmub_srv;
1264 	const struct dmcub_firmware_header_v1_0 *hdr;
1265 	const char *fw_name_dmub;
1266 	enum dmub_asic dmub_asic;
1267 	enum dmub_status status;
1268 	int r;
1269 
1270 	switch (adev->asic_type) {
1271 	case CHIP_RENOIR:
1272 		dmub_asic = DMUB_ASIC_DCN21;
1273 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1274 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1275 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1276 		break;
1277 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1278 	case CHIP_SIENNA_CICHLID:
1279 		dmub_asic = DMUB_ASIC_DCN30;
1280 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1281 		break;
1282 	case CHIP_NAVY_FLOUNDER:
1283 		dmub_asic = DMUB_ASIC_DCN30;
1284 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1285 		break;
1286 #endif
1287 
1288 	default:
1289 		/* ASIC doesn't support DMUB. */
1290 		return 0;
1291 	}
1292 
1293 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1294 	if (r) {
1295 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1296 		return 0;
1297 	}
1298 
1299 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1300 	if (r) {
1301 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1302 		return 0;
1303 	}
1304 
1305 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1306 
1307 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1308 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1309 			AMDGPU_UCODE_ID_DMCUB;
1310 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1311 			adev->dm.dmub_fw;
1312 		adev->firmware.fw_size +=
1313 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1314 
1315 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1316 			 adev->dm.dmcub_fw_version);
1317 	}
1318 
1319 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1320 
1321 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1322 	dmub_srv = adev->dm.dmub_srv;
1323 
1324 	if (!dmub_srv) {
1325 		DRM_ERROR("Failed to allocate DMUB service!\n");
1326 		return -ENOMEM;
1327 	}
1328 
1329 	memset(&create_params, 0, sizeof(create_params));
1330 	create_params.user_ctx = adev;
1331 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1332 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1333 	create_params.asic = dmub_asic;
1334 
1335 	/* Create the DMUB service. */
1336 	status = dmub_srv_create(dmub_srv, &create_params);
1337 	if (status != DMUB_STATUS_OK) {
1338 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1339 		return -EINVAL;
1340 	}
1341 
1342 	/* Calculate the size of all the regions for the DMUB service. */
1343 	memset(&region_params, 0, sizeof(region_params));
1344 
1345 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1346 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1347 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1348 	region_params.vbios_size = adev->bios_size;
1349 	region_params.fw_bss_data = region_params.bss_data_size ?
1350 		adev->dm.dmub_fw->data +
1351 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1352 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1353 	region_params.fw_inst_const =
1354 		adev->dm.dmub_fw->data +
1355 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1356 		PSP_HEADER_BYTES;
1357 
1358 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1359 					   &region_info);
1360 
1361 	if (status != DMUB_STATUS_OK) {
1362 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1363 		return -EINVAL;
1364 	}
1365 
1366 	/*
1367 	 * Allocate a framebuffer based on the total size of all the regions.
1368 	 * TODO: Move this into GART.
1369 	 */
1370 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1371 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1372 				    &adev->dm.dmub_bo_gpu_addr,
1373 				    &adev->dm.dmub_bo_cpu_addr);
1374 	if (r)
1375 		return r;
1376 
1377 	/* Rebase the regions on the framebuffer address. */
1378 	memset(&fb_params, 0, sizeof(fb_params));
1379 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1380 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1381 	fb_params.region_info = &region_info;
1382 
1383 	adev->dm.dmub_fb_info =
1384 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1385 	fb_info = adev->dm.dmub_fb_info;
1386 
1387 	if (!fb_info) {
1388 		DRM_ERROR(
1389 			"Failed to allocate framebuffer info for DMUB service!\n");
1390 		return -ENOMEM;
1391 	}
1392 
1393 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1394 	if (status != DMUB_STATUS_OK) {
1395 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1396 		return -EINVAL;
1397 	}
1398 
1399 	return 0;
1400 }
1401 
1402 static int dm_sw_init(void *handle)
1403 {
1404 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1405 	int r;
1406 
1407 	r = dm_dmub_sw_init(adev);
1408 	if (r)
1409 		return r;
1410 
1411 	return load_dmcu_fw(adev);
1412 }
1413 
1414 static int dm_sw_fini(void *handle)
1415 {
1416 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1417 
1418 	kfree(adev->dm.dmub_fb_info);
1419 	adev->dm.dmub_fb_info = NULL;
1420 
1421 	if (adev->dm.dmub_srv) {
1422 		dmub_srv_destroy(adev->dm.dmub_srv);
1423 		adev->dm.dmub_srv = NULL;
1424 	}
1425 
1426 	release_firmware(adev->dm.dmub_fw);
1427 	adev->dm.dmub_fw = NULL;
1428 
1429 	release_firmware(adev->dm.fw_dmcu);
1430 	adev->dm.fw_dmcu = NULL;
1431 
1432 	return 0;
1433 }
1434 
1435 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1436 {
1437 	struct amdgpu_dm_connector *aconnector;
1438 	struct drm_connector *connector;
1439 	struct drm_connector_list_iter iter;
1440 	int ret = 0;
1441 
1442 	drm_connector_list_iter_begin(dev, &iter);
1443 	drm_for_each_connector_iter(connector, &iter) {
1444 		aconnector = to_amdgpu_dm_connector(connector);
1445 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1446 		    aconnector->mst_mgr.aux) {
1447 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1448 					 aconnector,
1449 					 aconnector->base.base.id);
1450 
1451 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1452 			if (ret < 0) {
1453 				DRM_ERROR("DM_MST: Failed to start MST\n");
1454 				aconnector->dc_link->type =
1455 					dc_connection_single;
1456 				break;
1457 			}
1458 		}
1459 	}
1460 	drm_connector_list_iter_end(&iter);
1461 
1462 	return ret;
1463 }
1464 
1465 static int dm_late_init(void *handle)
1466 {
1467 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1468 
1469 	struct dmcu_iram_parameters params;
1470 	unsigned int linear_lut[16];
1471 	int i;
1472 	struct dmcu *dmcu = NULL;
1473 	bool ret = true;
1474 
1475 	dmcu = adev->dm.dc->res_pool->dmcu;
1476 
1477 	for (i = 0; i < 16; i++)
1478 		linear_lut[i] = 0xFFFF * i / 15;
1479 
1480 	params.set = 0;
1481 	params.backlight_ramping_start = 0xCCCC;
1482 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1483 	params.backlight_lut_array_size = 16;
1484 	params.backlight_lut_array = linear_lut;
1485 
1486 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1487 	 * 0xFFFF x 0.01 = 0x28F
1488 	 */
1489 	params.min_abm_backlight = 0x28F;
1490 
1491 	/* In the case where abm is implemented on dmcub,
1492 	 * dmcu object will be null.
1493 	 * ABM 2.4 and up are implemented on dmcub.
1494 	 */
1495 	if (dmcu)
1496 		ret = dmcu_load_iram(dmcu, params);
1497 	else if (adev->dm.dc->ctx->dmub_srv)
1498 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1499 
1500 	if (!ret)
1501 		return -EINVAL;
1502 
1503 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1504 }
1505 
1506 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1507 {
1508 	struct amdgpu_dm_connector *aconnector;
1509 	struct drm_connector *connector;
1510 	struct drm_connector_list_iter iter;
1511 	struct drm_dp_mst_topology_mgr *mgr;
1512 	int ret;
1513 	bool need_hotplug = false;
1514 
1515 	drm_connector_list_iter_begin(dev, &iter);
1516 	drm_for_each_connector_iter(connector, &iter) {
1517 		aconnector = to_amdgpu_dm_connector(connector);
1518 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1519 		    aconnector->mst_port)
1520 			continue;
1521 
1522 		mgr = &aconnector->mst_mgr;
1523 
1524 		if (suspend) {
1525 			drm_dp_mst_topology_mgr_suspend(mgr);
1526 		} else {
1527 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1528 			if (ret < 0) {
1529 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1530 				need_hotplug = true;
1531 			}
1532 		}
1533 	}
1534 	drm_connector_list_iter_end(&iter);
1535 
1536 	if (need_hotplug)
1537 		drm_kms_helper_hotplug_event(dev);
1538 }
1539 
1540 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1541 {
1542 	struct smu_context *smu = &adev->smu;
1543 	int ret = 0;
1544 
1545 	if (!is_support_sw_smu(adev))
1546 		return 0;
1547 
1548 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1549 	 * on window driver dc implementation.
1550 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1551 	 * should be passed to smu during boot up and resume from s3.
1552 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1553 	 * dcn20_resource_construct
1554 	 * then call pplib functions below to pass the settings to smu:
1555 	 * smu_set_watermarks_for_clock_ranges
1556 	 * smu_set_watermarks_table
1557 	 * navi10_set_watermarks_table
1558 	 * smu_write_watermarks_table
1559 	 *
1560 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1561 	 * dc has implemented different flow for window driver:
1562 	 * dc_hardware_init / dc_set_power_state
1563 	 * dcn10_init_hw
1564 	 * notify_wm_ranges
1565 	 * set_wm_ranges
1566 	 * -- Linux
1567 	 * smu_set_watermarks_for_clock_ranges
1568 	 * renoir_set_watermarks_table
1569 	 * smu_write_watermarks_table
1570 	 *
1571 	 * For Linux,
1572 	 * dc_hardware_init -> amdgpu_dm_init
1573 	 * dc_set_power_state --> dm_resume
1574 	 *
1575 	 * therefore, this function apply to navi10/12/14 but not Renoir
1576 	 * *
1577 	 */
1578 	switch(adev->asic_type) {
1579 	case CHIP_NAVI10:
1580 	case CHIP_NAVI14:
1581 	case CHIP_NAVI12:
1582 		break;
1583 	default:
1584 		return 0;
1585 	}
1586 
1587 	ret = smu_write_watermarks_table(smu);
1588 	if (ret) {
1589 		DRM_ERROR("Failed to update WMTABLE!\n");
1590 		return ret;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 /**
1597  * dm_hw_init() - Initialize DC device
1598  * @handle: The base driver device containing the amdgpu_dm device.
1599  *
1600  * Initialize the &struct amdgpu_display_manager device. This involves calling
1601  * the initializers of each DM component, then populating the struct with them.
1602  *
1603  * Although the function implies hardware initialization, both hardware and
1604  * software are initialized here. Splitting them out to their relevant init
1605  * hooks is a future TODO item.
1606  *
1607  * Some notable things that are initialized here:
1608  *
1609  * - Display Core, both software and hardware
1610  * - DC modules that we need (freesync and color management)
1611  * - DRM software states
1612  * - Interrupt sources and handlers
1613  * - Vblank support
1614  * - Debug FS entries, if enabled
1615  */
1616 static int dm_hw_init(void *handle)
1617 {
1618 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1619 	/* Create DAL display manager */
1620 	amdgpu_dm_init(adev);
1621 	amdgpu_dm_hpd_init(adev);
1622 
1623 	return 0;
1624 }
1625 
1626 /**
1627  * dm_hw_fini() - Teardown DC device
1628  * @handle: The base driver device containing the amdgpu_dm device.
1629  *
1630  * Teardown components within &struct amdgpu_display_manager that require
1631  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1632  * were loaded. Also flush IRQ workqueues and disable them.
1633  */
1634 static int dm_hw_fini(void *handle)
1635 {
1636 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1637 
1638 	amdgpu_dm_hpd_fini(adev);
1639 
1640 	amdgpu_dm_irq_fini(adev);
1641 	amdgpu_dm_fini(adev);
1642 	return 0;
1643 }
1644 
1645 
1646 static int dm_enable_vblank(struct drm_crtc *crtc);
1647 static void dm_disable_vblank(struct drm_crtc *crtc);
1648 
1649 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1650 				 struct dc_state *state, bool enable)
1651 {
1652 	enum dc_irq_source irq_source;
1653 	struct amdgpu_crtc *acrtc;
1654 	int rc = -EBUSY;
1655 	int i = 0;
1656 
1657 	for (i = 0; i < state->stream_count; i++) {
1658 		acrtc = get_crtc_by_otg_inst(
1659 				adev, state->stream_status[i].primary_otg_inst);
1660 
1661 		if (acrtc && state->stream_status[i].plane_count != 0) {
1662 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1663 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1664 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1665 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1666 			if (rc)
1667 				DRM_WARN("Failed to %s pflip interrupts\n",
1668 					 enable ? "enable" : "disable");
1669 
1670 			if (enable) {
1671 				rc = dm_enable_vblank(&acrtc->base);
1672 				if (rc)
1673 					DRM_WARN("Failed to enable vblank interrupts\n");
1674 			} else {
1675 				dm_disable_vblank(&acrtc->base);
1676 			}
1677 
1678 		}
1679 	}
1680 
1681 }
1682 
1683 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1684 {
1685 	struct dc_state *context = NULL;
1686 	enum dc_status res = DC_ERROR_UNEXPECTED;
1687 	int i;
1688 	struct dc_stream_state *del_streams[MAX_PIPES];
1689 	int del_streams_count = 0;
1690 
1691 	memset(del_streams, 0, sizeof(del_streams));
1692 
1693 	context = dc_create_state(dc);
1694 	if (context == NULL)
1695 		goto context_alloc_fail;
1696 
1697 	dc_resource_state_copy_construct_current(dc, context);
1698 
1699 	/* First remove from context all streams */
1700 	for (i = 0; i < context->stream_count; i++) {
1701 		struct dc_stream_state *stream = context->streams[i];
1702 
1703 		del_streams[del_streams_count++] = stream;
1704 	}
1705 
1706 	/* Remove all planes for removed streams and then remove the streams */
1707 	for (i = 0; i < del_streams_count; i++) {
1708 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1709 			res = DC_FAIL_DETACH_SURFACES;
1710 			goto fail;
1711 		}
1712 
1713 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1714 		if (res != DC_OK)
1715 			goto fail;
1716 	}
1717 
1718 
1719 	res = dc_validate_global_state(dc, context, false);
1720 
1721 	if (res != DC_OK) {
1722 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1723 		goto fail;
1724 	}
1725 
1726 	res = dc_commit_state(dc, context);
1727 
1728 fail:
1729 	dc_release_state(context);
1730 
1731 context_alloc_fail:
1732 	return res;
1733 }
1734 
1735 static int dm_suspend(void *handle)
1736 {
1737 	struct amdgpu_device *adev = handle;
1738 	struct amdgpu_display_manager *dm = &adev->dm;
1739 	int ret = 0;
1740 
1741 	if (amdgpu_in_reset(adev)) {
1742 		mutex_lock(&dm->dc_lock);
1743 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1744 
1745 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1746 
1747 		amdgpu_dm_commit_zero_streams(dm->dc);
1748 
1749 		amdgpu_dm_irq_suspend(adev);
1750 
1751 		return ret;
1752 	}
1753 
1754 	WARN_ON(adev->dm.cached_state);
1755 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1756 
1757 	s3_handle_mst(adev_to_drm(adev), true);
1758 
1759 	amdgpu_dm_irq_suspend(adev);
1760 
1761 
1762 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1763 
1764 	return 0;
1765 }
1766 
1767 static struct amdgpu_dm_connector *
1768 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1769 					     struct drm_crtc *crtc)
1770 {
1771 	uint32_t i;
1772 	struct drm_connector_state *new_con_state;
1773 	struct drm_connector *connector;
1774 	struct drm_crtc *crtc_from_state;
1775 
1776 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1777 		crtc_from_state = new_con_state->crtc;
1778 
1779 		if (crtc_from_state == crtc)
1780 			return to_amdgpu_dm_connector(connector);
1781 	}
1782 
1783 	return NULL;
1784 }
1785 
1786 static void emulated_link_detect(struct dc_link *link)
1787 {
1788 	struct dc_sink_init_data sink_init_data = { 0 };
1789 	struct display_sink_capability sink_caps = { 0 };
1790 	enum dc_edid_status edid_status;
1791 	struct dc_context *dc_ctx = link->ctx;
1792 	struct dc_sink *sink = NULL;
1793 	struct dc_sink *prev_sink = NULL;
1794 
1795 	link->type = dc_connection_none;
1796 	prev_sink = link->local_sink;
1797 
1798 	if (prev_sink != NULL)
1799 		dc_sink_retain(prev_sink);
1800 
1801 	switch (link->connector_signal) {
1802 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1803 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1804 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1805 		break;
1806 	}
1807 
1808 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1809 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1810 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1811 		break;
1812 	}
1813 
1814 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1815 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1816 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1817 		break;
1818 	}
1819 
1820 	case SIGNAL_TYPE_LVDS: {
1821 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1822 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1823 		break;
1824 	}
1825 
1826 	case SIGNAL_TYPE_EDP: {
1827 		sink_caps.transaction_type =
1828 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1829 		sink_caps.signal = SIGNAL_TYPE_EDP;
1830 		break;
1831 	}
1832 
1833 	case SIGNAL_TYPE_DISPLAY_PORT: {
1834 		sink_caps.transaction_type =
1835 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1836 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1837 		break;
1838 	}
1839 
1840 	default:
1841 		DC_ERROR("Invalid connector type! signal:%d\n",
1842 			link->connector_signal);
1843 		return;
1844 	}
1845 
1846 	sink_init_data.link = link;
1847 	sink_init_data.sink_signal = sink_caps.signal;
1848 
1849 	sink = dc_sink_create(&sink_init_data);
1850 	if (!sink) {
1851 		DC_ERROR("Failed to create sink!\n");
1852 		return;
1853 	}
1854 
1855 	/* dc_sink_create returns a new reference */
1856 	link->local_sink = sink;
1857 
1858 	edid_status = dm_helpers_read_local_edid(
1859 			link->ctx,
1860 			link,
1861 			sink);
1862 
1863 	if (edid_status != EDID_OK)
1864 		DC_ERROR("Failed to read EDID");
1865 
1866 }
1867 
1868 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1869 				     struct amdgpu_display_manager *dm)
1870 {
1871 	struct {
1872 		struct dc_surface_update surface_updates[MAX_SURFACES];
1873 		struct dc_plane_info plane_infos[MAX_SURFACES];
1874 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1875 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1876 		struct dc_stream_update stream_update;
1877 	} * bundle;
1878 	int k, m;
1879 
1880 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1881 
1882 	if (!bundle) {
1883 		dm_error("Failed to allocate update bundle\n");
1884 		goto cleanup;
1885 	}
1886 
1887 	for (k = 0; k < dc_state->stream_count; k++) {
1888 		bundle->stream_update.stream = dc_state->streams[k];
1889 
1890 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1891 			bundle->surface_updates[m].surface =
1892 				dc_state->stream_status->plane_states[m];
1893 			bundle->surface_updates[m].surface->force_full_update =
1894 				true;
1895 		}
1896 		dc_commit_updates_for_stream(
1897 			dm->dc, bundle->surface_updates,
1898 			dc_state->stream_status->plane_count,
1899 			dc_state->streams[k], &bundle->stream_update, dc_state);
1900 	}
1901 
1902 cleanup:
1903 	kfree(bundle);
1904 
1905 	return;
1906 }
1907 
1908 static int dm_resume(void *handle)
1909 {
1910 	struct amdgpu_device *adev = handle;
1911 	struct drm_device *ddev = adev_to_drm(adev);
1912 	struct amdgpu_display_manager *dm = &adev->dm;
1913 	struct amdgpu_dm_connector *aconnector;
1914 	struct drm_connector *connector;
1915 	struct drm_connector_list_iter iter;
1916 	struct drm_crtc *crtc;
1917 	struct drm_crtc_state *new_crtc_state;
1918 	struct dm_crtc_state *dm_new_crtc_state;
1919 	struct drm_plane *plane;
1920 	struct drm_plane_state *new_plane_state;
1921 	struct dm_plane_state *dm_new_plane_state;
1922 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1923 	enum dc_connection_type new_connection_type = dc_connection_none;
1924 	struct dc_state *dc_state;
1925 	int i, r, j;
1926 
1927 	if (amdgpu_in_reset(adev)) {
1928 		dc_state = dm->cached_dc_state;
1929 
1930 		r = dm_dmub_hw_init(adev);
1931 		if (r)
1932 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1933 
1934 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1935 		dc_resume(dm->dc);
1936 
1937 		amdgpu_dm_irq_resume_early(adev);
1938 
1939 		for (i = 0; i < dc_state->stream_count; i++) {
1940 			dc_state->streams[i]->mode_changed = true;
1941 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1942 				dc_state->stream_status->plane_states[j]->update_flags.raw
1943 					= 0xffffffff;
1944 			}
1945 		}
1946 
1947 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1948 
1949 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1950 
1951 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1952 
1953 		dc_release_state(dm->cached_dc_state);
1954 		dm->cached_dc_state = NULL;
1955 
1956 		amdgpu_dm_irq_resume_late(adev);
1957 
1958 		mutex_unlock(&dm->dc_lock);
1959 
1960 		return 0;
1961 	}
1962 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1963 	dc_release_state(dm_state->context);
1964 	dm_state->context = dc_create_state(dm->dc);
1965 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1966 	dc_resource_state_construct(dm->dc, dm_state->context);
1967 
1968 	/* Before powering on DC we need to re-initialize DMUB. */
1969 	r = dm_dmub_hw_init(adev);
1970 	if (r)
1971 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1972 
1973 	/* power on hardware */
1974 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1975 
1976 	/* program HPD filter */
1977 	dc_resume(dm->dc);
1978 
1979 	/*
1980 	 * early enable HPD Rx IRQ, should be done before set mode as short
1981 	 * pulse interrupts are used for MST
1982 	 */
1983 	amdgpu_dm_irq_resume_early(adev);
1984 
1985 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1986 	s3_handle_mst(ddev, false);
1987 
1988 	/* Do detection*/
1989 	drm_connector_list_iter_begin(ddev, &iter);
1990 	drm_for_each_connector_iter(connector, &iter) {
1991 		aconnector = to_amdgpu_dm_connector(connector);
1992 
1993 		/*
1994 		 * this is the case when traversing through already created
1995 		 * MST connectors, should be skipped
1996 		 */
1997 		if (aconnector->mst_port)
1998 			continue;
1999 
2000 		mutex_lock(&aconnector->hpd_lock);
2001 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2002 			DRM_ERROR("KMS: Failed to detect connector\n");
2003 
2004 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2005 			emulated_link_detect(aconnector->dc_link);
2006 		else
2007 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2008 
2009 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2010 			aconnector->fake_enable = false;
2011 
2012 		if (aconnector->dc_sink)
2013 			dc_sink_release(aconnector->dc_sink);
2014 		aconnector->dc_sink = NULL;
2015 		amdgpu_dm_update_connector_after_detect(aconnector);
2016 		mutex_unlock(&aconnector->hpd_lock);
2017 	}
2018 	drm_connector_list_iter_end(&iter);
2019 
2020 	/* Force mode set in atomic commit */
2021 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2022 		new_crtc_state->active_changed = true;
2023 
2024 	/*
2025 	 * atomic_check is expected to create the dc states. We need to release
2026 	 * them here, since they were duplicated as part of the suspend
2027 	 * procedure.
2028 	 */
2029 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2030 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2031 		if (dm_new_crtc_state->stream) {
2032 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2033 			dc_stream_release(dm_new_crtc_state->stream);
2034 			dm_new_crtc_state->stream = NULL;
2035 		}
2036 	}
2037 
2038 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2039 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2040 		if (dm_new_plane_state->dc_state) {
2041 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2042 			dc_plane_state_release(dm_new_plane_state->dc_state);
2043 			dm_new_plane_state->dc_state = NULL;
2044 		}
2045 	}
2046 
2047 	drm_atomic_helper_resume(ddev, dm->cached_state);
2048 
2049 	dm->cached_state = NULL;
2050 
2051 	amdgpu_dm_irq_resume_late(adev);
2052 
2053 	amdgpu_dm_smu_write_watermarks_table(adev);
2054 
2055 	return 0;
2056 }
2057 
2058 /**
2059  * DOC: DM Lifecycle
2060  *
2061  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2062  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2063  * the base driver's device list to be initialized and torn down accordingly.
2064  *
2065  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2066  */
2067 
2068 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2069 	.name = "dm",
2070 	.early_init = dm_early_init,
2071 	.late_init = dm_late_init,
2072 	.sw_init = dm_sw_init,
2073 	.sw_fini = dm_sw_fini,
2074 	.hw_init = dm_hw_init,
2075 	.hw_fini = dm_hw_fini,
2076 	.suspend = dm_suspend,
2077 	.resume = dm_resume,
2078 	.is_idle = dm_is_idle,
2079 	.wait_for_idle = dm_wait_for_idle,
2080 	.check_soft_reset = dm_check_soft_reset,
2081 	.soft_reset = dm_soft_reset,
2082 	.set_clockgating_state = dm_set_clockgating_state,
2083 	.set_powergating_state = dm_set_powergating_state,
2084 };
2085 
2086 const struct amdgpu_ip_block_version dm_ip_block =
2087 {
2088 	.type = AMD_IP_BLOCK_TYPE_DCE,
2089 	.major = 1,
2090 	.minor = 0,
2091 	.rev = 0,
2092 	.funcs = &amdgpu_dm_funcs,
2093 };
2094 
2095 
2096 /**
2097  * DOC: atomic
2098  *
2099  * *WIP*
2100  */
2101 
2102 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2103 	.fb_create = amdgpu_display_user_framebuffer_create,
2104 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2105 	.atomic_check = amdgpu_dm_atomic_check,
2106 	.atomic_commit = amdgpu_dm_atomic_commit,
2107 };
2108 
2109 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2110 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2111 };
2112 
2113 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2114 {
2115 	u32 max_cll, min_cll, max, min, q, r;
2116 	struct amdgpu_dm_backlight_caps *caps;
2117 	struct amdgpu_display_manager *dm;
2118 	struct drm_connector *conn_base;
2119 	struct amdgpu_device *adev;
2120 	struct dc_link *link = NULL;
2121 	static const u8 pre_computed_values[] = {
2122 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2123 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2124 
2125 	if (!aconnector || !aconnector->dc_link)
2126 		return;
2127 
2128 	link = aconnector->dc_link;
2129 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2130 		return;
2131 
2132 	conn_base = &aconnector->base;
2133 	adev = drm_to_adev(conn_base->dev);
2134 	dm = &adev->dm;
2135 	caps = &dm->backlight_caps;
2136 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2137 	caps->aux_support = false;
2138 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2139 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2140 
2141 	if (caps->ext_caps->bits.oled == 1 ||
2142 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2143 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2144 		caps->aux_support = true;
2145 
2146 	/* From the specification (CTA-861-G), for calculating the maximum
2147 	 * luminance we need to use:
2148 	 *	Luminance = 50*2**(CV/32)
2149 	 * Where CV is a one-byte value.
2150 	 * For calculating this expression we may need float point precision;
2151 	 * to avoid this complexity level, we take advantage that CV is divided
2152 	 * by a constant. From the Euclids division algorithm, we know that CV
2153 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2154 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2155 	 * need to pre-compute the value of r/32. For pre-computing the values
2156 	 * We just used the following Ruby line:
2157 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2158 	 * The results of the above expressions can be verified at
2159 	 * pre_computed_values.
2160 	 */
2161 	q = max_cll >> 5;
2162 	r = max_cll % 32;
2163 	max = (1 << q) * pre_computed_values[r];
2164 
2165 	// min luminance: maxLum * (CV/255)^2 / 100
2166 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2167 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2168 
2169 	caps->aux_max_input_signal = max;
2170 	caps->aux_min_input_signal = min;
2171 }
2172 
2173 void amdgpu_dm_update_connector_after_detect(
2174 		struct amdgpu_dm_connector *aconnector)
2175 {
2176 	struct drm_connector *connector = &aconnector->base;
2177 	struct drm_device *dev = connector->dev;
2178 	struct dc_sink *sink;
2179 
2180 	/* MST handled by drm_mst framework */
2181 	if (aconnector->mst_mgr.mst_state == true)
2182 		return;
2183 
2184 	sink = aconnector->dc_link->local_sink;
2185 	if (sink)
2186 		dc_sink_retain(sink);
2187 
2188 	/*
2189 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2190 	 * the connector sink is set to either fake or physical sink depends on link status.
2191 	 * Skip if already done during boot.
2192 	 */
2193 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2194 			&& aconnector->dc_em_sink) {
2195 
2196 		/*
2197 		 * For S3 resume with headless use eml_sink to fake stream
2198 		 * because on resume connector->sink is set to NULL
2199 		 */
2200 		mutex_lock(&dev->mode_config.mutex);
2201 
2202 		if (sink) {
2203 			if (aconnector->dc_sink) {
2204 				amdgpu_dm_update_freesync_caps(connector, NULL);
2205 				/*
2206 				 * retain and release below are used to
2207 				 * bump up refcount for sink because the link doesn't point
2208 				 * to it anymore after disconnect, so on next crtc to connector
2209 				 * reshuffle by UMD we will get into unwanted dc_sink release
2210 				 */
2211 				dc_sink_release(aconnector->dc_sink);
2212 			}
2213 			aconnector->dc_sink = sink;
2214 			dc_sink_retain(aconnector->dc_sink);
2215 			amdgpu_dm_update_freesync_caps(connector,
2216 					aconnector->edid);
2217 		} else {
2218 			amdgpu_dm_update_freesync_caps(connector, NULL);
2219 			if (!aconnector->dc_sink) {
2220 				aconnector->dc_sink = aconnector->dc_em_sink;
2221 				dc_sink_retain(aconnector->dc_sink);
2222 			}
2223 		}
2224 
2225 		mutex_unlock(&dev->mode_config.mutex);
2226 
2227 		if (sink)
2228 			dc_sink_release(sink);
2229 		return;
2230 	}
2231 
2232 	/*
2233 	 * TODO: temporary guard to look for proper fix
2234 	 * if this sink is MST sink, we should not do anything
2235 	 */
2236 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2237 		dc_sink_release(sink);
2238 		return;
2239 	}
2240 
2241 	if (aconnector->dc_sink == sink) {
2242 		/*
2243 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2244 		 * Do nothing!!
2245 		 */
2246 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2247 				aconnector->connector_id);
2248 		if (sink)
2249 			dc_sink_release(sink);
2250 		return;
2251 	}
2252 
2253 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2254 		aconnector->connector_id, aconnector->dc_sink, sink);
2255 
2256 	mutex_lock(&dev->mode_config.mutex);
2257 
2258 	/*
2259 	 * 1. Update status of the drm connector
2260 	 * 2. Send an event and let userspace tell us what to do
2261 	 */
2262 	if (sink) {
2263 		/*
2264 		 * TODO: check if we still need the S3 mode update workaround.
2265 		 * If yes, put it here.
2266 		 */
2267 		if (aconnector->dc_sink)
2268 			amdgpu_dm_update_freesync_caps(connector, NULL);
2269 
2270 		aconnector->dc_sink = sink;
2271 		dc_sink_retain(aconnector->dc_sink);
2272 		if (sink->dc_edid.length == 0) {
2273 			aconnector->edid = NULL;
2274 			if (aconnector->dc_link->aux_mode) {
2275 				drm_dp_cec_unset_edid(
2276 					&aconnector->dm_dp_aux.aux);
2277 			}
2278 		} else {
2279 			aconnector->edid =
2280 				(struct edid *)sink->dc_edid.raw_edid;
2281 
2282 			drm_connector_update_edid_property(connector,
2283 							   aconnector->edid);
2284 			drm_add_edid_modes(connector, aconnector->edid);
2285 
2286 			if (aconnector->dc_link->aux_mode)
2287 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2288 						    aconnector->edid);
2289 		}
2290 
2291 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2292 		update_connector_ext_caps(aconnector);
2293 	} else {
2294 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2295 		amdgpu_dm_update_freesync_caps(connector, NULL);
2296 		drm_connector_update_edid_property(connector, NULL);
2297 		aconnector->num_modes = 0;
2298 		dc_sink_release(aconnector->dc_sink);
2299 		aconnector->dc_sink = NULL;
2300 		aconnector->edid = NULL;
2301 #ifdef CONFIG_DRM_AMD_DC_HDCP
2302 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2303 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2304 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2305 #endif
2306 	}
2307 
2308 	mutex_unlock(&dev->mode_config.mutex);
2309 
2310 	update_subconnector_property(aconnector);
2311 
2312 	if (sink)
2313 		dc_sink_release(sink);
2314 }
2315 
2316 static void handle_hpd_irq(void *param)
2317 {
2318 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2319 	struct drm_connector *connector = &aconnector->base;
2320 	struct drm_device *dev = connector->dev;
2321 	enum dc_connection_type new_connection_type = dc_connection_none;
2322 #ifdef CONFIG_DRM_AMD_DC_HDCP
2323 	struct amdgpu_device *adev = drm_to_adev(dev);
2324 #endif
2325 
2326 	/*
2327 	 * In case of failure or MST no need to update connector status or notify the OS
2328 	 * since (for MST case) MST does this in its own context.
2329 	 */
2330 	mutex_lock(&aconnector->hpd_lock);
2331 
2332 #ifdef CONFIG_DRM_AMD_DC_HDCP
2333 	if (adev->dm.hdcp_workqueue)
2334 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2335 #endif
2336 	if (aconnector->fake_enable)
2337 		aconnector->fake_enable = false;
2338 
2339 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2340 		DRM_ERROR("KMS: Failed to detect connector\n");
2341 
2342 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2343 		emulated_link_detect(aconnector->dc_link);
2344 
2345 
2346 		drm_modeset_lock_all(dev);
2347 		dm_restore_drm_connector_state(dev, connector);
2348 		drm_modeset_unlock_all(dev);
2349 
2350 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2351 			drm_kms_helper_hotplug_event(dev);
2352 
2353 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2354 		amdgpu_dm_update_connector_after_detect(aconnector);
2355 
2356 
2357 		drm_modeset_lock_all(dev);
2358 		dm_restore_drm_connector_state(dev, connector);
2359 		drm_modeset_unlock_all(dev);
2360 
2361 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2362 			drm_kms_helper_hotplug_event(dev);
2363 	}
2364 	mutex_unlock(&aconnector->hpd_lock);
2365 
2366 }
2367 
2368 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2369 {
2370 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2371 	uint8_t dret;
2372 	bool new_irq_handled = false;
2373 	int dpcd_addr;
2374 	int dpcd_bytes_to_read;
2375 
2376 	const int max_process_count = 30;
2377 	int process_count = 0;
2378 
2379 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2380 
2381 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2382 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2383 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2384 		dpcd_addr = DP_SINK_COUNT;
2385 	} else {
2386 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2387 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2388 		dpcd_addr = DP_SINK_COUNT_ESI;
2389 	}
2390 
2391 	dret = drm_dp_dpcd_read(
2392 		&aconnector->dm_dp_aux.aux,
2393 		dpcd_addr,
2394 		esi,
2395 		dpcd_bytes_to_read);
2396 
2397 	while (dret == dpcd_bytes_to_read &&
2398 		process_count < max_process_count) {
2399 		uint8_t retry;
2400 		dret = 0;
2401 
2402 		process_count++;
2403 
2404 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2405 		/* handle HPD short pulse irq */
2406 		if (aconnector->mst_mgr.mst_state)
2407 			drm_dp_mst_hpd_irq(
2408 				&aconnector->mst_mgr,
2409 				esi,
2410 				&new_irq_handled);
2411 
2412 		if (new_irq_handled) {
2413 			/* ACK at DPCD to notify down stream */
2414 			const int ack_dpcd_bytes_to_write =
2415 				dpcd_bytes_to_read - 1;
2416 
2417 			for (retry = 0; retry < 3; retry++) {
2418 				uint8_t wret;
2419 
2420 				wret = drm_dp_dpcd_write(
2421 					&aconnector->dm_dp_aux.aux,
2422 					dpcd_addr + 1,
2423 					&esi[1],
2424 					ack_dpcd_bytes_to_write);
2425 				if (wret == ack_dpcd_bytes_to_write)
2426 					break;
2427 			}
2428 
2429 			/* check if there is new irq to be handled */
2430 			dret = drm_dp_dpcd_read(
2431 				&aconnector->dm_dp_aux.aux,
2432 				dpcd_addr,
2433 				esi,
2434 				dpcd_bytes_to_read);
2435 
2436 			new_irq_handled = false;
2437 		} else {
2438 			break;
2439 		}
2440 	}
2441 
2442 	if (process_count == max_process_count)
2443 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2444 }
2445 
2446 static void handle_hpd_rx_irq(void *param)
2447 {
2448 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2449 	struct drm_connector *connector = &aconnector->base;
2450 	struct drm_device *dev = connector->dev;
2451 	struct dc_link *dc_link = aconnector->dc_link;
2452 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2453 	enum dc_connection_type new_connection_type = dc_connection_none;
2454 #ifdef CONFIG_DRM_AMD_DC_HDCP
2455 	union hpd_irq_data hpd_irq_data;
2456 	struct amdgpu_device *adev = drm_to_adev(dev);
2457 
2458 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2459 #endif
2460 
2461 	/*
2462 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2463 	 * conflict, after implement i2c helper, this mutex should be
2464 	 * retired.
2465 	 */
2466 	if (dc_link->type != dc_connection_mst_branch)
2467 		mutex_lock(&aconnector->hpd_lock);
2468 
2469 
2470 #ifdef CONFIG_DRM_AMD_DC_HDCP
2471 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2472 #else
2473 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2474 #endif
2475 			!is_mst_root_connector) {
2476 		/* Downstream Port status changed. */
2477 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2478 			DRM_ERROR("KMS: Failed to detect connector\n");
2479 
2480 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2481 			emulated_link_detect(dc_link);
2482 
2483 			if (aconnector->fake_enable)
2484 				aconnector->fake_enable = false;
2485 
2486 			amdgpu_dm_update_connector_after_detect(aconnector);
2487 
2488 
2489 			drm_modeset_lock_all(dev);
2490 			dm_restore_drm_connector_state(dev, connector);
2491 			drm_modeset_unlock_all(dev);
2492 
2493 			drm_kms_helper_hotplug_event(dev);
2494 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2495 
2496 			if (aconnector->fake_enable)
2497 				aconnector->fake_enable = false;
2498 
2499 			amdgpu_dm_update_connector_after_detect(aconnector);
2500 
2501 
2502 			drm_modeset_lock_all(dev);
2503 			dm_restore_drm_connector_state(dev, connector);
2504 			drm_modeset_unlock_all(dev);
2505 
2506 			drm_kms_helper_hotplug_event(dev);
2507 		}
2508 	}
2509 #ifdef CONFIG_DRM_AMD_DC_HDCP
2510 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2511 		if (adev->dm.hdcp_workqueue)
2512 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2513 	}
2514 #endif
2515 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2516 	    (dc_link->type == dc_connection_mst_branch))
2517 		dm_handle_hpd_rx_irq(aconnector);
2518 
2519 	if (dc_link->type != dc_connection_mst_branch) {
2520 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2521 		mutex_unlock(&aconnector->hpd_lock);
2522 	}
2523 }
2524 
2525 static void register_hpd_handlers(struct amdgpu_device *adev)
2526 {
2527 	struct drm_device *dev = adev_to_drm(adev);
2528 	struct drm_connector *connector;
2529 	struct amdgpu_dm_connector *aconnector;
2530 	const struct dc_link *dc_link;
2531 	struct dc_interrupt_params int_params = {0};
2532 
2533 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2534 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2535 
2536 	list_for_each_entry(connector,
2537 			&dev->mode_config.connector_list, head)	{
2538 
2539 		aconnector = to_amdgpu_dm_connector(connector);
2540 		dc_link = aconnector->dc_link;
2541 
2542 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2543 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2544 			int_params.irq_source = dc_link->irq_source_hpd;
2545 
2546 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2547 					handle_hpd_irq,
2548 					(void *) aconnector);
2549 		}
2550 
2551 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2552 
2553 			/* Also register for DP short pulse (hpd_rx). */
2554 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2555 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2556 
2557 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2558 					handle_hpd_rx_irq,
2559 					(void *) aconnector);
2560 		}
2561 	}
2562 }
2563 
2564 #if defined(CONFIG_DRM_AMD_DC_SI)
2565 /* Register IRQ sources and initialize IRQ callbacks */
2566 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2567 {
2568 	struct dc *dc = adev->dm.dc;
2569 	struct common_irq_params *c_irq_params;
2570 	struct dc_interrupt_params int_params = {0};
2571 	int r;
2572 	int i;
2573 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2574 
2575 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2576 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2577 
2578 	/*
2579 	 * Actions of amdgpu_irq_add_id():
2580 	 * 1. Register a set() function with base driver.
2581 	 *    Base driver will call set() function to enable/disable an
2582 	 *    interrupt in DC hardware.
2583 	 * 2. Register amdgpu_dm_irq_handler().
2584 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2585 	 *    coming from DC hardware.
2586 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2587 	 *    for acknowledging and handling. */
2588 
2589 	/* Use VBLANK interrupt */
2590 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2591 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2592 		if (r) {
2593 			DRM_ERROR("Failed to add crtc irq id!\n");
2594 			return r;
2595 		}
2596 
2597 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2598 		int_params.irq_source =
2599 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2600 
2601 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2602 
2603 		c_irq_params->adev = adev;
2604 		c_irq_params->irq_src = int_params.irq_source;
2605 
2606 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2607 				dm_crtc_high_irq, c_irq_params);
2608 	}
2609 
2610 	/* Use GRPH_PFLIP interrupt */
2611 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2612 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2613 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2614 		if (r) {
2615 			DRM_ERROR("Failed to add page flip irq id!\n");
2616 			return r;
2617 		}
2618 
2619 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2620 		int_params.irq_source =
2621 			dc_interrupt_to_irq_source(dc, i, 0);
2622 
2623 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2624 
2625 		c_irq_params->adev = adev;
2626 		c_irq_params->irq_src = int_params.irq_source;
2627 
2628 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2629 				dm_pflip_high_irq, c_irq_params);
2630 
2631 	}
2632 
2633 	/* HPD */
2634 	r = amdgpu_irq_add_id(adev, client_id,
2635 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2636 	if (r) {
2637 		DRM_ERROR("Failed to add hpd irq id!\n");
2638 		return r;
2639 	}
2640 
2641 	register_hpd_handlers(adev);
2642 
2643 	return 0;
2644 }
2645 #endif
2646 
2647 /* Register IRQ sources and initialize IRQ callbacks */
2648 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2649 {
2650 	struct dc *dc = adev->dm.dc;
2651 	struct common_irq_params *c_irq_params;
2652 	struct dc_interrupt_params int_params = {0};
2653 	int r;
2654 	int i;
2655 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2656 
2657 	if (adev->asic_type >= CHIP_VEGA10)
2658 		client_id = SOC15_IH_CLIENTID_DCE;
2659 
2660 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2661 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2662 
2663 	/*
2664 	 * Actions of amdgpu_irq_add_id():
2665 	 * 1. Register a set() function with base driver.
2666 	 *    Base driver will call set() function to enable/disable an
2667 	 *    interrupt in DC hardware.
2668 	 * 2. Register amdgpu_dm_irq_handler().
2669 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2670 	 *    coming from DC hardware.
2671 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2672 	 *    for acknowledging and handling. */
2673 
2674 	/* Use VBLANK interrupt */
2675 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2676 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2677 		if (r) {
2678 			DRM_ERROR("Failed to add crtc irq id!\n");
2679 			return r;
2680 		}
2681 
2682 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2683 		int_params.irq_source =
2684 			dc_interrupt_to_irq_source(dc, i, 0);
2685 
2686 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2687 
2688 		c_irq_params->adev = adev;
2689 		c_irq_params->irq_src = int_params.irq_source;
2690 
2691 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2692 				dm_crtc_high_irq, c_irq_params);
2693 	}
2694 
2695 	/* Use VUPDATE interrupt */
2696 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2697 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2698 		if (r) {
2699 			DRM_ERROR("Failed to add vupdate irq id!\n");
2700 			return r;
2701 		}
2702 
2703 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2704 		int_params.irq_source =
2705 			dc_interrupt_to_irq_source(dc, i, 0);
2706 
2707 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2708 
2709 		c_irq_params->adev = adev;
2710 		c_irq_params->irq_src = int_params.irq_source;
2711 
2712 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2713 				dm_vupdate_high_irq, c_irq_params);
2714 	}
2715 
2716 	/* Use GRPH_PFLIP interrupt */
2717 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2718 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2719 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2720 		if (r) {
2721 			DRM_ERROR("Failed to add page flip irq id!\n");
2722 			return r;
2723 		}
2724 
2725 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2726 		int_params.irq_source =
2727 			dc_interrupt_to_irq_source(dc, i, 0);
2728 
2729 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2730 
2731 		c_irq_params->adev = adev;
2732 		c_irq_params->irq_src = int_params.irq_source;
2733 
2734 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2735 				dm_pflip_high_irq, c_irq_params);
2736 
2737 	}
2738 
2739 	/* HPD */
2740 	r = amdgpu_irq_add_id(adev, client_id,
2741 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2742 	if (r) {
2743 		DRM_ERROR("Failed to add hpd irq id!\n");
2744 		return r;
2745 	}
2746 
2747 	register_hpd_handlers(adev);
2748 
2749 	return 0;
2750 }
2751 
2752 #if defined(CONFIG_DRM_AMD_DC_DCN)
2753 /* Register IRQ sources and initialize IRQ callbacks */
2754 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2755 {
2756 	struct dc *dc = adev->dm.dc;
2757 	struct common_irq_params *c_irq_params;
2758 	struct dc_interrupt_params int_params = {0};
2759 	int r;
2760 	int i;
2761 
2762 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2763 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2764 
2765 	/*
2766 	 * Actions of amdgpu_irq_add_id():
2767 	 * 1. Register a set() function with base driver.
2768 	 *    Base driver will call set() function to enable/disable an
2769 	 *    interrupt in DC hardware.
2770 	 * 2. Register amdgpu_dm_irq_handler().
2771 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2772 	 *    coming from DC hardware.
2773 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2774 	 *    for acknowledging and handling.
2775 	 */
2776 
2777 	/* Use VSTARTUP interrupt */
2778 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2779 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2780 			i++) {
2781 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2782 
2783 		if (r) {
2784 			DRM_ERROR("Failed to add crtc irq id!\n");
2785 			return r;
2786 		}
2787 
2788 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2789 		int_params.irq_source =
2790 			dc_interrupt_to_irq_source(dc, i, 0);
2791 
2792 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2793 
2794 		c_irq_params->adev = adev;
2795 		c_irq_params->irq_src = int_params.irq_source;
2796 
2797 		amdgpu_dm_irq_register_interrupt(
2798 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2799 	}
2800 
2801 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2802 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2803 	 * to trigger at end of each vblank, regardless of state of the lock,
2804 	 * matching DCE behaviour.
2805 	 */
2806 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2807 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2808 	     i++) {
2809 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2810 
2811 		if (r) {
2812 			DRM_ERROR("Failed to add vupdate irq id!\n");
2813 			return r;
2814 		}
2815 
2816 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2817 		int_params.irq_source =
2818 			dc_interrupt_to_irq_source(dc, i, 0);
2819 
2820 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2821 
2822 		c_irq_params->adev = adev;
2823 		c_irq_params->irq_src = int_params.irq_source;
2824 
2825 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2826 				dm_vupdate_high_irq, c_irq_params);
2827 	}
2828 
2829 	/* Use GRPH_PFLIP interrupt */
2830 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2831 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2832 			i++) {
2833 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2834 		if (r) {
2835 			DRM_ERROR("Failed to add page flip irq id!\n");
2836 			return r;
2837 		}
2838 
2839 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2840 		int_params.irq_source =
2841 			dc_interrupt_to_irq_source(dc, i, 0);
2842 
2843 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2844 
2845 		c_irq_params->adev = adev;
2846 		c_irq_params->irq_src = int_params.irq_source;
2847 
2848 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2849 				dm_pflip_high_irq, c_irq_params);
2850 
2851 	}
2852 
2853 	/* HPD */
2854 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2855 			&adev->hpd_irq);
2856 	if (r) {
2857 		DRM_ERROR("Failed to add hpd irq id!\n");
2858 		return r;
2859 	}
2860 
2861 	register_hpd_handlers(adev);
2862 
2863 	return 0;
2864 }
2865 #endif
2866 
2867 /*
2868  * Acquires the lock for the atomic state object and returns
2869  * the new atomic state.
2870  *
2871  * This should only be called during atomic check.
2872  */
2873 static int dm_atomic_get_state(struct drm_atomic_state *state,
2874 			       struct dm_atomic_state **dm_state)
2875 {
2876 	struct drm_device *dev = state->dev;
2877 	struct amdgpu_device *adev = drm_to_adev(dev);
2878 	struct amdgpu_display_manager *dm = &adev->dm;
2879 	struct drm_private_state *priv_state;
2880 
2881 	if (*dm_state)
2882 		return 0;
2883 
2884 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2885 	if (IS_ERR(priv_state))
2886 		return PTR_ERR(priv_state);
2887 
2888 	*dm_state = to_dm_atomic_state(priv_state);
2889 
2890 	return 0;
2891 }
2892 
2893 static struct dm_atomic_state *
2894 dm_atomic_get_new_state(struct drm_atomic_state *state)
2895 {
2896 	struct drm_device *dev = state->dev;
2897 	struct amdgpu_device *adev = drm_to_adev(dev);
2898 	struct amdgpu_display_manager *dm = &adev->dm;
2899 	struct drm_private_obj *obj;
2900 	struct drm_private_state *new_obj_state;
2901 	int i;
2902 
2903 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2904 		if (obj->funcs == dm->atomic_obj.funcs)
2905 			return to_dm_atomic_state(new_obj_state);
2906 	}
2907 
2908 	return NULL;
2909 }
2910 
2911 static struct drm_private_state *
2912 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2913 {
2914 	struct dm_atomic_state *old_state, *new_state;
2915 
2916 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2917 	if (!new_state)
2918 		return NULL;
2919 
2920 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2921 
2922 	old_state = to_dm_atomic_state(obj->state);
2923 
2924 	if (old_state && old_state->context)
2925 		new_state->context = dc_copy_state(old_state->context);
2926 
2927 	if (!new_state->context) {
2928 		kfree(new_state);
2929 		return NULL;
2930 	}
2931 
2932 	return &new_state->base;
2933 }
2934 
2935 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2936 				    struct drm_private_state *state)
2937 {
2938 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2939 
2940 	if (dm_state && dm_state->context)
2941 		dc_release_state(dm_state->context);
2942 
2943 	kfree(dm_state);
2944 }
2945 
2946 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2947 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2948 	.atomic_destroy_state = dm_atomic_destroy_state,
2949 };
2950 
2951 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2952 {
2953 	struct dm_atomic_state *state;
2954 	int r;
2955 
2956 	adev->mode_info.mode_config_initialized = true;
2957 
2958 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2959 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2960 
2961 	adev_to_drm(adev)->mode_config.max_width = 16384;
2962 	adev_to_drm(adev)->mode_config.max_height = 16384;
2963 
2964 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2965 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2966 	/* indicates support for immediate flip */
2967 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2968 
2969 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2970 
2971 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2972 	if (!state)
2973 		return -ENOMEM;
2974 
2975 	state->context = dc_create_state(adev->dm.dc);
2976 	if (!state->context) {
2977 		kfree(state);
2978 		return -ENOMEM;
2979 	}
2980 
2981 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2982 
2983 	drm_atomic_private_obj_init(adev_to_drm(adev),
2984 				    &adev->dm.atomic_obj,
2985 				    &state->base,
2986 				    &dm_atomic_state_funcs);
2987 
2988 	r = amdgpu_display_modeset_create_props(adev);
2989 	if (r) {
2990 		dc_release_state(state->context);
2991 		kfree(state);
2992 		return r;
2993 	}
2994 
2995 	r = amdgpu_dm_audio_init(adev);
2996 	if (r) {
2997 		dc_release_state(state->context);
2998 		kfree(state);
2999 		return r;
3000 	}
3001 
3002 	return 0;
3003 }
3004 
3005 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3006 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3007 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3008 
3009 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3010 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3011 
3012 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3013 {
3014 #if defined(CONFIG_ACPI)
3015 	struct amdgpu_dm_backlight_caps caps;
3016 
3017 	memset(&caps, 0, sizeof(caps));
3018 
3019 	if (dm->backlight_caps.caps_valid)
3020 		return;
3021 
3022 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3023 	if (caps.caps_valid) {
3024 		dm->backlight_caps.caps_valid = true;
3025 		if (caps.aux_support)
3026 			return;
3027 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3028 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3029 	} else {
3030 		dm->backlight_caps.min_input_signal =
3031 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3032 		dm->backlight_caps.max_input_signal =
3033 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3034 	}
3035 #else
3036 	if (dm->backlight_caps.aux_support)
3037 		return;
3038 
3039 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3040 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3041 #endif
3042 }
3043 
3044 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3045 {
3046 	bool rc;
3047 
3048 	if (!link)
3049 		return 1;
3050 
3051 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3052 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3053 
3054 	return rc ? 0 : 1;
3055 }
3056 
3057 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3058 				unsigned *min, unsigned *max)
3059 {
3060 	if (!caps)
3061 		return 0;
3062 
3063 	if (caps->aux_support) {
3064 		// Firmware limits are in nits, DC API wants millinits.
3065 		*max = 1000 * caps->aux_max_input_signal;
3066 		*min = 1000 * caps->aux_min_input_signal;
3067 	} else {
3068 		// Firmware limits are 8-bit, PWM control is 16-bit.
3069 		*max = 0x101 * caps->max_input_signal;
3070 		*min = 0x101 * caps->min_input_signal;
3071 	}
3072 	return 1;
3073 }
3074 
3075 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3076 					uint32_t brightness)
3077 {
3078 	unsigned min, max;
3079 
3080 	if (!get_brightness_range(caps, &min, &max))
3081 		return brightness;
3082 
3083 	// Rescale 0..255 to min..max
3084 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3085 				       AMDGPU_MAX_BL_LEVEL);
3086 }
3087 
3088 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3089 				      uint32_t brightness)
3090 {
3091 	unsigned min, max;
3092 
3093 	if (!get_brightness_range(caps, &min, &max))
3094 		return brightness;
3095 
3096 	if (brightness < min)
3097 		return 0;
3098 	// Rescale min..max to 0..255
3099 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3100 				 max - min);
3101 }
3102 
3103 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3104 {
3105 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3106 	struct amdgpu_dm_backlight_caps caps;
3107 	struct dc_link *link = NULL;
3108 	u32 brightness;
3109 	bool rc;
3110 
3111 	amdgpu_dm_update_backlight_caps(dm);
3112 	caps = dm->backlight_caps;
3113 
3114 	link = (struct dc_link *)dm->backlight_link;
3115 
3116 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3117 	// Change brightness based on AUX property
3118 	if (caps.aux_support)
3119 		return set_backlight_via_aux(link, brightness);
3120 
3121 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3122 
3123 	return rc ? 0 : 1;
3124 }
3125 
3126 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3127 {
3128 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3129 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3130 
3131 	if (ret == DC_ERROR_UNEXPECTED)
3132 		return bd->props.brightness;
3133 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3134 }
3135 
3136 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3137 	.options = BL_CORE_SUSPENDRESUME,
3138 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3139 	.update_status	= amdgpu_dm_backlight_update_status,
3140 };
3141 
3142 static void
3143 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3144 {
3145 	char bl_name[16];
3146 	struct backlight_properties props = { 0 };
3147 
3148 	amdgpu_dm_update_backlight_caps(dm);
3149 
3150 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3151 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3152 	props.type = BACKLIGHT_RAW;
3153 
3154 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3155 		 adev_to_drm(dm->adev)->primary->index);
3156 
3157 	dm->backlight_dev = backlight_device_register(bl_name,
3158 						      adev_to_drm(dm->adev)->dev,
3159 						      dm,
3160 						      &amdgpu_dm_backlight_ops,
3161 						      &props);
3162 
3163 	if (IS_ERR(dm->backlight_dev))
3164 		DRM_ERROR("DM: Backlight registration failed!\n");
3165 	else
3166 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3167 }
3168 
3169 #endif
3170 
3171 static int initialize_plane(struct amdgpu_display_manager *dm,
3172 			    struct amdgpu_mode_info *mode_info, int plane_id,
3173 			    enum drm_plane_type plane_type,
3174 			    const struct dc_plane_cap *plane_cap)
3175 {
3176 	struct drm_plane *plane;
3177 	unsigned long possible_crtcs;
3178 	int ret = 0;
3179 
3180 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3181 	if (!plane) {
3182 		DRM_ERROR("KMS: Failed to allocate plane\n");
3183 		return -ENOMEM;
3184 	}
3185 	plane->type = plane_type;
3186 
3187 	/*
3188 	 * HACK: IGT tests expect that the primary plane for a CRTC
3189 	 * can only have one possible CRTC. Only expose support for
3190 	 * any CRTC if they're not going to be used as a primary plane
3191 	 * for a CRTC - like overlay or underlay planes.
3192 	 */
3193 	possible_crtcs = 1 << plane_id;
3194 	if (plane_id >= dm->dc->caps.max_streams)
3195 		possible_crtcs = 0xff;
3196 
3197 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3198 
3199 	if (ret) {
3200 		DRM_ERROR("KMS: Failed to initialize plane\n");
3201 		kfree(plane);
3202 		return ret;
3203 	}
3204 
3205 	if (mode_info)
3206 		mode_info->planes[plane_id] = plane;
3207 
3208 	return ret;
3209 }
3210 
3211 
3212 static void register_backlight_device(struct amdgpu_display_manager *dm,
3213 				      struct dc_link *link)
3214 {
3215 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3216 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3217 
3218 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3219 	    link->type != dc_connection_none) {
3220 		/*
3221 		 * Event if registration failed, we should continue with
3222 		 * DM initialization because not having a backlight control
3223 		 * is better then a black screen.
3224 		 */
3225 		amdgpu_dm_register_backlight_device(dm);
3226 
3227 		if (dm->backlight_dev)
3228 			dm->backlight_link = link;
3229 	}
3230 #endif
3231 }
3232 
3233 
3234 /*
3235  * In this architecture, the association
3236  * connector -> encoder -> crtc
3237  * id not really requried. The crtc and connector will hold the
3238  * display_index as an abstraction to use with DAL component
3239  *
3240  * Returns 0 on success
3241  */
3242 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3243 {
3244 	struct amdgpu_display_manager *dm = &adev->dm;
3245 	int32_t i;
3246 	struct amdgpu_dm_connector *aconnector = NULL;
3247 	struct amdgpu_encoder *aencoder = NULL;
3248 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3249 	uint32_t link_cnt;
3250 	int32_t primary_planes;
3251 	enum dc_connection_type new_connection_type = dc_connection_none;
3252 	const struct dc_plane_cap *plane;
3253 
3254 	link_cnt = dm->dc->caps.max_links;
3255 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3256 		DRM_ERROR("DM: Failed to initialize mode config\n");
3257 		return -EINVAL;
3258 	}
3259 
3260 	/* There is one primary plane per CRTC */
3261 	primary_planes = dm->dc->caps.max_streams;
3262 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3263 
3264 	/*
3265 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3266 	 * Order is reversed to match iteration order in atomic check.
3267 	 */
3268 	for (i = (primary_planes - 1); i >= 0; i--) {
3269 		plane = &dm->dc->caps.planes[i];
3270 
3271 		if (initialize_plane(dm, mode_info, i,
3272 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3273 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3274 			goto fail;
3275 		}
3276 	}
3277 
3278 	/*
3279 	 * Initialize overlay planes, index starting after primary planes.
3280 	 * These planes have a higher DRM index than the primary planes since
3281 	 * they should be considered as having a higher z-order.
3282 	 * Order is reversed to match iteration order in atomic check.
3283 	 *
3284 	 * Only support DCN for now, and only expose one so we don't encourage
3285 	 * userspace to use up all the pipes.
3286 	 */
3287 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3288 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3289 
3290 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3291 			continue;
3292 
3293 		if (!plane->blends_with_above || !plane->blends_with_below)
3294 			continue;
3295 
3296 		if (!plane->pixel_format_support.argb8888)
3297 			continue;
3298 
3299 		if (initialize_plane(dm, NULL, primary_planes + i,
3300 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3301 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3302 			goto fail;
3303 		}
3304 
3305 		/* Only create one overlay plane. */
3306 		break;
3307 	}
3308 
3309 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3310 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3311 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3312 			goto fail;
3313 		}
3314 
3315 	dm->display_indexes_num = dm->dc->caps.max_streams;
3316 
3317 	/* loops over all connectors on the board */
3318 	for (i = 0; i < link_cnt; i++) {
3319 		struct dc_link *link = NULL;
3320 
3321 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3322 			DRM_ERROR(
3323 				"KMS: Cannot support more than %d display indexes\n",
3324 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3325 			continue;
3326 		}
3327 
3328 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3329 		if (!aconnector)
3330 			goto fail;
3331 
3332 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3333 		if (!aencoder)
3334 			goto fail;
3335 
3336 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3337 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3338 			goto fail;
3339 		}
3340 
3341 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3342 			DRM_ERROR("KMS: Failed to initialize connector\n");
3343 			goto fail;
3344 		}
3345 
3346 		link = dc_get_link_at_index(dm->dc, i);
3347 
3348 		if (!dc_link_detect_sink(link, &new_connection_type))
3349 			DRM_ERROR("KMS: Failed to detect connector\n");
3350 
3351 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3352 			emulated_link_detect(link);
3353 			amdgpu_dm_update_connector_after_detect(aconnector);
3354 
3355 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3356 			amdgpu_dm_update_connector_after_detect(aconnector);
3357 			register_backlight_device(dm, link);
3358 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3359 				amdgpu_dm_set_psr_caps(link);
3360 		}
3361 
3362 
3363 	}
3364 
3365 	/* Software is initialized. Now we can register interrupt handlers. */
3366 	switch (adev->asic_type) {
3367 #if defined(CONFIG_DRM_AMD_DC_SI)
3368 	case CHIP_TAHITI:
3369 	case CHIP_PITCAIRN:
3370 	case CHIP_VERDE:
3371 	case CHIP_OLAND:
3372 		if (dce60_register_irq_handlers(dm->adev)) {
3373 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3374 			goto fail;
3375 		}
3376 		break;
3377 #endif
3378 	case CHIP_BONAIRE:
3379 	case CHIP_HAWAII:
3380 	case CHIP_KAVERI:
3381 	case CHIP_KABINI:
3382 	case CHIP_MULLINS:
3383 	case CHIP_TONGA:
3384 	case CHIP_FIJI:
3385 	case CHIP_CARRIZO:
3386 	case CHIP_STONEY:
3387 	case CHIP_POLARIS11:
3388 	case CHIP_POLARIS10:
3389 	case CHIP_POLARIS12:
3390 	case CHIP_VEGAM:
3391 	case CHIP_VEGA10:
3392 	case CHIP_VEGA12:
3393 	case CHIP_VEGA20:
3394 		if (dce110_register_irq_handlers(dm->adev)) {
3395 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3396 			goto fail;
3397 		}
3398 		break;
3399 #if defined(CONFIG_DRM_AMD_DC_DCN)
3400 	case CHIP_RAVEN:
3401 	case CHIP_NAVI12:
3402 	case CHIP_NAVI10:
3403 	case CHIP_NAVI14:
3404 	case CHIP_RENOIR:
3405 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3406 	case CHIP_SIENNA_CICHLID:
3407 	case CHIP_NAVY_FLOUNDER:
3408 #endif
3409 		if (dcn10_register_irq_handlers(dm->adev)) {
3410 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3411 			goto fail;
3412 		}
3413 		break;
3414 #endif
3415 	default:
3416 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3417 		goto fail;
3418 	}
3419 
3420 	return 0;
3421 fail:
3422 	kfree(aencoder);
3423 	kfree(aconnector);
3424 
3425 	return -EINVAL;
3426 }
3427 
3428 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3429 {
3430 	drm_mode_config_cleanup(dm->ddev);
3431 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3432 	return;
3433 }
3434 
3435 /******************************************************************************
3436  * amdgpu_display_funcs functions
3437  *****************************************************************************/
3438 
3439 /*
3440  * dm_bandwidth_update - program display watermarks
3441  *
3442  * @adev: amdgpu_device pointer
3443  *
3444  * Calculate and program the display watermarks and line buffer allocation.
3445  */
3446 static void dm_bandwidth_update(struct amdgpu_device *adev)
3447 {
3448 	/* TODO: implement later */
3449 }
3450 
3451 static const struct amdgpu_display_funcs dm_display_funcs = {
3452 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3453 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3454 	.backlight_set_level = NULL, /* never called for DC */
3455 	.backlight_get_level = NULL, /* never called for DC */
3456 	.hpd_sense = NULL,/* called unconditionally */
3457 	.hpd_set_polarity = NULL, /* called unconditionally */
3458 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3459 	.page_flip_get_scanoutpos =
3460 		dm_crtc_get_scanoutpos,/* called unconditionally */
3461 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3462 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3463 };
3464 
3465 #if defined(CONFIG_DEBUG_KERNEL_DC)
3466 
3467 static ssize_t s3_debug_store(struct device *device,
3468 			      struct device_attribute *attr,
3469 			      const char *buf,
3470 			      size_t count)
3471 {
3472 	int ret;
3473 	int s3_state;
3474 	struct drm_device *drm_dev = dev_get_drvdata(device);
3475 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3476 
3477 	ret = kstrtoint(buf, 0, &s3_state);
3478 
3479 	if (ret == 0) {
3480 		if (s3_state) {
3481 			dm_resume(adev);
3482 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3483 		} else
3484 			dm_suspend(adev);
3485 	}
3486 
3487 	return ret == 0 ? count : 0;
3488 }
3489 
3490 DEVICE_ATTR_WO(s3_debug);
3491 
3492 #endif
3493 
3494 static int dm_early_init(void *handle)
3495 {
3496 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3497 
3498 	switch (adev->asic_type) {
3499 #if defined(CONFIG_DRM_AMD_DC_SI)
3500 	case CHIP_TAHITI:
3501 	case CHIP_PITCAIRN:
3502 	case CHIP_VERDE:
3503 		adev->mode_info.num_crtc = 6;
3504 		adev->mode_info.num_hpd = 6;
3505 		adev->mode_info.num_dig = 6;
3506 		break;
3507 	case CHIP_OLAND:
3508 		adev->mode_info.num_crtc = 2;
3509 		adev->mode_info.num_hpd = 2;
3510 		adev->mode_info.num_dig = 2;
3511 		break;
3512 #endif
3513 	case CHIP_BONAIRE:
3514 	case CHIP_HAWAII:
3515 		adev->mode_info.num_crtc = 6;
3516 		adev->mode_info.num_hpd = 6;
3517 		adev->mode_info.num_dig = 6;
3518 		break;
3519 	case CHIP_KAVERI:
3520 		adev->mode_info.num_crtc = 4;
3521 		adev->mode_info.num_hpd = 6;
3522 		adev->mode_info.num_dig = 7;
3523 		break;
3524 	case CHIP_KABINI:
3525 	case CHIP_MULLINS:
3526 		adev->mode_info.num_crtc = 2;
3527 		adev->mode_info.num_hpd = 6;
3528 		adev->mode_info.num_dig = 6;
3529 		break;
3530 	case CHIP_FIJI:
3531 	case CHIP_TONGA:
3532 		adev->mode_info.num_crtc = 6;
3533 		adev->mode_info.num_hpd = 6;
3534 		adev->mode_info.num_dig = 7;
3535 		break;
3536 	case CHIP_CARRIZO:
3537 		adev->mode_info.num_crtc = 3;
3538 		adev->mode_info.num_hpd = 6;
3539 		adev->mode_info.num_dig = 9;
3540 		break;
3541 	case CHIP_STONEY:
3542 		adev->mode_info.num_crtc = 2;
3543 		adev->mode_info.num_hpd = 6;
3544 		adev->mode_info.num_dig = 9;
3545 		break;
3546 	case CHIP_POLARIS11:
3547 	case CHIP_POLARIS12:
3548 		adev->mode_info.num_crtc = 5;
3549 		adev->mode_info.num_hpd = 5;
3550 		adev->mode_info.num_dig = 5;
3551 		break;
3552 	case CHIP_POLARIS10:
3553 	case CHIP_VEGAM:
3554 		adev->mode_info.num_crtc = 6;
3555 		adev->mode_info.num_hpd = 6;
3556 		adev->mode_info.num_dig = 6;
3557 		break;
3558 	case CHIP_VEGA10:
3559 	case CHIP_VEGA12:
3560 	case CHIP_VEGA20:
3561 		adev->mode_info.num_crtc = 6;
3562 		adev->mode_info.num_hpd = 6;
3563 		adev->mode_info.num_dig = 6;
3564 		break;
3565 #if defined(CONFIG_DRM_AMD_DC_DCN)
3566 	case CHIP_RAVEN:
3567 		adev->mode_info.num_crtc = 4;
3568 		adev->mode_info.num_hpd = 4;
3569 		adev->mode_info.num_dig = 4;
3570 		break;
3571 #endif
3572 	case CHIP_NAVI10:
3573 	case CHIP_NAVI12:
3574 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3575 	case CHIP_SIENNA_CICHLID:
3576 	case CHIP_NAVY_FLOUNDER:
3577 #endif
3578 		adev->mode_info.num_crtc = 6;
3579 		adev->mode_info.num_hpd = 6;
3580 		adev->mode_info.num_dig = 6;
3581 		break;
3582 	case CHIP_NAVI14:
3583 		adev->mode_info.num_crtc = 5;
3584 		adev->mode_info.num_hpd = 5;
3585 		adev->mode_info.num_dig = 5;
3586 		break;
3587 	case CHIP_RENOIR:
3588 		adev->mode_info.num_crtc = 4;
3589 		adev->mode_info.num_hpd = 4;
3590 		adev->mode_info.num_dig = 4;
3591 		break;
3592 	default:
3593 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3594 		return -EINVAL;
3595 	}
3596 
3597 	amdgpu_dm_set_irq_funcs(adev);
3598 
3599 	if (adev->mode_info.funcs == NULL)
3600 		adev->mode_info.funcs = &dm_display_funcs;
3601 
3602 	/*
3603 	 * Note: Do NOT change adev->audio_endpt_rreg and
3604 	 * adev->audio_endpt_wreg because they are initialised in
3605 	 * amdgpu_device_init()
3606 	 */
3607 #if defined(CONFIG_DEBUG_KERNEL_DC)
3608 	device_create_file(
3609 		adev_to_drm(adev)->dev,
3610 		&dev_attr_s3_debug);
3611 #endif
3612 
3613 	return 0;
3614 }
3615 
3616 static bool modeset_required(struct drm_crtc_state *crtc_state,
3617 			     struct dc_stream_state *new_stream,
3618 			     struct dc_stream_state *old_stream)
3619 {
3620 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3621 }
3622 
3623 static bool modereset_required(struct drm_crtc_state *crtc_state)
3624 {
3625 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3626 }
3627 
3628 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3629 {
3630 	drm_encoder_cleanup(encoder);
3631 	kfree(encoder);
3632 }
3633 
3634 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3635 	.destroy = amdgpu_dm_encoder_destroy,
3636 };
3637 
3638 
3639 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3640 				struct dc_scaling_info *scaling_info)
3641 {
3642 	int scale_w, scale_h;
3643 
3644 	memset(scaling_info, 0, sizeof(*scaling_info));
3645 
3646 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3647 	scaling_info->src_rect.x = state->src_x >> 16;
3648 	scaling_info->src_rect.y = state->src_y >> 16;
3649 
3650 	scaling_info->src_rect.width = state->src_w >> 16;
3651 	if (scaling_info->src_rect.width == 0)
3652 		return -EINVAL;
3653 
3654 	scaling_info->src_rect.height = state->src_h >> 16;
3655 	if (scaling_info->src_rect.height == 0)
3656 		return -EINVAL;
3657 
3658 	scaling_info->dst_rect.x = state->crtc_x;
3659 	scaling_info->dst_rect.y = state->crtc_y;
3660 
3661 	if (state->crtc_w == 0)
3662 		return -EINVAL;
3663 
3664 	scaling_info->dst_rect.width = state->crtc_w;
3665 
3666 	if (state->crtc_h == 0)
3667 		return -EINVAL;
3668 
3669 	scaling_info->dst_rect.height = state->crtc_h;
3670 
3671 	/* DRM doesn't specify clipping on destination output. */
3672 	scaling_info->clip_rect = scaling_info->dst_rect;
3673 
3674 	/* TODO: Validate scaling per-format with DC plane caps */
3675 	scale_w = scaling_info->dst_rect.width * 1000 /
3676 		  scaling_info->src_rect.width;
3677 
3678 	if (scale_w < 250 || scale_w > 16000)
3679 		return -EINVAL;
3680 
3681 	scale_h = scaling_info->dst_rect.height * 1000 /
3682 		  scaling_info->src_rect.height;
3683 
3684 	if (scale_h < 250 || scale_h > 16000)
3685 		return -EINVAL;
3686 
3687 	/*
3688 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3689 	 * assume reasonable defaults based on the format.
3690 	 */
3691 
3692 	return 0;
3693 }
3694 
3695 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3696 		       uint64_t *tiling_flags, bool *tmz_surface)
3697 {
3698 	struct amdgpu_bo *rbo;
3699 	int r;
3700 
3701 	if (!amdgpu_fb) {
3702 		*tiling_flags = 0;
3703 		*tmz_surface = false;
3704 		return 0;
3705 	}
3706 
3707 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3708 	r = amdgpu_bo_reserve(rbo, false);
3709 
3710 	if (unlikely(r)) {
3711 		/* Don't show error message when returning -ERESTARTSYS */
3712 		if (r != -ERESTARTSYS)
3713 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3714 		return r;
3715 	}
3716 
3717 	if (tiling_flags)
3718 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3719 
3720 	if (tmz_surface)
3721 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3722 
3723 	amdgpu_bo_unreserve(rbo);
3724 
3725 	return r;
3726 }
3727 
3728 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3729 {
3730 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3731 
3732 	return offset ? (address + offset * 256) : 0;
3733 }
3734 
3735 static int
3736 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3737 			  const struct amdgpu_framebuffer *afb,
3738 			  const enum surface_pixel_format format,
3739 			  const enum dc_rotation_angle rotation,
3740 			  const struct plane_size *plane_size,
3741 			  const union dc_tiling_info *tiling_info,
3742 			  const uint64_t info,
3743 			  struct dc_plane_dcc_param *dcc,
3744 			  struct dc_plane_address *address,
3745 			  bool force_disable_dcc)
3746 {
3747 	struct dc *dc = adev->dm.dc;
3748 	struct dc_dcc_surface_param input;
3749 	struct dc_surface_dcc_cap output;
3750 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3751 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3752 	uint64_t dcc_address;
3753 
3754 	memset(&input, 0, sizeof(input));
3755 	memset(&output, 0, sizeof(output));
3756 
3757 	if (force_disable_dcc)
3758 		return 0;
3759 
3760 	if (!offset)
3761 		return 0;
3762 
3763 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3764 		return 0;
3765 
3766 	if (!dc->cap_funcs.get_dcc_compression_cap)
3767 		return -EINVAL;
3768 
3769 	input.format = format;
3770 	input.surface_size.width = plane_size->surface_size.width;
3771 	input.surface_size.height = plane_size->surface_size.height;
3772 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3773 
3774 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3775 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3776 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3777 		input.scan = SCAN_DIRECTION_VERTICAL;
3778 
3779 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3780 		return -EINVAL;
3781 
3782 	if (!output.capable)
3783 		return -EINVAL;
3784 
3785 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3786 		return -EINVAL;
3787 
3788 	dcc->enable = 1;
3789 	dcc->meta_pitch =
3790 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3791 	dcc->independent_64b_blks = i64b;
3792 
3793 	dcc_address = get_dcc_address(afb->address, info);
3794 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3795 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3796 
3797 	return 0;
3798 }
3799 
3800 static int
3801 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3802 			     const struct amdgpu_framebuffer *afb,
3803 			     const enum surface_pixel_format format,
3804 			     const enum dc_rotation_angle rotation,
3805 			     const uint64_t tiling_flags,
3806 			     union dc_tiling_info *tiling_info,
3807 			     struct plane_size *plane_size,
3808 			     struct dc_plane_dcc_param *dcc,
3809 			     struct dc_plane_address *address,
3810 			     bool tmz_surface,
3811 			     bool force_disable_dcc)
3812 {
3813 	const struct drm_framebuffer *fb = &afb->base;
3814 	int ret;
3815 
3816 	memset(tiling_info, 0, sizeof(*tiling_info));
3817 	memset(plane_size, 0, sizeof(*plane_size));
3818 	memset(dcc, 0, sizeof(*dcc));
3819 	memset(address, 0, sizeof(*address));
3820 
3821 	address->tmz_surface = tmz_surface;
3822 
3823 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3824 		plane_size->surface_size.x = 0;
3825 		plane_size->surface_size.y = 0;
3826 		plane_size->surface_size.width = fb->width;
3827 		plane_size->surface_size.height = fb->height;
3828 		plane_size->surface_pitch =
3829 			fb->pitches[0] / fb->format->cpp[0];
3830 
3831 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3832 		address->grph.addr.low_part = lower_32_bits(afb->address);
3833 		address->grph.addr.high_part = upper_32_bits(afb->address);
3834 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3835 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3836 
3837 		plane_size->surface_size.x = 0;
3838 		plane_size->surface_size.y = 0;
3839 		plane_size->surface_size.width = fb->width;
3840 		plane_size->surface_size.height = fb->height;
3841 		plane_size->surface_pitch =
3842 			fb->pitches[0] / fb->format->cpp[0];
3843 
3844 		plane_size->chroma_size.x = 0;
3845 		plane_size->chroma_size.y = 0;
3846 		/* TODO: set these based on surface format */
3847 		plane_size->chroma_size.width = fb->width / 2;
3848 		plane_size->chroma_size.height = fb->height / 2;
3849 
3850 		plane_size->chroma_pitch =
3851 			fb->pitches[1] / fb->format->cpp[1];
3852 
3853 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3854 		address->video_progressive.luma_addr.low_part =
3855 			lower_32_bits(afb->address);
3856 		address->video_progressive.luma_addr.high_part =
3857 			upper_32_bits(afb->address);
3858 		address->video_progressive.chroma_addr.low_part =
3859 			lower_32_bits(chroma_addr);
3860 		address->video_progressive.chroma_addr.high_part =
3861 			upper_32_bits(chroma_addr);
3862 	}
3863 
3864 	/* Fill GFX8 params */
3865 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3866 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3867 
3868 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3869 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3870 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3871 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3872 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3873 
3874 		/* XXX fix me for VI */
3875 		tiling_info->gfx8.num_banks = num_banks;
3876 		tiling_info->gfx8.array_mode =
3877 				DC_ARRAY_2D_TILED_THIN1;
3878 		tiling_info->gfx8.tile_split = tile_split;
3879 		tiling_info->gfx8.bank_width = bankw;
3880 		tiling_info->gfx8.bank_height = bankh;
3881 		tiling_info->gfx8.tile_aspect = mtaspect;
3882 		tiling_info->gfx8.tile_mode =
3883 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3884 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3885 			== DC_ARRAY_1D_TILED_THIN1) {
3886 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3887 	}
3888 
3889 	tiling_info->gfx8.pipe_config =
3890 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3891 
3892 	if (adev->asic_type == CHIP_VEGA10 ||
3893 	    adev->asic_type == CHIP_VEGA12 ||
3894 	    adev->asic_type == CHIP_VEGA20 ||
3895 	    adev->asic_type == CHIP_NAVI10 ||
3896 	    adev->asic_type == CHIP_NAVI14 ||
3897 	    adev->asic_type == CHIP_NAVI12 ||
3898 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3899 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3900 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3901 #endif
3902 	    adev->asic_type == CHIP_RENOIR ||
3903 	    adev->asic_type == CHIP_RAVEN) {
3904 		/* Fill GFX9 params */
3905 		tiling_info->gfx9.num_pipes =
3906 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3907 		tiling_info->gfx9.num_banks =
3908 			adev->gfx.config.gb_addr_config_fields.num_banks;
3909 		tiling_info->gfx9.pipe_interleave =
3910 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3911 		tiling_info->gfx9.num_shader_engines =
3912 			adev->gfx.config.gb_addr_config_fields.num_se;
3913 		tiling_info->gfx9.max_compressed_frags =
3914 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3915 		tiling_info->gfx9.num_rb_per_se =
3916 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3917 		tiling_info->gfx9.swizzle =
3918 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3919 		tiling_info->gfx9.shaderEnable = 1;
3920 
3921 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3922 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3923 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3924 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3925 #endif
3926 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3927 						plane_size, tiling_info,
3928 						tiling_flags, dcc, address,
3929 						force_disable_dcc);
3930 		if (ret)
3931 			return ret;
3932 	}
3933 
3934 	return 0;
3935 }
3936 
3937 static void
3938 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3939 			       bool *per_pixel_alpha, bool *global_alpha,
3940 			       int *global_alpha_value)
3941 {
3942 	*per_pixel_alpha = false;
3943 	*global_alpha = false;
3944 	*global_alpha_value = 0xff;
3945 
3946 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3947 		return;
3948 
3949 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3950 		static const uint32_t alpha_formats[] = {
3951 			DRM_FORMAT_ARGB8888,
3952 			DRM_FORMAT_RGBA8888,
3953 			DRM_FORMAT_ABGR8888,
3954 		};
3955 		uint32_t format = plane_state->fb->format->format;
3956 		unsigned int i;
3957 
3958 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3959 			if (format == alpha_formats[i]) {
3960 				*per_pixel_alpha = true;
3961 				break;
3962 			}
3963 		}
3964 	}
3965 
3966 	if (plane_state->alpha < 0xffff) {
3967 		*global_alpha = true;
3968 		*global_alpha_value = plane_state->alpha >> 8;
3969 	}
3970 }
3971 
3972 static int
3973 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3974 			    const enum surface_pixel_format format,
3975 			    enum dc_color_space *color_space)
3976 {
3977 	bool full_range;
3978 
3979 	*color_space = COLOR_SPACE_SRGB;
3980 
3981 	/* DRM color properties only affect non-RGB formats. */
3982 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3983 		return 0;
3984 
3985 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3986 
3987 	switch (plane_state->color_encoding) {
3988 	case DRM_COLOR_YCBCR_BT601:
3989 		if (full_range)
3990 			*color_space = COLOR_SPACE_YCBCR601;
3991 		else
3992 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3993 		break;
3994 
3995 	case DRM_COLOR_YCBCR_BT709:
3996 		if (full_range)
3997 			*color_space = COLOR_SPACE_YCBCR709;
3998 		else
3999 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4000 		break;
4001 
4002 	case DRM_COLOR_YCBCR_BT2020:
4003 		if (full_range)
4004 			*color_space = COLOR_SPACE_2020_YCBCR;
4005 		else
4006 			return -EINVAL;
4007 		break;
4008 
4009 	default:
4010 		return -EINVAL;
4011 	}
4012 
4013 	return 0;
4014 }
4015 
4016 static int
4017 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4018 			    const struct drm_plane_state *plane_state,
4019 			    const uint64_t tiling_flags,
4020 			    struct dc_plane_info *plane_info,
4021 			    struct dc_plane_address *address,
4022 			    bool tmz_surface,
4023 			    bool force_disable_dcc)
4024 {
4025 	const struct drm_framebuffer *fb = plane_state->fb;
4026 	const struct amdgpu_framebuffer *afb =
4027 		to_amdgpu_framebuffer(plane_state->fb);
4028 	struct drm_format_name_buf format_name;
4029 	int ret;
4030 
4031 	memset(plane_info, 0, sizeof(*plane_info));
4032 
4033 	switch (fb->format->format) {
4034 	case DRM_FORMAT_C8:
4035 		plane_info->format =
4036 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4037 		break;
4038 	case DRM_FORMAT_RGB565:
4039 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4040 		break;
4041 	case DRM_FORMAT_XRGB8888:
4042 	case DRM_FORMAT_ARGB8888:
4043 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4044 		break;
4045 	case DRM_FORMAT_XRGB2101010:
4046 	case DRM_FORMAT_ARGB2101010:
4047 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4048 		break;
4049 	case DRM_FORMAT_XBGR2101010:
4050 	case DRM_FORMAT_ABGR2101010:
4051 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4052 		break;
4053 	case DRM_FORMAT_XBGR8888:
4054 	case DRM_FORMAT_ABGR8888:
4055 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4056 		break;
4057 	case DRM_FORMAT_NV21:
4058 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4059 		break;
4060 	case DRM_FORMAT_NV12:
4061 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4062 		break;
4063 	case DRM_FORMAT_P010:
4064 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4065 		break;
4066 	case DRM_FORMAT_XRGB16161616F:
4067 	case DRM_FORMAT_ARGB16161616F:
4068 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4069 		break;
4070 	case DRM_FORMAT_XBGR16161616F:
4071 	case DRM_FORMAT_ABGR16161616F:
4072 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4073 		break;
4074 	default:
4075 		DRM_ERROR(
4076 			"Unsupported screen format %s\n",
4077 			drm_get_format_name(fb->format->format, &format_name));
4078 		return -EINVAL;
4079 	}
4080 
4081 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4082 	case DRM_MODE_ROTATE_0:
4083 		plane_info->rotation = ROTATION_ANGLE_0;
4084 		break;
4085 	case DRM_MODE_ROTATE_90:
4086 		plane_info->rotation = ROTATION_ANGLE_90;
4087 		break;
4088 	case DRM_MODE_ROTATE_180:
4089 		plane_info->rotation = ROTATION_ANGLE_180;
4090 		break;
4091 	case DRM_MODE_ROTATE_270:
4092 		plane_info->rotation = ROTATION_ANGLE_270;
4093 		break;
4094 	default:
4095 		plane_info->rotation = ROTATION_ANGLE_0;
4096 		break;
4097 	}
4098 
4099 	plane_info->visible = true;
4100 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4101 
4102 	plane_info->layer_index = 0;
4103 
4104 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4105 					  &plane_info->color_space);
4106 	if (ret)
4107 		return ret;
4108 
4109 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4110 					   plane_info->rotation, tiling_flags,
4111 					   &plane_info->tiling_info,
4112 					   &plane_info->plane_size,
4113 					   &plane_info->dcc, address, tmz_surface,
4114 					   force_disable_dcc);
4115 	if (ret)
4116 		return ret;
4117 
4118 	fill_blending_from_plane_state(
4119 		plane_state, &plane_info->per_pixel_alpha,
4120 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4121 
4122 	return 0;
4123 }
4124 
4125 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4126 				    struct dc_plane_state *dc_plane_state,
4127 				    struct drm_plane_state *plane_state,
4128 				    struct drm_crtc_state *crtc_state)
4129 {
4130 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4131 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4132 	struct dc_scaling_info scaling_info;
4133 	struct dc_plane_info plane_info;
4134 	int ret;
4135 	bool force_disable_dcc = false;
4136 
4137 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4138 	if (ret)
4139 		return ret;
4140 
4141 	dc_plane_state->src_rect = scaling_info.src_rect;
4142 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4143 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4144 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4145 
4146 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4147 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4148 					  dm_plane_state->tiling_flags,
4149 					  &plane_info,
4150 					  &dc_plane_state->address,
4151 					  dm_plane_state->tmz_surface,
4152 					  force_disable_dcc);
4153 	if (ret)
4154 		return ret;
4155 
4156 	dc_plane_state->format = plane_info.format;
4157 	dc_plane_state->color_space = plane_info.color_space;
4158 	dc_plane_state->format = plane_info.format;
4159 	dc_plane_state->plane_size = plane_info.plane_size;
4160 	dc_plane_state->rotation = plane_info.rotation;
4161 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4162 	dc_plane_state->stereo_format = plane_info.stereo_format;
4163 	dc_plane_state->tiling_info = plane_info.tiling_info;
4164 	dc_plane_state->visible = plane_info.visible;
4165 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4166 	dc_plane_state->global_alpha = plane_info.global_alpha;
4167 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4168 	dc_plane_state->dcc = plane_info.dcc;
4169 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4170 
4171 	/*
4172 	 * Always set input transfer function, since plane state is refreshed
4173 	 * every time.
4174 	 */
4175 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4176 	if (ret)
4177 		return ret;
4178 
4179 	return 0;
4180 }
4181 
4182 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4183 					   const struct dm_connector_state *dm_state,
4184 					   struct dc_stream_state *stream)
4185 {
4186 	enum amdgpu_rmx_type rmx_type;
4187 
4188 	struct rect src = { 0 }; /* viewport in composition space*/
4189 	struct rect dst = { 0 }; /* stream addressable area */
4190 
4191 	/* no mode. nothing to be done */
4192 	if (!mode)
4193 		return;
4194 
4195 	/* Full screen scaling by default */
4196 	src.width = mode->hdisplay;
4197 	src.height = mode->vdisplay;
4198 	dst.width = stream->timing.h_addressable;
4199 	dst.height = stream->timing.v_addressable;
4200 
4201 	if (dm_state) {
4202 		rmx_type = dm_state->scaling;
4203 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4204 			if (src.width * dst.height <
4205 					src.height * dst.width) {
4206 				/* height needs less upscaling/more downscaling */
4207 				dst.width = src.width *
4208 						dst.height / src.height;
4209 			} else {
4210 				/* width needs less upscaling/more downscaling */
4211 				dst.height = src.height *
4212 						dst.width / src.width;
4213 			}
4214 		} else if (rmx_type == RMX_CENTER) {
4215 			dst = src;
4216 		}
4217 
4218 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4219 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4220 
4221 		if (dm_state->underscan_enable) {
4222 			dst.x += dm_state->underscan_hborder / 2;
4223 			dst.y += dm_state->underscan_vborder / 2;
4224 			dst.width -= dm_state->underscan_hborder;
4225 			dst.height -= dm_state->underscan_vborder;
4226 		}
4227 	}
4228 
4229 	stream->src = src;
4230 	stream->dst = dst;
4231 
4232 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4233 			dst.x, dst.y, dst.width, dst.height);
4234 
4235 }
4236 
4237 static enum dc_color_depth
4238 convert_color_depth_from_display_info(const struct drm_connector *connector,
4239 				      bool is_y420, int requested_bpc)
4240 {
4241 	uint8_t bpc;
4242 
4243 	if (is_y420) {
4244 		bpc = 8;
4245 
4246 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4247 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4248 			bpc = 16;
4249 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4250 			bpc = 12;
4251 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4252 			bpc = 10;
4253 	} else {
4254 		bpc = (uint8_t)connector->display_info.bpc;
4255 		/* Assume 8 bpc by default if no bpc is specified. */
4256 		bpc = bpc ? bpc : 8;
4257 	}
4258 
4259 	if (requested_bpc > 0) {
4260 		/*
4261 		 * Cap display bpc based on the user requested value.
4262 		 *
4263 		 * The value for state->max_bpc may not correctly updated
4264 		 * depending on when the connector gets added to the state
4265 		 * or if this was called outside of atomic check, so it
4266 		 * can't be used directly.
4267 		 */
4268 		bpc = min_t(u8, bpc, requested_bpc);
4269 
4270 		/* Round down to the nearest even number. */
4271 		bpc = bpc - (bpc & 1);
4272 	}
4273 
4274 	switch (bpc) {
4275 	case 0:
4276 		/*
4277 		 * Temporary Work around, DRM doesn't parse color depth for
4278 		 * EDID revision before 1.4
4279 		 * TODO: Fix edid parsing
4280 		 */
4281 		return COLOR_DEPTH_888;
4282 	case 6:
4283 		return COLOR_DEPTH_666;
4284 	case 8:
4285 		return COLOR_DEPTH_888;
4286 	case 10:
4287 		return COLOR_DEPTH_101010;
4288 	case 12:
4289 		return COLOR_DEPTH_121212;
4290 	case 14:
4291 		return COLOR_DEPTH_141414;
4292 	case 16:
4293 		return COLOR_DEPTH_161616;
4294 	default:
4295 		return COLOR_DEPTH_UNDEFINED;
4296 	}
4297 }
4298 
4299 static enum dc_aspect_ratio
4300 get_aspect_ratio(const struct drm_display_mode *mode_in)
4301 {
4302 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4303 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4304 }
4305 
4306 static enum dc_color_space
4307 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4308 {
4309 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4310 
4311 	switch (dc_crtc_timing->pixel_encoding)	{
4312 	case PIXEL_ENCODING_YCBCR422:
4313 	case PIXEL_ENCODING_YCBCR444:
4314 	case PIXEL_ENCODING_YCBCR420:
4315 	{
4316 		/*
4317 		 * 27030khz is the separation point between HDTV and SDTV
4318 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4319 		 * respectively
4320 		 */
4321 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4322 			if (dc_crtc_timing->flags.Y_ONLY)
4323 				color_space =
4324 					COLOR_SPACE_YCBCR709_LIMITED;
4325 			else
4326 				color_space = COLOR_SPACE_YCBCR709;
4327 		} else {
4328 			if (dc_crtc_timing->flags.Y_ONLY)
4329 				color_space =
4330 					COLOR_SPACE_YCBCR601_LIMITED;
4331 			else
4332 				color_space = COLOR_SPACE_YCBCR601;
4333 		}
4334 
4335 	}
4336 	break;
4337 	case PIXEL_ENCODING_RGB:
4338 		color_space = COLOR_SPACE_SRGB;
4339 		break;
4340 
4341 	default:
4342 		WARN_ON(1);
4343 		break;
4344 	}
4345 
4346 	return color_space;
4347 }
4348 
4349 static bool adjust_colour_depth_from_display_info(
4350 	struct dc_crtc_timing *timing_out,
4351 	const struct drm_display_info *info)
4352 {
4353 	enum dc_color_depth depth = timing_out->display_color_depth;
4354 	int normalized_clk;
4355 	do {
4356 		normalized_clk = timing_out->pix_clk_100hz / 10;
4357 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4358 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4359 			normalized_clk /= 2;
4360 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4361 		switch (depth) {
4362 		case COLOR_DEPTH_888:
4363 			break;
4364 		case COLOR_DEPTH_101010:
4365 			normalized_clk = (normalized_clk * 30) / 24;
4366 			break;
4367 		case COLOR_DEPTH_121212:
4368 			normalized_clk = (normalized_clk * 36) / 24;
4369 			break;
4370 		case COLOR_DEPTH_161616:
4371 			normalized_clk = (normalized_clk * 48) / 24;
4372 			break;
4373 		default:
4374 			/* The above depths are the only ones valid for HDMI. */
4375 			return false;
4376 		}
4377 		if (normalized_clk <= info->max_tmds_clock) {
4378 			timing_out->display_color_depth = depth;
4379 			return true;
4380 		}
4381 	} while (--depth > COLOR_DEPTH_666);
4382 	return false;
4383 }
4384 
4385 static void fill_stream_properties_from_drm_display_mode(
4386 	struct dc_stream_state *stream,
4387 	const struct drm_display_mode *mode_in,
4388 	const struct drm_connector *connector,
4389 	const struct drm_connector_state *connector_state,
4390 	const struct dc_stream_state *old_stream,
4391 	int requested_bpc)
4392 {
4393 	struct dc_crtc_timing *timing_out = &stream->timing;
4394 	const struct drm_display_info *info = &connector->display_info;
4395 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4396 	struct hdmi_vendor_infoframe hv_frame;
4397 	struct hdmi_avi_infoframe avi_frame;
4398 
4399 	memset(&hv_frame, 0, sizeof(hv_frame));
4400 	memset(&avi_frame, 0, sizeof(avi_frame));
4401 
4402 	timing_out->h_border_left = 0;
4403 	timing_out->h_border_right = 0;
4404 	timing_out->v_border_top = 0;
4405 	timing_out->v_border_bottom = 0;
4406 	/* TODO: un-hardcode */
4407 	if (drm_mode_is_420_only(info, mode_in)
4408 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4409 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4410 	else if (drm_mode_is_420_also(info, mode_in)
4411 			&& aconnector->force_yuv420_output)
4412 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4413 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4414 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4415 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4416 	else
4417 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4418 
4419 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4420 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4421 		connector,
4422 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4423 		requested_bpc);
4424 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4425 	timing_out->hdmi_vic = 0;
4426 
4427 	if(old_stream) {
4428 		timing_out->vic = old_stream->timing.vic;
4429 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4430 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4431 	} else {
4432 		timing_out->vic = drm_match_cea_mode(mode_in);
4433 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4434 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4435 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4436 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4437 	}
4438 
4439 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4440 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4441 		timing_out->vic = avi_frame.video_code;
4442 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4443 		timing_out->hdmi_vic = hv_frame.vic;
4444 	}
4445 
4446 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4447 	timing_out->h_total = mode_in->crtc_htotal;
4448 	timing_out->h_sync_width =
4449 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4450 	timing_out->h_front_porch =
4451 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4452 	timing_out->v_total = mode_in->crtc_vtotal;
4453 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4454 	timing_out->v_front_porch =
4455 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4456 	timing_out->v_sync_width =
4457 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4458 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4459 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4460 
4461 	stream->output_color_space = get_output_color_space(timing_out);
4462 
4463 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4464 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4465 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4466 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4467 		    drm_mode_is_420_also(info, mode_in) &&
4468 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4469 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4470 			adjust_colour_depth_from_display_info(timing_out, info);
4471 		}
4472 	}
4473 }
4474 
4475 static void fill_audio_info(struct audio_info *audio_info,
4476 			    const struct drm_connector *drm_connector,
4477 			    const struct dc_sink *dc_sink)
4478 {
4479 	int i = 0;
4480 	int cea_revision = 0;
4481 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4482 
4483 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4484 	audio_info->product_id = edid_caps->product_id;
4485 
4486 	cea_revision = drm_connector->display_info.cea_rev;
4487 
4488 	strscpy(audio_info->display_name,
4489 		edid_caps->display_name,
4490 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4491 
4492 	if (cea_revision >= 3) {
4493 		audio_info->mode_count = edid_caps->audio_mode_count;
4494 
4495 		for (i = 0; i < audio_info->mode_count; ++i) {
4496 			audio_info->modes[i].format_code =
4497 					(enum audio_format_code)
4498 					(edid_caps->audio_modes[i].format_code);
4499 			audio_info->modes[i].channel_count =
4500 					edid_caps->audio_modes[i].channel_count;
4501 			audio_info->modes[i].sample_rates.all =
4502 					edid_caps->audio_modes[i].sample_rate;
4503 			audio_info->modes[i].sample_size =
4504 					edid_caps->audio_modes[i].sample_size;
4505 		}
4506 	}
4507 
4508 	audio_info->flags.all = edid_caps->speaker_flags;
4509 
4510 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4511 	if (drm_connector->latency_present[0]) {
4512 		audio_info->video_latency = drm_connector->video_latency[0];
4513 		audio_info->audio_latency = drm_connector->audio_latency[0];
4514 	}
4515 
4516 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4517 
4518 }
4519 
4520 static void
4521 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4522 				      struct drm_display_mode *dst_mode)
4523 {
4524 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4525 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4526 	dst_mode->crtc_clock = src_mode->crtc_clock;
4527 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4528 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4529 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4530 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4531 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4532 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4533 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4534 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4535 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4536 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4537 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4538 }
4539 
4540 static void
4541 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4542 					const struct drm_display_mode *native_mode,
4543 					bool scale_enabled)
4544 {
4545 	if (scale_enabled) {
4546 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4547 	} else if (native_mode->clock == drm_mode->clock &&
4548 			native_mode->htotal == drm_mode->htotal &&
4549 			native_mode->vtotal == drm_mode->vtotal) {
4550 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4551 	} else {
4552 		/* no scaling nor amdgpu inserted, no need to patch */
4553 	}
4554 }
4555 
4556 static struct dc_sink *
4557 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4558 {
4559 	struct dc_sink_init_data sink_init_data = { 0 };
4560 	struct dc_sink *sink = NULL;
4561 	sink_init_data.link = aconnector->dc_link;
4562 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4563 
4564 	sink = dc_sink_create(&sink_init_data);
4565 	if (!sink) {
4566 		DRM_ERROR("Failed to create sink!\n");
4567 		return NULL;
4568 	}
4569 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4570 
4571 	return sink;
4572 }
4573 
4574 static void set_multisync_trigger_params(
4575 		struct dc_stream_state *stream)
4576 {
4577 	if (stream->triggered_crtc_reset.enabled) {
4578 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4579 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4580 	}
4581 }
4582 
4583 static void set_master_stream(struct dc_stream_state *stream_set[],
4584 			      int stream_count)
4585 {
4586 	int j, highest_rfr = 0, master_stream = 0;
4587 
4588 	for (j = 0;  j < stream_count; j++) {
4589 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4590 			int refresh_rate = 0;
4591 
4592 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4593 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4594 			if (refresh_rate > highest_rfr) {
4595 				highest_rfr = refresh_rate;
4596 				master_stream = j;
4597 			}
4598 		}
4599 	}
4600 	for (j = 0;  j < stream_count; j++) {
4601 		if (stream_set[j])
4602 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4603 	}
4604 }
4605 
4606 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4607 {
4608 	int i = 0;
4609 
4610 	if (context->stream_count < 2)
4611 		return;
4612 	for (i = 0; i < context->stream_count ; i++) {
4613 		if (!context->streams[i])
4614 			continue;
4615 		/*
4616 		 * TODO: add a function to read AMD VSDB bits and set
4617 		 * crtc_sync_master.multi_sync_enabled flag
4618 		 * For now it's set to false
4619 		 */
4620 		set_multisync_trigger_params(context->streams[i]);
4621 	}
4622 	set_master_stream(context->streams, context->stream_count);
4623 }
4624 
4625 static struct dc_stream_state *
4626 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4627 		       const struct drm_display_mode *drm_mode,
4628 		       const struct dm_connector_state *dm_state,
4629 		       const struct dc_stream_state *old_stream,
4630 		       int requested_bpc)
4631 {
4632 	struct drm_display_mode *preferred_mode = NULL;
4633 	struct drm_connector *drm_connector;
4634 	const struct drm_connector_state *con_state =
4635 		dm_state ? &dm_state->base : NULL;
4636 	struct dc_stream_state *stream = NULL;
4637 	struct drm_display_mode mode = *drm_mode;
4638 	bool native_mode_found = false;
4639 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4640 	int mode_refresh;
4641 	int preferred_refresh = 0;
4642 #if defined(CONFIG_DRM_AMD_DC_DCN)
4643 	struct dsc_dec_dpcd_caps dsc_caps;
4644 #endif
4645 	uint32_t link_bandwidth_kbps;
4646 
4647 	struct dc_sink *sink = NULL;
4648 	if (aconnector == NULL) {
4649 		DRM_ERROR("aconnector is NULL!\n");
4650 		return stream;
4651 	}
4652 
4653 	drm_connector = &aconnector->base;
4654 
4655 	if (!aconnector->dc_sink) {
4656 		sink = create_fake_sink(aconnector);
4657 		if (!sink)
4658 			return stream;
4659 	} else {
4660 		sink = aconnector->dc_sink;
4661 		dc_sink_retain(sink);
4662 	}
4663 
4664 	stream = dc_create_stream_for_sink(sink);
4665 
4666 	if (stream == NULL) {
4667 		DRM_ERROR("Failed to create stream for sink!\n");
4668 		goto finish;
4669 	}
4670 
4671 	stream->dm_stream_context = aconnector;
4672 
4673 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4674 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4675 
4676 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4677 		/* Search for preferred mode */
4678 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4679 			native_mode_found = true;
4680 			break;
4681 		}
4682 	}
4683 	if (!native_mode_found)
4684 		preferred_mode = list_first_entry_or_null(
4685 				&aconnector->base.modes,
4686 				struct drm_display_mode,
4687 				head);
4688 
4689 	mode_refresh = drm_mode_vrefresh(&mode);
4690 
4691 	if (preferred_mode == NULL) {
4692 		/*
4693 		 * This may not be an error, the use case is when we have no
4694 		 * usermode calls to reset and set mode upon hotplug. In this
4695 		 * case, we call set mode ourselves to restore the previous mode
4696 		 * and the modelist may not be filled in in time.
4697 		 */
4698 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4699 	} else {
4700 		decide_crtc_timing_for_drm_display_mode(
4701 				&mode, preferred_mode,
4702 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4703 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4704 	}
4705 
4706 	if (!dm_state)
4707 		drm_mode_set_crtcinfo(&mode, 0);
4708 
4709 	/*
4710 	* If scaling is enabled and refresh rate didn't change
4711 	* we copy the vic and polarities of the old timings
4712 	*/
4713 	if (!scale || mode_refresh != preferred_refresh)
4714 		fill_stream_properties_from_drm_display_mode(stream,
4715 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4716 	else
4717 		fill_stream_properties_from_drm_display_mode(stream,
4718 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4719 
4720 	stream->timing.flags.DSC = 0;
4721 
4722 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4723 #if defined(CONFIG_DRM_AMD_DC_DCN)
4724 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4725 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4726 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4727 				      &dsc_caps);
4728 #endif
4729 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4730 							     dc_link_get_link_cap(aconnector->dc_link));
4731 
4732 #if defined(CONFIG_DRM_AMD_DC_DCN)
4733 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4734 			/* Set DSC policy according to dsc_clock_en */
4735 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4736 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4737 
4738 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4739 						  &dsc_caps,
4740 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4741 						  link_bandwidth_kbps,
4742 						  &stream->timing,
4743 						  &stream->timing.dsc_cfg))
4744 				stream->timing.flags.DSC = 1;
4745 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4746 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4747 				stream->timing.flags.DSC = 1;
4748 
4749 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4750 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4751 
4752 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4753 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4754 
4755 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4756 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4757 		}
4758 #endif
4759 	}
4760 
4761 	update_stream_scaling_settings(&mode, dm_state, stream);
4762 
4763 	fill_audio_info(
4764 		&stream->audio_info,
4765 		drm_connector,
4766 		sink);
4767 
4768 	update_stream_signal(stream, sink);
4769 
4770 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4771 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4772 
4773 	if (stream->link->psr_settings.psr_feature_enabled) {
4774 		//
4775 		// should decide stream support vsc sdp colorimetry capability
4776 		// before building vsc info packet
4777 		//
4778 		stream->use_vsc_sdp_for_colorimetry = false;
4779 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4780 			stream->use_vsc_sdp_for_colorimetry =
4781 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4782 		} else {
4783 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4784 				stream->use_vsc_sdp_for_colorimetry = true;
4785 		}
4786 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4787 	}
4788 finish:
4789 	dc_sink_release(sink);
4790 
4791 	return stream;
4792 }
4793 
4794 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4795 {
4796 	drm_crtc_cleanup(crtc);
4797 	kfree(crtc);
4798 }
4799 
4800 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4801 				  struct drm_crtc_state *state)
4802 {
4803 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4804 
4805 	/* TODO Destroy dc_stream objects are stream object is flattened */
4806 	if (cur->stream)
4807 		dc_stream_release(cur->stream);
4808 
4809 
4810 	__drm_atomic_helper_crtc_destroy_state(state);
4811 
4812 
4813 	kfree(state);
4814 }
4815 
4816 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4817 {
4818 	struct dm_crtc_state *state;
4819 
4820 	if (crtc->state)
4821 		dm_crtc_destroy_state(crtc, crtc->state);
4822 
4823 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4824 	if (WARN_ON(!state))
4825 		return;
4826 
4827 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4828 }
4829 
4830 static struct drm_crtc_state *
4831 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4832 {
4833 	struct dm_crtc_state *state, *cur;
4834 
4835 	cur = to_dm_crtc_state(crtc->state);
4836 
4837 	if (WARN_ON(!crtc->state))
4838 		return NULL;
4839 
4840 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4841 	if (!state)
4842 		return NULL;
4843 
4844 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4845 
4846 	if (cur->stream) {
4847 		state->stream = cur->stream;
4848 		dc_stream_retain(state->stream);
4849 	}
4850 
4851 	state->active_planes = cur->active_planes;
4852 	state->vrr_infopacket = cur->vrr_infopacket;
4853 	state->abm_level = cur->abm_level;
4854 	state->vrr_supported = cur->vrr_supported;
4855 	state->freesync_config = cur->freesync_config;
4856 	state->crc_src = cur->crc_src;
4857 	state->cm_has_degamma = cur->cm_has_degamma;
4858 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4859 
4860 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4861 
4862 	return &state->base;
4863 }
4864 
4865 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4866 {
4867 	enum dc_irq_source irq_source;
4868 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4869 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4870 	int rc;
4871 
4872 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4873 
4874 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4875 
4876 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4877 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4878 	return rc;
4879 }
4880 
4881 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4882 {
4883 	enum dc_irq_source irq_source;
4884 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4885 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4886 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4887 	int rc = 0;
4888 
4889 	if (enable) {
4890 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4891 		if (amdgpu_dm_vrr_active(acrtc_state))
4892 			rc = dm_set_vupdate_irq(crtc, true);
4893 	} else {
4894 		/* vblank irq off -> vupdate irq off */
4895 		rc = dm_set_vupdate_irq(crtc, false);
4896 	}
4897 
4898 	if (rc)
4899 		return rc;
4900 
4901 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4902 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4903 }
4904 
4905 static int dm_enable_vblank(struct drm_crtc *crtc)
4906 {
4907 	return dm_set_vblank(crtc, true);
4908 }
4909 
4910 static void dm_disable_vblank(struct drm_crtc *crtc)
4911 {
4912 	dm_set_vblank(crtc, false);
4913 }
4914 
4915 /* Implemented only the options currently availible for the driver */
4916 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4917 	.reset = dm_crtc_reset_state,
4918 	.destroy = amdgpu_dm_crtc_destroy,
4919 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4920 	.set_config = drm_atomic_helper_set_config,
4921 	.page_flip = drm_atomic_helper_page_flip,
4922 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4923 	.atomic_destroy_state = dm_crtc_destroy_state,
4924 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4925 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4926 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4927 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4928 	.enable_vblank = dm_enable_vblank,
4929 	.disable_vblank = dm_disable_vblank,
4930 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4931 };
4932 
4933 static enum drm_connector_status
4934 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4935 {
4936 	bool connected;
4937 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4938 
4939 	/*
4940 	 * Notes:
4941 	 * 1. This interface is NOT called in context of HPD irq.
4942 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4943 	 * makes it a bad place for *any* MST-related activity.
4944 	 */
4945 
4946 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4947 	    !aconnector->fake_enable)
4948 		connected = (aconnector->dc_sink != NULL);
4949 	else
4950 		connected = (aconnector->base.force == DRM_FORCE_ON);
4951 
4952 	update_subconnector_property(aconnector);
4953 
4954 	return (connected ? connector_status_connected :
4955 			connector_status_disconnected);
4956 }
4957 
4958 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4959 					    struct drm_connector_state *connector_state,
4960 					    struct drm_property *property,
4961 					    uint64_t val)
4962 {
4963 	struct drm_device *dev = connector->dev;
4964 	struct amdgpu_device *adev = drm_to_adev(dev);
4965 	struct dm_connector_state *dm_old_state =
4966 		to_dm_connector_state(connector->state);
4967 	struct dm_connector_state *dm_new_state =
4968 		to_dm_connector_state(connector_state);
4969 
4970 	int ret = -EINVAL;
4971 
4972 	if (property == dev->mode_config.scaling_mode_property) {
4973 		enum amdgpu_rmx_type rmx_type;
4974 
4975 		switch (val) {
4976 		case DRM_MODE_SCALE_CENTER:
4977 			rmx_type = RMX_CENTER;
4978 			break;
4979 		case DRM_MODE_SCALE_ASPECT:
4980 			rmx_type = RMX_ASPECT;
4981 			break;
4982 		case DRM_MODE_SCALE_FULLSCREEN:
4983 			rmx_type = RMX_FULL;
4984 			break;
4985 		case DRM_MODE_SCALE_NONE:
4986 		default:
4987 			rmx_type = RMX_OFF;
4988 			break;
4989 		}
4990 
4991 		if (dm_old_state->scaling == rmx_type)
4992 			return 0;
4993 
4994 		dm_new_state->scaling = rmx_type;
4995 		ret = 0;
4996 	} else if (property == adev->mode_info.underscan_hborder_property) {
4997 		dm_new_state->underscan_hborder = val;
4998 		ret = 0;
4999 	} else if (property == adev->mode_info.underscan_vborder_property) {
5000 		dm_new_state->underscan_vborder = val;
5001 		ret = 0;
5002 	} else if (property == adev->mode_info.underscan_property) {
5003 		dm_new_state->underscan_enable = val;
5004 		ret = 0;
5005 	} else if (property == adev->mode_info.abm_level_property) {
5006 		dm_new_state->abm_level = val;
5007 		ret = 0;
5008 	}
5009 
5010 	return ret;
5011 }
5012 
5013 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5014 					    const struct drm_connector_state *state,
5015 					    struct drm_property *property,
5016 					    uint64_t *val)
5017 {
5018 	struct drm_device *dev = connector->dev;
5019 	struct amdgpu_device *adev = drm_to_adev(dev);
5020 	struct dm_connector_state *dm_state =
5021 		to_dm_connector_state(state);
5022 	int ret = -EINVAL;
5023 
5024 	if (property == dev->mode_config.scaling_mode_property) {
5025 		switch (dm_state->scaling) {
5026 		case RMX_CENTER:
5027 			*val = DRM_MODE_SCALE_CENTER;
5028 			break;
5029 		case RMX_ASPECT:
5030 			*val = DRM_MODE_SCALE_ASPECT;
5031 			break;
5032 		case RMX_FULL:
5033 			*val = DRM_MODE_SCALE_FULLSCREEN;
5034 			break;
5035 		case RMX_OFF:
5036 		default:
5037 			*val = DRM_MODE_SCALE_NONE;
5038 			break;
5039 		}
5040 		ret = 0;
5041 	} else if (property == adev->mode_info.underscan_hborder_property) {
5042 		*val = dm_state->underscan_hborder;
5043 		ret = 0;
5044 	} else if (property == adev->mode_info.underscan_vborder_property) {
5045 		*val = dm_state->underscan_vborder;
5046 		ret = 0;
5047 	} else if (property == adev->mode_info.underscan_property) {
5048 		*val = dm_state->underscan_enable;
5049 		ret = 0;
5050 	} else if (property == adev->mode_info.abm_level_property) {
5051 		*val = dm_state->abm_level;
5052 		ret = 0;
5053 	}
5054 
5055 	return ret;
5056 }
5057 
5058 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5059 {
5060 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5061 
5062 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5063 }
5064 
5065 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5066 {
5067 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5068 	const struct dc_link *link = aconnector->dc_link;
5069 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5070 	struct amdgpu_display_manager *dm = &adev->dm;
5071 
5072 	/*
5073 	 * Call only if mst_mgr was iniitalized before since it's not done
5074 	 * for all connector types.
5075 	 */
5076 	if (aconnector->mst_mgr.dev)
5077 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5078 
5079 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5080 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5081 
5082 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5083 	    link->type != dc_connection_none &&
5084 	    dm->backlight_dev) {
5085 		backlight_device_unregister(dm->backlight_dev);
5086 		dm->backlight_dev = NULL;
5087 	}
5088 #endif
5089 
5090 	if (aconnector->dc_em_sink)
5091 		dc_sink_release(aconnector->dc_em_sink);
5092 	aconnector->dc_em_sink = NULL;
5093 	if (aconnector->dc_sink)
5094 		dc_sink_release(aconnector->dc_sink);
5095 	aconnector->dc_sink = NULL;
5096 
5097 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5098 	drm_connector_unregister(connector);
5099 	drm_connector_cleanup(connector);
5100 	if (aconnector->i2c) {
5101 		i2c_del_adapter(&aconnector->i2c->base);
5102 		kfree(aconnector->i2c);
5103 	}
5104 	kfree(aconnector->dm_dp_aux.aux.name);
5105 
5106 	kfree(connector);
5107 }
5108 
5109 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5110 {
5111 	struct dm_connector_state *state =
5112 		to_dm_connector_state(connector->state);
5113 
5114 	if (connector->state)
5115 		__drm_atomic_helper_connector_destroy_state(connector->state);
5116 
5117 	kfree(state);
5118 
5119 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5120 
5121 	if (state) {
5122 		state->scaling = RMX_OFF;
5123 		state->underscan_enable = false;
5124 		state->underscan_hborder = 0;
5125 		state->underscan_vborder = 0;
5126 		state->base.max_requested_bpc = 8;
5127 		state->vcpi_slots = 0;
5128 		state->pbn = 0;
5129 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5130 			state->abm_level = amdgpu_dm_abm_level;
5131 
5132 		__drm_atomic_helper_connector_reset(connector, &state->base);
5133 	}
5134 }
5135 
5136 struct drm_connector_state *
5137 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5138 {
5139 	struct dm_connector_state *state =
5140 		to_dm_connector_state(connector->state);
5141 
5142 	struct dm_connector_state *new_state =
5143 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5144 
5145 	if (!new_state)
5146 		return NULL;
5147 
5148 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5149 
5150 	new_state->freesync_capable = state->freesync_capable;
5151 	new_state->abm_level = state->abm_level;
5152 	new_state->scaling = state->scaling;
5153 	new_state->underscan_enable = state->underscan_enable;
5154 	new_state->underscan_hborder = state->underscan_hborder;
5155 	new_state->underscan_vborder = state->underscan_vborder;
5156 	new_state->vcpi_slots = state->vcpi_slots;
5157 	new_state->pbn = state->pbn;
5158 	return &new_state->base;
5159 }
5160 
5161 static int
5162 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5163 {
5164 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5165 		to_amdgpu_dm_connector(connector);
5166 	int r;
5167 
5168 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5169 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5170 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5171 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5172 		if (r)
5173 			return r;
5174 	}
5175 
5176 #if defined(CONFIG_DEBUG_FS)
5177 	connector_debugfs_init(amdgpu_dm_connector);
5178 #endif
5179 
5180 	return 0;
5181 }
5182 
5183 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5184 	.reset = amdgpu_dm_connector_funcs_reset,
5185 	.detect = amdgpu_dm_connector_detect,
5186 	.fill_modes = drm_helper_probe_single_connector_modes,
5187 	.destroy = amdgpu_dm_connector_destroy,
5188 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5189 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5190 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5191 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5192 	.late_register = amdgpu_dm_connector_late_register,
5193 	.early_unregister = amdgpu_dm_connector_unregister
5194 };
5195 
5196 static int get_modes(struct drm_connector *connector)
5197 {
5198 	return amdgpu_dm_connector_get_modes(connector);
5199 }
5200 
5201 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5202 {
5203 	struct dc_sink_init_data init_params = {
5204 			.link = aconnector->dc_link,
5205 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5206 	};
5207 	struct edid *edid;
5208 
5209 	if (!aconnector->base.edid_blob_ptr) {
5210 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5211 				aconnector->base.name);
5212 
5213 		aconnector->base.force = DRM_FORCE_OFF;
5214 		aconnector->base.override_edid = false;
5215 		return;
5216 	}
5217 
5218 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5219 
5220 	aconnector->edid = edid;
5221 
5222 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5223 		aconnector->dc_link,
5224 		(uint8_t *)edid,
5225 		(edid->extensions + 1) * EDID_LENGTH,
5226 		&init_params);
5227 
5228 	if (aconnector->base.force == DRM_FORCE_ON) {
5229 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5230 		aconnector->dc_link->local_sink :
5231 		aconnector->dc_em_sink;
5232 		dc_sink_retain(aconnector->dc_sink);
5233 	}
5234 }
5235 
5236 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5237 {
5238 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5239 
5240 	/*
5241 	 * In case of headless boot with force on for DP managed connector
5242 	 * Those settings have to be != 0 to get initial modeset
5243 	 */
5244 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5245 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5246 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5247 	}
5248 
5249 
5250 	aconnector->base.override_edid = true;
5251 	create_eml_sink(aconnector);
5252 }
5253 
5254 static struct dc_stream_state *
5255 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5256 				const struct drm_display_mode *drm_mode,
5257 				const struct dm_connector_state *dm_state,
5258 				const struct dc_stream_state *old_stream)
5259 {
5260 	struct drm_connector *connector = &aconnector->base;
5261 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5262 	struct dc_stream_state *stream;
5263 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5264 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5265 	enum dc_status dc_result = DC_OK;
5266 
5267 	do {
5268 		stream = create_stream_for_sink(aconnector, drm_mode,
5269 						dm_state, old_stream,
5270 						requested_bpc);
5271 		if (stream == NULL) {
5272 			DRM_ERROR("Failed to create stream for sink!\n");
5273 			break;
5274 		}
5275 
5276 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5277 
5278 		if (dc_result != DC_OK) {
5279 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5280 				      drm_mode->hdisplay,
5281 				      drm_mode->vdisplay,
5282 				      drm_mode->clock,
5283 				      dc_result,
5284 				      dc_status_to_str(dc_result));
5285 
5286 			dc_stream_release(stream);
5287 			stream = NULL;
5288 			requested_bpc -= 2; /* lower bpc to retry validation */
5289 		}
5290 
5291 	} while (stream == NULL && requested_bpc >= 6);
5292 
5293 	return stream;
5294 }
5295 
5296 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5297 				   struct drm_display_mode *mode)
5298 {
5299 	int result = MODE_ERROR;
5300 	struct dc_sink *dc_sink;
5301 	/* TODO: Unhardcode stream count */
5302 	struct dc_stream_state *stream;
5303 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5304 
5305 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5306 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5307 		return result;
5308 
5309 	/*
5310 	 * Only run this the first time mode_valid is called to initilialize
5311 	 * EDID mgmt
5312 	 */
5313 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5314 		!aconnector->dc_em_sink)
5315 		handle_edid_mgmt(aconnector);
5316 
5317 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5318 
5319 	if (dc_sink == NULL) {
5320 		DRM_ERROR("dc_sink is NULL!\n");
5321 		goto fail;
5322 	}
5323 
5324 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5325 	if (stream) {
5326 		dc_stream_release(stream);
5327 		result = MODE_OK;
5328 	}
5329 
5330 fail:
5331 	/* TODO: error handling*/
5332 	return result;
5333 }
5334 
5335 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5336 				struct dc_info_packet *out)
5337 {
5338 	struct hdmi_drm_infoframe frame;
5339 	unsigned char buf[30]; /* 26 + 4 */
5340 	ssize_t len;
5341 	int ret, i;
5342 
5343 	memset(out, 0, sizeof(*out));
5344 
5345 	if (!state->hdr_output_metadata)
5346 		return 0;
5347 
5348 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5349 	if (ret)
5350 		return ret;
5351 
5352 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5353 	if (len < 0)
5354 		return (int)len;
5355 
5356 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5357 	if (len != 30)
5358 		return -EINVAL;
5359 
5360 	/* Prepare the infopacket for DC. */
5361 	switch (state->connector->connector_type) {
5362 	case DRM_MODE_CONNECTOR_HDMIA:
5363 		out->hb0 = 0x87; /* type */
5364 		out->hb1 = 0x01; /* version */
5365 		out->hb2 = 0x1A; /* length */
5366 		out->sb[0] = buf[3]; /* checksum */
5367 		i = 1;
5368 		break;
5369 
5370 	case DRM_MODE_CONNECTOR_DisplayPort:
5371 	case DRM_MODE_CONNECTOR_eDP:
5372 		out->hb0 = 0x00; /* sdp id, zero */
5373 		out->hb1 = 0x87; /* type */
5374 		out->hb2 = 0x1D; /* payload len - 1 */
5375 		out->hb3 = (0x13 << 2); /* sdp version */
5376 		out->sb[0] = 0x01; /* version */
5377 		out->sb[1] = 0x1A; /* length */
5378 		i = 2;
5379 		break;
5380 
5381 	default:
5382 		return -EINVAL;
5383 	}
5384 
5385 	memcpy(&out->sb[i], &buf[4], 26);
5386 	out->valid = true;
5387 
5388 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5389 		       sizeof(out->sb), false);
5390 
5391 	return 0;
5392 }
5393 
5394 static bool
5395 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5396 			  const struct drm_connector_state *new_state)
5397 {
5398 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5399 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5400 
5401 	if (old_blob != new_blob) {
5402 		if (old_blob && new_blob &&
5403 		    old_blob->length == new_blob->length)
5404 			return memcmp(old_blob->data, new_blob->data,
5405 				      old_blob->length);
5406 
5407 		return true;
5408 	}
5409 
5410 	return false;
5411 }
5412 
5413 static int
5414 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5415 				 struct drm_atomic_state *state)
5416 {
5417 	struct drm_connector_state *new_con_state =
5418 		drm_atomic_get_new_connector_state(state, conn);
5419 	struct drm_connector_state *old_con_state =
5420 		drm_atomic_get_old_connector_state(state, conn);
5421 	struct drm_crtc *crtc = new_con_state->crtc;
5422 	struct drm_crtc_state *new_crtc_state;
5423 	int ret;
5424 
5425 	if (!crtc)
5426 		return 0;
5427 
5428 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5429 		struct dc_info_packet hdr_infopacket;
5430 
5431 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5432 		if (ret)
5433 			return ret;
5434 
5435 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5436 		if (IS_ERR(new_crtc_state))
5437 			return PTR_ERR(new_crtc_state);
5438 
5439 		/*
5440 		 * DC considers the stream backends changed if the
5441 		 * static metadata changes. Forcing the modeset also
5442 		 * gives a simple way for userspace to switch from
5443 		 * 8bpc to 10bpc when setting the metadata to enter
5444 		 * or exit HDR.
5445 		 *
5446 		 * Changing the static metadata after it's been
5447 		 * set is permissible, however. So only force a
5448 		 * modeset if we're entering or exiting HDR.
5449 		 */
5450 		new_crtc_state->mode_changed =
5451 			!old_con_state->hdr_output_metadata ||
5452 			!new_con_state->hdr_output_metadata;
5453 	}
5454 
5455 	return 0;
5456 }
5457 
5458 static const struct drm_connector_helper_funcs
5459 amdgpu_dm_connector_helper_funcs = {
5460 	/*
5461 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5462 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5463 	 * are missing after user start lightdm. So we need to renew modes list.
5464 	 * in get_modes call back, not just return the modes count
5465 	 */
5466 	.get_modes = get_modes,
5467 	.mode_valid = amdgpu_dm_connector_mode_valid,
5468 	.atomic_check = amdgpu_dm_connector_atomic_check,
5469 };
5470 
5471 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5472 {
5473 }
5474 
5475 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5476 {
5477 	struct drm_atomic_state *state = new_crtc_state->state;
5478 	struct drm_plane *plane;
5479 	int num_active = 0;
5480 
5481 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5482 		struct drm_plane_state *new_plane_state;
5483 
5484 		/* Cursor planes are "fake". */
5485 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5486 			continue;
5487 
5488 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5489 
5490 		if (!new_plane_state) {
5491 			/*
5492 			 * The plane is enable on the CRTC and hasn't changed
5493 			 * state. This means that it previously passed
5494 			 * validation and is therefore enabled.
5495 			 */
5496 			num_active += 1;
5497 			continue;
5498 		}
5499 
5500 		/* We need a framebuffer to be considered enabled. */
5501 		num_active += (new_plane_state->fb != NULL);
5502 	}
5503 
5504 	return num_active;
5505 }
5506 
5507 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5508 					 struct drm_crtc_state *new_crtc_state)
5509 {
5510 	struct dm_crtc_state *dm_new_crtc_state =
5511 		to_dm_crtc_state(new_crtc_state);
5512 
5513 	dm_new_crtc_state->active_planes = 0;
5514 
5515 	if (!dm_new_crtc_state->stream)
5516 		return;
5517 
5518 	dm_new_crtc_state->active_planes =
5519 		count_crtc_active_planes(new_crtc_state);
5520 }
5521 
5522 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5523 				       struct drm_crtc_state *state)
5524 {
5525 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5526 	struct dc *dc = adev->dm.dc;
5527 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5528 	int ret = -EINVAL;
5529 
5530 	dm_update_crtc_active_planes(crtc, state);
5531 
5532 	if (unlikely(!dm_crtc_state->stream &&
5533 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5534 		WARN_ON(1);
5535 		return ret;
5536 	}
5537 
5538 	/*
5539 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5540 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5541 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5542 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5543 	 */
5544 	if (state->enable &&
5545 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5546 		return -EINVAL;
5547 
5548 	/* In some use cases, like reset, no stream is attached */
5549 	if (!dm_crtc_state->stream)
5550 		return 0;
5551 
5552 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5553 		return 0;
5554 
5555 	return ret;
5556 }
5557 
5558 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5559 				      const struct drm_display_mode *mode,
5560 				      struct drm_display_mode *adjusted_mode)
5561 {
5562 	return true;
5563 }
5564 
5565 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5566 	.disable = dm_crtc_helper_disable,
5567 	.atomic_check = dm_crtc_helper_atomic_check,
5568 	.mode_fixup = dm_crtc_helper_mode_fixup,
5569 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5570 };
5571 
5572 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5573 {
5574 
5575 }
5576 
5577 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5578 {
5579 	switch (display_color_depth) {
5580 		case COLOR_DEPTH_666:
5581 			return 6;
5582 		case COLOR_DEPTH_888:
5583 			return 8;
5584 		case COLOR_DEPTH_101010:
5585 			return 10;
5586 		case COLOR_DEPTH_121212:
5587 			return 12;
5588 		case COLOR_DEPTH_141414:
5589 			return 14;
5590 		case COLOR_DEPTH_161616:
5591 			return 16;
5592 		default:
5593 			break;
5594 		}
5595 	return 0;
5596 }
5597 
5598 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5599 					  struct drm_crtc_state *crtc_state,
5600 					  struct drm_connector_state *conn_state)
5601 {
5602 	struct drm_atomic_state *state = crtc_state->state;
5603 	struct drm_connector *connector = conn_state->connector;
5604 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5605 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5606 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5607 	struct drm_dp_mst_topology_mgr *mst_mgr;
5608 	struct drm_dp_mst_port *mst_port;
5609 	enum dc_color_depth color_depth;
5610 	int clock, bpp = 0;
5611 	bool is_y420 = false;
5612 
5613 	if (!aconnector->port || !aconnector->dc_sink)
5614 		return 0;
5615 
5616 	mst_port = aconnector->port;
5617 	mst_mgr = &aconnector->mst_port->mst_mgr;
5618 
5619 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5620 		return 0;
5621 
5622 	if (!state->duplicated) {
5623 		int max_bpc = conn_state->max_requested_bpc;
5624 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5625 				aconnector->force_yuv420_output;
5626 		color_depth = convert_color_depth_from_display_info(connector,
5627 								    is_y420,
5628 								    max_bpc);
5629 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5630 		clock = adjusted_mode->clock;
5631 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5632 	}
5633 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5634 									   mst_mgr,
5635 									   mst_port,
5636 									   dm_new_connector_state->pbn,
5637 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5638 	if (dm_new_connector_state->vcpi_slots < 0) {
5639 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5640 		return dm_new_connector_state->vcpi_slots;
5641 	}
5642 	return 0;
5643 }
5644 
5645 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5646 	.disable = dm_encoder_helper_disable,
5647 	.atomic_check = dm_encoder_helper_atomic_check
5648 };
5649 
5650 #if defined(CONFIG_DRM_AMD_DC_DCN)
5651 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5652 					    struct dc_state *dc_state)
5653 {
5654 	struct dc_stream_state *stream = NULL;
5655 	struct drm_connector *connector;
5656 	struct drm_connector_state *new_con_state, *old_con_state;
5657 	struct amdgpu_dm_connector *aconnector;
5658 	struct dm_connector_state *dm_conn_state;
5659 	int i, j, clock, bpp;
5660 	int vcpi, pbn_div, pbn = 0;
5661 
5662 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5663 
5664 		aconnector = to_amdgpu_dm_connector(connector);
5665 
5666 		if (!aconnector->port)
5667 			continue;
5668 
5669 		if (!new_con_state || !new_con_state->crtc)
5670 			continue;
5671 
5672 		dm_conn_state = to_dm_connector_state(new_con_state);
5673 
5674 		for (j = 0; j < dc_state->stream_count; j++) {
5675 			stream = dc_state->streams[j];
5676 			if (!stream)
5677 				continue;
5678 
5679 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5680 				break;
5681 
5682 			stream = NULL;
5683 		}
5684 
5685 		if (!stream)
5686 			continue;
5687 
5688 		if (stream->timing.flags.DSC != 1) {
5689 			drm_dp_mst_atomic_enable_dsc(state,
5690 						     aconnector->port,
5691 						     dm_conn_state->pbn,
5692 						     0,
5693 						     false);
5694 			continue;
5695 		}
5696 
5697 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5698 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5699 		clock = stream->timing.pix_clk_100hz / 10;
5700 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5701 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5702 						    aconnector->port,
5703 						    pbn, pbn_div,
5704 						    true);
5705 		if (vcpi < 0)
5706 			return vcpi;
5707 
5708 		dm_conn_state->pbn = pbn;
5709 		dm_conn_state->vcpi_slots = vcpi;
5710 	}
5711 	return 0;
5712 }
5713 #endif
5714 
5715 static void dm_drm_plane_reset(struct drm_plane *plane)
5716 {
5717 	struct dm_plane_state *amdgpu_state = NULL;
5718 
5719 	if (plane->state)
5720 		plane->funcs->atomic_destroy_state(plane, plane->state);
5721 
5722 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5723 	WARN_ON(amdgpu_state == NULL);
5724 
5725 	if (amdgpu_state)
5726 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5727 }
5728 
5729 static struct drm_plane_state *
5730 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5731 {
5732 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5733 
5734 	old_dm_plane_state = to_dm_plane_state(plane->state);
5735 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5736 	if (!dm_plane_state)
5737 		return NULL;
5738 
5739 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5740 
5741 	if (old_dm_plane_state->dc_state) {
5742 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5743 		dc_plane_state_retain(dm_plane_state->dc_state);
5744 	}
5745 
5746 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5747 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5748 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5749 
5750 	return &dm_plane_state->base;
5751 }
5752 
5753 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5754 				struct drm_plane_state *state)
5755 {
5756 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5757 
5758 	if (dm_plane_state->dc_state)
5759 		dc_plane_state_release(dm_plane_state->dc_state);
5760 
5761 	drm_atomic_helper_plane_destroy_state(plane, state);
5762 }
5763 
5764 static const struct drm_plane_funcs dm_plane_funcs = {
5765 	.update_plane	= drm_atomic_helper_update_plane,
5766 	.disable_plane	= drm_atomic_helper_disable_plane,
5767 	.destroy	= drm_primary_helper_destroy,
5768 	.reset = dm_drm_plane_reset,
5769 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5770 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5771 };
5772 
5773 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5774 				      struct drm_plane_state *new_state)
5775 {
5776 	struct amdgpu_framebuffer *afb;
5777 	struct drm_gem_object *obj;
5778 	struct amdgpu_device *adev;
5779 	struct amdgpu_bo *rbo;
5780 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5781 	struct list_head list;
5782 	struct ttm_validate_buffer tv;
5783 	struct ww_acquire_ctx ticket;
5784 	uint32_t domain;
5785 	int r;
5786 
5787 	if (!new_state->fb) {
5788 		DRM_DEBUG_DRIVER("No FB bound\n");
5789 		return 0;
5790 	}
5791 
5792 	afb = to_amdgpu_framebuffer(new_state->fb);
5793 	obj = new_state->fb->obj[0];
5794 	rbo = gem_to_amdgpu_bo(obj);
5795 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5796 	INIT_LIST_HEAD(&list);
5797 
5798 	tv.bo = &rbo->tbo;
5799 	tv.num_shared = 1;
5800 	list_add(&tv.head, &list);
5801 
5802 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5803 	if (r) {
5804 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5805 		return r;
5806 	}
5807 
5808 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5809 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5810 	else
5811 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5812 
5813 	r = amdgpu_bo_pin(rbo, domain);
5814 	if (unlikely(r != 0)) {
5815 		if (r != -ERESTARTSYS)
5816 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5817 		ttm_eu_backoff_reservation(&ticket, &list);
5818 		return r;
5819 	}
5820 
5821 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5822 	if (unlikely(r != 0)) {
5823 		amdgpu_bo_unpin(rbo);
5824 		ttm_eu_backoff_reservation(&ticket, &list);
5825 		DRM_ERROR("%p bind failed\n", rbo);
5826 		return r;
5827 	}
5828 
5829 	ttm_eu_backoff_reservation(&ticket, &list);
5830 
5831 	afb->address = amdgpu_bo_gpu_offset(rbo);
5832 
5833 	amdgpu_bo_ref(rbo);
5834 
5835 	/**
5836 	 * We don't do surface updates on planes that have been newly created,
5837 	 * but we also don't have the afb->address during atomic check.
5838 	 *
5839 	 * Fill in buffer attributes depending on the address here, but only on
5840 	 * newly created planes since they're not being used by DC yet and this
5841 	 * won't modify global state.
5842 	 */
5843 	dm_plane_state_old = to_dm_plane_state(plane->state);
5844 	dm_plane_state_new = to_dm_plane_state(new_state);
5845 
5846 	if (dm_plane_state_new->dc_state &&
5847 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5848 		struct dc_plane_state *plane_state =
5849 			dm_plane_state_new->dc_state;
5850 		bool force_disable_dcc = !plane_state->dcc.enable;
5851 
5852 		fill_plane_buffer_attributes(
5853 			adev, afb, plane_state->format, plane_state->rotation,
5854 			dm_plane_state_new->tiling_flags,
5855 			&plane_state->tiling_info, &plane_state->plane_size,
5856 			&plane_state->dcc, &plane_state->address,
5857 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5858 	}
5859 
5860 	return 0;
5861 }
5862 
5863 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5864 				       struct drm_plane_state *old_state)
5865 {
5866 	struct amdgpu_bo *rbo;
5867 	int r;
5868 
5869 	if (!old_state->fb)
5870 		return;
5871 
5872 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5873 	r = amdgpu_bo_reserve(rbo, false);
5874 	if (unlikely(r)) {
5875 		DRM_ERROR("failed to reserve rbo before unpin\n");
5876 		return;
5877 	}
5878 
5879 	amdgpu_bo_unpin(rbo);
5880 	amdgpu_bo_unreserve(rbo);
5881 	amdgpu_bo_unref(&rbo);
5882 }
5883 
5884 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5885 				       struct drm_crtc_state *new_crtc_state)
5886 {
5887 	int max_downscale = 0;
5888 	int max_upscale = INT_MAX;
5889 
5890 	/* TODO: These should be checked against DC plane caps */
5891 	return drm_atomic_helper_check_plane_state(
5892 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5893 }
5894 
5895 static int dm_plane_atomic_check(struct drm_plane *plane,
5896 				 struct drm_plane_state *state)
5897 {
5898 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
5899 	struct dc *dc = adev->dm.dc;
5900 	struct dm_plane_state *dm_plane_state;
5901 	struct dc_scaling_info scaling_info;
5902 	struct drm_crtc_state *new_crtc_state;
5903 	int ret;
5904 
5905 	dm_plane_state = to_dm_plane_state(state);
5906 
5907 	if (!dm_plane_state->dc_state)
5908 		return 0;
5909 
5910 	new_crtc_state =
5911 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5912 	if (!new_crtc_state)
5913 		return -EINVAL;
5914 
5915 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5916 	if (ret)
5917 		return ret;
5918 
5919 	ret = fill_dc_scaling_info(state, &scaling_info);
5920 	if (ret)
5921 		return ret;
5922 
5923 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5924 		return 0;
5925 
5926 	return -EINVAL;
5927 }
5928 
5929 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5930 				       struct drm_plane_state *new_plane_state)
5931 {
5932 	/* Only support async updates on cursor planes. */
5933 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5934 		return -EINVAL;
5935 
5936 	return 0;
5937 }
5938 
5939 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5940 					 struct drm_plane_state *new_state)
5941 {
5942 	struct drm_plane_state *old_state =
5943 		drm_atomic_get_old_plane_state(new_state->state, plane);
5944 
5945 	swap(plane->state->fb, new_state->fb);
5946 
5947 	plane->state->src_x = new_state->src_x;
5948 	plane->state->src_y = new_state->src_y;
5949 	plane->state->src_w = new_state->src_w;
5950 	plane->state->src_h = new_state->src_h;
5951 	plane->state->crtc_x = new_state->crtc_x;
5952 	plane->state->crtc_y = new_state->crtc_y;
5953 	plane->state->crtc_w = new_state->crtc_w;
5954 	plane->state->crtc_h = new_state->crtc_h;
5955 
5956 	handle_cursor_update(plane, old_state);
5957 }
5958 
5959 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5960 	.prepare_fb = dm_plane_helper_prepare_fb,
5961 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5962 	.atomic_check = dm_plane_atomic_check,
5963 	.atomic_async_check = dm_plane_atomic_async_check,
5964 	.atomic_async_update = dm_plane_atomic_async_update
5965 };
5966 
5967 /*
5968  * TODO: these are currently initialized to rgb formats only.
5969  * For future use cases we should either initialize them dynamically based on
5970  * plane capabilities, or initialize this array to all formats, so internal drm
5971  * check will succeed, and let DC implement proper check
5972  */
5973 static const uint32_t rgb_formats[] = {
5974 	DRM_FORMAT_XRGB8888,
5975 	DRM_FORMAT_ARGB8888,
5976 	DRM_FORMAT_RGBA8888,
5977 	DRM_FORMAT_XRGB2101010,
5978 	DRM_FORMAT_XBGR2101010,
5979 	DRM_FORMAT_ARGB2101010,
5980 	DRM_FORMAT_ABGR2101010,
5981 	DRM_FORMAT_XBGR8888,
5982 	DRM_FORMAT_ABGR8888,
5983 	DRM_FORMAT_RGB565,
5984 };
5985 
5986 static const uint32_t overlay_formats[] = {
5987 	DRM_FORMAT_XRGB8888,
5988 	DRM_FORMAT_ARGB8888,
5989 	DRM_FORMAT_RGBA8888,
5990 	DRM_FORMAT_XBGR8888,
5991 	DRM_FORMAT_ABGR8888,
5992 	DRM_FORMAT_RGB565
5993 };
5994 
5995 static const u32 cursor_formats[] = {
5996 	DRM_FORMAT_ARGB8888
5997 };
5998 
5999 static int get_plane_formats(const struct drm_plane *plane,
6000 			     const struct dc_plane_cap *plane_cap,
6001 			     uint32_t *formats, int max_formats)
6002 {
6003 	int i, num_formats = 0;
6004 
6005 	/*
6006 	 * TODO: Query support for each group of formats directly from
6007 	 * DC plane caps. This will require adding more formats to the
6008 	 * caps list.
6009 	 */
6010 
6011 	switch (plane->type) {
6012 	case DRM_PLANE_TYPE_PRIMARY:
6013 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6014 			if (num_formats >= max_formats)
6015 				break;
6016 
6017 			formats[num_formats++] = rgb_formats[i];
6018 		}
6019 
6020 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6021 			formats[num_formats++] = DRM_FORMAT_NV12;
6022 		if (plane_cap && plane_cap->pixel_format_support.p010)
6023 			formats[num_formats++] = DRM_FORMAT_P010;
6024 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6025 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6026 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6027 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6028 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6029 		}
6030 		break;
6031 
6032 	case DRM_PLANE_TYPE_OVERLAY:
6033 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6034 			if (num_formats >= max_formats)
6035 				break;
6036 
6037 			formats[num_formats++] = overlay_formats[i];
6038 		}
6039 		break;
6040 
6041 	case DRM_PLANE_TYPE_CURSOR:
6042 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6043 			if (num_formats >= max_formats)
6044 				break;
6045 
6046 			formats[num_formats++] = cursor_formats[i];
6047 		}
6048 		break;
6049 	}
6050 
6051 	return num_formats;
6052 }
6053 
6054 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6055 				struct drm_plane *plane,
6056 				unsigned long possible_crtcs,
6057 				const struct dc_plane_cap *plane_cap)
6058 {
6059 	uint32_t formats[32];
6060 	int num_formats;
6061 	int res = -EPERM;
6062 	unsigned int supported_rotations;
6063 
6064 	num_formats = get_plane_formats(plane, plane_cap, formats,
6065 					ARRAY_SIZE(formats));
6066 
6067 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6068 				       &dm_plane_funcs, formats, num_formats,
6069 				       NULL, plane->type, NULL);
6070 	if (res)
6071 		return res;
6072 
6073 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6074 	    plane_cap && plane_cap->per_pixel_alpha) {
6075 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6076 					  BIT(DRM_MODE_BLEND_PREMULTI);
6077 
6078 		drm_plane_create_alpha_property(plane);
6079 		drm_plane_create_blend_mode_property(plane, blend_caps);
6080 	}
6081 
6082 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6083 	    plane_cap &&
6084 	    (plane_cap->pixel_format_support.nv12 ||
6085 	     plane_cap->pixel_format_support.p010)) {
6086 		/* This only affects YUV formats. */
6087 		drm_plane_create_color_properties(
6088 			plane,
6089 			BIT(DRM_COLOR_YCBCR_BT601) |
6090 			BIT(DRM_COLOR_YCBCR_BT709) |
6091 			BIT(DRM_COLOR_YCBCR_BT2020),
6092 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6093 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6094 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6095 	}
6096 
6097 	supported_rotations =
6098 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6099 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6100 
6101 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6102 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6103 						   supported_rotations);
6104 
6105 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6106 
6107 	/* Create (reset) the plane state */
6108 	if (plane->funcs->reset)
6109 		plane->funcs->reset(plane);
6110 
6111 	return 0;
6112 }
6113 
6114 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6115 			       struct drm_plane *plane,
6116 			       uint32_t crtc_index)
6117 {
6118 	struct amdgpu_crtc *acrtc = NULL;
6119 	struct drm_plane *cursor_plane;
6120 
6121 	int res = -ENOMEM;
6122 
6123 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6124 	if (!cursor_plane)
6125 		goto fail;
6126 
6127 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6128 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6129 
6130 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6131 	if (!acrtc)
6132 		goto fail;
6133 
6134 	res = drm_crtc_init_with_planes(
6135 			dm->ddev,
6136 			&acrtc->base,
6137 			plane,
6138 			cursor_plane,
6139 			&amdgpu_dm_crtc_funcs, NULL);
6140 
6141 	if (res)
6142 		goto fail;
6143 
6144 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6145 
6146 	/* Create (reset) the plane state */
6147 	if (acrtc->base.funcs->reset)
6148 		acrtc->base.funcs->reset(&acrtc->base);
6149 
6150 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6151 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6152 
6153 	acrtc->crtc_id = crtc_index;
6154 	acrtc->base.enabled = false;
6155 	acrtc->otg_inst = -1;
6156 
6157 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6158 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6159 				   true, MAX_COLOR_LUT_ENTRIES);
6160 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6161 
6162 	return 0;
6163 
6164 fail:
6165 	kfree(acrtc);
6166 	kfree(cursor_plane);
6167 	return res;
6168 }
6169 
6170 
6171 static int to_drm_connector_type(enum signal_type st)
6172 {
6173 	switch (st) {
6174 	case SIGNAL_TYPE_HDMI_TYPE_A:
6175 		return DRM_MODE_CONNECTOR_HDMIA;
6176 	case SIGNAL_TYPE_EDP:
6177 		return DRM_MODE_CONNECTOR_eDP;
6178 	case SIGNAL_TYPE_LVDS:
6179 		return DRM_MODE_CONNECTOR_LVDS;
6180 	case SIGNAL_TYPE_RGB:
6181 		return DRM_MODE_CONNECTOR_VGA;
6182 	case SIGNAL_TYPE_DISPLAY_PORT:
6183 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6184 		return DRM_MODE_CONNECTOR_DisplayPort;
6185 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6186 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6187 		return DRM_MODE_CONNECTOR_DVID;
6188 	case SIGNAL_TYPE_VIRTUAL:
6189 		return DRM_MODE_CONNECTOR_VIRTUAL;
6190 
6191 	default:
6192 		return DRM_MODE_CONNECTOR_Unknown;
6193 	}
6194 }
6195 
6196 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6197 {
6198 	struct drm_encoder *encoder;
6199 
6200 	/* There is only one encoder per connector */
6201 	drm_connector_for_each_possible_encoder(connector, encoder)
6202 		return encoder;
6203 
6204 	return NULL;
6205 }
6206 
6207 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6208 {
6209 	struct drm_encoder *encoder;
6210 	struct amdgpu_encoder *amdgpu_encoder;
6211 
6212 	encoder = amdgpu_dm_connector_to_encoder(connector);
6213 
6214 	if (encoder == NULL)
6215 		return;
6216 
6217 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6218 
6219 	amdgpu_encoder->native_mode.clock = 0;
6220 
6221 	if (!list_empty(&connector->probed_modes)) {
6222 		struct drm_display_mode *preferred_mode = NULL;
6223 
6224 		list_for_each_entry(preferred_mode,
6225 				    &connector->probed_modes,
6226 				    head) {
6227 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6228 				amdgpu_encoder->native_mode = *preferred_mode;
6229 
6230 			break;
6231 		}
6232 
6233 	}
6234 }
6235 
6236 static struct drm_display_mode *
6237 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6238 			     char *name,
6239 			     int hdisplay, int vdisplay)
6240 {
6241 	struct drm_device *dev = encoder->dev;
6242 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6243 	struct drm_display_mode *mode = NULL;
6244 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6245 
6246 	mode = drm_mode_duplicate(dev, native_mode);
6247 
6248 	if (mode == NULL)
6249 		return NULL;
6250 
6251 	mode->hdisplay = hdisplay;
6252 	mode->vdisplay = vdisplay;
6253 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6254 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6255 
6256 	return mode;
6257 
6258 }
6259 
6260 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6261 						 struct drm_connector *connector)
6262 {
6263 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6264 	struct drm_display_mode *mode = NULL;
6265 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6266 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6267 				to_amdgpu_dm_connector(connector);
6268 	int i;
6269 	int n;
6270 	struct mode_size {
6271 		char name[DRM_DISPLAY_MODE_LEN];
6272 		int w;
6273 		int h;
6274 	} common_modes[] = {
6275 		{  "640x480",  640,  480},
6276 		{  "800x600",  800,  600},
6277 		{ "1024x768", 1024,  768},
6278 		{ "1280x720", 1280,  720},
6279 		{ "1280x800", 1280,  800},
6280 		{"1280x1024", 1280, 1024},
6281 		{ "1440x900", 1440,  900},
6282 		{"1680x1050", 1680, 1050},
6283 		{"1600x1200", 1600, 1200},
6284 		{"1920x1080", 1920, 1080},
6285 		{"1920x1200", 1920, 1200}
6286 	};
6287 
6288 	n = ARRAY_SIZE(common_modes);
6289 
6290 	for (i = 0; i < n; i++) {
6291 		struct drm_display_mode *curmode = NULL;
6292 		bool mode_existed = false;
6293 
6294 		if (common_modes[i].w > native_mode->hdisplay ||
6295 		    common_modes[i].h > native_mode->vdisplay ||
6296 		   (common_modes[i].w == native_mode->hdisplay &&
6297 		    common_modes[i].h == native_mode->vdisplay))
6298 			continue;
6299 
6300 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6301 			if (common_modes[i].w == curmode->hdisplay &&
6302 			    common_modes[i].h == curmode->vdisplay) {
6303 				mode_existed = true;
6304 				break;
6305 			}
6306 		}
6307 
6308 		if (mode_existed)
6309 			continue;
6310 
6311 		mode = amdgpu_dm_create_common_mode(encoder,
6312 				common_modes[i].name, common_modes[i].w,
6313 				common_modes[i].h);
6314 		drm_mode_probed_add(connector, mode);
6315 		amdgpu_dm_connector->num_modes++;
6316 	}
6317 }
6318 
6319 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6320 					      struct edid *edid)
6321 {
6322 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6323 			to_amdgpu_dm_connector(connector);
6324 
6325 	if (edid) {
6326 		/* empty probed_modes */
6327 		INIT_LIST_HEAD(&connector->probed_modes);
6328 		amdgpu_dm_connector->num_modes =
6329 				drm_add_edid_modes(connector, edid);
6330 
6331 		/* sorting the probed modes before calling function
6332 		 * amdgpu_dm_get_native_mode() since EDID can have
6333 		 * more than one preferred mode. The modes that are
6334 		 * later in the probed mode list could be of higher
6335 		 * and preferred resolution. For example, 3840x2160
6336 		 * resolution in base EDID preferred timing and 4096x2160
6337 		 * preferred resolution in DID extension block later.
6338 		 */
6339 		drm_mode_sort(&connector->probed_modes);
6340 		amdgpu_dm_get_native_mode(connector);
6341 	} else {
6342 		amdgpu_dm_connector->num_modes = 0;
6343 	}
6344 }
6345 
6346 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6347 {
6348 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6349 			to_amdgpu_dm_connector(connector);
6350 	struct drm_encoder *encoder;
6351 	struct edid *edid = amdgpu_dm_connector->edid;
6352 
6353 	encoder = amdgpu_dm_connector_to_encoder(connector);
6354 
6355 	if (!edid || !drm_edid_is_valid(edid)) {
6356 		amdgpu_dm_connector->num_modes =
6357 				drm_add_modes_noedid(connector, 640, 480);
6358 	} else {
6359 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6360 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6361 	}
6362 	amdgpu_dm_fbc_init(connector);
6363 
6364 	return amdgpu_dm_connector->num_modes;
6365 }
6366 
6367 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6368 				     struct amdgpu_dm_connector *aconnector,
6369 				     int connector_type,
6370 				     struct dc_link *link,
6371 				     int link_index)
6372 {
6373 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6374 
6375 	/*
6376 	 * Some of the properties below require access to state, like bpc.
6377 	 * Allocate some default initial connector state with our reset helper.
6378 	 */
6379 	if (aconnector->base.funcs->reset)
6380 		aconnector->base.funcs->reset(&aconnector->base);
6381 
6382 	aconnector->connector_id = link_index;
6383 	aconnector->dc_link = link;
6384 	aconnector->base.interlace_allowed = false;
6385 	aconnector->base.doublescan_allowed = false;
6386 	aconnector->base.stereo_allowed = false;
6387 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6388 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6389 	aconnector->audio_inst = -1;
6390 	mutex_init(&aconnector->hpd_lock);
6391 
6392 	/*
6393 	 * configure support HPD hot plug connector_>polled default value is 0
6394 	 * which means HPD hot plug not supported
6395 	 */
6396 	switch (connector_type) {
6397 	case DRM_MODE_CONNECTOR_HDMIA:
6398 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6399 		aconnector->base.ycbcr_420_allowed =
6400 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6401 		break;
6402 	case DRM_MODE_CONNECTOR_DisplayPort:
6403 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6404 		aconnector->base.ycbcr_420_allowed =
6405 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6406 		break;
6407 	case DRM_MODE_CONNECTOR_DVID:
6408 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6409 		break;
6410 	default:
6411 		break;
6412 	}
6413 
6414 	drm_object_attach_property(&aconnector->base.base,
6415 				dm->ddev->mode_config.scaling_mode_property,
6416 				DRM_MODE_SCALE_NONE);
6417 
6418 	drm_object_attach_property(&aconnector->base.base,
6419 				adev->mode_info.underscan_property,
6420 				UNDERSCAN_OFF);
6421 	drm_object_attach_property(&aconnector->base.base,
6422 				adev->mode_info.underscan_hborder_property,
6423 				0);
6424 	drm_object_attach_property(&aconnector->base.base,
6425 				adev->mode_info.underscan_vborder_property,
6426 				0);
6427 
6428 	if (!aconnector->mst_port)
6429 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6430 
6431 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6432 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6433 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6434 
6435 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6436 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6437 		drm_object_attach_property(&aconnector->base.base,
6438 				adev->mode_info.abm_level_property, 0);
6439 	}
6440 
6441 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6442 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6443 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6444 		drm_object_attach_property(
6445 			&aconnector->base.base,
6446 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6447 
6448 		if (!aconnector->mst_port)
6449 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6450 
6451 #ifdef CONFIG_DRM_AMD_DC_HDCP
6452 		if (adev->dm.hdcp_workqueue)
6453 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6454 #endif
6455 	}
6456 }
6457 
6458 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6459 			      struct i2c_msg *msgs, int num)
6460 {
6461 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6462 	struct ddc_service *ddc_service = i2c->ddc_service;
6463 	struct i2c_command cmd;
6464 	int i;
6465 	int result = -EIO;
6466 
6467 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6468 
6469 	if (!cmd.payloads)
6470 		return result;
6471 
6472 	cmd.number_of_payloads = num;
6473 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6474 	cmd.speed = 100;
6475 
6476 	for (i = 0; i < num; i++) {
6477 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6478 		cmd.payloads[i].address = msgs[i].addr;
6479 		cmd.payloads[i].length = msgs[i].len;
6480 		cmd.payloads[i].data = msgs[i].buf;
6481 	}
6482 
6483 	if (dc_submit_i2c(
6484 			ddc_service->ctx->dc,
6485 			ddc_service->ddc_pin->hw_info.ddc_channel,
6486 			&cmd))
6487 		result = num;
6488 
6489 	kfree(cmd.payloads);
6490 	return result;
6491 }
6492 
6493 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6494 {
6495 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6496 }
6497 
6498 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6499 	.master_xfer = amdgpu_dm_i2c_xfer,
6500 	.functionality = amdgpu_dm_i2c_func,
6501 };
6502 
6503 static struct amdgpu_i2c_adapter *
6504 create_i2c(struct ddc_service *ddc_service,
6505 	   int link_index,
6506 	   int *res)
6507 {
6508 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6509 	struct amdgpu_i2c_adapter *i2c;
6510 
6511 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6512 	if (!i2c)
6513 		return NULL;
6514 	i2c->base.owner = THIS_MODULE;
6515 	i2c->base.class = I2C_CLASS_DDC;
6516 	i2c->base.dev.parent = &adev->pdev->dev;
6517 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6518 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6519 	i2c_set_adapdata(&i2c->base, i2c);
6520 	i2c->ddc_service = ddc_service;
6521 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6522 
6523 	return i2c;
6524 }
6525 
6526 
6527 /*
6528  * Note: this function assumes that dc_link_detect() was called for the
6529  * dc_link which will be represented by this aconnector.
6530  */
6531 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6532 				    struct amdgpu_dm_connector *aconnector,
6533 				    uint32_t link_index,
6534 				    struct amdgpu_encoder *aencoder)
6535 {
6536 	int res = 0;
6537 	int connector_type;
6538 	struct dc *dc = dm->dc;
6539 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6540 	struct amdgpu_i2c_adapter *i2c;
6541 
6542 	link->priv = aconnector;
6543 
6544 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6545 
6546 	i2c = create_i2c(link->ddc, link->link_index, &res);
6547 	if (!i2c) {
6548 		DRM_ERROR("Failed to create i2c adapter data\n");
6549 		return -ENOMEM;
6550 	}
6551 
6552 	aconnector->i2c = i2c;
6553 	res = i2c_add_adapter(&i2c->base);
6554 
6555 	if (res) {
6556 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6557 		goto out_free;
6558 	}
6559 
6560 	connector_type = to_drm_connector_type(link->connector_signal);
6561 
6562 	res = drm_connector_init_with_ddc(
6563 			dm->ddev,
6564 			&aconnector->base,
6565 			&amdgpu_dm_connector_funcs,
6566 			connector_type,
6567 			&i2c->base);
6568 
6569 	if (res) {
6570 		DRM_ERROR("connector_init failed\n");
6571 		aconnector->connector_id = -1;
6572 		goto out_free;
6573 	}
6574 
6575 	drm_connector_helper_add(
6576 			&aconnector->base,
6577 			&amdgpu_dm_connector_helper_funcs);
6578 
6579 	amdgpu_dm_connector_init_helper(
6580 		dm,
6581 		aconnector,
6582 		connector_type,
6583 		link,
6584 		link_index);
6585 
6586 	drm_connector_attach_encoder(
6587 		&aconnector->base, &aencoder->base);
6588 
6589 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6590 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6591 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6592 
6593 out_free:
6594 	if (res) {
6595 		kfree(i2c);
6596 		aconnector->i2c = NULL;
6597 	}
6598 	return res;
6599 }
6600 
6601 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6602 {
6603 	switch (adev->mode_info.num_crtc) {
6604 	case 1:
6605 		return 0x1;
6606 	case 2:
6607 		return 0x3;
6608 	case 3:
6609 		return 0x7;
6610 	case 4:
6611 		return 0xf;
6612 	case 5:
6613 		return 0x1f;
6614 	case 6:
6615 	default:
6616 		return 0x3f;
6617 	}
6618 }
6619 
6620 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6621 				  struct amdgpu_encoder *aencoder,
6622 				  uint32_t link_index)
6623 {
6624 	struct amdgpu_device *adev = drm_to_adev(dev);
6625 
6626 	int res = drm_encoder_init(dev,
6627 				   &aencoder->base,
6628 				   &amdgpu_dm_encoder_funcs,
6629 				   DRM_MODE_ENCODER_TMDS,
6630 				   NULL);
6631 
6632 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6633 
6634 	if (!res)
6635 		aencoder->encoder_id = link_index;
6636 	else
6637 		aencoder->encoder_id = -1;
6638 
6639 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6640 
6641 	return res;
6642 }
6643 
6644 static void manage_dm_interrupts(struct amdgpu_device *adev,
6645 				 struct amdgpu_crtc *acrtc,
6646 				 bool enable)
6647 {
6648 	/*
6649 	 * We have no guarantee that the frontend index maps to the same
6650 	 * backend index - some even map to more than one.
6651 	 *
6652 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6653 	 */
6654 	int irq_type =
6655 		amdgpu_display_crtc_idx_to_irq_type(
6656 			adev,
6657 			acrtc->crtc_id);
6658 
6659 	if (enable) {
6660 		drm_crtc_vblank_on(&acrtc->base);
6661 		amdgpu_irq_get(
6662 			adev,
6663 			&adev->pageflip_irq,
6664 			irq_type);
6665 	} else {
6666 
6667 		amdgpu_irq_put(
6668 			adev,
6669 			&adev->pageflip_irq,
6670 			irq_type);
6671 		drm_crtc_vblank_off(&acrtc->base);
6672 	}
6673 }
6674 
6675 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6676 				      struct amdgpu_crtc *acrtc)
6677 {
6678 	int irq_type =
6679 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6680 
6681 	/**
6682 	 * This reads the current state for the IRQ and force reapplies
6683 	 * the setting to hardware.
6684 	 */
6685 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6686 }
6687 
6688 static bool
6689 is_scaling_state_different(const struct dm_connector_state *dm_state,
6690 			   const struct dm_connector_state *old_dm_state)
6691 {
6692 	if (dm_state->scaling != old_dm_state->scaling)
6693 		return true;
6694 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6695 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6696 			return true;
6697 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6698 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6699 			return true;
6700 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6701 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6702 		return true;
6703 	return false;
6704 }
6705 
6706 #ifdef CONFIG_DRM_AMD_DC_HDCP
6707 static bool is_content_protection_different(struct drm_connector_state *state,
6708 					    const struct drm_connector_state *old_state,
6709 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6710 {
6711 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6712 
6713 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6714 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6715 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6716 		return true;
6717 	}
6718 
6719 	/* CP is being re enabled, ignore this */
6720 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6721 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6722 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6723 		return false;
6724 	}
6725 
6726 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6727 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6728 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6729 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6730 
6731 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6732 	 * hot-plug, headless s3, dpms
6733 	 */
6734 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6735 	    aconnector->dc_sink != NULL)
6736 		return true;
6737 
6738 	if (old_state->content_protection == state->content_protection)
6739 		return false;
6740 
6741 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6742 		return true;
6743 
6744 	return false;
6745 }
6746 
6747 #endif
6748 static void remove_stream(struct amdgpu_device *adev,
6749 			  struct amdgpu_crtc *acrtc,
6750 			  struct dc_stream_state *stream)
6751 {
6752 	/* this is the update mode case */
6753 
6754 	acrtc->otg_inst = -1;
6755 	acrtc->enabled = false;
6756 }
6757 
6758 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6759 			       struct dc_cursor_position *position)
6760 {
6761 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6762 	int x, y;
6763 	int xorigin = 0, yorigin = 0;
6764 
6765 	position->enable = false;
6766 	position->x = 0;
6767 	position->y = 0;
6768 
6769 	if (!crtc || !plane->state->fb)
6770 		return 0;
6771 
6772 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6773 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6774 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6775 			  __func__,
6776 			  plane->state->crtc_w,
6777 			  plane->state->crtc_h);
6778 		return -EINVAL;
6779 	}
6780 
6781 	x = plane->state->crtc_x;
6782 	y = plane->state->crtc_y;
6783 
6784 	if (x <= -amdgpu_crtc->max_cursor_width ||
6785 	    y <= -amdgpu_crtc->max_cursor_height)
6786 		return 0;
6787 
6788 	if (x < 0) {
6789 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6790 		x = 0;
6791 	}
6792 	if (y < 0) {
6793 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6794 		y = 0;
6795 	}
6796 	position->enable = true;
6797 	position->translate_by_source = true;
6798 	position->x = x;
6799 	position->y = y;
6800 	position->x_hotspot = xorigin;
6801 	position->y_hotspot = yorigin;
6802 
6803 	return 0;
6804 }
6805 
6806 static void handle_cursor_update(struct drm_plane *plane,
6807 				 struct drm_plane_state *old_plane_state)
6808 {
6809 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6810 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6811 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6812 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6813 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6814 	uint64_t address = afb ? afb->address : 0;
6815 	struct dc_cursor_position position;
6816 	struct dc_cursor_attributes attributes;
6817 	int ret;
6818 
6819 	if (!plane->state->fb && !old_plane_state->fb)
6820 		return;
6821 
6822 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6823 			 __func__,
6824 			 amdgpu_crtc->crtc_id,
6825 			 plane->state->crtc_w,
6826 			 plane->state->crtc_h);
6827 
6828 	ret = get_cursor_position(plane, crtc, &position);
6829 	if (ret)
6830 		return;
6831 
6832 	if (!position.enable) {
6833 		/* turn off cursor */
6834 		if (crtc_state && crtc_state->stream) {
6835 			mutex_lock(&adev->dm.dc_lock);
6836 			dc_stream_set_cursor_position(crtc_state->stream,
6837 						      &position);
6838 			mutex_unlock(&adev->dm.dc_lock);
6839 		}
6840 		return;
6841 	}
6842 
6843 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6844 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6845 
6846 	memset(&attributes, 0, sizeof(attributes));
6847 	attributes.address.high_part = upper_32_bits(address);
6848 	attributes.address.low_part  = lower_32_bits(address);
6849 	attributes.width             = plane->state->crtc_w;
6850 	attributes.height            = plane->state->crtc_h;
6851 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6852 	attributes.rotation_angle    = 0;
6853 	attributes.attribute_flags.value = 0;
6854 
6855 	attributes.pitch = attributes.width;
6856 
6857 	if (crtc_state->stream) {
6858 		mutex_lock(&adev->dm.dc_lock);
6859 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6860 							 &attributes))
6861 			DRM_ERROR("DC failed to set cursor attributes\n");
6862 
6863 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6864 						   &position))
6865 			DRM_ERROR("DC failed to set cursor position\n");
6866 		mutex_unlock(&adev->dm.dc_lock);
6867 	}
6868 }
6869 
6870 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6871 {
6872 
6873 	assert_spin_locked(&acrtc->base.dev->event_lock);
6874 	WARN_ON(acrtc->event);
6875 
6876 	acrtc->event = acrtc->base.state->event;
6877 
6878 	/* Set the flip status */
6879 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6880 
6881 	/* Mark this event as consumed */
6882 	acrtc->base.state->event = NULL;
6883 
6884 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6885 						 acrtc->crtc_id);
6886 }
6887 
6888 static void update_freesync_state_on_stream(
6889 	struct amdgpu_display_manager *dm,
6890 	struct dm_crtc_state *new_crtc_state,
6891 	struct dc_stream_state *new_stream,
6892 	struct dc_plane_state *surface,
6893 	u32 flip_timestamp_in_us)
6894 {
6895 	struct mod_vrr_params vrr_params;
6896 	struct dc_info_packet vrr_infopacket = {0};
6897 	struct amdgpu_device *adev = dm->adev;
6898 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6899 	unsigned long flags;
6900 
6901 	if (!new_stream)
6902 		return;
6903 
6904 	/*
6905 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6906 	 * For now it's sufficient to just guard against these conditions.
6907 	 */
6908 
6909 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6910 		return;
6911 
6912 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6913         vrr_params = acrtc->dm_irq_params.vrr_params;
6914 
6915 	if (surface) {
6916 		mod_freesync_handle_preflip(
6917 			dm->freesync_module,
6918 			surface,
6919 			new_stream,
6920 			flip_timestamp_in_us,
6921 			&vrr_params);
6922 
6923 		if (adev->family < AMDGPU_FAMILY_AI &&
6924 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6925 			mod_freesync_handle_v_update(dm->freesync_module,
6926 						     new_stream, &vrr_params);
6927 
6928 			/* Need to call this before the frame ends. */
6929 			dc_stream_adjust_vmin_vmax(dm->dc,
6930 						   new_crtc_state->stream,
6931 						   &vrr_params.adjust);
6932 		}
6933 	}
6934 
6935 	mod_freesync_build_vrr_infopacket(
6936 		dm->freesync_module,
6937 		new_stream,
6938 		&vrr_params,
6939 		PACKET_TYPE_VRR,
6940 		TRANSFER_FUNC_UNKNOWN,
6941 		&vrr_infopacket);
6942 
6943 	new_crtc_state->freesync_timing_changed |=
6944 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
6945 			&vrr_params.adjust,
6946 			sizeof(vrr_params.adjust)) != 0);
6947 
6948 	new_crtc_state->freesync_vrr_info_changed |=
6949 		(memcmp(&new_crtc_state->vrr_infopacket,
6950 			&vrr_infopacket,
6951 			sizeof(vrr_infopacket)) != 0);
6952 
6953 	acrtc->dm_irq_params.vrr_params = vrr_params;
6954 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6955 
6956 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
6957 	new_stream->vrr_infopacket = vrr_infopacket;
6958 
6959 	if (new_crtc_state->freesync_vrr_info_changed)
6960 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6961 			      new_crtc_state->base.crtc->base.id,
6962 			      (int)new_crtc_state->base.vrr_enabled,
6963 			      (int)vrr_params.state);
6964 
6965 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6966 }
6967 
6968 static void update_stream_irq_parameters(
6969 	struct amdgpu_display_manager *dm,
6970 	struct dm_crtc_state *new_crtc_state)
6971 {
6972 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6973 	struct mod_vrr_params vrr_params;
6974 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6975 	struct amdgpu_device *adev = dm->adev;
6976 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6977 	unsigned long flags;
6978 
6979 	if (!new_stream)
6980 		return;
6981 
6982 	/*
6983 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6984 	 * For now it's sufficient to just guard against these conditions.
6985 	 */
6986 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6987 		return;
6988 
6989 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6990 	vrr_params = acrtc->dm_irq_params.vrr_params;
6991 
6992 	if (new_crtc_state->vrr_supported &&
6993 	    config.min_refresh_in_uhz &&
6994 	    config.max_refresh_in_uhz) {
6995 		config.state = new_crtc_state->base.vrr_enabled ?
6996 			VRR_STATE_ACTIVE_VARIABLE :
6997 			VRR_STATE_INACTIVE;
6998 	} else {
6999 		config.state = VRR_STATE_UNSUPPORTED;
7000 	}
7001 
7002 	mod_freesync_build_vrr_params(dm->freesync_module,
7003 				      new_stream,
7004 				      &config, &vrr_params);
7005 
7006 	new_crtc_state->freesync_timing_changed |=
7007 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7008 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7009 
7010 	new_crtc_state->freesync_config = config;
7011 	/* Copy state for access from DM IRQ handler */
7012 	acrtc->dm_irq_params.freesync_config = config;
7013 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7014 	acrtc->dm_irq_params.vrr_params = vrr_params;
7015 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7016 }
7017 
7018 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7019 					    struct dm_crtc_state *new_state)
7020 {
7021 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7022 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7023 
7024 	if (!old_vrr_active && new_vrr_active) {
7025 		/* Transition VRR inactive -> active:
7026 		 * While VRR is active, we must not disable vblank irq, as a
7027 		 * reenable after disable would compute bogus vblank/pflip
7028 		 * timestamps if it likely happened inside display front-porch.
7029 		 *
7030 		 * We also need vupdate irq for the actual core vblank handling
7031 		 * at end of vblank.
7032 		 */
7033 		dm_set_vupdate_irq(new_state->base.crtc, true);
7034 		drm_crtc_vblank_get(new_state->base.crtc);
7035 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7036 				 __func__, new_state->base.crtc->base.id);
7037 	} else if (old_vrr_active && !new_vrr_active) {
7038 		/* Transition VRR active -> inactive:
7039 		 * Allow vblank irq disable again for fixed refresh rate.
7040 		 */
7041 		dm_set_vupdate_irq(new_state->base.crtc, false);
7042 		drm_crtc_vblank_put(new_state->base.crtc);
7043 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7044 				 __func__, new_state->base.crtc->base.id);
7045 	}
7046 }
7047 
7048 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7049 {
7050 	struct drm_plane *plane;
7051 	struct drm_plane_state *old_plane_state, *new_plane_state;
7052 	int i;
7053 
7054 	/*
7055 	 * TODO: Make this per-stream so we don't issue redundant updates for
7056 	 * commits with multiple streams.
7057 	 */
7058 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7059 				       new_plane_state, i)
7060 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7061 			handle_cursor_update(plane, old_plane_state);
7062 }
7063 
7064 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7065 				    struct dc_state *dc_state,
7066 				    struct drm_device *dev,
7067 				    struct amdgpu_display_manager *dm,
7068 				    struct drm_crtc *pcrtc,
7069 				    bool wait_for_vblank)
7070 {
7071 	uint32_t i;
7072 	uint64_t timestamp_ns;
7073 	struct drm_plane *plane;
7074 	struct drm_plane_state *old_plane_state, *new_plane_state;
7075 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7076 	struct drm_crtc_state *new_pcrtc_state =
7077 			drm_atomic_get_new_crtc_state(state, pcrtc);
7078 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7079 	struct dm_crtc_state *dm_old_crtc_state =
7080 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7081 	int planes_count = 0, vpos, hpos;
7082 	long r;
7083 	unsigned long flags;
7084 	struct amdgpu_bo *abo;
7085 	uint32_t target_vblank, last_flip_vblank;
7086 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7087 	bool pflip_present = false;
7088 	struct {
7089 		struct dc_surface_update surface_updates[MAX_SURFACES];
7090 		struct dc_plane_info plane_infos[MAX_SURFACES];
7091 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7092 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7093 		struct dc_stream_update stream_update;
7094 	} *bundle;
7095 
7096 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7097 
7098 	if (!bundle) {
7099 		dm_error("Failed to allocate update bundle\n");
7100 		goto cleanup;
7101 	}
7102 
7103 	/*
7104 	 * Disable the cursor first if we're disabling all the planes.
7105 	 * It'll remain on the screen after the planes are re-enabled
7106 	 * if we don't.
7107 	 */
7108 	if (acrtc_state->active_planes == 0)
7109 		amdgpu_dm_commit_cursors(state);
7110 
7111 	/* update planes when needed */
7112 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7113 		struct drm_crtc *crtc = new_plane_state->crtc;
7114 		struct drm_crtc_state *new_crtc_state;
7115 		struct drm_framebuffer *fb = new_plane_state->fb;
7116 		bool plane_needs_flip;
7117 		struct dc_plane_state *dc_plane;
7118 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7119 
7120 		/* Cursor plane is handled after stream updates */
7121 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7122 			continue;
7123 
7124 		if (!fb || !crtc || pcrtc != crtc)
7125 			continue;
7126 
7127 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7128 		if (!new_crtc_state->active)
7129 			continue;
7130 
7131 		dc_plane = dm_new_plane_state->dc_state;
7132 
7133 		bundle->surface_updates[planes_count].surface = dc_plane;
7134 		if (new_pcrtc_state->color_mgmt_changed) {
7135 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7136 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7137 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7138 		}
7139 
7140 		fill_dc_scaling_info(new_plane_state,
7141 				     &bundle->scaling_infos[planes_count]);
7142 
7143 		bundle->surface_updates[planes_count].scaling_info =
7144 			&bundle->scaling_infos[planes_count];
7145 
7146 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7147 
7148 		pflip_present = pflip_present || plane_needs_flip;
7149 
7150 		if (!plane_needs_flip) {
7151 			planes_count += 1;
7152 			continue;
7153 		}
7154 
7155 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7156 
7157 		/*
7158 		 * Wait for all fences on this FB. Do limited wait to avoid
7159 		 * deadlock during GPU reset when this fence will not signal
7160 		 * but we hold reservation lock for the BO.
7161 		 */
7162 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7163 							false,
7164 							msecs_to_jiffies(5000));
7165 		if (unlikely(r <= 0))
7166 			DRM_ERROR("Waiting for fences timed out!");
7167 
7168 		fill_dc_plane_info_and_addr(
7169 			dm->adev, new_plane_state,
7170 			dm_new_plane_state->tiling_flags,
7171 			&bundle->plane_infos[planes_count],
7172 			&bundle->flip_addrs[planes_count].address,
7173 			dm_new_plane_state->tmz_surface, false);
7174 
7175 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7176 				 new_plane_state->plane->index,
7177 				 bundle->plane_infos[planes_count].dcc.enable);
7178 
7179 		bundle->surface_updates[planes_count].plane_info =
7180 			&bundle->plane_infos[planes_count];
7181 
7182 		/*
7183 		 * Only allow immediate flips for fast updates that don't
7184 		 * change FB pitch, DCC state, rotation or mirroing.
7185 		 */
7186 		bundle->flip_addrs[planes_count].flip_immediate =
7187 			crtc->state->async_flip &&
7188 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7189 
7190 		timestamp_ns = ktime_get_ns();
7191 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7192 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7193 		bundle->surface_updates[planes_count].surface = dc_plane;
7194 
7195 		if (!bundle->surface_updates[planes_count].surface) {
7196 			DRM_ERROR("No surface for CRTC: id=%d\n",
7197 					acrtc_attach->crtc_id);
7198 			continue;
7199 		}
7200 
7201 		if (plane == pcrtc->primary)
7202 			update_freesync_state_on_stream(
7203 				dm,
7204 				acrtc_state,
7205 				acrtc_state->stream,
7206 				dc_plane,
7207 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7208 
7209 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7210 				 __func__,
7211 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7212 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7213 
7214 		planes_count += 1;
7215 
7216 	}
7217 
7218 	if (pflip_present) {
7219 		if (!vrr_active) {
7220 			/* Use old throttling in non-vrr fixed refresh rate mode
7221 			 * to keep flip scheduling based on target vblank counts
7222 			 * working in a backwards compatible way, e.g., for
7223 			 * clients using the GLX_OML_sync_control extension or
7224 			 * DRI3/Present extension with defined target_msc.
7225 			 */
7226 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7227 		}
7228 		else {
7229 			/* For variable refresh rate mode only:
7230 			 * Get vblank of last completed flip to avoid > 1 vrr
7231 			 * flips per video frame by use of throttling, but allow
7232 			 * flip programming anywhere in the possibly large
7233 			 * variable vrr vblank interval for fine-grained flip
7234 			 * timing control and more opportunity to avoid stutter
7235 			 * on late submission of flips.
7236 			 */
7237 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7238 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7239 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7240 		}
7241 
7242 		target_vblank = last_flip_vblank + wait_for_vblank;
7243 
7244 		/*
7245 		 * Wait until we're out of the vertical blank period before the one
7246 		 * targeted by the flip
7247 		 */
7248 		while ((acrtc_attach->enabled &&
7249 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7250 							    0, &vpos, &hpos, NULL,
7251 							    NULL, &pcrtc->hwmode)
7252 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7253 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7254 			(int)(target_vblank -
7255 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7256 			usleep_range(1000, 1100);
7257 		}
7258 
7259 		/**
7260 		 * Prepare the flip event for the pageflip interrupt to handle.
7261 		 *
7262 		 * This only works in the case where we've already turned on the
7263 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7264 		 * from 0 -> n planes we have to skip a hardware generated event
7265 		 * and rely on sending it from software.
7266 		 */
7267 		if (acrtc_attach->base.state->event &&
7268 		    acrtc_state->active_planes > 0) {
7269 			drm_crtc_vblank_get(pcrtc);
7270 
7271 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7272 
7273 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7274 			prepare_flip_isr(acrtc_attach);
7275 
7276 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7277 		}
7278 
7279 		if (acrtc_state->stream) {
7280 			if (acrtc_state->freesync_vrr_info_changed)
7281 				bundle->stream_update.vrr_infopacket =
7282 					&acrtc_state->stream->vrr_infopacket;
7283 		}
7284 	}
7285 
7286 	/* Update the planes if changed or disable if we don't have any. */
7287 	if ((planes_count || acrtc_state->active_planes == 0) &&
7288 		acrtc_state->stream) {
7289 		bundle->stream_update.stream = acrtc_state->stream;
7290 		if (new_pcrtc_state->mode_changed) {
7291 			bundle->stream_update.src = acrtc_state->stream->src;
7292 			bundle->stream_update.dst = acrtc_state->stream->dst;
7293 		}
7294 
7295 		if (new_pcrtc_state->color_mgmt_changed) {
7296 			/*
7297 			 * TODO: This isn't fully correct since we've actually
7298 			 * already modified the stream in place.
7299 			 */
7300 			bundle->stream_update.gamut_remap =
7301 				&acrtc_state->stream->gamut_remap_matrix;
7302 			bundle->stream_update.output_csc_transform =
7303 				&acrtc_state->stream->csc_color_matrix;
7304 			bundle->stream_update.out_transfer_func =
7305 				acrtc_state->stream->out_transfer_func;
7306 		}
7307 
7308 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7309 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7310 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7311 
7312 		/*
7313 		 * If FreeSync state on the stream has changed then we need to
7314 		 * re-adjust the min/max bounds now that DC doesn't handle this
7315 		 * as part of commit.
7316 		 */
7317 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7318 		    amdgpu_dm_vrr_active(acrtc_state)) {
7319 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7320 			dc_stream_adjust_vmin_vmax(
7321 				dm->dc, acrtc_state->stream,
7322 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7323 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7324 		}
7325 		mutex_lock(&dm->dc_lock);
7326 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7327 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7328 			amdgpu_dm_psr_disable(acrtc_state->stream);
7329 
7330 		dc_commit_updates_for_stream(dm->dc,
7331 						     bundle->surface_updates,
7332 						     planes_count,
7333 						     acrtc_state->stream,
7334 						     &bundle->stream_update,
7335 						     dc_state);
7336 
7337 		/**
7338 		 * Enable or disable the interrupts on the backend.
7339 		 *
7340 		 * Most pipes are put into power gating when unused.
7341 		 *
7342 		 * When power gating is enabled on a pipe we lose the
7343 		 * interrupt enablement state when power gating is disabled.
7344 		 *
7345 		 * So we need to update the IRQ control state in hardware
7346 		 * whenever the pipe turns on (since it could be previously
7347 		 * power gated) or off (since some pipes can't be power gated
7348 		 * on some ASICs).
7349 		 */
7350 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7351 			dm_update_pflip_irq_state(drm_to_adev(dev),
7352 						  acrtc_attach);
7353 
7354 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7355 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7356 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7357 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7358 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7359 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7360 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7361 			amdgpu_dm_psr_enable(acrtc_state->stream);
7362 		}
7363 
7364 		mutex_unlock(&dm->dc_lock);
7365 	}
7366 
7367 	/*
7368 	 * Update cursor state *after* programming all the planes.
7369 	 * This avoids redundant programming in the case where we're going
7370 	 * to be disabling a single plane - those pipes are being disabled.
7371 	 */
7372 	if (acrtc_state->active_planes)
7373 		amdgpu_dm_commit_cursors(state);
7374 
7375 cleanup:
7376 	kfree(bundle);
7377 }
7378 
7379 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7380 				   struct drm_atomic_state *state)
7381 {
7382 	struct amdgpu_device *adev = drm_to_adev(dev);
7383 	struct amdgpu_dm_connector *aconnector;
7384 	struct drm_connector *connector;
7385 	struct drm_connector_state *old_con_state, *new_con_state;
7386 	struct drm_crtc_state *new_crtc_state;
7387 	struct dm_crtc_state *new_dm_crtc_state;
7388 	const struct dc_stream_status *status;
7389 	int i, inst;
7390 
7391 	/* Notify device removals. */
7392 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7393 		if (old_con_state->crtc != new_con_state->crtc) {
7394 			/* CRTC changes require notification. */
7395 			goto notify;
7396 		}
7397 
7398 		if (!new_con_state->crtc)
7399 			continue;
7400 
7401 		new_crtc_state = drm_atomic_get_new_crtc_state(
7402 			state, new_con_state->crtc);
7403 
7404 		if (!new_crtc_state)
7405 			continue;
7406 
7407 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7408 			continue;
7409 
7410 	notify:
7411 		aconnector = to_amdgpu_dm_connector(connector);
7412 
7413 		mutex_lock(&adev->dm.audio_lock);
7414 		inst = aconnector->audio_inst;
7415 		aconnector->audio_inst = -1;
7416 		mutex_unlock(&adev->dm.audio_lock);
7417 
7418 		amdgpu_dm_audio_eld_notify(adev, inst);
7419 	}
7420 
7421 	/* Notify audio device additions. */
7422 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7423 		if (!new_con_state->crtc)
7424 			continue;
7425 
7426 		new_crtc_state = drm_atomic_get_new_crtc_state(
7427 			state, new_con_state->crtc);
7428 
7429 		if (!new_crtc_state)
7430 			continue;
7431 
7432 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7433 			continue;
7434 
7435 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7436 		if (!new_dm_crtc_state->stream)
7437 			continue;
7438 
7439 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7440 		if (!status)
7441 			continue;
7442 
7443 		aconnector = to_amdgpu_dm_connector(connector);
7444 
7445 		mutex_lock(&adev->dm.audio_lock);
7446 		inst = status->audio_inst;
7447 		aconnector->audio_inst = inst;
7448 		mutex_unlock(&adev->dm.audio_lock);
7449 
7450 		amdgpu_dm_audio_eld_notify(adev, inst);
7451 	}
7452 }
7453 
7454 /*
7455  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7456  * @crtc_state: the DRM CRTC state
7457  * @stream_state: the DC stream state.
7458  *
7459  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7460  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7461  */
7462 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7463 						struct dc_stream_state *stream_state)
7464 {
7465 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7466 }
7467 
7468 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7469 				   struct drm_atomic_state *state,
7470 				   bool nonblock)
7471 {
7472 	/*
7473 	 * Add check here for SoC's that support hardware cursor plane, to
7474 	 * unset legacy_cursor_update
7475 	 */
7476 
7477 	return drm_atomic_helper_commit(dev, state, nonblock);
7478 
7479 	/*TODO Handle EINTR, reenable IRQ*/
7480 }
7481 
7482 /**
7483  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7484  * @state: The atomic state to commit
7485  *
7486  * This will tell DC to commit the constructed DC state from atomic_check,
7487  * programming the hardware. Any failures here implies a hardware failure, since
7488  * atomic check should have filtered anything non-kosher.
7489  */
7490 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7491 {
7492 	struct drm_device *dev = state->dev;
7493 	struct amdgpu_device *adev = drm_to_adev(dev);
7494 	struct amdgpu_display_manager *dm = &adev->dm;
7495 	struct dm_atomic_state *dm_state;
7496 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7497 	uint32_t i, j;
7498 	struct drm_crtc *crtc;
7499 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7500 	unsigned long flags;
7501 	bool wait_for_vblank = true;
7502 	struct drm_connector *connector;
7503 	struct drm_connector_state *old_con_state, *new_con_state;
7504 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7505 	int crtc_disable_count = 0;
7506 	bool mode_set_reset_required = false;
7507 
7508 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7509 
7510 	dm_state = dm_atomic_get_new_state(state);
7511 	if (dm_state && dm_state->context) {
7512 		dc_state = dm_state->context;
7513 	} else {
7514 		/* No state changes, retain current state. */
7515 		dc_state_temp = dc_create_state(dm->dc);
7516 		ASSERT(dc_state_temp);
7517 		dc_state = dc_state_temp;
7518 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7519 	}
7520 
7521 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7522 				       new_crtc_state, i) {
7523 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7524 
7525 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7526 
7527 		if (old_crtc_state->active &&
7528 		    (!new_crtc_state->active ||
7529 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7530 			manage_dm_interrupts(adev, acrtc, false);
7531 			dc_stream_release(dm_old_crtc_state->stream);
7532 		}
7533 	}
7534 
7535 	drm_atomic_helper_calc_timestamping_constants(state);
7536 
7537 	/* update changed items */
7538 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7539 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7540 
7541 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7542 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7543 
7544 		DRM_DEBUG_DRIVER(
7545 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7546 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7547 			"connectors_changed:%d\n",
7548 			acrtc->crtc_id,
7549 			new_crtc_state->enable,
7550 			new_crtc_state->active,
7551 			new_crtc_state->planes_changed,
7552 			new_crtc_state->mode_changed,
7553 			new_crtc_state->active_changed,
7554 			new_crtc_state->connectors_changed);
7555 
7556 		/* Copy all transient state flags into dc state */
7557 		if (dm_new_crtc_state->stream) {
7558 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7559 							    dm_new_crtc_state->stream);
7560 		}
7561 
7562 		/* handles headless hotplug case, updating new_state and
7563 		 * aconnector as needed
7564 		 */
7565 
7566 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7567 
7568 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7569 
7570 			if (!dm_new_crtc_state->stream) {
7571 				/*
7572 				 * this could happen because of issues with
7573 				 * userspace notifications delivery.
7574 				 * In this case userspace tries to set mode on
7575 				 * display which is disconnected in fact.
7576 				 * dc_sink is NULL in this case on aconnector.
7577 				 * We expect reset mode will come soon.
7578 				 *
7579 				 * This can also happen when unplug is done
7580 				 * during resume sequence ended
7581 				 *
7582 				 * In this case, we want to pretend we still
7583 				 * have a sink to keep the pipe running so that
7584 				 * hw state is consistent with the sw state
7585 				 */
7586 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7587 						__func__, acrtc->base.base.id);
7588 				continue;
7589 			}
7590 
7591 			if (dm_old_crtc_state->stream)
7592 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7593 
7594 			pm_runtime_get_noresume(dev->dev);
7595 
7596 			acrtc->enabled = true;
7597 			acrtc->hw_mode = new_crtc_state->mode;
7598 			crtc->hwmode = new_crtc_state->mode;
7599 			mode_set_reset_required = true;
7600 		} else if (modereset_required(new_crtc_state)) {
7601 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7602 			/* i.e. reset mode */
7603 			if (dm_old_crtc_state->stream)
7604 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7605 			mode_set_reset_required = true;
7606 		}
7607 	} /* for_each_crtc_in_state() */
7608 
7609 	if (dc_state) {
7610 		/* if there mode set or reset, disable eDP PSR */
7611 		if (mode_set_reset_required)
7612 			amdgpu_dm_psr_disable_all(dm);
7613 
7614 		dm_enable_per_frame_crtc_master_sync(dc_state);
7615 		mutex_lock(&dm->dc_lock);
7616 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7617 		mutex_unlock(&dm->dc_lock);
7618 	}
7619 
7620 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7621 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7622 
7623 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7624 
7625 		if (dm_new_crtc_state->stream != NULL) {
7626 			const struct dc_stream_status *status =
7627 					dc_stream_get_status(dm_new_crtc_state->stream);
7628 
7629 			if (!status)
7630 				status = dc_stream_get_status_from_state(dc_state,
7631 									 dm_new_crtc_state->stream);
7632 			if (!status)
7633 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7634 			else
7635 				acrtc->otg_inst = status->primary_otg_inst;
7636 		}
7637 	}
7638 #ifdef CONFIG_DRM_AMD_DC_HDCP
7639 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7640 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7641 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7642 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7643 
7644 		new_crtc_state = NULL;
7645 
7646 		if (acrtc)
7647 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7648 
7649 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7650 
7651 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7652 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7653 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7654 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7655 			continue;
7656 		}
7657 
7658 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7659 			hdcp_update_display(
7660 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7661 				new_con_state->hdcp_content_type,
7662 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7663 													 : false);
7664 	}
7665 #endif
7666 
7667 	/* Handle connector state changes */
7668 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7669 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7670 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7671 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7672 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7673 		struct dc_stream_update stream_update;
7674 		struct dc_info_packet hdr_packet;
7675 		struct dc_stream_status *status = NULL;
7676 		bool abm_changed, hdr_changed, scaling_changed;
7677 
7678 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7679 		memset(&stream_update, 0, sizeof(stream_update));
7680 
7681 		if (acrtc) {
7682 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7683 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7684 		}
7685 
7686 		/* Skip any modesets/resets */
7687 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7688 			continue;
7689 
7690 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7691 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7692 
7693 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7694 							     dm_old_con_state);
7695 
7696 		abm_changed = dm_new_crtc_state->abm_level !=
7697 			      dm_old_crtc_state->abm_level;
7698 
7699 		hdr_changed =
7700 			is_hdr_metadata_different(old_con_state, new_con_state);
7701 
7702 		if (!scaling_changed && !abm_changed && !hdr_changed)
7703 			continue;
7704 
7705 		stream_update.stream = dm_new_crtc_state->stream;
7706 		if (scaling_changed) {
7707 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7708 					dm_new_con_state, dm_new_crtc_state->stream);
7709 
7710 			stream_update.src = dm_new_crtc_state->stream->src;
7711 			stream_update.dst = dm_new_crtc_state->stream->dst;
7712 		}
7713 
7714 		if (abm_changed) {
7715 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7716 
7717 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7718 		}
7719 
7720 		if (hdr_changed) {
7721 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7722 			stream_update.hdr_static_metadata = &hdr_packet;
7723 		}
7724 
7725 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7726 		WARN_ON(!status);
7727 		WARN_ON(!status->plane_count);
7728 
7729 		/*
7730 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7731 		 * Here we create an empty update on each plane.
7732 		 * To fix this, DC should permit updating only stream properties.
7733 		 */
7734 		for (j = 0; j < status->plane_count; j++)
7735 			dummy_updates[j].surface = status->plane_states[0];
7736 
7737 
7738 		mutex_lock(&dm->dc_lock);
7739 		dc_commit_updates_for_stream(dm->dc,
7740 						     dummy_updates,
7741 						     status->plane_count,
7742 						     dm_new_crtc_state->stream,
7743 						     &stream_update,
7744 						     dc_state);
7745 		mutex_unlock(&dm->dc_lock);
7746 	}
7747 
7748 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7749 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7750 				      new_crtc_state, i) {
7751 		if (old_crtc_state->active && !new_crtc_state->active)
7752 			crtc_disable_count++;
7753 
7754 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7755 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7756 
7757 		/* For freesync config update on crtc state and params for irq */
7758 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7759 
7760 		/* Handle vrr on->off / off->on transitions */
7761 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7762 						dm_new_crtc_state);
7763 	}
7764 
7765 	/**
7766 	 * Enable interrupts for CRTCs that are newly enabled or went through
7767 	 * a modeset. It was intentionally deferred until after the front end
7768 	 * state was modified to wait until the OTG was on and so the IRQ
7769 	 * handlers didn't access stale or invalid state.
7770 	 */
7771 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7772 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7773 
7774 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7775 
7776 		if (new_crtc_state->active &&
7777 		    (!old_crtc_state->active ||
7778 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7779 			dc_stream_retain(dm_new_crtc_state->stream);
7780 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7781 			manage_dm_interrupts(adev, acrtc, true);
7782 
7783 #ifdef CONFIG_DEBUG_FS
7784 			/**
7785 			 * Frontend may have changed so reapply the CRC capture
7786 			 * settings for the stream.
7787 			 */
7788 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7789 
7790 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7791 				amdgpu_dm_crtc_configure_crc_source(
7792 					crtc, dm_new_crtc_state,
7793 					dm_new_crtc_state->crc_src);
7794 			}
7795 #endif
7796 		}
7797 	}
7798 
7799 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7800 		if (new_crtc_state->async_flip)
7801 			wait_for_vblank = false;
7802 
7803 	/* update planes when needed per crtc*/
7804 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7805 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7806 
7807 		if (dm_new_crtc_state->stream)
7808 			amdgpu_dm_commit_planes(state, dc_state, dev,
7809 						dm, crtc, wait_for_vblank);
7810 	}
7811 
7812 	/* Update audio instances for each connector. */
7813 	amdgpu_dm_commit_audio(dev, state);
7814 
7815 	/*
7816 	 * send vblank event on all events not handled in flip and
7817 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7818 	 */
7819 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7820 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7821 
7822 		if (new_crtc_state->event)
7823 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7824 
7825 		new_crtc_state->event = NULL;
7826 	}
7827 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7828 
7829 	/* Signal HW programming completion */
7830 	drm_atomic_helper_commit_hw_done(state);
7831 
7832 	if (wait_for_vblank)
7833 		drm_atomic_helper_wait_for_flip_done(dev, state);
7834 
7835 	drm_atomic_helper_cleanup_planes(dev, state);
7836 
7837 	/*
7838 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7839 	 * so we can put the GPU into runtime suspend if we're not driving any
7840 	 * displays anymore
7841 	 */
7842 	for (i = 0; i < crtc_disable_count; i++)
7843 		pm_runtime_put_autosuspend(dev->dev);
7844 	pm_runtime_mark_last_busy(dev->dev);
7845 
7846 	if (dc_state_temp)
7847 		dc_release_state(dc_state_temp);
7848 }
7849 
7850 
7851 static int dm_force_atomic_commit(struct drm_connector *connector)
7852 {
7853 	int ret = 0;
7854 	struct drm_device *ddev = connector->dev;
7855 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7856 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7857 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7858 	struct drm_connector_state *conn_state;
7859 	struct drm_crtc_state *crtc_state;
7860 	struct drm_plane_state *plane_state;
7861 
7862 	if (!state)
7863 		return -ENOMEM;
7864 
7865 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7866 
7867 	/* Construct an atomic state to restore previous display setting */
7868 
7869 	/*
7870 	 * Attach connectors to drm_atomic_state
7871 	 */
7872 	conn_state = drm_atomic_get_connector_state(state, connector);
7873 
7874 	ret = PTR_ERR_OR_ZERO(conn_state);
7875 	if (ret)
7876 		goto err;
7877 
7878 	/* Attach crtc to drm_atomic_state*/
7879 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7880 
7881 	ret = PTR_ERR_OR_ZERO(crtc_state);
7882 	if (ret)
7883 		goto err;
7884 
7885 	/* force a restore */
7886 	crtc_state->mode_changed = true;
7887 
7888 	/* Attach plane to drm_atomic_state */
7889 	plane_state = drm_atomic_get_plane_state(state, plane);
7890 
7891 	ret = PTR_ERR_OR_ZERO(plane_state);
7892 	if (ret)
7893 		goto err;
7894 
7895 
7896 	/* Call commit internally with the state we just constructed */
7897 	ret = drm_atomic_commit(state);
7898 	if (!ret)
7899 		return 0;
7900 
7901 err:
7902 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7903 	drm_atomic_state_put(state);
7904 
7905 	return ret;
7906 }
7907 
7908 /*
7909  * This function handles all cases when set mode does not come upon hotplug.
7910  * This includes when a display is unplugged then plugged back into the
7911  * same port and when running without usermode desktop manager supprot
7912  */
7913 void dm_restore_drm_connector_state(struct drm_device *dev,
7914 				    struct drm_connector *connector)
7915 {
7916 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7917 	struct amdgpu_crtc *disconnected_acrtc;
7918 	struct dm_crtc_state *acrtc_state;
7919 
7920 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7921 		return;
7922 
7923 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7924 	if (!disconnected_acrtc)
7925 		return;
7926 
7927 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7928 	if (!acrtc_state->stream)
7929 		return;
7930 
7931 	/*
7932 	 * If the previous sink is not released and different from the current,
7933 	 * we deduce we are in a state where we can not rely on usermode call
7934 	 * to turn on the display, so we do it here
7935 	 */
7936 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7937 		dm_force_atomic_commit(&aconnector->base);
7938 }
7939 
7940 /*
7941  * Grabs all modesetting locks to serialize against any blocking commits,
7942  * Waits for completion of all non blocking commits.
7943  */
7944 static int do_aquire_global_lock(struct drm_device *dev,
7945 				 struct drm_atomic_state *state)
7946 {
7947 	struct drm_crtc *crtc;
7948 	struct drm_crtc_commit *commit;
7949 	long ret;
7950 
7951 	/*
7952 	 * Adding all modeset locks to aquire_ctx will
7953 	 * ensure that when the framework release it the
7954 	 * extra locks we are locking here will get released to
7955 	 */
7956 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7957 	if (ret)
7958 		return ret;
7959 
7960 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7961 		spin_lock(&crtc->commit_lock);
7962 		commit = list_first_entry_or_null(&crtc->commit_list,
7963 				struct drm_crtc_commit, commit_entry);
7964 		if (commit)
7965 			drm_crtc_commit_get(commit);
7966 		spin_unlock(&crtc->commit_lock);
7967 
7968 		if (!commit)
7969 			continue;
7970 
7971 		/*
7972 		 * Make sure all pending HW programming completed and
7973 		 * page flips done
7974 		 */
7975 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7976 
7977 		if (ret > 0)
7978 			ret = wait_for_completion_interruptible_timeout(
7979 					&commit->flip_done, 10*HZ);
7980 
7981 		if (ret == 0)
7982 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7983 				  "timed out\n", crtc->base.id, crtc->name);
7984 
7985 		drm_crtc_commit_put(commit);
7986 	}
7987 
7988 	return ret < 0 ? ret : 0;
7989 }
7990 
7991 static void get_freesync_config_for_crtc(
7992 	struct dm_crtc_state *new_crtc_state,
7993 	struct dm_connector_state *new_con_state)
7994 {
7995 	struct mod_freesync_config config = {0};
7996 	struct amdgpu_dm_connector *aconnector =
7997 			to_amdgpu_dm_connector(new_con_state->base.connector);
7998 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7999 	int vrefresh = drm_mode_vrefresh(mode);
8000 
8001 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8002 					vrefresh >= aconnector->min_vfreq &&
8003 					vrefresh <= aconnector->max_vfreq;
8004 
8005 	if (new_crtc_state->vrr_supported) {
8006 		new_crtc_state->stream->ignore_msa_timing_param = true;
8007 		config.state = new_crtc_state->base.vrr_enabled ?
8008 				VRR_STATE_ACTIVE_VARIABLE :
8009 				VRR_STATE_INACTIVE;
8010 		config.min_refresh_in_uhz =
8011 				aconnector->min_vfreq * 1000000;
8012 		config.max_refresh_in_uhz =
8013 				aconnector->max_vfreq * 1000000;
8014 		config.vsif_supported = true;
8015 		config.btr = true;
8016 	}
8017 
8018 	new_crtc_state->freesync_config = config;
8019 }
8020 
8021 static void reset_freesync_config_for_crtc(
8022 	struct dm_crtc_state *new_crtc_state)
8023 {
8024 	new_crtc_state->vrr_supported = false;
8025 
8026 	memset(&new_crtc_state->vrr_infopacket, 0,
8027 	       sizeof(new_crtc_state->vrr_infopacket));
8028 }
8029 
8030 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8031 				struct drm_atomic_state *state,
8032 				struct drm_crtc *crtc,
8033 				struct drm_crtc_state *old_crtc_state,
8034 				struct drm_crtc_state *new_crtc_state,
8035 				bool enable,
8036 				bool *lock_and_validation_needed)
8037 {
8038 	struct dm_atomic_state *dm_state = NULL;
8039 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8040 	struct dc_stream_state *new_stream;
8041 	int ret = 0;
8042 
8043 	/*
8044 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8045 	 * update changed items
8046 	 */
8047 	struct amdgpu_crtc *acrtc = NULL;
8048 	struct amdgpu_dm_connector *aconnector = NULL;
8049 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8050 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8051 
8052 	new_stream = NULL;
8053 
8054 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8055 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8056 	acrtc = to_amdgpu_crtc(crtc);
8057 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8058 
8059 	/* TODO This hack should go away */
8060 	if (aconnector && enable) {
8061 		/* Make sure fake sink is created in plug-in scenario */
8062 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8063 							    &aconnector->base);
8064 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8065 							    &aconnector->base);
8066 
8067 		if (IS_ERR(drm_new_conn_state)) {
8068 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8069 			goto fail;
8070 		}
8071 
8072 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8073 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8074 
8075 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8076 			goto skip_modeset;
8077 
8078 		new_stream = create_validate_stream_for_sink(aconnector,
8079 							     &new_crtc_state->mode,
8080 							     dm_new_conn_state,
8081 							     dm_old_crtc_state->stream);
8082 
8083 		/*
8084 		 * we can have no stream on ACTION_SET if a display
8085 		 * was disconnected during S3, in this case it is not an
8086 		 * error, the OS will be updated after detection, and
8087 		 * will do the right thing on next atomic commit
8088 		 */
8089 
8090 		if (!new_stream) {
8091 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8092 					__func__, acrtc->base.base.id);
8093 			ret = -ENOMEM;
8094 			goto fail;
8095 		}
8096 
8097 		/*
8098 		 * TODO: Check VSDB bits to decide whether this should
8099 		 * be enabled or not.
8100 		 */
8101 		new_stream->triggered_crtc_reset.enabled =
8102 			dm->force_timing_sync;
8103 
8104 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8105 
8106 		ret = fill_hdr_info_packet(drm_new_conn_state,
8107 					   &new_stream->hdr_static_metadata);
8108 		if (ret)
8109 			goto fail;
8110 
8111 		/*
8112 		 * If we already removed the old stream from the context
8113 		 * (and set the new stream to NULL) then we can't reuse
8114 		 * the old stream even if the stream and scaling are unchanged.
8115 		 * We'll hit the BUG_ON and black screen.
8116 		 *
8117 		 * TODO: Refactor this function to allow this check to work
8118 		 * in all conditions.
8119 		 */
8120 		if (dm_new_crtc_state->stream &&
8121 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8122 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8123 			new_crtc_state->mode_changed = false;
8124 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8125 					 new_crtc_state->mode_changed);
8126 		}
8127 	}
8128 
8129 	/* mode_changed flag may get updated above, need to check again */
8130 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8131 		goto skip_modeset;
8132 
8133 	DRM_DEBUG_DRIVER(
8134 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8135 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8136 		"connectors_changed:%d\n",
8137 		acrtc->crtc_id,
8138 		new_crtc_state->enable,
8139 		new_crtc_state->active,
8140 		new_crtc_state->planes_changed,
8141 		new_crtc_state->mode_changed,
8142 		new_crtc_state->active_changed,
8143 		new_crtc_state->connectors_changed);
8144 
8145 	/* Remove stream for any changed/disabled CRTC */
8146 	if (!enable) {
8147 
8148 		if (!dm_old_crtc_state->stream)
8149 			goto skip_modeset;
8150 
8151 		ret = dm_atomic_get_state(state, &dm_state);
8152 		if (ret)
8153 			goto fail;
8154 
8155 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8156 				crtc->base.id);
8157 
8158 		/* i.e. reset mode */
8159 		if (dc_remove_stream_from_ctx(
8160 				dm->dc,
8161 				dm_state->context,
8162 				dm_old_crtc_state->stream) != DC_OK) {
8163 			ret = -EINVAL;
8164 			goto fail;
8165 		}
8166 
8167 		dc_stream_release(dm_old_crtc_state->stream);
8168 		dm_new_crtc_state->stream = NULL;
8169 
8170 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8171 
8172 		*lock_and_validation_needed = true;
8173 
8174 	} else {/* Add stream for any updated/enabled CRTC */
8175 		/*
8176 		 * Quick fix to prevent NULL pointer on new_stream when
8177 		 * added MST connectors not found in existing crtc_state in the chained mode
8178 		 * TODO: need to dig out the root cause of that
8179 		 */
8180 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8181 			goto skip_modeset;
8182 
8183 		if (modereset_required(new_crtc_state))
8184 			goto skip_modeset;
8185 
8186 		if (modeset_required(new_crtc_state, new_stream,
8187 				     dm_old_crtc_state->stream)) {
8188 
8189 			WARN_ON(dm_new_crtc_state->stream);
8190 
8191 			ret = dm_atomic_get_state(state, &dm_state);
8192 			if (ret)
8193 				goto fail;
8194 
8195 			dm_new_crtc_state->stream = new_stream;
8196 
8197 			dc_stream_retain(new_stream);
8198 
8199 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8200 						crtc->base.id);
8201 
8202 			if (dc_add_stream_to_ctx(
8203 					dm->dc,
8204 					dm_state->context,
8205 					dm_new_crtc_state->stream) != DC_OK) {
8206 				ret = -EINVAL;
8207 				goto fail;
8208 			}
8209 
8210 			*lock_and_validation_needed = true;
8211 		}
8212 	}
8213 
8214 skip_modeset:
8215 	/* Release extra reference */
8216 	if (new_stream)
8217 		 dc_stream_release(new_stream);
8218 
8219 	/*
8220 	 * We want to do dc stream updates that do not require a
8221 	 * full modeset below.
8222 	 */
8223 	if (!(enable && aconnector && new_crtc_state->active))
8224 		return 0;
8225 	/*
8226 	 * Given above conditions, the dc state cannot be NULL because:
8227 	 * 1. We're in the process of enabling CRTCs (just been added
8228 	 *    to the dc context, or already is on the context)
8229 	 * 2. Has a valid connector attached, and
8230 	 * 3. Is currently active and enabled.
8231 	 * => The dc stream state currently exists.
8232 	 */
8233 	BUG_ON(dm_new_crtc_state->stream == NULL);
8234 
8235 	/* Scaling or underscan settings */
8236 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8237 		update_stream_scaling_settings(
8238 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8239 
8240 	/* ABM settings */
8241 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8242 
8243 	/*
8244 	 * Color management settings. We also update color properties
8245 	 * when a modeset is needed, to ensure it gets reprogrammed.
8246 	 */
8247 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8248 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8249 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8250 		if (ret)
8251 			goto fail;
8252 	}
8253 
8254 	/* Update Freesync settings. */
8255 	get_freesync_config_for_crtc(dm_new_crtc_state,
8256 				     dm_new_conn_state);
8257 
8258 	return ret;
8259 
8260 fail:
8261 	if (new_stream)
8262 		dc_stream_release(new_stream);
8263 	return ret;
8264 }
8265 
8266 static bool should_reset_plane(struct drm_atomic_state *state,
8267 			       struct drm_plane *plane,
8268 			       struct drm_plane_state *old_plane_state,
8269 			       struct drm_plane_state *new_plane_state)
8270 {
8271 	struct drm_plane *other;
8272 	struct drm_plane_state *old_other_state, *new_other_state;
8273 	struct drm_crtc_state *new_crtc_state;
8274 	int i;
8275 
8276 	/*
8277 	 * TODO: Remove this hack once the checks below are sufficient
8278 	 * enough to determine when we need to reset all the planes on
8279 	 * the stream.
8280 	 */
8281 	if (state->allow_modeset)
8282 		return true;
8283 
8284 	/* Exit early if we know that we're adding or removing the plane. */
8285 	if (old_plane_state->crtc != new_plane_state->crtc)
8286 		return true;
8287 
8288 	/* old crtc == new_crtc == NULL, plane not in context. */
8289 	if (!new_plane_state->crtc)
8290 		return false;
8291 
8292 	new_crtc_state =
8293 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8294 
8295 	if (!new_crtc_state)
8296 		return true;
8297 
8298 	/* CRTC Degamma changes currently require us to recreate planes. */
8299 	if (new_crtc_state->color_mgmt_changed)
8300 		return true;
8301 
8302 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8303 		return true;
8304 
8305 	/*
8306 	 * If there are any new primary or overlay planes being added or
8307 	 * removed then the z-order can potentially change. To ensure
8308 	 * correct z-order and pipe acquisition the current DC architecture
8309 	 * requires us to remove and recreate all existing planes.
8310 	 *
8311 	 * TODO: Come up with a more elegant solution for this.
8312 	 */
8313 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8314 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8315 
8316 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8317 			continue;
8318 
8319 		if (old_other_state->crtc != new_plane_state->crtc &&
8320 		    new_other_state->crtc != new_plane_state->crtc)
8321 			continue;
8322 
8323 		if (old_other_state->crtc != new_other_state->crtc)
8324 			return true;
8325 
8326 		/* Src/dst size and scaling updates. */
8327 		if (old_other_state->src_w != new_other_state->src_w ||
8328 		    old_other_state->src_h != new_other_state->src_h ||
8329 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8330 		    old_other_state->crtc_h != new_other_state->crtc_h)
8331 			return true;
8332 
8333 		/* Rotation / mirroring updates. */
8334 		if (old_other_state->rotation != new_other_state->rotation)
8335 			return true;
8336 
8337 		/* Blending updates. */
8338 		if (old_other_state->pixel_blend_mode !=
8339 		    new_other_state->pixel_blend_mode)
8340 			return true;
8341 
8342 		/* Alpha updates. */
8343 		if (old_other_state->alpha != new_other_state->alpha)
8344 			return true;
8345 
8346 		/* Colorspace changes. */
8347 		if (old_other_state->color_range != new_other_state->color_range ||
8348 		    old_other_state->color_encoding != new_other_state->color_encoding)
8349 			return true;
8350 
8351 		/* Framebuffer checks fall at the end. */
8352 		if (!old_other_state->fb || !new_other_state->fb)
8353 			continue;
8354 
8355 		/* Pixel format changes can require bandwidth updates. */
8356 		if (old_other_state->fb->format != new_other_state->fb->format)
8357 			return true;
8358 
8359 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8360 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8361 
8362 		/* Tiling and DCC changes also require bandwidth updates. */
8363 		if (old_dm_plane_state->tiling_flags !=
8364 		    new_dm_plane_state->tiling_flags)
8365 			return true;
8366 	}
8367 
8368 	return false;
8369 }
8370 
8371 static int dm_update_plane_state(struct dc *dc,
8372 				 struct drm_atomic_state *state,
8373 				 struct drm_plane *plane,
8374 				 struct drm_plane_state *old_plane_state,
8375 				 struct drm_plane_state *new_plane_state,
8376 				 bool enable,
8377 				 bool *lock_and_validation_needed)
8378 {
8379 
8380 	struct dm_atomic_state *dm_state = NULL;
8381 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8382 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8383 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8384 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8385 	struct amdgpu_crtc *new_acrtc;
8386 	bool needs_reset;
8387 	int ret = 0;
8388 
8389 
8390 	new_plane_crtc = new_plane_state->crtc;
8391 	old_plane_crtc = old_plane_state->crtc;
8392 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8393 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8394 
8395 	/*TODO Implement better atomic check for cursor plane */
8396 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8397 		if (!enable || !new_plane_crtc ||
8398 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8399 			return 0;
8400 
8401 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8402 
8403 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8404 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8405 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8406 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8407 			return -EINVAL;
8408 		}
8409 
8410 		return 0;
8411 	}
8412 
8413 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8414 					 new_plane_state);
8415 
8416 	/* Remove any changed/removed planes */
8417 	if (!enable) {
8418 		if (!needs_reset)
8419 			return 0;
8420 
8421 		if (!old_plane_crtc)
8422 			return 0;
8423 
8424 		old_crtc_state = drm_atomic_get_old_crtc_state(
8425 				state, old_plane_crtc);
8426 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8427 
8428 		if (!dm_old_crtc_state->stream)
8429 			return 0;
8430 
8431 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8432 				plane->base.id, old_plane_crtc->base.id);
8433 
8434 		ret = dm_atomic_get_state(state, &dm_state);
8435 		if (ret)
8436 			return ret;
8437 
8438 		if (!dc_remove_plane_from_context(
8439 				dc,
8440 				dm_old_crtc_state->stream,
8441 				dm_old_plane_state->dc_state,
8442 				dm_state->context)) {
8443 
8444 			return -EINVAL;
8445 		}
8446 
8447 
8448 		dc_plane_state_release(dm_old_plane_state->dc_state);
8449 		dm_new_plane_state->dc_state = NULL;
8450 
8451 		*lock_and_validation_needed = true;
8452 
8453 	} else { /* Add new planes */
8454 		struct dc_plane_state *dc_new_plane_state;
8455 
8456 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8457 			return 0;
8458 
8459 		if (!new_plane_crtc)
8460 			return 0;
8461 
8462 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8463 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8464 
8465 		if (!dm_new_crtc_state->stream)
8466 			return 0;
8467 
8468 		if (!needs_reset)
8469 			return 0;
8470 
8471 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8472 		if (ret)
8473 			return ret;
8474 
8475 		WARN_ON(dm_new_plane_state->dc_state);
8476 
8477 		dc_new_plane_state = dc_create_plane_state(dc);
8478 		if (!dc_new_plane_state)
8479 			return -ENOMEM;
8480 
8481 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8482 				plane->base.id, new_plane_crtc->base.id);
8483 
8484 		ret = fill_dc_plane_attributes(
8485 			drm_to_adev(new_plane_crtc->dev),
8486 			dc_new_plane_state,
8487 			new_plane_state,
8488 			new_crtc_state);
8489 		if (ret) {
8490 			dc_plane_state_release(dc_new_plane_state);
8491 			return ret;
8492 		}
8493 
8494 		ret = dm_atomic_get_state(state, &dm_state);
8495 		if (ret) {
8496 			dc_plane_state_release(dc_new_plane_state);
8497 			return ret;
8498 		}
8499 
8500 		/*
8501 		 * Any atomic check errors that occur after this will
8502 		 * not need a release. The plane state will be attached
8503 		 * to the stream, and therefore part of the atomic
8504 		 * state. It'll be released when the atomic state is
8505 		 * cleaned.
8506 		 */
8507 		if (!dc_add_plane_to_context(
8508 				dc,
8509 				dm_new_crtc_state->stream,
8510 				dc_new_plane_state,
8511 				dm_state->context)) {
8512 
8513 			dc_plane_state_release(dc_new_plane_state);
8514 			return -EINVAL;
8515 		}
8516 
8517 		dm_new_plane_state->dc_state = dc_new_plane_state;
8518 
8519 		/* Tell DC to do a full surface update every time there
8520 		 * is a plane change. Inefficient, but works for now.
8521 		 */
8522 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8523 
8524 		*lock_and_validation_needed = true;
8525 	}
8526 
8527 
8528 	return ret;
8529 }
8530 
8531 #if defined(CONFIG_DRM_AMD_DC_DCN)
8532 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8533 {
8534 	struct drm_connector *connector;
8535 	struct drm_connector_state *conn_state;
8536 	struct amdgpu_dm_connector *aconnector = NULL;
8537 	int i;
8538 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8539 		if (conn_state->crtc != crtc)
8540 			continue;
8541 
8542 		aconnector = to_amdgpu_dm_connector(connector);
8543 		if (!aconnector->port || !aconnector->mst_port)
8544 			aconnector = NULL;
8545 		else
8546 			break;
8547 	}
8548 
8549 	if (!aconnector)
8550 		return 0;
8551 
8552 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8553 }
8554 #endif
8555 
8556 /**
8557  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8558  * @dev: The DRM device
8559  * @state: The atomic state to commit
8560  *
8561  * Validate that the given atomic state is programmable by DC into hardware.
8562  * This involves constructing a &struct dc_state reflecting the new hardware
8563  * state we wish to commit, then querying DC to see if it is programmable. It's
8564  * important not to modify the existing DC state. Otherwise, atomic_check
8565  * may unexpectedly commit hardware changes.
8566  *
8567  * When validating the DC state, it's important that the right locks are
8568  * acquired. For full updates case which removes/adds/updates streams on one
8569  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8570  * that any such full update commit will wait for completion of any outstanding
8571  * flip using DRMs synchronization events.
8572  *
8573  * Note that DM adds the affected connectors for all CRTCs in state, when that
8574  * might not seem necessary. This is because DC stream creation requires the
8575  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8576  * be possible but non-trivial - a possible TODO item.
8577  *
8578  * Return: -Error code if validation failed.
8579  */
8580 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8581 				  struct drm_atomic_state *state)
8582 {
8583 	struct amdgpu_device *adev = drm_to_adev(dev);
8584 	struct dm_atomic_state *dm_state = NULL;
8585 	struct dc *dc = adev->dm.dc;
8586 	struct drm_connector *connector;
8587 	struct drm_connector_state *old_con_state, *new_con_state;
8588 	struct drm_crtc *crtc;
8589 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8590 	struct drm_plane *plane;
8591 	struct drm_plane_state *old_plane_state, *new_plane_state;
8592 	enum dc_status status;
8593 	int ret, i;
8594 	bool lock_and_validation_needed = false;
8595 
8596 	amdgpu_check_debugfs_connector_property_change(adev, state);
8597 
8598 	ret = drm_atomic_helper_check_modeset(dev, state);
8599 	if (ret)
8600 		goto fail;
8601 
8602 	/* Check connector changes */
8603 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8604 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8605 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8606 
8607 		/* Skip connectors that are disabled or part of modeset already. */
8608 		if (!old_con_state->crtc && !new_con_state->crtc)
8609 			continue;
8610 
8611 		if (!new_con_state->crtc)
8612 			continue;
8613 
8614 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8615 		if (IS_ERR(new_crtc_state)) {
8616 			ret = PTR_ERR(new_crtc_state);
8617 			goto fail;
8618 		}
8619 
8620 		if (dm_old_con_state->abm_level !=
8621 		    dm_new_con_state->abm_level)
8622 			new_crtc_state->connectors_changed = true;
8623 	}
8624 
8625 #if defined(CONFIG_DRM_AMD_DC_DCN)
8626 	if (adev->asic_type >= CHIP_NAVI10) {
8627 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8628 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8629 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8630 				if (ret)
8631 					goto fail;
8632 			}
8633 		}
8634 	}
8635 #endif
8636 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8637 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8638 		    !new_crtc_state->color_mgmt_changed &&
8639 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8640 			continue;
8641 
8642 		if (!new_crtc_state->enable)
8643 			continue;
8644 
8645 		ret = drm_atomic_add_affected_connectors(state, crtc);
8646 		if (ret)
8647 			return ret;
8648 
8649 		ret = drm_atomic_add_affected_planes(state, crtc);
8650 		if (ret)
8651 			goto fail;
8652 	}
8653 
8654 	/*
8655 	 * Add all primary and overlay planes on the CRTC to the state
8656 	 * whenever a plane is enabled to maintain correct z-ordering
8657 	 * and to enable fast surface updates.
8658 	 */
8659 	drm_for_each_crtc(crtc, dev) {
8660 		bool modified = false;
8661 
8662 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8663 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8664 				continue;
8665 
8666 			if (new_plane_state->crtc == crtc ||
8667 			    old_plane_state->crtc == crtc) {
8668 				modified = true;
8669 				break;
8670 			}
8671 		}
8672 
8673 		if (!modified)
8674 			continue;
8675 
8676 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8677 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8678 				continue;
8679 
8680 			new_plane_state =
8681 				drm_atomic_get_plane_state(state, plane);
8682 
8683 			if (IS_ERR(new_plane_state)) {
8684 				ret = PTR_ERR(new_plane_state);
8685 				goto fail;
8686 			}
8687 		}
8688 	}
8689 
8690 	/* Prepass for updating tiling flags on new planes. */
8691 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8692 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8693 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8694 
8695 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8696 				  &new_dm_plane_state->tmz_surface);
8697 		if (ret)
8698 			goto fail;
8699 	}
8700 
8701 	/* Remove exiting planes if they are modified */
8702 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8703 		ret = dm_update_plane_state(dc, state, plane,
8704 					    old_plane_state,
8705 					    new_plane_state,
8706 					    false,
8707 					    &lock_and_validation_needed);
8708 		if (ret)
8709 			goto fail;
8710 	}
8711 
8712 	/* Disable all crtcs which require disable */
8713 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8714 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8715 					   old_crtc_state,
8716 					   new_crtc_state,
8717 					   false,
8718 					   &lock_and_validation_needed);
8719 		if (ret)
8720 			goto fail;
8721 	}
8722 
8723 	/* Enable all crtcs which require enable */
8724 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8725 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8726 					   old_crtc_state,
8727 					   new_crtc_state,
8728 					   true,
8729 					   &lock_and_validation_needed);
8730 		if (ret)
8731 			goto fail;
8732 	}
8733 
8734 	/* Add new/modified planes */
8735 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8736 		ret = dm_update_plane_state(dc, state, plane,
8737 					    old_plane_state,
8738 					    new_plane_state,
8739 					    true,
8740 					    &lock_and_validation_needed);
8741 		if (ret)
8742 			goto fail;
8743 	}
8744 
8745 	/* Run this here since we want to validate the streams we created */
8746 	ret = drm_atomic_helper_check_planes(dev, state);
8747 	if (ret)
8748 		goto fail;
8749 
8750 	if (state->legacy_cursor_update) {
8751 		/*
8752 		 * This is a fast cursor update coming from the plane update
8753 		 * helper, check if it can be done asynchronously for better
8754 		 * performance.
8755 		 */
8756 		state->async_update =
8757 			!drm_atomic_helper_async_check(dev, state);
8758 
8759 		/*
8760 		 * Skip the remaining global validation if this is an async
8761 		 * update. Cursor updates can be done without affecting
8762 		 * state or bandwidth calcs and this avoids the performance
8763 		 * penalty of locking the private state object and
8764 		 * allocating a new dc_state.
8765 		 */
8766 		if (state->async_update)
8767 			return 0;
8768 	}
8769 
8770 	/* Check scaling and underscan changes*/
8771 	/* TODO Removed scaling changes validation due to inability to commit
8772 	 * new stream into context w\o causing full reset. Need to
8773 	 * decide how to handle.
8774 	 */
8775 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8776 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8777 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8778 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8779 
8780 		/* Skip any modesets/resets */
8781 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8782 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8783 			continue;
8784 
8785 		/* Skip any thing not scale or underscan changes */
8786 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8787 			continue;
8788 
8789 		lock_and_validation_needed = true;
8790 	}
8791 
8792 	/**
8793 	 * Streams and planes are reset when there are changes that affect
8794 	 * bandwidth. Anything that affects bandwidth needs to go through
8795 	 * DC global validation to ensure that the configuration can be applied
8796 	 * to hardware.
8797 	 *
8798 	 * We have to currently stall out here in atomic_check for outstanding
8799 	 * commits to finish in this case because our IRQ handlers reference
8800 	 * DRM state directly - we can end up disabling interrupts too early
8801 	 * if we don't.
8802 	 *
8803 	 * TODO: Remove this stall and drop DM state private objects.
8804 	 */
8805 	if (lock_and_validation_needed) {
8806 		ret = dm_atomic_get_state(state, &dm_state);
8807 		if (ret)
8808 			goto fail;
8809 
8810 		ret = do_aquire_global_lock(dev, state);
8811 		if (ret)
8812 			goto fail;
8813 
8814 #if defined(CONFIG_DRM_AMD_DC_DCN)
8815 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8816 			goto fail;
8817 
8818 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8819 		if (ret)
8820 			goto fail;
8821 #endif
8822 
8823 		/*
8824 		 * Perform validation of MST topology in the state:
8825 		 * We need to perform MST atomic check before calling
8826 		 * dc_validate_global_state(), or there is a chance
8827 		 * to get stuck in an infinite loop and hang eventually.
8828 		 */
8829 		ret = drm_dp_mst_atomic_check(state);
8830 		if (ret)
8831 			goto fail;
8832 		status = dc_validate_global_state(dc, dm_state->context, false);
8833 		if (status != DC_OK) {
8834 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8835 				       dc_status_to_str(status), status);
8836 			ret = -EINVAL;
8837 			goto fail;
8838 		}
8839 	} else {
8840 		/*
8841 		 * The commit is a fast update. Fast updates shouldn't change
8842 		 * the DC context, affect global validation, and can have their
8843 		 * commit work done in parallel with other commits not touching
8844 		 * the same resource. If we have a new DC context as part of
8845 		 * the DM atomic state from validation we need to free it and
8846 		 * retain the existing one instead.
8847 		 *
8848 		 * Furthermore, since the DM atomic state only contains the DC
8849 		 * context and can safely be annulled, we can free the state
8850 		 * and clear the associated private object now to free
8851 		 * some memory and avoid a possible use-after-free later.
8852 		 */
8853 
8854 		for (i = 0; i < state->num_private_objs; i++) {
8855 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8856 
8857 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8858 				int j = state->num_private_objs-1;
8859 
8860 				dm_atomic_destroy_state(obj,
8861 						state->private_objs[i].state);
8862 
8863 				/* If i is not at the end of the array then the
8864 				 * last element needs to be moved to where i was
8865 				 * before the array can safely be truncated.
8866 				 */
8867 				if (i != j)
8868 					state->private_objs[i] =
8869 						state->private_objs[j];
8870 
8871 				state->private_objs[j].ptr = NULL;
8872 				state->private_objs[j].state = NULL;
8873 				state->private_objs[j].old_state = NULL;
8874 				state->private_objs[j].new_state = NULL;
8875 
8876 				state->num_private_objs = j;
8877 				break;
8878 			}
8879 		}
8880 	}
8881 
8882 	/* Store the overall update type for use later in atomic check. */
8883 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8884 		struct dm_crtc_state *dm_new_crtc_state =
8885 			to_dm_crtc_state(new_crtc_state);
8886 
8887 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
8888 							 UPDATE_TYPE_FULL :
8889 							 UPDATE_TYPE_FAST;
8890 	}
8891 
8892 	/* Must be success */
8893 	WARN_ON(ret);
8894 	return ret;
8895 
8896 fail:
8897 	if (ret == -EDEADLK)
8898 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8899 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8900 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8901 	else
8902 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8903 
8904 	return ret;
8905 }
8906 
8907 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8908 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8909 {
8910 	uint8_t dpcd_data;
8911 	bool capable = false;
8912 
8913 	if (amdgpu_dm_connector->dc_link &&
8914 		dm_helpers_dp_read_dpcd(
8915 				NULL,
8916 				amdgpu_dm_connector->dc_link,
8917 				DP_DOWN_STREAM_PORT_COUNT,
8918 				&dpcd_data,
8919 				sizeof(dpcd_data))) {
8920 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8921 	}
8922 
8923 	return capable;
8924 }
8925 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8926 					struct edid *edid)
8927 {
8928 	int i;
8929 	bool edid_check_required;
8930 	struct detailed_timing *timing;
8931 	struct detailed_non_pixel *data;
8932 	struct detailed_data_monitor_range *range;
8933 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8934 			to_amdgpu_dm_connector(connector);
8935 	struct dm_connector_state *dm_con_state = NULL;
8936 
8937 	struct drm_device *dev = connector->dev;
8938 	struct amdgpu_device *adev = drm_to_adev(dev);
8939 	bool freesync_capable = false;
8940 
8941 	if (!connector->state) {
8942 		DRM_ERROR("%s - Connector has no state", __func__);
8943 		goto update;
8944 	}
8945 
8946 	if (!edid) {
8947 		dm_con_state = to_dm_connector_state(connector->state);
8948 
8949 		amdgpu_dm_connector->min_vfreq = 0;
8950 		amdgpu_dm_connector->max_vfreq = 0;
8951 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8952 
8953 		goto update;
8954 	}
8955 
8956 	dm_con_state = to_dm_connector_state(connector->state);
8957 
8958 	edid_check_required = false;
8959 	if (!amdgpu_dm_connector->dc_sink) {
8960 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8961 		goto update;
8962 	}
8963 	if (!adev->dm.freesync_module)
8964 		goto update;
8965 	/*
8966 	 * if edid non zero restrict freesync only for dp and edp
8967 	 */
8968 	if (edid) {
8969 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8970 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8971 			edid_check_required = is_dp_capable_without_timing_msa(
8972 						adev->dm.dc,
8973 						amdgpu_dm_connector);
8974 		}
8975 	}
8976 	if (edid_check_required == true && (edid->version > 1 ||
8977 	   (edid->version == 1 && edid->revision > 1))) {
8978 		for (i = 0; i < 4; i++) {
8979 
8980 			timing	= &edid->detailed_timings[i];
8981 			data	= &timing->data.other_data;
8982 			range	= &data->data.range;
8983 			/*
8984 			 * Check if monitor has continuous frequency mode
8985 			 */
8986 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8987 				continue;
8988 			/*
8989 			 * Check for flag range limits only. If flag == 1 then
8990 			 * no additional timing information provided.
8991 			 * Default GTF, GTF Secondary curve and CVT are not
8992 			 * supported
8993 			 */
8994 			if (range->flags != 1)
8995 				continue;
8996 
8997 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8998 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8999 			amdgpu_dm_connector->pixel_clock_mhz =
9000 				range->pixel_clock_mhz * 10;
9001 			break;
9002 		}
9003 
9004 		if (amdgpu_dm_connector->max_vfreq -
9005 		    amdgpu_dm_connector->min_vfreq > 10) {
9006 
9007 			freesync_capable = true;
9008 		}
9009 	}
9010 
9011 update:
9012 	if (dm_con_state)
9013 		dm_con_state->freesync_capable = freesync_capable;
9014 
9015 	if (connector->vrr_capable_property)
9016 		drm_connector_set_vrr_capable_property(connector,
9017 						       freesync_capable);
9018 }
9019 
9020 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9021 {
9022 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9023 
9024 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9025 		return;
9026 	if (link->type == dc_connection_none)
9027 		return;
9028 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9029 					dpcd_data, sizeof(dpcd_data))) {
9030 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9031 
9032 		if (dpcd_data[0] == 0) {
9033 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9034 			link->psr_settings.psr_feature_enabled = false;
9035 		} else {
9036 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9037 			link->psr_settings.psr_feature_enabled = true;
9038 		}
9039 
9040 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9041 	}
9042 }
9043 
9044 /*
9045  * amdgpu_dm_link_setup_psr() - configure psr link
9046  * @stream: stream state
9047  *
9048  * Return: true if success
9049  */
9050 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9051 {
9052 	struct dc_link *link = NULL;
9053 	struct psr_config psr_config = {0};
9054 	struct psr_context psr_context = {0};
9055 	bool ret = false;
9056 
9057 	if (stream == NULL)
9058 		return false;
9059 
9060 	link = stream->link;
9061 
9062 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9063 
9064 	if (psr_config.psr_version > 0) {
9065 		psr_config.psr_exit_link_training_required = 0x1;
9066 		psr_config.psr_frame_capture_indication_req = 0;
9067 		psr_config.psr_rfb_setup_time = 0x37;
9068 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9069 		psr_config.allow_smu_optimizations = 0x0;
9070 
9071 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9072 
9073 	}
9074 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9075 
9076 	return ret;
9077 }
9078 
9079 /*
9080  * amdgpu_dm_psr_enable() - enable psr f/w
9081  * @stream: stream state
9082  *
9083  * Return: true if success
9084  */
9085 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9086 {
9087 	struct dc_link *link = stream->link;
9088 	unsigned int vsync_rate_hz = 0;
9089 	struct dc_static_screen_params params = {0};
9090 	/* Calculate number of static frames before generating interrupt to
9091 	 * enter PSR.
9092 	 */
9093 	// Init fail safe of 2 frames static
9094 	unsigned int num_frames_static = 2;
9095 
9096 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9097 
9098 	vsync_rate_hz = div64_u64(div64_u64((
9099 			stream->timing.pix_clk_100hz * 100),
9100 			stream->timing.v_total),
9101 			stream->timing.h_total);
9102 
9103 	/* Round up
9104 	 * Calculate number of frames such that at least 30 ms of time has
9105 	 * passed.
9106 	 */
9107 	if (vsync_rate_hz != 0) {
9108 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9109 		num_frames_static = (30000 / frame_time_microsec) + 1;
9110 	}
9111 
9112 	params.triggers.cursor_update = true;
9113 	params.triggers.overlay_update = true;
9114 	params.triggers.surface_update = true;
9115 	params.num_frames = num_frames_static;
9116 
9117 	dc_stream_set_static_screen_params(link->ctx->dc,
9118 					   &stream, 1,
9119 					   &params);
9120 
9121 	return dc_link_set_psr_allow_active(link, true, false);
9122 }
9123 
9124 /*
9125  * amdgpu_dm_psr_disable() - disable psr f/w
9126  * @stream:  stream state
9127  *
9128  * Return: true if success
9129  */
9130 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9131 {
9132 
9133 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9134 
9135 	return dc_link_set_psr_allow_active(stream->link, false, true);
9136 }
9137 
9138 /*
9139  * amdgpu_dm_psr_disable() - disable psr f/w
9140  * if psr is enabled on any stream
9141  *
9142  * Return: true if success
9143  */
9144 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9145 {
9146 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9147 	return dc_set_psr_allow_active(dm->dc, false);
9148 }
9149 
9150 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9151 {
9152 	struct amdgpu_device *adev = drm_to_adev(dev);
9153 	struct dc *dc = adev->dm.dc;
9154 	int i;
9155 
9156 	mutex_lock(&adev->dm.dc_lock);
9157 	if (dc->current_state) {
9158 		for (i = 0; i < dc->current_state->stream_count; ++i)
9159 			dc->current_state->streams[i]
9160 				->triggered_crtc_reset.enabled =
9161 				adev->dm.force_timing_sync;
9162 
9163 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9164 		dc_trigger_sync(dc, dc->current_state);
9165 	}
9166 	mutex_unlock(&adev->dm.dc_lock);
9167 }
9168