1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
131 {
132 	switch (link->dpcd_caps.dongle_type) {
133 	case DISPLAY_DONGLE_NONE:
134 		return DRM_MODE_SUBCONNECTOR_Native;
135 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
136 		return DRM_MODE_SUBCONNECTOR_VGA;
137 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
138 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
139 		return DRM_MODE_SUBCONNECTOR_DVID;
140 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
141 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
142 		return DRM_MODE_SUBCONNECTOR_HDMIA;
143 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
144 	default:
145 		return DRM_MODE_SUBCONNECTOR_Unknown;
146 	}
147 }
148 
149 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
150 {
151 	struct dc_link *link = aconnector->dc_link;
152 	struct drm_connector *connector = &aconnector->base;
153 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
154 
155 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
156 		return;
157 
158 	if (aconnector->dc_sink)
159 		subconnector = get_subconnector_type(link);
160 
161 	drm_object_property_set_value(&connector->base,
162 			connector->dev->mode_config.dp_subconnector_property,
163 			subconnector);
164 }
165 
166 /*
167  * initializes drm_device display related structures, based on the information
168  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
169  * drm_encoder, drm_mode_config
170  *
171  * Returns 0 on success
172  */
173 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
174 /* removes and deallocates the drm structures, created by the above function */
175 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
176 
177 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
178 				struct drm_plane *plane,
179 				unsigned long possible_crtcs,
180 				const struct dc_plane_cap *plane_cap);
181 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
182 			       struct drm_plane *plane,
183 			       uint32_t link_index);
184 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
185 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
186 				    uint32_t link_index,
187 				    struct amdgpu_encoder *amdgpu_encoder);
188 static int amdgpu_dm_encoder_init(struct drm_device *dev,
189 				  struct amdgpu_encoder *aencoder,
190 				  uint32_t link_index);
191 
192 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
193 
194 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
195 				   struct drm_atomic_state *state,
196 				   bool nonblock);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 /*
213  * dm_vblank_get_counter
214  *
215  * @brief
216  * Get counter for number of vertical blanks
217  *
218  * @param
219  * struct amdgpu_device *adev - [in] desired amdgpu device
220  * int disp_idx - [in] which CRTC to get the counter from
221  *
222  * @return
223  * Counter for vertical blanks
224  */
225 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
226 {
227 	if (crtc >= adev->mode_info.num_crtc)
228 		return 0;
229 	else {
230 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
231 
232 		if (acrtc->dm_irq_params.stream == NULL) {
233 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
234 				  crtc);
235 			return 0;
236 		}
237 
238 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
239 	}
240 }
241 
242 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
243 				  u32 *vbl, u32 *position)
244 {
245 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
246 
247 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
248 		return -EINVAL;
249 	else {
250 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
251 
252 		if (acrtc->dm_irq_params.stream ==  NULL) {
253 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
254 				  crtc);
255 			return 0;
256 		}
257 
258 		/*
259 		 * TODO rework base driver to use values directly.
260 		 * for now parse it back into reg-format
261 		 */
262 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
263 					 &v_blank_start,
264 					 &v_blank_end,
265 					 &h_position,
266 					 &v_position);
267 
268 		*position = v_position | (h_position << 16);
269 		*vbl = v_blank_start | (v_blank_end << 16);
270 	}
271 
272 	return 0;
273 }
274 
275 static bool dm_is_idle(void *handle)
276 {
277 	/* XXX todo */
278 	return true;
279 }
280 
281 static int dm_wait_for_idle(void *handle)
282 {
283 	/* XXX todo */
284 	return 0;
285 }
286 
287 static bool dm_check_soft_reset(void *handle)
288 {
289 	return false;
290 }
291 
292 static int dm_soft_reset(void *handle)
293 {
294 	/* XXX todo */
295 	return 0;
296 }
297 
298 static struct amdgpu_crtc *
299 get_crtc_by_otg_inst(struct amdgpu_device *adev,
300 		     int otg_inst)
301 {
302 	struct drm_device *dev = adev_to_drm(adev);
303 	struct drm_crtc *crtc;
304 	struct amdgpu_crtc *amdgpu_crtc;
305 
306 	if (otg_inst == -1) {
307 		WARN_ON(1);
308 		return adev->mode_info.crtcs[0];
309 	}
310 
311 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
312 		amdgpu_crtc = to_amdgpu_crtc(crtc);
313 
314 		if (amdgpu_crtc->otg_inst == otg_inst)
315 			return amdgpu_crtc;
316 	}
317 
318 	return NULL;
319 }
320 
321 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
322 {
323 	return acrtc->dm_irq_params.freesync_config.state ==
324 		       VRR_STATE_ACTIVE_VARIABLE ||
325 	       acrtc->dm_irq_params.freesync_config.state ==
326 		       VRR_STATE_ACTIVE_FIXED;
327 }
328 
329 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
330 {
331 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
332 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
333 }
334 
335 /**
336  * dm_pflip_high_irq() - Handle pageflip interrupt
337  * @interrupt_params: ignored
338  *
339  * Handles the pageflip interrupt by notifying all interested parties
340  * that the pageflip has been completed.
341  */
342 static void dm_pflip_high_irq(void *interrupt_params)
343 {
344 	struct amdgpu_crtc *amdgpu_crtc;
345 	struct common_irq_params *irq_params = interrupt_params;
346 	struct amdgpu_device *adev = irq_params->adev;
347 	unsigned long flags;
348 	struct drm_pending_vblank_event *e;
349 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
350 	bool vrr_active;
351 
352 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
353 
354 	/* IRQ could occur when in initial stage */
355 	/* TODO work and BO cleanup */
356 	if (amdgpu_crtc == NULL) {
357 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
358 		return;
359 	}
360 
361 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
362 
363 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
364 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
365 						 amdgpu_crtc->pflip_status,
366 						 AMDGPU_FLIP_SUBMITTED,
367 						 amdgpu_crtc->crtc_id,
368 						 amdgpu_crtc);
369 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
370 		return;
371 	}
372 
373 	/* page flip completed. */
374 	e = amdgpu_crtc->event;
375 	amdgpu_crtc->event = NULL;
376 
377 	if (!e)
378 		WARN_ON(1);
379 
380 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
381 
382 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
383 	if (!vrr_active ||
384 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
385 				      &v_blank_end, &hpos, &vpos) ||
386 	    (vpos < v_blank_start)) {
387 		/* Update to correct count and vblank timestamp if racing with
388 		 * vblank irq. This also updates to the correct vblank timestamp
389 		 * even in VRR mode, as scanout is past the front-porch atm.
390 		 */
391 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
392 
393 		/* Wake up userspace by sending the pageflip event with proper
394 		 * count and timestamp of vblank of flip completion.
395 		 */
396 		if (e) {
397 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
398 
399 			/* Event sent, so done with vblank for this flip */
400 			drm_crtc_vblank_put(&amdgpu_crtc->base);
401 		}
402 	} else if (e) {
403 		/* VRR active and inside front-porch: vblank count and
404 		 * timestamp for pageflip event will only be up to date after
405 		 * drm_crtc_handle_vblank() has been executed from late vblank
406 		 * irq handler after start of back-porch (vline 0). We queue the
407 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
408 		 * updated timestamp and count, once it runs after us.
409 		 *
410 		 * We need to open-code this instead of using the helper
411 		 * drm_crtc_arm_vblank_event(), as that helper would
412 		 * call drm_crtc_accurate_vblank_count(), which we must
413 		 * not call in VRR mode while we are in front-porch!
414 		 */
415 
416 		/* sequence will be replaced by real count during send-out. */
417 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
418 		e->pipe = amdgpu_crtc->crtc_id;
419 
420 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
421 		e = NULL;
422 	}
423 
424 	/* Keep track of vblank of this flip for flip throttling. We use the
425 	 * cooked hw counter, as that one incremented at start of this vblank
426 	 * of pageflip completion, so last_flip_vblank is the forbidden count
427 	 * for queueing new pageflips if vsync + VRR is enabled.
428 	 */
429 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
430 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
431 
432 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
433 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
434 
435 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
436 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
437 			 vrr_active, (int) !e);
438 }
439 
440 static void dm_vupdate_high_irq(void *interrupt_params)
441 {
442 	struct common_irq_params *irq_params = interrupt_params;
443 	struct amdgpu_device *adev = irq_params->adev;
444 	struct amdgpu_crtc *acrtc;
445 	unsigned long flags;
446 	int vrr_active;
447 
448 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
449 
450 	if (acrtc) {
451 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
452 
453 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
454 			      acrtc->crtc_id,
455 			      vrr_active);
456 
457 		/* Core vblank handling is done here after end of front-porch in
458 		 * vrr mode, as vblank timestamping will give valid results
459 		 * while now done after front-porch. This will also deliver
460 		 * page-flip completion events that have been queued to us
461 		 * if a pageflip happened inside front-porch.
462 		 */
463 		if (vrr_active) {
464 			drm_crtc_handle_vblank(&acrtc->base);
465 
466 			/* BTR processing for pre-DCE12 ASICs */
467 			if (acrtc->dm_irq_params.stream &&
468 			    adev->family < AMDGPU_FAMILY_AI) {
469 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
470 				mod_freesync_handle_v_update(
471 				    adev->dm.freesync_module,
472 				    acrtc->dm_irq_params.stream,
473 				    &acrtc->dm_irq_params.vrr_params);
474 
475 				dc_stream_adjust_vmin_vmax(
476 				    adev->dm.dc,
477 				    acrtc->dm_irq_params.stream,
478 				    &acrtc->dm_irq_params.vrr_params.adjust);
479 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
480 			}
481 		}
482 	}
483 }
484 
485 /**
486  * dm_crtc_high_irq() - Handles CRTC interrupt
487  * @interrupt_params: used for determining the CRTC instance
488  *
489  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
490  * event handler.
491  */
492 static void dm_crtc_high_irq(void *interrupt_params)
493 {
494 	struct common_irq_params *irq_params = interrupt_params;
495 	struct amdgpu_device *adev = irq_params->adev;
496 	struct amdgpu_crtc *acrtc;
497 	unsigned long flags;
498 	int vrr_active;
499 
500 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
501 	if (!acrtc)
502 		return;
503 
504 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
505 
506 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
507 		      vrr_active, acrtc->dm_irq_params.active_planes);
508 
509 	/**
510 	 * Core vblank handling at start of front-porch is only possible
511 	 * in non-vrr mode, as only there vblank timestamping will give
512 	 * valid results while done in front-porch. Otherwise defer it
513 	 * to dm_vupdate_high_irq after end of front-porch.
514 	 */
515 	if (!vrr_active)
516 		drm_crtc_handle_vblank(&acrtc->base);
517 
518 	/**
519 	 * Following stuff must happen at start of vblank, for crc
520 	 * computation and below-the-range btr support in vrr mode.
521 	 */
522 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
523 
524 	/* BTR updates need to happen before VUPDATE on Vega and above. */
525 	if (adev->family < AMDGPU_FAMILY_AI)
526 		return;
527 
528 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
529 
530 	if (acrtc->dm_irq_params.stream &&
531 	    acrtc->dm_irq_params.vrr_params.supported &&
532 	    acrtc->dm_irq_params.freesync_config.state ==
533 		    VRR_STATE_ACTIVE_VARIABLE) {
534 		mod_freesync_handle_v_update(adev->dm.freesync_module,
535 					     acrtc->dm_irq_params.stream,
536 					     &acrtc->dm_irq_params.vrr_params);
537 
538 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
539 					   &acrtc->dm_irq_params.vrr_params.adjust);
540 	}
541 
542 	/*
543 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
544 	 * In that case, pageflip completion interrupts won't fire and pageflip
545 	 * completion events won't get delivered. Prevent this by sending
546 	 * pending pageflip events from here if a flip is still pending.
547 	 *
548 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
549 	 * avoid race conditions between flip programming and completion,
550 	 * which could cause too early flip completion events.
551 	 */
552 	if (adev->family >= AMDGPU_FAMILY_RV &&
553 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
554 	    acrtc->dm_irq_params.active_planes == 0) {
555 		if (acrtc->event) {
556 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
557 			acrtc->event = NULL;
558 			drm_crtc_vblank_put(&acrtc->base);
559 		}
560 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
561 	}
562 
563 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
564 }
565 
566 static int dm_set_clockgating_state(void *handle,
567 		  enum amd_clockgating_state state)
568 {
569 	return 0;
570 }
571 
572 static int dm_set_powergating_state(void *handle,
573 		  enum amd_powergating_state state)
574 {
575 	return 0;
576 }
577 
578 /* Prototypes of private functions */
579 static int dm_early_init(void* handle);
580 
581 /* Allocate memory for FBC compressed data  */
582 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
583 {
584 	struct drm_device *dev = connector->dev;
585 	struct amdgpu_device *adev = drm_to_adev(dev);
586 	struct dm_comressor_info *compressor = &adev->dm.compressor;
587 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
588 	struct drm_display_mode *mode;
589 	unsigned long max_size = 0;
590 
591 	if (adev->dm.dc->fbc_compressor == NULL)
592 		return;
593 
594 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
595 		return;
596 
597 	if (compressor->bo_ptr)
598 		return;
599 
600 
601 	list_for_each_entry(mode, &connector->modes, head) {
602 		if (max_size < mode->htotal * mode->vtotal)
603 			max_size = mode->htotal * mode->vtotal;
604 	}
605 
606 	if (max_size) {
607 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
608 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
609 			    &compressor->gpu_addr, &compressor->cpu_addr);
610 
611 		if (r)
612 			DRM_ERROR("DM: Failed to initialize FBC\n");
613 		else {
614 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
615 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
616 		}
617 
618 	}
619 
620 }
621 
622 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
623 					  int pipe, bool *enabled,
624 					  unsigned char *buf, int max_bytes)
625 {
626 	struct drm_device *dev = dev_get_drvdata(kdev);
627 	struct amdgpu_device *adev = drm_to_adev(dev);
628 	struct drm_connector *connector;
629 	struct drm_connector_list_iter conn_iter;
630 	struct amdgpu_dm_connector *aconnector;
631 	int ret = 0;
632 
633 	*enabled = false;
634 
635 	mutex_lock(&adev->dm.audio_lock);
636 
637 	drm_connector_list_iter_begin(dev, &conn_iter);
638 	drm_for_each_connector_iter(connector, &conn_iter) {
639 		aconnector = to_amdgpu_dm_connector(connector);
640 		if (aconnector->audio_inst != port)
641 			continue;
642 
643 		*enabled = true;
644 		ret = drm_eld_size(connector->eld);
645 		memcpy(buf, connector->eld, min(max_bytes, ret));
646 
647 		break;
648 	}
649 	drm_connector_list_iter_end(&conn_iter);
650 
651 	mutex_unlock(&adev->dm.audio_lock);
652 
653 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
654 
655 	return ret;
656 }
657 
658 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
659 	.get_eld = amdgpu_dm_audio_component_get_eld,
660 };
661 
662 static int amdgpu_dm_audio_component_bind(struct device *kdev,
663 				       struct device *hda_kdev, void *data)
664 {
665 	struct drm_device *dev = dev_get_drvdata(kdev);
666 	struct amdgpu_device *adev = drm_to_adev(dev);
667 	struct drm_audio_component *acomp = data;
668 
669 	acomp->ops = &amdgpu_dm_audio_component_ops;
670 	acomp->dev = kdev;
671 	adev->dm.audio_component = acomp;
672 
673 	return 0;
674 }
675 
676 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
677 					  struct device *hda_kdev, void *data)
678 {
679 	struct drm_device *dev = dev_get_drvdata(kdev);
680 	struct amdgpu_device *adev = drm_to_adev(dev);
681 	struct drm_audio_component *acomp = data;
682 
683 	acomp->ops = NULL;
684 	acomp->dev = NULL;
685 	adev->dm.audio_component = NULL;
686 }
687 
688 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
689 	.bind	= amdgpu_dm_audio_component_bind,
690 	.unbind	= amdgpu_dm_audio_component_unbind,
691 };
692 
693 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
694 {
695 	int i, ret;
696 
697 	if (!amdgpu_audio)
698 		return 0;
699 
700 	adev->mode_info.audio.enabled = true;
701 
702 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
703 
704 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
705 		adev->mode_info.audio.pin[i].channels = -1;
706 		adev->mode_info.audio.pin[i].rate = -1;
707 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
708 		adev->mode_info.audio.pin[i].status_bits = 0;
709 		adev->mode_info.audio.pin[i].category_code = 0;
710 		adev->mode_info.audio.pin[i].connected = false;
711 		adev->mode_info.audio.pin[i].id =
712 			adev->dm.dc->res_pool->audios[i]->inst;
713 		adev->mode_info.audio.pin[i].offset = 0;
714 	}
715 
716 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
717 	if (ret < 0)
718 		return ret;
719 
720 	adev->dm.audio_registered = true;
721 
722 	return 0;
723 }
724 
725 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
726 {
727 	if (!amdgpu_audio)
728 		return;
729 
730 	if (!adev->mode_info.audio.enabled)
731 		return;
732 
733 	if (adev->dm.audio_registered) {
734 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
735 		adev->dm.audio_registered = false;
736 	}
737 
738 	/* TODO: Disable audio? */
739 
740 	adev->mode_info.audio.enabled = false;
741 }
742 
743 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
744 {
745 	struct drm_audio_component *acomp = adev->dm.audio_component;
746 
747 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
748 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
749 
750 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
751 						 pin, -1);
752 	}
753 }
754 
755 static int dm_dmub_hw_init(struct amdgpu_device *adev)
756 {
757 	const struct dmcub_firmware_header_v1_0 *hdr;
758 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
759 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
760 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
761 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
762 	struct abm *abm = adev->dm.dc->res_pool->abm;
763 	struct dmub_srv_hw_params hw_params;
764 	enum dmub_status status;
765 	const unsigned char *fw_inst_const, *fw_bss_data;
766 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
767 	bool has_hw_support;
768 
769 	if (!dmub_srv)
770 		/* DMUB isn't supported on the ASIC. */
771 		return 0;
772 
773 	if (!fb_info) {
774 		DRM_ERROR("No framebuffer info for DMUB service.\n");
775 		return -EINVAL;
776 	}
777 
778 	if (!dmub_fw) {
779 		/* Firmware required for DMUB support. */
780 		DRM_ERROR("No firmware provided for DMUB.\n");
781 		return -EINVAL;
782 	}
783 
784 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
785 	if (status != DMUB_STATUS_OK) {
786 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
787 		return -EINVAL;
788 	}
789 
790 	if (!has_hw_support) {
791 		DRM_INFO("DMUB unsupported on ASIC\n");
792 		return 0;
793 	}
794 
795 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
796 
797 	fw_inst_const = dmub_fw->data +
798 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
799 			PSP_HEADER_BYTES;
800 
801 	fw_bss_data = dmub_fw->data +
802 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
803 		      le32_to_cpu(hdr->inst_const_bytes);
804 
805 	/* Copy firmware and bios info into FB memory. */
806 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
807 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
808 
809 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
810 
811 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
812 	 * amdgpu_ucode_init_single_fw will load dmub firmware
813 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
814 	 * will be done by dm_dmub_hw_init
815 	 */
816 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
817 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
818 				fw_inst_const_size);
819 	}
820 
821 	if (fw_bss_data_size)
822 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
823 		       fw_bss_data, fw_bss_data_size);
824 
825 	/* Copy firmware bios info into FB memory. */
826 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
827 	       adev->bios_size);
828 
829 	/* Reset regions that need to be reset. */
830 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
831 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
832 
833 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
834 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
835 
836 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
837 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
838 
839 	/* Initialize hardware. */
840 	memset(&hw_params, 0, sizeof(hw_params));
841 	hw_params.fb_base = adev->gmc.fb_start;
842 	hw_params.fb_offset = adev->gmc.aper_base;
843 
844 	/* backdoor load firmware and trigger dmub running */
845 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
846 		hw_params.load_inst_const = true;
847 
848 	if (dmcu)
849 		hw_params.psp_version = dmcu->psp_version;
850 
851 	for (i = 0; i < fb_info->num_fb; ++i)
852 		hw_params.fb[i] = &fb_info->fb[i];
853 
854 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
855 	if (status != DMUB_STATUS_OK) {
856 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
857 		return -EINVAL;
858 	}
859 
860 	/* Wait for firmware load to finish. */
861 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
862 	if (status != DMUB_STATUS_OK)
863 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
864 
865 	/* Init DMCU and ABM if available. */
866 	if (dmcu && abm) {
867 		dmcu->funcs->dmcu_init(dmcu);
868 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
869 	}
870 
871 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
872 	if (!adev->dm.dc->ctx->dmub_srv) {
873 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
874 		return -ENOMEM;
875 	}
876 
877 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
878 		 adev->dm.dmcub_fw_version);
879 
880 	return 0;
881 }
882 
883 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
884 							   struct drm_atomic_state *state)
885 {
886 	struct drm_connector *connector;
887 	struct drm_crtc *crtc;
888 	struct amdgpu_dm_connector *amdgpu_dm_connector;
889 	struct drm_connector_state *conn_state;
890 	struct dm_crtc_state *acrtc_state;
891 	struct drm_crtc_state *crtc_state;
892 	struct dc_stream_state *stream;
893 	struct drm_device *dev = adev_to_drm(adev);
894 
895 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
896 
897 		amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
898 		conn_state = connector->state;
899 
900 		if (!(conn_state && conn_state->crtc))
901 			continue;
902 
903 		crtc = conn_state->crtc;
904 		acrtc_state = to_dm_crtc_state(crtc->state);
905 
906 		if (!(acrtc_state && acrtc_state->stream))
907 			continue;
908 
909 		stream = acrtc_state->stream;
910 
911 		if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
912 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
913 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
914 		    amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
915 			conn_state = drm_atomic_get_connector_state(state, connector);
916 			crtc_state = drm_atomic_get_crtc_state(state, crtc);
917 			crtc_state->mode_changed = true;
918 		}
919 	}
920 }
921 
922 static int amdgpu_dm_init(struct amdgpu_device *adev)
923 {
924 	struct dc_init_data init_data;
925 #ifdef CONFIG_DRM_AMD_DC_HDCP
926 	struct dc_callback_init init_params;
927 #endif
928 	int r;
929 
930 	adev->dm.ddev = adev_to_drm(adev);
931 	adev->dm.adev = adev;
932 
933 	/* Zero all the fields */
934 	memset(&init_data, 0, sizeof(init_data));
935 #ifdef CONFIG_DRM_AMD_DC_HDCP
936 	memset(&init_params, 0, sizeof(init_params));
937 #endif
938 
939 	mutex_init(&adev->dm.dc_lock);
940 	mutex_init(&adev->dm.audio_lock);
941 
942 	if(amdgpu_dm_irq_init(adev)) {
943 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
944 		goto error;
945 	}
946 
947 	init_data.asic_id.chip_family = adev->family;
948 
949 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
950 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
951 
952 	init_data.asic_id.vram_width = adev->gmc.vram_width;
953 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
954 	init_data.asic_id.atombios_base_address =
955 		adev->mode_info.atom_context->bios;
956 
957 	init_data.driver = adev;
958 
959 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
960 
961 	if (!adev->dm.cgs_device) {
962 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
963 		goto error;
964 	}
965 
966 	init_data.cgs_device = adev->dm.cgs_device;
967 
968 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
969 
970 	switch (adev->asic_type) {
971 	case CHIP_CARRIZO:
972 	case CHIP_STONEY:
973 	case CHIP_RAVEN:
974 	case CHIP_RENOIR:
975 		init_data.flags.gpu_vm_support = true;
976 		break;
977 	default:
978 		break;
979 	}
980 
981 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
982 		init_data.flags.fbc_support = true;
983 
984 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
985 		init_data.flags.multi_mon_pp_mclk_switch = true;
986 
987 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
988 		init_data.flags.disable_fractional_pwm = true;
989 
990 	init_data.flags.power_down_display_on_boot = true;
991 
992 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
993 
994 	/* Display Core create. */
995 	adev->dm.dc = dc_create(&init_data);
996 
997 	if (adev->dm.dc) {
998 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
999 	} else {
1000 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1001 		goto error;
1002 	}
1003 
1004 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1005 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1006 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1007 	}
1008 
1009 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1010 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1011 
1012 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1013 		adev->dm.dc->debug.disable_stutter = true;
1014 
1015 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1016 		adev->dm.dc->debug.disable_dsc = true;
1017 
1018 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1019 		adev->dm.dc->debug.disable_clock_gate = true;
1020 
1021 	r = dm_dmub_hw_init(adev);
1022 	if (r) {
1023 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1024 		goto error;
1025 	}
1026 
1027 	dc_hardware_init(adev->dm.dc);
1028 
1029 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1030 	if (!adev->dm.freesync_module) {
1031 		DRM_ERROR(
1032 		"amdgpu: failed to initialize freesync_module.\n");
1033 	} else
1034 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1035 				adev->dm.freesync_module);
1036 
1037 	amdgpu_dm_init_color_mod();
1038 
1039 #ifdef CONFIG_DRM_AMD_DC_HDCP
1040 	if (adev->asic_type >= CHIP_RAVEN) {
1041 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1042 
1043 		if (!adev->dm.hdcp_workqueue)
1044 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1045 		else
1046 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1047 
1048 		dc_init_callbacks(adev->dm.dc, &init_params);
1049 	}
1050 #endif
1051 	if (amdgpu_dm_initialize_drm_device(adev)) {
1052 		DRM_ERROR(
1053 		"amdgpu: failed to initialize sw for display support.\n");
1054 		goto error;
1055 	}
1056 
1057 	/* Update the actual used number of crtc */
1058 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1059 
1060 	/* create fake encoders for MST */
1061 	dm_dp_create_fake_mst_encoders(adev);
1062 
1063 	/* TODO: Add_display_info? */
1064 
1065 	/* TODO use dynamic cursor width */
1066 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1067 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1068 
1069 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1070 		DRM_ERROR(
1071 		"amdgpu: failed to initialize sw for display support.\n");
1072 		goto error;
1073 	}
1074 
1075 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1076 
1077 	return 0;
1078 error:
1079 	amdgpu_dm_fini(adev);
1080 
1081 	return -EINVAL;
1082 }
1083 
1084 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1085 {
1086 	int i;
1087 
1088 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1089 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1090 	}
1091 
1092 	amdgpu_dm_audio_fini(adev);
1093 
1094 	amdgpu_dm_destroy_drm_device(&adev->dm);
1095 
1096 #ifdef CONFIG_DRM_AMD_DC_HDCP
1097 	if (adev->dm.hdcp_workqueue) {
1098 		hdcp_destroy(adev->dm.hdcp_workqueue);
1099 		adev->dm.hdcp_workqueue = NULL;
1100 	}
1101 
1102 	if (adev->dm.dc)
1103 		dc_deinit_callbacks(adev->dm.dc);
1104 #endif
1105 	if (adev->dm.dc->ctx->dmub_srv) {
1106 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1107 		adev->dm.dc->ctx->dmub_srv = NULL;
1108 	}
1109 
1110 	if (adev->dm.dmub_bo)
1111 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1112 				      &adev->dm.dmub_bo_gpu_addr,
1113 				      &adev->dm.dmub_bo_cpu_addr);
1114 
1115 	/* DC Destroy TODO: Replace destroy DAL */
1116 	if (adev->dm.dc)
1117 		dc_destroy(&adev->dm.dc);
1118 	/*
1119 	 * TODO: pageflip, vlank interrupt
1120 	 *
1121 	 * amdgpu_dm_irq_fini(adev);
1122 	 */
1123 
1124 	if (adev->dm.cgs_device) {
1125 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1126 		adev->dm.cgs_device = NULL;
1127 	}
1128 	if (adev->dm.freesync_module) {
1129 		mod_freesync_destroy(adev->dm.freesync_module);
1130 		adev->dm.freesync_module = NULL;
1131 	}
1132 
1133 	mutex_destroy(&adev->dm.audio_lock);
1134 	mutex_destroy(&adev->dm.dc_lock);
1135 
1136 	return;
1137 }
1138 
1139 static int load_dmcu_fw(struct amdgpu_device *adev)
1140 {
1141 	const char *fw_name_dmcu = NULL;
1142 	int r;
1143 	const struct dmcu_firmware_header_v1_0 *hdr;
1144 
1145 	switch(adev->asic_type) {
1146 #if defined(CONFIG_DRM_AMD_DC_SI)
1147 	case CHIP_TAHITI:
1148 	case CHIP_PITCAIRN:
1149 	case CHIP_VERDE:
1150 	case CHIP_OLAND:
1151 #endif
1152 	case CHIP_BONAIRE:
1153 	case CHIP_HAWAII:
1154 	case CHIP_KAVERI:
1155 	case CHIP_KABINI:
1156 	case CHIP_MULLINS:
1157 	case CHIP_TONGA:
1158 	case CHIP_FIJI:
1159 	case CHIP_CARRIZO:
1160 	case CHIP_STONEY:
1161 	case CHIP_POLARIS11:
1162 	case CHIP_POLARIS10:
1163 	case CHIP_POLARIS12:
1164 	case CHIP_VEGAM:
1165 	case CHIP_VEGA10:
1166 	case CHIP_VEGA12:
1167 	case CHIP_VEGA20:
1168 	case CHIP_NAVI10:
1169 	case CHIP_NAVI14:
1170 	case CHIP_RENOIR:
1171 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1172 	case CHIP_SIENNA_CICHLID:
1173 	case CHIP_NAVY_FLOUNDER:
1174 #endif
1175 		return 0;
1176 	case CHIP_NAVI12:
1177 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1178 		break;
1179 	case CHIP_RAVEN:
1180 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1181 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1182 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1183 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1184 		else
1185 			return 0;
1186 		break;
1187 	default:
1188 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1189 		return -EINVAL;
1190 	}
1191 
1192 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1193 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1194 		return 0;
1195 	}
1196 
1197 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1198 	if (r == -ENOENT) {
1199 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1200 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1201 		adev->dm.fw_dmcu = NULL;
1202 		return 0;
1203 	}
1204 	if (r) {
1205 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1206 			fw_name_dmcu);
1207 		return r;
1208 	}
1209 
1210 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1211 	if (r) {
1212 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1213 			fw_name_dmcu);
1214 		release_firmware(adev->dm.fw_dmcu);
1215 		adev->dm.fw_dmcu = NULL;
1216 		return r;
1217 	}
1218 
1219 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1220 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1221 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1222 	adev->firmware.fw_size +=
1223 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1224 
1225 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1226 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1227 	adev->firmware.fw_size +=
1228 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1229 
1230 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1231 
1232 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1233 
1234 	return 0;
1235 }
1236 
1237 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1238 {
1239 	struct amdgpu_device *adev = ctx;
1240 
1241 	return dm_read_reg(adev->dm.dc->ctx, address);
1242 }
1243 
1244 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1245 				     uint32_t value)
1246 {
1247 	struct amdgpu_device *adev = ctx;
1248 
1249 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1250 }
1251 
1252 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1253 {
1254 	struct dmub_srv_create_params create_params;
1255 	struct dmub_srv_region_params region_params;
1256 	struct dmub_srv_region_info region_info;
1257 	struct dmub_srv_fb_params fb_params;
1258 	struct dmub_srv_fb_info *fb_info;
1259 	struct dmub_srv *dmub_srv;
1260 	const struct dmcub_firmware_header_v1_0 *hdr;
1261 	const char *fw_name_dmub;
1262 	enum dmub_asic dmub_asic;
1263 	enum dmub_status status;
1264 	int r;
1265 
1266 	switch (adev->asic_type) {
1267 	case CHIP_RENOIR:
1268 		dmub_asic = DMUB_ASIC_DCN21;
1269 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1270 		break;
1271 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1272 	case CHIP_SIENNA_CICHLID:
1273 		dmub_asic = DMUB_ASIC_DCN30;
1274 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1275 		break;
1276 	case CHIP_NAVY_FLOUNDER:
1277 		dmub_asic = DMUB_ASIC_DCN30;
1278 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1279 		break;
1280 #endif
1281 
1282 	default:
1283 		/* ASIC doesn't support DMUB. */
1284 		return 0;
1285 	}
1286 
1287 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1288 	if (r) {
1289 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1290 		return 0;
1291 	}
1292 
1293 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1294 	if (r) {
1295 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1296 		return 0;
1297 	}
1298 
1299 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1300 
1301 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1302 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1303 			AMDGPU_UCODE_ID_DMCUB;
1304 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1305 			adev->dm.dmub_fw;
1306 		adev->firmware.fw_size +=
1307 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1308 
1309 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1310 			 adev->dm.dmcub_fw_version);
1311 	}
1312 
1313 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1314 
1315 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1316 	dmub_srv = adev->dm.dmub_srv;
1317 
1318 	if (!dmub_srv) {
1319 		DRM_ERROR("Failed to allocate DMUB service!\n");
1320 		return -ENOMEM;
1321 	}
1322 
1323 	memset(&create_params, 0, sizeof(create_params));
1324 	create_params.user_ctx = adev;
1325 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1326 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1327 	create_params.asic = dmub_asic;
1328 
1329 	/* Create the DMUB service. */
1330 	status = dmub_srv_create(dmub_srv, &create_params);
1331 	if (status != DMUB_STATUS_OK) {
1332 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1333 		return -EINVAL;
1334 	}
1335 
1336 	/* Calculate the size of all the regions for the DMUB service. */
1337 	memset(&region_params, 0, sizeof(region_params));
1338 
1339 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1340 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1341 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1342 	region_params.vbios_size = adev->bios_size;
1343 	region_params.fw_bss_data = region_params.bss_data_size ?
1344 		adev->dm.dmub_fw->data +
1345 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1346 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1347 	region_params.fw_inst_const =
1348 		adev->dm.dmub_fw->data +
1349 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1350 		PSP_HEADER_BYTES;
1351 
1352 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1353 					   &region_info);
1354 
1355 	if (status != DMUB_STATUS_OK) {
1356 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1357 		return -EINVAL;
1358 	}
1359 
1360 	/*
1361 	 * Allocate a framebuffer based on the total size of all the regions.
1362 	 * TODO: Move this into GART.
1363 	 */
1364 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1365 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1366 				    &adev->dm.dmub_bo_gpu_addr,
1367 				    &adev->dm.dmub_bo_cpu_addr);
1368 	if (r)
1369 		return r;
1370 
1371 	/* Rebase the regions on the framebuffer address. */
1372 	memset(&fb_params, 0, sizeof(fb_params));
1373 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1374 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1375 	fb_params.region_info = &region_info;
1376 
1377 	adev->dm.dmub_fb_info =
1378 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1379 	fb_info = adev->dm.dmub_fb_info;
1380 
1381 	if (!fb_info) {
1382 		DRM_ERROR(
1383 			"Failed to allocate framebuffer info for DMUB service!\n");
1384 		return -ENOMEM;
1385 	}
1386 
1387 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1388 	if (status != DMUB_STATUS_OK) {
1389 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1390 		return -EINVAL;
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 static int dm_sw_init(void *handle)
1397 {
1398 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1399 	int r;
1400 
1401 	r = dm_dmub_sw_init(adev);
1402 	if (r)
1403 		return r;
1404 
1405 	return load_dmcu_fw(adev);
1406 }
1407 
1408 static int dm_sw_fini(void *handle)
1409 {
1410 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1411 
1412 	kfree(adev->dm.dmub_fb_info);
1413 	adev->dm.dmub_fb_info = NULL;
1414 
1415 	if (adev->dm.dmub_srv) {
1416 		dmub_srv_destroy(adev->dm.dmub_srv);
1417 		adev->dm.dmub_srv = NULL;
1418 	}
1419 
1420 	release_firmware(adev->dm.dmub_fw);
1421 	adev->dm.dmub_fw = NULL;
1422 
1423 	release_firmware(adev->dm.fw_dmcu);
1424 	adev->dm.fw_dmcu = NULL;
1425 
1426 	return 0;
1427 }
1428 
1429 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1430 {
1431 	struct amdgpu_dm_connector *aconnector;
1432 	struct drm_connector *connector;
1433 	struct drm_connector_list_iter iter;
1434 	int ret = 0;
1435 
1436 	drm_connector_list_iter_begin(dev, &iter);
1437 	drm_for_each_connector_iter(connector, &iter) {
1438 		aconnector = to_amdgpu_dm_connector(connector);
1439 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1440 		    aconnector->mst_mgr.aux) {
1441 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1442 					 aconnector,
1443 					 aconnector->base.base.id);
1444 
1445 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1446 			if (ret < 0) {
1447 				DRM_ERROR("DM_MST: Failed to start MST\n");
1448 				aconnector->dc_link->type =
1449 					dc_connection_single;
1450 				break;
1451 			}
1452 		}
1453 	}
1454 	drm_connector_list_iter_end(&iter);
1455 
1456 	return ret;
1457 }
1458 
1459 static int dm_late_init(void *handle)
1460 {
1461 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1462 
1463 	struct dmcu_iram_parameters params;
1464 	unsigned int linear_lut[16];
1465 	int i;
1466 	struct dmcu *dmcu = NULL;
1467 	bool ret = true;
1468 
1469 	dmcu = adev->dm.dc->res_pool->dmcu;
1470 
1471 	for (i = 0; i < 16; i++)
1472 		linear_lut[i] = 0xFFFF * i / 15;
1473 
1474 	params.set = 0;
1475 	params.backlight_ramping_start = 0xCCCC;
1476 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1477 	params.backlight_lut_array_size = 16;
1478 	params.backlight_lut_array = linear_lut;
1479 
1480 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1481 	 * 0xFFFF x 0.01 = 0x28F
1482 	 */
1483 	params.min_abm_backlight = 0x28F;
1484 
1485 	/* In the case where abm is implemented on dmcub,
1486 	 * dmcu object will be null.
1487 	 * ABM 2.4 and up are implemented on dmcub.
1488 	 */
1489 	if (dmcu)
1490 		ret = dmcu_load_iram(dmcu, params);
1491 	else if (adev->dm.dc->ctx->dmub_srv)
1492 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1493 
1494 	if (!ret)
1495 		return -EINVAL;
1496 
1497 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1498 }
1499 
1500 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1501 {
1502 	struct amdgpu_dm_connector *aconnector;
1503 	struct drm_connector *connector;
1504 	struct drm_connector_list_iter iter;
1505 	struct drm_dp_mst_topology_mgr *mgr;
1506 	int ret;
1507 	bool need_hotplug = false;
1508 
1509 	drm_connector_list_iter_begin(dev, &iter);
1510 	drm_for_each_connector_iter(connector, &iter) {
1511 		aconnector = to_amdgpu_dm_connector(connector);
1512 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1513 		    aconnector->mst_port)
1514 			continue;
1515 
1516 		mgr = &aconnector->mst_mgr;
1517 
1518 		if (suspend) {
1519 			drm_dp_mst_topology_mgr_suspend(mgr);
1520 		} else {
1521 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1522 			if (ret < 0) {
1523 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1524 				need_hotplug = true;
1525 			}
1526 		}
1527 	}
1528 	drm_connector_list_iter_end(&iter);
1529 
1530 	if (need_hotplug)
1531 		drm_kms_helper_hotplug_event(dev);
1532 }
1533 
1534 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1535 {
1536 	struct smu_context *smu = &adev->smu;
1537 	int ret = 0;
1538 
1539 	if (!is_support_sw_smu(adev))
1540 		return 0;
1541 
1542 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1543 	 * on window driver dc implementation.
1544 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1545 	 * should be passed to smu during boot up and resume from s3.
1546 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1547 	 * dcn20_resource_construct
1548 	 * then call pplib functions below to pass the settings to smu:
1549 	 * smu_set_watermarks_for_clock_ranges
1550 	 * smu_set_watermarks_table
1551 	 * navi10_set_watermarks_table
1552 	 * smu_write_watermarks_table
1553 	 *
1554 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1555 	 * dc has implemented different flow for window driver:
1556 	 * dc_hardware_init / dc_set_power_state
1557 	 * dcn10_init_hw
1558 	 * notify_wm_ranges
1559 	 * set_wm_ranges
1560 	 * -- Linux
1561 	 * smu_set_watermarks_for_clock_ranges
1562 	 * renoir_set_watermarks_table
1563 	 * smu_write_watermarks_table
1564 	 *
1565 	 * For Linux,
1566 	 * dc_hardware_init -> amdgpu_dm_init
1567 	 * dc_set_power_state --> dm_resume
1568 	 *
1569 	 * therefore, this function apply to navi10/12/14 but not Renoir
1570 	 * *
1571 	 */
1572 	switch(adev->asic_type) {
1573 	case CHIP_NAVI10:
1574 	case CHIP_NAVI14:
1575 	case CHIP_NAVI12:
1576 		break;
1577 	default:
1578 		return 0;
1579 	}
1580 
1581 	ret = smu_write_watermarks_table(smu);
1582 	if (ret) {
1583 		DRM_ERROR("Failed to update WMTABLE!\n");
1584 		return ret;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 /**
1591  * dm_hw_init() - Initialize DC device
1592  * @handle: The base driver device containing the amdgpu_dm device.
1593  *
1594  * Initialize the &struct amdgpu_display_manager device. This involves calling
1595  * the initializers of each DM component, then populating the struct with them.
1596  *
1597  * Although the function implies hardware initialization, both hardware and
1598  * software are initialized here. Splitting them out to their relevant init
1599  * hooks is a future TODO item.
1600  *
1601  * Some notable things that are initialized here:
1602  *
1603  * - Display Core, both software and hardware
1604  * - DC modules that we need (freesync and color management)
1605  * - DRM software states
1606  * - Interrupt sources and handlers
1607  * - Vblank support
1608  * - Debug FS entries, if enabled
1609  */
1610 static int dm_hw_init(void *handle)
1611 {
1612 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1613 	/* Create DAL display manager */
1614 	amdgpu_dm_init(adev);
1615 	amdgpu_dm_hpd_init(adev);
1616 
1617 	return 0;
1618 }
1619 
1620 /**
1621  * dm_hw_fini() - Teardown DC device
1622  * @handle: The base driver device containing the amdgpu_dm device.
1623  *
1624  * Teardown components within &struct amdgpu_display_manager that require
1625  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1626  * were loaded. Also flush IRQ workqueues and disable them.
1627  */
1628 static int dm_hw_fini(void *handle)
1629 {
1630 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1631 
1632 	amdgpu_dm_hpd_fini(adev);
1633 
1634 	amdgpu_dm_irq_fini(adev);
1635 	amdgpu_dm_fini(adev);
1636 	return 0;
1637 }
1638 
1639 
1640 static int dm_enable_vblank(struct drm_crtc *crtc);
1641 static void dm_disable_vblank(struct drm_crtc *crtc);
1642 
1643 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1644 				 struct dc_state *state, bool enable)
1645 {
1646 	enum dc_irq_source irq_source;
1647 	struct amdgpu_crtc *acrtc;
1648 	int rc = -EBUSY;
1649 	int i = 0;
1650 
1651 	for (i = 0; i < state->stream_count; i++) {
1652 		acrtc = get_crtc_by_otg_inst(
1653 				adev, state->stream_status[i].primary_otg_inst);
1654 
1655 		if (acrtc && state->stream_status[i].plane_count != 0) {
1656 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1657 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1658 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1659 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1660 			if (rc)
1661 				DRM_WARN("Failed to %s pflip interrupts\n",
1662 					 enable ? "enable" : "disable");
1663 
1664 			if (enable) {
1665 				rc = dm_enable_vblank(&acrtc->base);
1666 				if (rc)
1667 					DRM_WARN("Failed to enable vblank interrupts\n");
1668 			} else {
1669 				dm_disable_vblank(&acrtc->base);
1670 			}
1671 
1672 		}
1673 	}
1674 
1675 }
1676 
1677 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1678 {
1679 	struct dc_state *context = NULL;
1680 	enum dc_status res = DC_ERROR_UNEXPECTED;
1681 	int i;
1682 	struct dc_stream_state *del_streams[MAX_PIPES];
1683 	int del_streams_count = 0;
1684 
1685 	memset(del_streams, 0, sizeof(del_streams));
1686 
1687 	context = dc_create_state(dc);
1688 	if (context == NULL)
1689 		goto context_alloc_fail;
1690 
1691 	dc_resource_state_copy_construct_current(dc, context);
1692 
1693 	/* First remove from context all streams */
1694 	for (i = 0; i < context->stream_count; i++) {
1695 		struct dc_stream_state *stream = context->streams[i];
1696 
1697 		del_streams[del_streams_count++] = stream;
1698 	}
1699 
1700 	/* Remove all planes for removed streams and then remove the streams */
1701 	for (i = 0; i < del_streams_count; i++) {
1702 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1703 			res = DC_FAIL_DETACH_SURFACES;
1704 			goto fail;
1705 		}
1706 
1707 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1708 		if (res != DC_OK)
1709 			goto fail;
1710 	}
1711 
1712 
1713 	res = dc_validate_global_state(dc, context, false);
1714 
1715 	if (res != DC_OK) {
1716 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1717 		goto fail;
1718 	}
1719 
1720 	res = dc_commit_state(dc, context);
1721 
1722 fail:
1723 	dc_release_state(context);
1724 
1725 context_alloc_fail:
1726 	return res;
1727 }
1728 
1729 static int dm_suspend(void *handle)
1730 {
1731 	struct amdgpu_device *adev = handle;
1732 	struct amdgpu_display_manager *dm = &adev->dm;
1733 	int ret = 0;
1734 
1735 	if (amdgpu_in_reset(adev)) {
1736 		mutex_lock(&dm->dc_lock);
1737 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1738 
1739 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1740 
1741 		amdgpu_dm_commit_zero_streams(dm->dc);
1742 
1743 		amdgpu_dm_irq_suspend(adev);
1744 
1745 		return ret;
1746 	}
1747 
1748 	WARN_ON(adev->dm.cached_state);
1749 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1750 
1751 	s3_handle_mst(adev_to_drm(adev), true);
1752 
1753 	amdgpu_dm_irq_suspend(adev);
1754 
1755 
1756 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1757 
1758 	return 0;
1759 }
1760 
1761 static struct amdgpu_dm_connector *
1762 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1763 					     struct drm_crtc *crtc)
1764 {
1765 	uint32_t i;
1766 	struct drm_connector_state *new_con_state;
1767 	struct drm_connector *connector;
1768 	struct drm_crtc *crtc_from_state;
1769 
1770 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1771 		crtc_from_state = new_con_state->crtc;
1772 
1773 		if (crtc_from_state == crtc)
1774 			return to_amdgpu_dm_connector(connector);
1775 	}
1776 
1777 	return NULL;
1778 }
1779 
1780 static void emulated_link_detect(struct dc_link *link)
1781 {
1782 	struct dc_sink_init_data sink_init_data = { 0 };
1783 	struct display_sink_capability sink_caps = { 0 };
1784 	enum dc_edid_status edid_status;
1785 	struct dc_context *dc_ctx = link->ctx;
1786 	struct dc_sink *sink = NULL;
1787 	struct dc_sink *prev_sink = NULL;
1788 
1789 	link->type = dc_connection_none;
1790 	prev_sink = link->local_sink;
1791 
1792 	if (prev_sink != NULL)
1793 		dc_sink_retain(prev_sink);
1794 
1795 	switch (link->connector_signal) {
1796 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1797 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1798 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1799 		break;
1800 	}
1801 
1802 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1803 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1804 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1805 		break;
1806 	}
1807 
1808 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1809 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1810 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1811 		break;
1812 	}
1813 
1814 	case SIGNAL_TYPE_LVDS: {
1815 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1816 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1817 		break;
1818 	}
1819 
1820 	case SIGNAL_TYPE_EDP: {
1821 		sink_caps.transaction_type =
1822 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1823 		sink_caps.signal = SIGNAL_TYPE_EDP;
1824 		break;
1825 	}
1826 
1827 	case SIGNAL_TYPE_DISPLAY_PORT: {
1828 		sink_caps.transaction_type =
1829 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1830 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1831 		break;
1832 	}
1833 
1834 	default:
1835 		DC_ERROR("Invalid connector type! signal:%d\n",
1836 			link->connector_signal);
1837 		return;
1838 	}
1839 
1840 	sink_init_data.link = link;
1841 	sink_init_data.sink_signal = sink_caps.signal;
1842 
1843 	sink = dc_sink_create(&sink_init_data);
1844 	if (!sink) {
1845 		DC_ERROR("Failed to create sink!\n");
1846 		return;
1847 	}
1848 
1849 	/* dc_sink_create returns a new reference */
1850 	link->local_sink = sink;
1851 
1852 	edid_status = dm_helpers_read_local_edid(
1853 			link->ctx,
1854 			link,
1855 			sink);
1856 
1857 	if (edid_status != EDID_OK)
1858 		DC_ERROR("Failed to read EDID");
1859 
1860 }
1861 
1862 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1863 				     struct amdgpu_display_manager *dm)
1864 {
1865 	struct {
1866 		struct dc_surface_update surface_updates[MAX_SURFACES];
1867 		struct dc_plane_info plane_infos[MAX_SURFACES];
1868 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1869 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1870 		struct dc_stream_update stream_update;
1871 	} * bundle;
1872 	int k, m;
1873 
1874 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1875 
1876 	if (!bundle) {
1877 		dm_error("Failed to allocate update bundle\n");
1878 		goto cleanup;
1879 	}
1880 
1881 	for (k = 0; k < dc_state->stream_count; k++) {
1882 		bundle->stream_update.stream = dc_state->streams[k];
1883 
1884 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1885 			bundle->surface_updates[m].surface =
1886 				dc_state->stream_status->plane_states[m];
1887 			bundle->surface_updates[m].surface->force_full_update =
1888 				true;
1889 		}
1890 		dc_commit_updates_for_stream(
1891 			dm->dc, bundle->surface_updates,
1892 			dc_state->stream_status->plane_count,
1893 			dc_state->streams[k], &bundle->stream_update, dc_state);
1894 	}
1895 
1896 cleanup:
1897 	kfree(bundle);
1898 
1899 	return;
1900 }
1901 
1902 static int dm_resume(void *handle)
1903 {
1904 	struct amdgpu_device *adev = handle;
1905 	struct drm_device *ddev = adev_to_drm(adev);
1906 	struct amdgpu_display_manager *dm = &adev->dm;
1907 	struct amdgpu_dm_connector *aconnector;
1908 	struct drm_connector *connector;
1909 	struct drm_connector_list_iter iter;
1910 	struct drm_crtc *crtc;
1911 	struct drm_crtc_state *new_crtc_state;
1912 	struct dm_crtc_state *dm_new_crtc_state;
1913 	struct drm_plane *plane;
1914 	struct drm_plane_state *new_plane_state;
1915 	struct dm_plane_state *dm_new_plane_state;
1916 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1917 	enum dc_connection_type new_connection_type = dc_connection_none;
1918 	struct dc_state *dc_state;
1919 	int i, r, j;
1920 
1921 	if (amdgpu_in_reset(adev)) {
1922 		dc_state = dm->cached_dc_state;
1923 
1924 		r = dm_dmub_hw_init(adev);
1925 		if (r)
1926 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1927 
1928 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1929 		dc_resume(dm->dc);
1930 
1931 		amdgpu_dm_irq_resume_early(adev);
1932 
1933 		for (i = 0; i < dc_state->stream_count; i++) {
1934 			dc_state->streams[i]->mode_changed = true;
1935 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1936 				dc_state->stream_status->plane_states[j]->update_flags.raw
1937 					= 0xffffffff;
1938 			}
1939 		}
1940 
1941 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1942 
1943 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1944 
1945 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1946 
1947 		dc_release_state(dm->cached_dc_state);
1948 		dm->cached_dc_state = NULL;
1949 
1950 		amdgpu_dm_irq_resume_late(adev);
1951 
1952 		mutex_unlock(&dm->dc_lock);
1953 
1954 		return 0;
1955 	}
1956 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1957 	dc_release_state(dm_state->context);
1958 	dm_state->context = dc_create_state(dm->dc);
1959 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1960 	dc_resource_state_construct(dm->dc, dm_state->context);
1961 
1962 	/* Before powering on DC we need to re-initialize DMUB. */
1963 	r = dm_dmub_hw_init(adev);
1964 	if (r)
1965 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1966 
1967 	/* power on hardware */
1968 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1969 
1970 	/* program HPD filter */
1971 	dc_resume(dm->dc);
1972 
1973 	/*
1974 	 * early enable HPD Rx IRQ, should be done before set mode as short
1975 	 * pulse interrupts are used for MST
1976 	 */
1977 	amdgpu_dm_irq_resume_early(adev);
1978 
1979 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1980 	s3_handle_mst(ddev, false);
1981 
1982 	/* Do detection*/
1983 	drm_connector_list_iter_begin(ddev, &iter);
1984 	drm_for_each_connector_iter(connector, &iter) {
1985 		aconnector = to_amdgpu_dm_connector(connector);
1986 
1987 		/*
1988 		 * this is the case when traversing through already created
1989 		 * MST connectors, should be skipped
1990 		 */
1991 		if (aconnector->mst_port)
1992 			continue;
1993 
1994 		mutex_lock(&aconnector->hpd_lock);
1995 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1996 			DRM_ERROR("KMS: Failed to detect connector\n");
1997 
1998 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1999 			emulated_link_detect(aconnector->dc_link);
2000 		else
2001 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2002 
2003 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2004 			aconnector->fake_enable = false;
2005 
2006 		if (aconnector->dc_sink)
2007 			dc_sink_release(aconnector->dc_sink);
2008 		aconnector->dc_sink = NULL;
2009 		amdgpu_dm_update_connector_after_detect(aconnector);
2010 		mutex_unlock(&aconnector->hpd_lock);
2011 	}
2012 	drm_connector_list_iter_end(&iter);
2013 
2014 	/* Force mode set in atomic commit */
2015 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2016 		new_crtc_state->active_changed = true;
2017 
2018 	/*
2019 	 * atomic_check is expected to create the dc states. We need to release
2020 	 * them here, since they were duplicated as part of the suspend
2021 	 * procedure.
2022 	 */
2023 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2024 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2025 		if (dm_new_crtc_state->stream) {
2026 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2027 			dc_stream_release(dm_new_crtc_state->stream);
2028 			dm_new_crtc_state->stream = NULL;
2029 		}
2030 	}
2031 
2032 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2033 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2034 		if (dm_new_plane_state->dc_state) {
2035 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2036 			dc_plane_state_release(dm_new_plane_state->dc_state);
2037 			dm_new_plane_state->dc_state = NULL;
2038 		}
2039 	}
2040 
2041 	drm_atomic_helper_resume(ddev, dm->cached_state);
2042 
2043 	dm->cached_state = NULL;
2044 
2045 	amdgpu_dm_irq_resume_late(adev);
2046 
2047 	amdgpu_dm_smu_write_watermarks_table(adev);
2048 
2049 	return 0;
2050 }
2051 
2052 /**
2053  * DOC: DM Lifecycle
2054  *
2055  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2056  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2057  * the base driver's device list to be initialized and torn down accordingly.
2058  *
2059  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2060  */
2061 
2062 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2063 	.name = "dm",
2064 	.early_init = dm_early_init,
2065 	.late_init = dm_late_init,
2066 	.sw_init = dm_sw_init,
2067 	.sw_fini = dm_sw_fini,
2068 	.hw_init = dm_hw_init,
2069 	.hw_fini = dm_hw_fini,
2070 	.suspend = dm_suspend,
2071 	.resume = dm_resume,
2072 	.is_idle = dm_is_idle,
2073 	.wait_for_idle = dm_wait_for_idle,
2074 	.check_soft_reset = dm_check_soft_reset,
2075 	.soft_reset = dm_soft_reset,
2076 	.set_clockgating_state = dm_set_clockgating_state,
2077 	.set_powergating_state = dm_set_powergating_state,
2078 };
2079 
2080 const struct amdgpu_ip_block_version dm_ip_block =
2081 {
2082 	.type = AMD_IP_BLOCK_TYPE_DCE,
2083 	.major = 1,
2084 	.minor = 0,
2085 	.rev = 0,
2086 	.funcs = &amdgpu_dm_funcs,
2087 };
2088 
2089 
2090 /**
2091  * DOC: atomic
2092  *
2093  * *WIP*
2094  */
2095 
2096 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2097 	.fb_create = amdgpu_display_user_framebuffer_create,
2098 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2099 	.atomic_check = amdgpu_dm_atomic_check,
2100 	.atomic_commit = amdgpu_dm_atomic_commit,
2101 };
2102 
2103 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2104 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2105 };
2106 
2107 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2108 {
2109 	u32 max_cll, min_cll, max, min, q, r;
2110 	struct amdgpu_dm_backlight_caps *caps;
2111 	struct amdgpu_display_manager *dm;
2112 	struct drm_connector *conn_base;
2113 	struct amdgpu_device *adev;
2114 	struct dc_link *link = NULL;
2115 	static const u8 pre_computed_values[] = {
2116 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2117 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2118 
2119 	if (!aconnector || !aconnector->dc_link)
2120 		return;
2121 
2122 	link = aconnector->dc_link;
2123 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2124 		return;
2125 
2126 	conn_base = &aconnector->base;
2127 	adev = drm_to_adev(conn_base->dev);
2128 	dm = &adev->dm;
2129 	caps = &dm->backlight_caps;
2130 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2131 	caps->aux_support = false;
2132 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2133 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2134 
2135 	if (caps->ext_caps->bits.oled == 1 ||
2136 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2137 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2138 		caps->aux_support = true;
2139 
2140 	/* From the specification (CTA-861-G), for calculating the maximum
2141 	 * luminance we need to use:
2142 	 *	Luminance = 50*2**(CV/32)
2143 	 * Where CV is a one-byte value.
2144 	 * For calculating this expression we may need float point precision;
2145 	 * to avoid this complexity level, we take advantage that CV is divided
2146 	 * by a constant. From the Euclids division algorithm, we know that CV
2147 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2148 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2149 	 * need to pre-compute the value of r/32. For pre-computing the values
2150 	 * We just used the following Ruby line:
2151 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2152 	 * The results of the above expressions can be verified at
2153 	 * pre_computed_values.
2154 	 */
2155 	q = max_cll >> 5;
2156 	r = max_cll % 32;
2157 	max = (1 << q) * pre_computed_values[r];
2158 
2159 	// min luminance: maxLum * (CV/255)^2 / 100
2160 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2161 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2162 
2163 	caps->aux_max_input_signal = max;
2164 	caps->aux_min_input_signal = min;
2165 }
2166 
2167 void amdgpu_dm_update_connector_after_detect(
2168 		struct amdgpu_dm_connector *aconnector)
2169 {
2170 	struct drm_connector *connector = &aconnector->base;
2171 	struct drm_device *dev = connector->dev;
2172 	struct dc_sink *sink;
2173 
2174 	/* MST handled by drm_mst framework */
2175 	if (aconnector->mst_mgr.mst_state == true)
2176 		return;
2177 
2178 	sink = aconnector->dc_link->local_sink;
2179 	if (sink)
2180 		dc_sink_retain(sink);
2181 
2182 	/*
2183 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2184 	 * the connector sink is set to either fake or physical sink depends on link status.
2185 	 * Skip if already done during boot.
2186 	 */
2187 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2188 			&& aconnector->dc_em_sink) {
2189 
2190 		/*
2191 		 * For S3 resume with headless use eml_sink to fake stream
2192 		 * because on resume connector->sink is set to NULL
2193 		 */
2194 		mutex_lock(&dev->mode_config.mutex);
2195 
2196 		if (sink) {
2197 			if (aconnector->dc_sink) {
2198 				amdgpu_dm_update_freesync_caps(connector, NULL);
2199 				/*
2200 				 * retain and release below are used to
2201 				 * bump up refcount for sink because the link doesn't point
2202 				 * to it anymore after disconnect, so on next crtc to connector
2203 				 * reshuffle by UMD we will get into unwanted dc_sink release
2204 				 */
2205 				dc_sink_release(aconnector->dc_sink);
2206 			}
2207 			aconnector->dc_sink = sink;
2208 			dc_sink_retain(aconnector->dc_sink);
2209 			amdgpu_dm_update_freesync_caps(connector,
2210 					aconnector->edid);
2211 		} else {
2212 			amdgpu_dm_update_freesync_caps(connector, NULL);
2213 			if (!aconnector->dc_sink) {
2214 				aconnector->dc_sink = aconnector->dc_em_sink;
2215 				dc_sink_retain(aconnector->dc_sink);
2216 			}
2217 		}
2218 
2219 		mutex_unlock(&dev->mode_config.mutex);
2220 
2221 		if (sink)
2222 			dc_sink_release(sink);
2223 		return;
2224 	}
2225 
2226 	/*
2227 	 * TODO: temporary guard to look for proper fix
2228 	 * if this sink is MST sink, we should not do anything
2229 	 */
2230 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2231 		dc_sink_release(sink);
2232 		return;
2233 	}
2234 
2235 	if (aconnector->dc_sink == sink) {
2236 		/*
2237 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2238 		 * Do nothing!!
2239 		 */
2240 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2241 				aconnector->connector_id);
2242 		if (sink)
2243 			dc_sink_release(sink);
2244 		return;
2245 	}
2246 
2247 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2248 		aconnector->connector_id, aconnector->dc_sink, sink);
2249 
2250 	mutex_lock(&dev->mode_config.mutex);
2251 
2252 	/*
2253 	 * 1. Update status of the drm connector
2254 	 * 2. Send an event and let userspace tell us what to do
2255 	 */
2256 	if (sink) {
2257 		/*
2258 		 * TODO: check if we still need the S3 mode update workaround.
2259 		 * If yes, put it here.
2260 		 */
2261 		if (aconnector->dc_sink)
2262 			amdgpu_dm_update_freesync_caps(connector, NULL);
2263 
2264 		aconnector->dc_sink = sink;
2265 		dc_sink_retain(aconnector->dc_sink);
2266 		if (sink->dc_edid.length == 0) {
2267 			aconnector->edid = NULL;
2268 			if (aconnector->dc_link->aux_mode) {
2269 				drm_dp_cec_unset_edid(
2270 					&aconnector->dm_dp_aux.aux);
2271 			}
2272 		} else {
2273 			aconnector->edid =
2274 				(struct edid *)sink->dc_edid.raw_edid;
2275 
2276 			drm_connector_update_edid_property(connector,
2277 							   aconnector->edid);
2278 			drm_add_edid_modes(connector, aconnector->edid);
2279 
2280 			if (aconnector->dc_link->aux_mode)
2281 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2282 						    aconnector->edid);
2283 		}
2284 
2285 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2286 		update_connector_ext_caps(aconnector);
2287 	} else {
2288 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2289 		amdgpu_dm_update_freesync_caps(connector, NULL);
2290 		drm_connector_update_edid_property(connector, NULL);
2291 		aconnector->num_modes = 0;
2292 		dc_sink_release(aconnector->dc_sink);
2293 		aconnector->dc_sink = NULL;
2294 		aconnector->edid = NULL;
2295 #ifdef CONFIG_DRM_AMD_DC_HDCP
2296 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2297 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2298 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2299 #endif
2300 	}
2301 
2302 	mutex_unlock(&dev->mode_config.mutex);
2303 
2304 	update_subconnector_property(aconnector);
2305 
2306 	if (sink)
2307 		dc_sink_release(sink);
2308 }
2309 
2310 static void handle_hpd_irq(void *param)
2311 {
2312 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2313 	struct drm_connector *connector = &aconnector->base;
2314 	struct drm_device *dev = connector->dev;
2315 	enum dc_connection_type new_connection_type = dc_connection_none;
2316 #ifdef CONFIG_DRM_AMD_DC_HDCP
2317 	struct amdgpu_device *adev = drm_to_adev(dev);
2318 #endif
2319 
2320 	/*
2321 	 * In case of failure or MST no need to update connector status or notify the OS
2322 	 * since (for MST case) MST does this in its own context.
2323 	 */
2324 	mutex_lock(&aconnector->hpd_lock);
2325 
2326 #ifdef CONFIG_DRM_AMD_DC_HDCP
2327 	if (adev->dm.hdcp_workqueue)
2328 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2329 #endif
2330 	if (aconnector->fake_enable)
2331 		aconnector->fake_enable = false;
2332 
2333 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2334 		DRM_ERROR("KMS: Failed to detect connector\n");
2335 
2336 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2337 		emulated_link_detect(aconnector->dc_link);
2338 
2339 
2340 		drm_modeset_lock_all(dev);
2341 		dm_restore_drm_connector_state(dev, connector);
2342 		drm_modeset_unlock_all(dev);
2343 
2344 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2345 			drm_kms_helper_hotplug_event(dev);
2346 
2347 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2348 		amdgpu_dm_update_connector_after_detect(aconnector);
2349 
2350 
2351 		drm_modeset_lock_all(dev);
2352 		dm_restore_drm_connector_state(dev, connector);
2353 		drm_modeset_unlock_all(dev);
2354 
2355 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2356 			drm_kms_helper_hotplug_event(dev);
2357 	}
2358 	mutex_unlock(&aconnector->hpd_lock);
2359 
2360 }
2361 
2362 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2363 {
2364 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2365 	uint8_t dret;
2366 	bool new_irq_handled = false;
2367 	int dpcd_addr;
2368 	int dpcd_bytes_to_read;
2369 
2370 	const int max_process_count = 30;
2371 	int process_count = 0;
2372 
2373 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2374 
2375 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2376 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2377 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2378 		dpcd_addr = DP_SINK_COUNT;
2379 	} else {
2380 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2381 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2382 		dpcd_addr = DP_SINK_COUNT_ESI;
2383 	}
2384 
2385 	dret = drm_dp_dpcd_read(
2386 		&aconnector->dm_dp_aux.aux,
2387 		dpcd_addr,
2388 		esi,
2389 		dpcd_bytes_to_read);
2390 
2391 	while (dret == dpcd_bytes_to_read &&
2392 		process_count < max_process_count) {
2393 		uint8_t retry;
2394 		dret = 0;
2395 
2396 		process_count++;
2397 
2398 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2399 		/* handle HPD short pulse irq */
2400 		if (aconnector->mst_mgr.mst_state)
2401 			drm_dp_mst_hpd_irq(
2402 				&aconnector->mst_mgr,
2403 				esi,
2404 				&new_irq_handled);
2405 
2406 		if (new_irq_handled) {
2407 			/* ACK at DPCD to notify down stream */
2408 			const int ack_dpcd_bytes_to_write =
2409 				dpcd_bytes_to_read - 1;
2410 
2411 			for (retry = 0; retry < 3; retry++) {
2412 				uint8_t wret;
2413 
2414 				wret = drm_dp_dpcd_write(
2415 					&aconnector->dm_dp_aux.aux,
2416 					dpcd_addr + 1,
2417 					&esi[1],
2418 					ack_dpcd_bytes_to_write);
2419 				if (wret == ack_dpcd_bytes_to_write)
2420 					break;
2421 			}
2422 
2423 			/* check if there is new irq to be handled */
2424 			dret = drm_dp_dpcd_read(
2425 				&aconnector->dm_dp_aux.aux,
2426 				dpcd_addr,
2427 				esi,
2428 				dpcd_bytes_to_read);
2429 
2430 			new_irq_handled = false;
2431 		} else {
2432 			break;
2433 		}
2434 	}
2435 
2436 	if (process_count == max_process_count)
2437 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2438 }
2439 
2440 static void handle_hpd_rx_irq(void *param)
2441 {
2442 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2443 	struct drm_connector *connector = &aconnector->base;
2444 	struct drm_device *dev = connector->dev;
2445 	struct dc_link *dc_link = aconnector->dc_link;
2446 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2447 	enum dc_connection_type new_connection_type = dc_connection_none;
2448 #ifdef CONFIG_DRM_AMD_DC_HDCP
2449 	union hpd_irq_data hpd_irq_data;
2450 	struct amdgpu_device *adev = drm_to_adev(dev);
2451 
2452 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2453 #endif
2454 
2455 	/*
2456 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2457 	 * conflict, after implement i2c helper, this mutex should be
2458 	 * retired.
2459 	 */
2460 	if (dc_link->type != dc_connection_mst_branch)
2461 		mutex_lock(&aconnector->hpd_lock);
2462 
2463 
2464 #ifdef CONFIG_DRM_AMD_DC_HDCP
2465 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2466 #else
2467 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2468 #endif
2469 			!is_mst_root_connector) {
2470 		/* Downstream Port status changed. */
2471 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2472 			DRM_ERROR("KMS: Failed to detect connector\n");
2473 
2474 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2475 			emulated_link_detect(dc_link);
2476 
2477 			if (aconnector->fake_enable)
2478 				aconnector->fake_enable = false;
2479 
2480 			amdgpu_dm_update_connector_after_detect(aconnector);
2481 
2482 
2483 			drm_modeset_lock_all(dev);
2484 			dm_restore_drm_connector_state(dev, connector);
2485 			drm_modeset_unlock_all(dev);
2486 
2487 			drm_kms_helper_hotplug_event(dev);
2488 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2489 
2490 			if (aconnector->fake_enable)
2491 				aconnector->fake_enable = false;
2492 
2493 			amdgpu_dm_update_connector_after_detect(aconnector);
2494 
2495 
2496 			drm_modeset_lock_all(dev);
2497 			dm_restore_drm_connector_state(dev, connector);
2498 			drm_modeset_unlock_all(dev);
2499 
2500 			drm_kms_helper_hotplug_event(dev);
2501 		}
2502 	}
2503 #ifdef CONFIG_DRM_AMD_DC_HDCP
2504 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2505 		if (adev->dm.hdcp_workqueue)
2506 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2507 	}
2508 #endif
2509 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2510 	    (dc_link->type == dc_connection_mst_branch))
2511 		dm_handle_hpd_rx_irq(aconnector);
2512 
2513 	if (dc_link->type != dc_connection_mst_branch) {
2514 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2515 		mutex_unlock(&aconnector->hpd_lock);
2516 	}
2517 }
2518 
2519 static void register_hpd_handlers(struct amdgpu_device *adev)
2520 {
2521 	struct drm_device *dev = adev_to_drm(adev);
2522 	struct drm_connector *connector;
2523 	struct amdgpu_dm_connector *aconnector;
2524 	const struct dc_link *dc_link;
2525 	struct dc_interrupt_params int_params = {0};
2526 
2527 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2528 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2529 
2530 	list_for_each_entry(connector,
2531 			&dev->mode_config.connector_list, head)	{
2532 
2533 		aconnector = to_amdgpu_dm_connector(connector);
2534 		dc_link = aconnector->dc_link;
2535 
2536 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2537 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2538 			int_params.irq_source = dc_link->irq_source_hpd;
2539 
2540 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2541 					handle_hpd_irq,
2542 					(void *) aconnector);
2543 		}
2544 
2545 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2546 
2547 			/* Also register for DP short pulse (hpd_rx). */
2548 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2549 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2550 
2551 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2552 					handle_hpd_rx_irq,
2553 					(void *) aconnector);
2554 		}
2555 	}
2556 }
2557 
2558 #if defined(CONFIG_DRM_AMD_DC_SI)
2559 /* Register IRQ sources and initialize IRQ callbacks */
2560 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2561 {
2562 	struct dc *dc = adev->dm.dc;
2563 	struct common_irq_params *c_irq_params;
2564 	struct dc_interrupt_params int_params = {0};
2565 	int r;
2566 	int i;
2567 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2568 
2569 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2570 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2571 
2572 	/*
2573 	 * Actions of amdgpu_irq_add_id():
2574 	 * 1. Register a set() function with base driver.
2575 	 *    Base driver will call set() function to enable/disable an
2576 	 *    interrupt in DC hardware.
2577 	 * 2. Register amdgpu_dm_irq_handler().
2578 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2579 	 *    coming from DC hardware.
2580 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2581 	 *    for acknowledging and handling. */
2582 
2583 	/* Use VBLANK interrupt */
2584 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2585 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2586 		if (r) {
2587 			DRM_ERROR("Failed to add crtc irq id!\n");
2588 			return r;
2589 		}
2590 
2591 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2592 		int_params.irq_source =
2593 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2594 
2595 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2596 
2597 		c_irq_params->adev = adev;
2598 		c_irq_params->irq_src = int_params.irq_source;
2599 
2600 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2601 				dm_crtc_high_irq, c_irq_params);
2602 	}
2603 
2604 	/* Use GRPH_PFLIP interrupt */
2605 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2606 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2607 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2608 		if (r) {
2609 			DRM_ERROR("Failed to add page flip irq id!\n");
2610 			return r;
2611 		}
2612 
2613 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2614 		int_params.irq_source =
2615 			dc_interrupt_to_irq_source(dc, i, 0);
2616 
2617 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2618 
2619 		c_irq_params->adev = adev;
2620 		c_irq_params->irq_src = int_params.irq_source;
2621 
2622 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2623 				dm_pflip_high_irq, c_irq_params);
2624 
2625 	}
2626 
2627 	/* HPD */
2628 	r = amdgpu_irq_add_id(adev, client_id,
2629 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2630 	if (r) {
2631 		DRM_ERROR("Failed to add hpd irq id!\n");
2632 		return r;
2633 	}
2634 
2635 	register_hpd_handlers(adev);
2636 
2637 	return 0;
2638 }
2639 #endif
2640 
2641 /* Register IRQ sources and initialize IRQ callbacks */
2642 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2643 {
2644 	struct dc *dc = adev->dm.dc;
2645 	struct common_irq_params *c_irq_params;
2646 	struct dc_interrupt_params int_params = {0};
2647 	int r;
2648 	int i;
2649 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2650 
2651 	if (adev->asic_type >= CHIP_VEGA10)
2652 		client_id = SOC15_IH_CLIENTID_DCE;
2653 
2654 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2655 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2656 
2657 	/*
2658 	 * Actions of amdgpu_irq_add_id():
2659 	 * 1. Register a set() function with base driver.
2660 	 *    Base driver will call set() function to enable/disable an
2661 	 *    interrupt in DC hardware.
2662 	 * 2. Register amdgpu_dm_irq_handler().
2663 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2664 	 *    coming from DC hardware.
2665 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2666 	 *    for acknowledging and handling. */
2667 
2668 	/* Use VBLANK interrupt */
2669 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2670 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2671 		if (r) {
2672 			DRM_ERROR("Failed to add crtc irq id!\n");
2673 			return r;
2674 		}
2675 
2676 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2677 		int_params.irq_source =
2678 			dc_interrupt_to_irq_source(dc, i, 0);
2679 
2680 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2681 
2682 		c_irq_params->adev = adev;
2683 		c_irq_params->irq_src = int_params.irq_source;
2684 
2685 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2686 				dm_crtc_high_irq, c_irq_params);
2687 	}
2688 
2689 	/* Use VUPDATE interrupt */
2690 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2691 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2692 		if (r) {
2693 			DRM_ERROR("Failed to add vupdate irq id!\n");
2694 			return r;
2695 		}
2696 
2697 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2698 		int_params.irq_source =
2699 			dc_interrupt_to_irq_source(dc, i, 0);
2700 
2701 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2702 
2703 		c_irq_params->adev = adev;
2704 		c_irq_params->irq_src = int_params.irq_source;
2705 
2706 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2707 				dm_vupdate_high_irq, c_irq_params);
2708 	}
2709 
2710 	/* Use GRPH_PFLIP interrupt */
2711 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2712 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2713 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2714 		if (r) {
2715 			DRM_ERROR("Failed to add page flip irq id!\n");
2716 			return r;
2717 		}
2718 
2719 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2720 		int_params.irq_source =
2721 			dc_interrupt_to_irq_source(dc, i, 0);
2722 
2723 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2724 
2725 		c_irq_params->adev = adev;
2726 		c_irq_params->irq_src = int_params.irq_source;
2727 
2728 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2729 				dm_pflip_high_irq, c_irq_params);
2730 
2731 	}
2732 
2733 	/* HPD */
2734 	r = amdgpu_irq_add_id(adev, client_id,
2735 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2736 	if (r) {
2737 		DRM_ERROR("Failed to add hpd irq id!\n");
2738 		return r;
2739 	}
2740 
2741 	register_hpd_handlers(adev);
2742 
2743 	return 0;
2744 }
2745 
2746 #if defined(CONFIG_DRM_AMD_DC_DCN)
2747 /* Register IRQ sources and initialize IRQ callbacks */
2748 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2749 {
2750 	struct dc *dc = adev->dm.dc;
2751 	struct common_irq_params *c_irq_params;
2752 	struct dc_interrupt_params int_params = {0};
2753 	int r;
2754 	int i;
2755 
2756 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2757 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2758 
2759 	/*
2760 	 * Actions of amdgpu_irq_add_id():
2761 	 * 1. Register a set() function with base driver.
2762 	 *    Base driver will call set() function to enable/disable an
2763 	 *    interrupt in DC hardware.
2764 	 * 2. Register amdgpu_dm_irq_handler().
2765 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2766 	 *    coming from DC hardware.
2767 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2768 	 *    for acknowledging and handling.
2769 	 */
2770 
2771 	/* Use VSTARTUP interrupt */
2772 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2773 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2774 			i++) {
2775 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2776 
2777 		if (r) {
2778 			DRM_ERROR("Failed to add crtc irq id!\n");
2779 			return r;
2780 		}
2781 
2782 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2783 		int_params.irq_source =
2784 			dc_interrupt_to_irq_source(dc, i, 0);
2785 
2786 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2787 
2788 		c_irq_params->adev = adev;
2789 		c_irq_params->irq_src = int_params.irq_source;
2790 
2791 		amdgpu_dm_irq_register_interrupt(
2792 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2793 	}
2794 
2795 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2796 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2797 	 * to trigger at end of each vblank, regardless of state of the lock,
2798 	 * matching DCE behaviour.
2799 	 */
2800 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2801 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2802 	     i++) {
2803 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2804 
2805 		if (r) {
2806 			DRM_ERROR("Failed to add vupdate irq id!\n");
2807 			return r;
2808 		}
2809 
2810 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2811 		int_params.irq_source =
2812 			dc_interrupt_to_irq_source(dc, i, 0);
2813 
2814 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2815 
2816 		c_irq_params->adev = adev;
2817 		c_irq_params->irq_src = int_params.irq_source;
2818 
2819 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820 				dm_vupdate_high_irq, c_irq_params);
2821 	}
2822 
2823 	/* Use GRPH_PFLIP interrupt */
2824 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2825 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2826 			i++) {
2827 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2828 		if (r) {
2829 			DRM_ERROR("Failed to add page flip irq id!\n");
2830 			return r;
2831 		}
2832 
2833 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2834 		int_params.irq_source =
2835 			dc_interrupt_to_irq_source(dc, i, 0);
2836 
2837 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2838 
2839 		c_irq_params->adev = adev;
2840 		c_irq_params->irq_src = int_params.irq_source;
2841 
2842 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2843 				dm_pflip_high_irq, c_irq_params);
2844 
2845 	}
2846 
2847 	/* HPD */
2848 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2849 			&adev->hpd_irq);
2850 	if (r) {
2851 		DRM_ERROR("Failed to add hpd irq id!\n");
2852 		return r;
2853 	}
2854 
2855 	register_hpd_handlers(adev);
2856 
2857 	return 0;
2858 }
2859 #endif
2860 
2861 /*
2862  * Acquires the lock for the atomic state object and returns
2863  * the new atomic state.
2864  *
2865  * This should only be called during atomic check.
2866  */
2867 static int dm_atomic_get_state(struct drm_atomic_state *state,
2868 			       struct dm_atomic_state **dm_state)
2869 {
2870 	struct drm_device *dev = state->dev;
2871 	struct amdgpu_device *adev = drm_to_adev(dev);
2872 	struct amdgpu_display_manager *dm = &adev->dm;
2873 	struct drm_private_state *priv_state;
2874 
2875 	if (*dm_state)
2876 		return 0;
2877 
2878 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2879 	if (IS_ERR(priv_state))
2880 		return PTR_ERR(priv_state);
2881 
2882 	*dm_state = to_dm_atomic_state(priv_state);
2883 
2884 	return 0;
2885 }
2886 
2887 static struct dm_atomic_state *
2888 dm_atomic_get_new_state(struct drm_atomic_state *state)
2889 {
2890 	struct drm_device *dev = state->dev;
2891 	struct amdgpu_device *adev = drm_to_adev(dev);
2892 	struct amdgpu_display_manager *dm = &adev->dm;
2893 	struct drm_private_obj *obj;
2894 	struct drm_private_state *new_obj_state;
2895 	int i;
2896 
2897 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2898 		if (obj->funcs == dm->atomic_obj.funcs)
2899 			return to_dm_atomic_state(new_obj_state);
2900 	}
2901 
2902 	return NULL;
2903 }
2904 
2905 static struct drm_private_state *
2906 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2907 {
2908 	struct dm_atomic_state *old_state, *new_state;
2909 
2910 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2911 	if (!new_state)
2912 		return NULL;
2913 
2914 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2915 
2916 	old_state = to_dm_atomic_state(obj->state);
2917 
2918 	if (old_state && old_state->context)
2919 		new_state->context = dc_copy_state(old_state->context);
2920 
2921 	if (!new_state->context) {
2922 		kfree(new_state);
2923 		return NULL;
2924 	}
2925 
2926 	return &new_state->base;
2927 }
2928 
2929 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2930 				    struct drm_private_state *state)
2931 {
2932 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2933 
2934 	if (dm_state && dm_state->context)
2935 		dc_release_state(dm_state->context);
2936 
2937 	kfree(dm_state);
2938 }
2939 
2940 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2941 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2942 	.atomic_destroy_state = dm_atomic_destroy_state,
2943 };
2944 
2945 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2946 {
2947 	struct dm_atomic_state *state;
2948 	int r;
2949 
2950 	adev->mode_info.mode_config_initialized = true;
2951 
2952 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2953 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2954 
2955 	adev_to_drm(adev)->mode_config.max_width = 16384;
2956 	adev_to_drm(adev)->mode_config.max_height = 16384;
2957 
2958 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2959 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2960 	/* indicates support for immediate flip */
2961 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2962 
2963 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2964 
2965 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2966 	if (!state)
2967 		return -ENOMEM;
2968 
2969 	state->context = dc_create_state(adev->dm.dc);
2970 	if (!state->context) {
2971 		kfree(state);
2972 		return -ENOMEM;
2973 	}
2974 
2975 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2976 
2977 	drm_atomic_private_obj_init(adev_to_drm(adev),
2978 				    &adev->dm.atomic_obj,
2979 				    &state->base,
2980 				    &dm_atomic_state_funcs);
2981 
2982 	r = amdgpu_display_modeset_create_props(adev);
2983 	if (r) {
2984 		dc_release_state(state->context);
2985 		kfree(state);
2986 		return r;
2987 	}
2988 
2989 	r = amdgpu_dm_audio_init(adev);
2990 	if (r) {
2991 		dc_release_state(state->context);
2992 		kfree(state);
2993 		return r;
2994 	}
2995 
2996 	return 0;
2997 }
2998 
2999 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3000 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3001 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3002 
3003 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3004 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3005 
3006 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3007 {
3008 #if defined(CONFIG_ACPI)
3009 	struct amdgpu_dm_backlight_caps caps;
3010 
3011 	memset(&caps, 0, sizeof(caps));
3012 
3013 	if (dm->backlight_caps.caps_valid)
3014 		return;
3015 
3016 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3017 	if (caps.caps_valid) {
3018 		dm->backlight_caps.caps_valid = true;
3019 		if (caps.aux_support)
3020 			return;
3021 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3022 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3023 	} else {
3024 		dm->backlight_caps.min_input_signal =
3025 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3026 		dm->backlight_caps.max_input_signal =
3027 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3028 	}
3029 #else
3030 	if (dm->backlight_caps.aux_support)
3031 		return;
3032 
3033 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3034 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3035 #endif
3036 }
3037 
3038 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3039 {
3040 	bool rc;
3041 
3042 	if (!link)
3043 		return 1;
3044 
3045 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3046 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3047 
3048 	return rc ? 0 : 1;
3049 }
3050 
3051 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3052 				unsigned *min, unsigned *max)
3053 {
3054 	if (!caps)
3055 		return 0;
3056 
3057 	if (caps->aux_support) {
3058 		// Firmware limits are in nits, DC API wants millinits.
3059 		*max = 1000 * caps->aux_max_input_signal;
3060 		*min = 1000 * caps->aux_min_input_signal;
3061 	} else {
3062 		// Firmware limits are 8-bit, PWM control is 16-bit.
3063 		*max = 0x101 * caps->max_input_signal;
3064 		*min = 0x101 * caps->min_input_signal;
3065 	}
3066 	return 1;
3067 }
3068 
3069 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3070 					uint32_t brightness)
3071 {
3072 	unsigned min, max;
3073 
3074 	if (!get_brightness_range(caps, &min, &max))
3075 		return brightness;
3076 
3077 	// Rescale 0..255 to min..max
3078 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3079 				       AMDGPU_MAX_BL_LEVEL);
3080 }
3081 
3082 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3083 				      uint32_t brightness)
3084 {
3085 	unsigned min, max;
3086 
3087 	if (!get_brightness_range(caps, &min, &max))
3088 		return brightness;
3089 
3090 	if (brightness < min)
3091 		return 0;
3092 	// Rescale min..max to 0..255
3093 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3094 				 max - min);
3095 }
3096 
3097 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3098 {
3099 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3100 	struct amdgpu_dm_backlight_caps caps;
3101 	struct dc_link *link = NULL;
3102 	u32 brightness;
3103 	bool rc;
3104 
3105 	amdgpu_dm_update_backlight_caps(dm);
3106 	caps = dm->backlight_caps;
3107 
3108 	link = (struct dc_link *)dm->backlight_link;
3109 
3110 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3111 	// Change brightness based on AUX property
3112 	if (caps.aux_support)
3113 		return set_backlight_via_aux(link, brightness);
3114 
3115 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3116 
3117 	return rc ? 0 : 1;
3118 }
3119 
3120 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3121 {
3122 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3123 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3124 
3125 	if (ret == DC_ERROR_UNEXPECTED)
3126 		return bd->props.brightness;
3127 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3128 }
3129 
3130 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3131 	.options = BL_CORE_SUSPENDRESUME,
3132 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3133 	.update_status	= amdgpu_dm_backlight_update_status,
3134 };
3135 
3136 static void
3137 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3138 {
3139 	char bl_name[16];
3140 	struct backlight_properties props = { 0 };
3141 
3142 	amdgpu_dm_update_backlight_caps(dm);
3143 
3144 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3145 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3146 	props.type = BACKLIGHT_RAW;
3147 
3148 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3149 		 adev_to_drm(dm->adev)->primary->index);
3150 
3151 	dm->backlight_dev = backlight_device_register(bl_name,
3152 						      adev_to_drm(dm->adev)->dev,
3153 						      dm,
3154 						      &amdgpu_dm_backlight_ops,
3155 						      &props);
3156 
3157 	if (IS_ERR(dm->backlight_dev))
3158 		DRM_ERROR("DM: Backlight registration failed!\n");
3159 	else
3160 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3161 }
3162 
3163 #endif
3164 
3165 static int initialize_plane(struct amdgpu_display_manager *dm,
3166 			    struct amdgpu_mode_info *mode_info, int plane_id,
3167 			    enum drm_plane_type plane_type,
3168 			    const struct dc_plane_cap *plane_cap)
3169 {
3170 	struct drm_plane *plane;
3171 	unsigned long possible_crtcs;
3172 	int ret = 0;
3173 
3174 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3175 	if (!plane) {
3176 		DRM_ERROR("KMS: Failed to allocate plane\n");
3177 		return -ENOMEM;
3178 	}
3179 	plane->type = plane_type;
3180 
3181 	/*
3182 	 * HACK: IGT tests expect that the primary plane for a CRTC
3183 	 * can only have one possible CRTC. Only expose support for
3184 	 * any CRTC if they're not going to be used as a primary plane
3185 	 * for a CRTC - like overlay or underlay planes.
3186 	 */
3187 	possible_crtcs = 1 << plane_id;
3188 	if (plane_id >= dm->dc->caps.max_streams)
3189 		possible_crtcs = 0xff;
3190 
3191 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3192 
3193 	if (ret) {
3194 		DRM_ERROR("KMS: Failed to initialize plane\n");
3195 		kfree(plane);
3196 		return ret;
3197 	}
3198 
3199 	if (mode_info)
3200 		mode_info->planes[plane_id] = plane;
3201 
3202 	return ret;
3203 }
3204 
3205 
3206 static void register_backlight_device(struct amdgpu_display_manager *dm,
3207 				      struct dc_link *link)
3208 {
3209 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3210 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3211 
3212 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3213 	    link->type != dc_connection_none) {
3214 		/*
3215 		 * Event if registration failed, we should continue with
3216 		 * DM initialization because not having a backlight control
3217 		 * is better then a black screen.
3218 		 */
3219 		amdgpu_dm_register_backlight_device(dm);
3220 
3221 		if (dm->backlight_dev)
3222 			dm->backlight_link = link;
3223 	}
3224 #endif
3225 }
3226 
3227 
3228 /*
3229  * In this architecture, the association
3230  * connector -> encoder -> crtc
3231  * id not really requried. The crtc and connector will hold the
3232  * display_index as an abstraction to use with DAL component
3233  *
3234  * Returns 0 on success
3235  */
3236 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3237 {
3238 	struct amdgpu_display_manager *dm = &adev->dm;
3239 	int32_t i;
3240 	struct amdgpu_dm_connector *aconnector = NULL;
3241 	struct amdgpu_encoder *aencoder = NULL;
3242 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3243 	uint32_t link_cnt;
3244 	int32_t primary_planes;
3245 	enum dc_connection_type new_connection_type = dc_connection_none;
3246 	const struct dc_plane_cap *plane;
3247 
3248 	link_cnt = dm->dc->caps.max_links;
3249 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3250 		DRM_ERROR("DM: Failed to initialize mode config\n");
3251 		return -EINVAL;
3252 	}
3253 
3254 	/* There is one primary plane per CRTC */
3255 	primary_planes = dm->dc->caps.max_streams;
3256 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3257 
3258 	/*
3259 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3260 	 * Order is reversed to match iteration order in atomic check.
3261 	 */
3262 	for (i = (primary_planes - 1); i >= 0; i--) {
3263 		plane = &dm->dc->caps.planes[i];
3264 
3265 		if (initialize_plane(dm, mode_info, i,
3266 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3267 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3268 			goto fail;
3269 		}
3270 	}
3271 
3272 	/*
3273 	 * Initialize overlay planes, index starting after primary planes.
3274 	 * These planes have a higher DRM index than the primary planes since
3275 	 * they should be considered as having a higher z-order.
3276 	 * Order is reversed to match iteration order in atomic check.
3277 	 *
3278 	 * Only support DCN for now, and only expose one so we don't encourage
3279 	 * userspace to use up all the pipes.
3280 	 */
3281 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3282 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3283 
3284 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3285 			continue;
3286 
3287 		if (!plane->blends_with_above || !plane->blends_with_below)
3288 			continue;
3289 
3290 		if (!plane->pixel_format_support.argb8888)
3291 			continue;
3292 
3293 		if (initialize_plane(dm, NULL, primary_planes + i,
3294 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3295 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3296 			goto fail;
3297 		}
3298 
3299 		/* Only create one overlay plane. */
3300 		break;
3301 	}
3302 
3303 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3304 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3305 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3306 			goto fail;
3307 		}
3308 
3309 	dm->display_indexes_num = dm->dc->caps.max_streams;
3310 
3311 	/* loops over all connectors on the board */
3312 	for (i = 0; i < link_cnt; i++) {
3313 		struct dc_link *link = NULL;
3314 
3315 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3316 			DRM_ERROR(
3317 				"KMS: Cannot support more than %d display indexes\n",
3318 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3319 			continue;
3320 		}
3321 
3322 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3323 		if (!aconnector)
3324 			goto fail;
3325 
3326 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3327 		if (!aencoder)
3328 			goto fail;
3329 
3330 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3331 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3332 			goto fail;
3333 		}
3334 
3335 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3336 			DRM_ERROR("KMS: Failed to initialize connector\n");
3337 			goto fail;
3338 		}
3339 
3340 		link = dc_get_link_at_index(dm->dc, i);
3341 
3342 		if (!dc_link_detect_sink(link, &new_connection_type))
3343 			DRM_ERROR("KMS: Failed to detect connector\n");
3344 
3345 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3346 			emulated_link_detect(link);
3347 			amdgpu_dm_update_connector_after_detect(aconnector);
3348 
3349 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3350 			amdgpu_dm_update_connector_after_detect(aconnector);
3351 			register_backlight_device(dm, link);
3352 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3353 				amdgpu_dm_set_psr_caps(link);
3354 		}
3355 
3356 
3357 	}
3358 
3359 	/* Software is initialized. Now we can register interrupt handlers. */
3360 	switch (adev->asic_type) {
3361 #if defined(CONFIG_DRM_AMD_DC_SI)
3362 	case CHIP_TAHITI:
3363 	case CHIP_PITCAIRN:
3364 	case CHIP_VERDE:
3365 	case CHIP_OLAND:
3366 		if (dce60_register_irq_handlers(dm->adev)) {
3367 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3368 			goto fail;
3369 		}
3370 		break;
3371 #endif
3372 	case CHIP_BONAIRE:
3373 	case CHIP_HAWAII:
3374 	case CHIP_KAVERI:
3375 	case CHIP_KABINI:
3376 	case CHIP_MULLINS:
3377 	case CHIP_TONGA:
3378 	case CHIP_FIJI:
3379 	case CHIP_CARRIZO:
3380 	case CHIP_STONEY:
3381 	case CHIP_POLARIS11:
3382 	case CHIP_POLARIS10:
3383 	case CHIP_POLARIS12:
3384 	case CHIP_VEGAM:
3385 	case CHIP_VEGA10:
3386 	case CHIP_VEGA12:
3387 	case CHIP_VEGA20:
3388 		if (dce110_register_irq_handlers(dm->adev)) {
3389 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3390 			goto fail;
3391 		}
3392 		break;
3393 #if defined(CONFIG_DRM_AMD_DC_DCN)
3394 	case CHIP_RAVEN:
3395 	case CHIP_NAVI12:
3396 	case CHIP_NAVI10:
3397 	case CHIP_NAVI14:
3398 	case CHIP_RENOIR:
3399 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3400 	case CHIP_SIENNA_CICHLID:
3401 	case CHIP_NAVY_FLOUNDER:
3402 #endif
3403 		if (dcn10_register_irq_handlers(dm->adev)) {
3404 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3405 			goto fail;
3406 		}
3407 		break;
3408 #endif
3409 	default:
3410 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3411 		goto fail;
3412 	}
3413 
3414 	return 0;
3415 fail:
3416 	kfree(aencoder);
3417 	kfree(aconnector);
3418 
3419 	return -EINVAL;
3420 }
3421 
3422 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3423 {
3424 	drm_mode_config_cleanup(dm->ddev);
3425 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3426 	return;
3427 }
3428 
3429 /******************************************************************************
3430  * amdgpu_display_funcs functions
3431  *****************************************************************************/
3432 
3433 /*
3434  * dm_bandwidth_update - program display watermarks
3435  *
3436  * @adev: amdgpu_device pointer
3437  *
3438  * Calculate and program the display watermarks and line buffer allocation.
3439  */
3440 static void dm_bandwidth_update(struct amdgpu_device *adev)
3441 {
3442 	/* TODO: implement later */
3443 }
3444 
3445 static const struct amdgpu_display_funcs dm_display_funcs = {
3446 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3447 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3448 	.backlight_set_level = NULL, /* never called for DC */
3449 	.backlight_get_level = NULL, /* never called for DC */
3450 	.hpd_sense = NULL,/* called unconditionally */
3451 	.hpd_set_polarity = NULL, /* called unconditionally */
3452 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3453 	.page_flip_get_scanoutpos =
3454 		dm_crtc_get_scanoutpos,/* called unconditionally */
3455 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3456 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3457 };
3458 
3459 #if defined(CONFIG_DEBUG_KERNEL_DC)
3460 
3461 static ssize_t s3_debug_store(struct device *device,
3462 			      struct device_attribute *attr,
3463 			      const char *buf,
3464 			      size_t count)
3465 {
3466 	int ret;
3467 	int s3_state;
3468 	struct drm_device *drm_dev = dev_get_drvdata(device);
3469 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3470 
3471 	ret = kstrtoint(buf, 0, &s3_state);
3472 
3473 	if (ret == 0) {
3474 		if (s3_state) {
3475 			dm_resume(adev);
3476 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3477 		} else
3478 			dm_suspend(adev);
3479 	}
3480 
3481 	return ret == 0 ? count : 0;
3482 }
3483 
3484 DEVICE_ATTR_WO(s3_debug);
3485 
3486 #endif
3487 
3488 static int dm_early_init(void *handle)
3489 {
3490 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3491 
3492 	switch (adev->asic_type) {
3493 #if defined(CONFIG_DRM_AMD_DC_SI)
3494 	case CHIP_TAHITI:
3495 	case CHIP_PITCAIRN:
3496 	case CHIP_VERDE:
3497 		adev->mode_info.num_crtc = 6;
3498 		adev->mode_info.num_hpd = 6;
3499 		adev->mode_info.num_dig = 6;
3500 		break;
3501 	case CHIP_OLAND:
3502 		adev->mode_info.num_crtc = 2;
3503 		adev->mode_info.num_hpd = 2;
3504 		adev->mode_info.num_dig = 2;
3505 		break;
3506 #endif
3507 	case CHIP_BONAIRE:
3508 	case CHIP_HAWAII:
3509 		adev->mode_info.num_crtc = 6;
3510 		adev->mode_info.num_hpd = 6;
3511 		adev->mode_info.num_dig = 6;
3512 		break;
3513 	case CHIP_KAVERI:
3514 		adev->mode_info.num_crtc = 4;
3515 		adev->mode_info.num_hpd = 6;
3516 		adev->mode_info.num_dig = 7;
3517 		break;
3518 	case CHIP_KABINI:
3519 	case CHIP_MULLINS:
3520 		adev->mode_info.num_crtc = 2;
3521 		adev->mode_info.num_hpd = 6;
3522 		adev->mode_info.num_dig = 6;
3523 		break;
3524 	case CHIP_FIJI:
3525 	case CHIP_TONGA:
3526 		adev->mode_info.num_crtc = 6;
3527 		adev->mode_info.num_hpd = 6;
3528 		adev->mode_info.num_dig = 7;
3529 		break;
3530 	case CHIP_CARRIZO:
3531 		adev->mode_info.num_crtc = 3;
3532 		adev->mode_info.num_hpd = 6;
3533 		adev->mode_info.num_dig = 9;
3534 		break;
3535 	case CHIP_STONEY:
3536 		adev->mode_info.num_crtc = 2;
3537 		adev->mode_info.num_hpd = 6;
3538 		adev->mode_info.num_dig = 9;
3539 		break;
3540 	case CHIP_POLARIS11:
3541 	case CHIP_POLARIS12:
3542 		adev->mode_info.num_crtc = 5;
3543 		adev->mode_info.num_hpd = 5;
3544 		adev->mode_info.num_dig = 5;
3545 		break;
3546 	case CHIP_POLARIS10:
3547 	case CHIP_VEGAM:
3548 		adev->mode_info.num_crtc = 6;
3549 		adev->mode_info.num_hpd = 6;
3550 		adev->mode_info.num_dig = 6;
3551 		break;
3552 	case CHIP_VEGA10:
3553 	case CHIP_VEGA12:
3554 	case CHIP_VEGA20:
3555 		adev->mode_info.num_crtc = 6;
3556 		adev->mode_info.num_hpd = 6;
3557 		adev->mode_info.num_dig = 6;
3558 		break;
3559 #if defined(CONFIG_DRM_AMD_DC_DCN)
3560 	case CHIP_RAVEN:
3561 		adev->mode_info.num_crtc = 4;
3562 		adev->mode_info.num_hpd = 4;
3563 		adev->mode_info.num_dig = 4;
3564 		break;
3565 #endif
3566 	case CHIP_NAVI10:
3567 	case CHIP_NAVI12:
3568 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3569 	case CHIP_SIENNA_CICHLID:
3570 	case CHIP_NAVY_FLOUNDER:
3571 #endif
3572 		adev->mode_info.num_crtc = 6;
3573 		adev->mode_info.num_hpd = 6;
3574 		adev->mode_info.num_dig = 6;
3575 		break;
3576 	case CHIP_NAVI14:
3577 		adev->mode_info.num_crtc = 5;
3578 		adev->mode_info.num_hpd = 5;
3579 		adev->mode_info.num_dig = 5;
3580 		break;
3581 	case CHIP_RENOIR:
3582 		adev->mode_info.num_crtc = 4;
3583 		adev->mode_info.num_hpd = 4;
3584 		adev->mode_info.num_dig = 4;
3585 		break;
3586 	default:
3587 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3588 		return -EINVAL;
3589 	}
3590 
3591 	amdgpu_dm_set_irq_funcs(adev);
3592 
3593 	if (adev->mode_info.funcs == NULL)
3594 		adev->mode_info.funcs = &dm_display_funcs;
3595 
3596 	/*
3597 	 * Note: Do NOT change adev->audio_endpt_rreg and
3598 	 * adev->audio_endpt_wreg because they are initialised in
3599 	 * amdgpu_device_init()
3600 	 */
3601 #if defined(CONFIG_DEBUG_KERNEL_DC)
3602 	device_create_file(
3603 		adev_to_drm(adev)->dev,
3604 		&dev_attr_s3_debug);
3605 #endif
3606 
3607 	return 0;
3608 }
3609 
3610 static bool modeset_required(struct drm_crtc_state *crtc_state,
3611 			     struct dc_stream_state *new_stream,
3612 			     struct dc_stream_state *old_stream)
3613 {
3614 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3615 }
3616 
3617 static bool modereset_required(struct drm_crtc_state *crtc_state)
3618 {
3619 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3620 }
3621 
3622 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3623 {
3624 	drm_encoder_cleanup(encoder);
3625 	kfree(encoder);
3626 }
3627 
3628 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3629 	.destroy = amdgpu_dm_encoder_destroy,
3630 };
3631 
3632 
3633 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3634 				struct dc_scaling_info *scaling_info)
3635 {
3636 	int scale_w, scale_h;
3637 
3638 	memset(scaling_info, 0, sizeof(*scaling_info));
3639 
3640 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3641 	scaling_info->src_rect.x = state->src_x >> 16;
3642 	scaling_info->src_rect.y = state->src_y >> 16;
3643 
3644 	scaling_info->src_rect.width = state->src_w >> 16;
3645 	if (scaling_info->src_rect.width == 0)
3646 		return -EINVAL;
3647 
3648 	scaling_info->src_rect.height = state->src_h >> 16;
3649 	if (scaling_info->src_rect.height == 0)
3650 		return -EINVAL;
3651 
3652 	scaling_info->dst_rect.x = state->crtc_x;
3653 	scaling_info->dst_rect.y = state->crtc_y;
3654 
3655 	if (state->crtc_w == 0)
3656 		return -EINVAL;
3657 
3658 	scaling_info->dst_rect.width = state->crtc_w;
3659 
3660 	if (state->crtc_h == 0)
3661 		return -EINVAL;
3662 
3663 	scaling_info->dst_rect.height = state->crtc_h;
3664 
3665 	/* DRM doesn't specify clipping on destination output. */
3666 	scaling_info->clip_rect = scaling_info->dst_rect;
3667 
3668 	/* TODO: Validate scaling per-format with DC plane caps */
3669 	scale_w = scaling_info->dst_rect.width * 1000 /
3670 		  scaling_info->src_rect.width;
3671 
3672 	if (scale_w < 250 || scale_w > 16000)
3673 		return -EINVAL;
3674 
3675 	scale_h = scaling_info->dst_rect.height * 1000 /
3676 		  scaling_info->src_rect.height;
3677 
3678 	if (scale_h < 250 || scale_h > 16000)
3679 		return -EINVAL;
3680 
3681 	/*
3682 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3683 	 * assume reasonable defaults based on the format.
3684 	 */
3685 
3686 	return 0;
3687 }
3688 
3689 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3690 		       uint64_t *tiling_flags, bool *tmz_surface)
3691 {
3692 	struct amdgpu_bo *rbo;
3693 	int r;
3694 
3695 	if (!amdgpu_fb) {
3696 		*tiling_flags = 0;
3697 		*tmz_surface = false;
3698 		return 0;
3699 	}
3700 
3701 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3702 	r = amdgpu_bo_reserve(rbo, false);
3703 
3704 	if (unlikely(r)) {
3705 		/* Don't show error message when returning -ERESTARTSYS */
3706 		if (r != -ERESTARTSYS)
3707 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3708 		return r;
3709 	}
3710 
3711 	if (tiling_flags)
3712 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3713 
3714 	if (tmz_surface)
3715 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3716 
3717 	amdgpu_bo_unreserve(rbo);
3718 
3719 	return r;
3720 }
3721 
3722 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3723 {
3724 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3725 
3726 	return offset ? (address + offset * 256) : 0;
3727 }
3728 
3729 static int
3730 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3731 			  const struct amdgpu_framebuffer *afb,
3732 			  const enum surface_pixel_format format,
3733 			  const enum dc_rotation_angle rotation,
3734 			  const struct plane_size *plane_size,
3735 			  const union dc_tiling_info *tiling_info,
3736 			  const uint64_t info,
3737 			  struct dc_plane_dcc_param *dcc,
3738 			  struct dc_plane_address *address,
3739 			  bool force_disable_dcc)
3740 {
3741 	struct dc *dc = adev->dm.dc;
3742 	struct dc_dcc_surface_param input;
3743 	struct dc_surface_dcc_cap output;
3744 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3745 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3746 	uint64_t dcc_address;
3747 
3748 	memset(&input, 0, sizeof(input));
3749 	memset(&output, 0, sizeof(output));
3750 
3751 	if (force_disable_dcc)
3752 		return 0;
3753 
3754 	if (!offset)
3755 		return 0;
3756 
3757 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3758 		return 0;
3759 
3760 	if (!dc->cap_funcs.get_dcc_compression_cap)
3761 		return -EINVAL;
3762 
3763 	input.format = format;
3764 	input.surface_size.width = plane_size->surface_size.width;
3765 	input.surface_size.height = plane_size->surface_size.height;
3766 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3767 
3768 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3769 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3770 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3771 		input.scan = SCAN_DIRECTION_VERTICAL;
3772 
3773 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3774 		return -EINVAL;
3775 
3776 	if (!output.capable)
3777 		return -EINVAL;
3778 
3779 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3780 		return -EINVAL;
3781 
3782 	dcc->enable = 1;
3783 	dcc->meta_pitch =
3784 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3785 	dcc->independent_64b_blks = i64b;
3786 
3787 	dcc_address = get_dcc_address(afb->address, info);
3788 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3789 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3790 
3791 	return 0;
3792 }
3793 
3794 static int
3795 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3796 			     const struct amdgpu_framebuffer *afb,
3797 			     const enum surface_pixel_format format,
3798 			     const enum dc_rotation_angle rotation,
3799 			     const uint64_t tiling_flags,
3800 			     union dc_tiling_info *tiling_info,
3801 			     struct plane_size *plane_size,
3802 			     struct dc_plane_dcc_param *dcc,
3803 			     struct dc_plane_address *address,
3804 			     bool tmz_surface,
3805 			     bool force_disable_dcc)
3806 {
3807 	const struct drm_framebuffer *fb = &afb->base;
3808 	int ret;
3809 
3810 	memset(tiling_info, 0, sizeof(*tiling_info));
3811 	memset(plane_size, 0, sizeof(*plane_size));
3812 	memset(dcc, 0, sizeof(*dcc));
3813 	memset(address, 0, sizeof(*address));
3814 
3815 	address->tmz_surface = tmz_surface;
3816 
3817 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3818 		plane_size->surface_size.x = 0;
3819 		plane_size->surface_size.y = 0;
3820 		plane_size->surface_size.width = fb->width;
3821 		plane_size->surface_size.height = fb->height;
3822 		plane_size->surface_pitch =
3823 			fb->pitches[0] / fb->format->cpp[0];
3824 
3825 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3826 		address->grph.addr.low_part = lower_32_bits(afb->address);
3827 		address->grph.addr.high_part = upper_32_bits(afb->address);
3828 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3829 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3830 
3831 		plane_size->surface_size.x = 0;
3832 		plane_size->surface_size.y = 0;
3833 		plane_size->surface_size.width = fb->width;
3834 		plane_size->surface_size.height = fb->height;
3835 		plane_size->surface_pitch =
3836 			fb->pitches[0] / fb->format->cpp[0];
3837 
3838 		plane_size->chroma_size.x = 0;
3839 		plane_size->chroma_size.y = 0;
3840 		/* TODO: set these based on surface format */
3841 		plane_size->chroma_size.width = fb->width / 2;
3842 		plane_size->chroma_size.height = fb->height / 2;
3843 
3844 		plane_size->chroma_pitch =
3845 			fb->pitches[1] / fb->format->cpp[1];
3846 
3847 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3848 		address->video_progressive.luma_addr.low_part =
3849 			lower_32_bits(afb->address);
3850 		address->video_progressive.luma_addr.high_part =
3851 			upper_32_bits(afb->address);
3852 		address->video_progressive.chroma_addr.low_part =
3853 			lower_32_bits(chroma_addr);
3854 		address->video_progressive.chroma_addr.high_part =
3855 			upper_32_bits(chroma_addr);
3856 	}
3857 
3858 	/* Fill GFX8 params */
3859 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3860 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3861 
3862 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3863 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3864 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3865 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3866 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3867 
3868 		/* XXX fix me for VI */
3869 		tiling_info->gfx8.num_banks = num_banks;
3870 		tiling_info->gfx8.array_mode =
3871 				DC_ARRAY_2D_TILED_THIN1;
3872 		tiling_info->gfx8.tile_split = tile_split;
3873 		tiling_info->gfx8.bank_width = bankw;
3874 		tiling_info->gfx8.bank_height = bankh;
3875 		tiling_info->gfx8.tile_aspect = mtaspect;
3876 		tiling_info->gfx8.tile_mode =
3877 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3878 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3879 			== DC_ARRAY_1D_TILED_THIN1) {
3880 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3881 	}
3882 
3883 	tiling_info->gfx8.pipe_config =
3884 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3885 
3886 	if (adev->asic_type == CHIP_VEGA10 ||
3887 	    adev->asic_type == CHIP_VEGA12 ||
3888 	    adev->asic_type == CHIP_VEGA20 ||
3889 	    adev->asic_type == CHIP_NAVI10 ||
3890 	    adev->asic_type == CHIP_NAVI14 ||
3891 	    adev->asic_type == CHIP_NAVI12 ||
3892 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3893 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3894 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3895 #endif
3896 	    adev->asic_type == CHIP_RENOIR ||
3897 	    adev->asic_type == CHIP_RAVEN) {
3898 		/* Fill GFX9 params */
3899 		tiling_info->gfx9.num_pipes =
3900 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3901 		tiling_info->gfx9.num_banks =
3902 			adev->gfx.config.gb_addr_config_fields.num_banks;
3903 		tiling_info->gfx9.pipe_interleave =
3904 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3905 		tiling_info->gfx9.num_shader_engines =
3906 			adev->gfx.config.gb_addr_config_fields.num_se;
3907 		tiling_info->gfx9.max_compressed_frags =
3908 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3909 		tiling_info->gfx9.num_rb_per_se =
3910 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3911 		tiling_info->gfx9.swizzle =
3912 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3913 		tiling_info->gfx9.shaderEnable = 1;
3914 
3915 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3916 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3917 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3918 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3919 #endif
3920 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3921 						plane_size, tiling_info,
3922 						tiling_flags, dcc, address,
3923 						force_disable_dcc);
3924 		if (ret)
3925 			return ret;
3926 	}
3927 
3928 	return 0;
3929 }
3930 
3931 static void
3932 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3933 			       bool *per_pixel_alpha, bool *global_alpha,
3934 			       int *global_alpha_value)
3935 {
3936 	*per_pixel_alpha = false;
3937 	*global_alpha = false;
3938 	*global_alpha_value = 0xff;
3939 
3940 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3941 		return;
3942 
3943 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3944 		static const uint32_t alpha_formats[] = {
3945 			DRM_FORMAT_ARGB8888,
3946 			DRM_FORMAT_RGBA8888,
3947 			DRM_FORMAT_ABGR8888,
3948 		};
3949 		uint32_t format = plane_state->fb->format->format;
3950 		unsigned int i;
3951 
3952 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3953 			if (format == alpha_formats[i]) {
3954 				*per_pixel_alpha = true;
3955 				break;
3956 			}
3957 		}
3958 	}
3959 
3960 	if (plane_state->alpha < 0xffff) {
3961 		*global_alpha = true;
3962 		*global_alpha_value = plane_state->alpha >> 8;
3963 	}
3964 }
3965 
3966 static int
3967 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3968 			    const enum surface_pixel_format format,
3969 			    enum dc_color_space *color_space)
3970 {
3971 	bool full_range;
3972 
3973 	*color_space = COLOR_SPACE_SRGB;
3974 
3975 	/* DRM color properties only affect non-RGB formats. */
3976 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3977 		return 0;
3978 
3979 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3980 
3981 	switch (plane_state->color_encoding) {
3982 	case DRM_COLOR_YCBCR_BT601:
3983 		if (full_range)
3984 			*color_space = COLOR_SPACE_YCBCR601;
3985 		else
3986 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3987 		break;
3988 
3989 	case DRM_COLOR_YCBCR_BT709:
3990 		if (full_range)
3991 			*color_space = COLOR_SPACE_YCBCR709;
3992 		else
3993 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3994 		break;
3995 
3996 	case DRM_COLOR_YCBCR_BT2020:
3997 		if (full_range)
3998 			*color_space = COLOR_SPACE_2020_YCBCR;
3999 		else
4000 			return -EINVAL;
4001 		break;
4002 
4003 	default:
4004 		return -EINVAL;
4005 	}
4006 
4007 	return 0;
4008 }
4009 
4010 static int
4011 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4012 			    const struct drm_plane_state *plane_state,
4013 			    const uint64_t tiling_flags,
4014 			    struct dc_plane_info *plane_info,
4015 			    struct dc_plane_address *address,
4016 			    bool tmz_surface,
4017 			    bool force_disable_dcc)
4018 {
4019 	const struct drm_framebuffer *fb = plane_state->fb;
4020 	const struct amdgpu_framebuffer *afb =
4021 		to_amdgpu_framebuffer(plane_state->fb);
4022 	struct drm_format_name_buf format_name;
4023 	int ret;
4024 
4025 	memset(plane_info, 0, sizeof(*plane_info));
4026 
4027 	switch (fb->format->format) {
4028 	case DRM_FORMAT_C8:
4029 		plane_info->format =
4030 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4031 		break;
4032 	case DRM_FORMAT_RGB565:
4033 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4034 		break;
4035 	case DRM_FORMAT_XRGB8888:
4036 	case DRM_FORMAT_ARGB8888:
4037 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4038 		break;
4039 	case DRM_FORMAT_XRGB2101010:
4040 	case DRM_FORMAT_ARGB2101010:
4041 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4042 		break;
4043 	case DRM_FORMAT_XBGR2101010:
4044 	case DRM_FORMAT_ABGR2101010:
4045 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4046 		break;
4047 	case DRM_FORMAT_XBGR8888:
4048 	case DRM_FORMAT_ABGR8888:
4049 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4050 		break;
4051 	case DRM_FORMAT_NV21:
4052 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4053 		break;
4054 	case DRM_FORMAT_NV12:
4055 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4056 		break;
4057 	case DRM_FORMAT_P010:
4058 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4059 		break;
4060 	case DRM_FORMAT_XRGB16161616F:
4061 	case DRM_FORMAT_ARGB16161616F:
4062 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4063 		break;
4064 	case DRM_FORMAT_XBGR16161616F:
4065 	case DRM_FORMAT_ABGR16161616F:
4066 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4067 		break;
4068 	default:
4069 		DRM_ERROR(
4070 			"Unsupported screen format %s\n",
4071 			drm_get_format_name(fb->format->format, &format_name));
4072 		return -EINVAL;
4073 	}
4074 
4075 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4076 	case DRM_MODE_ROTATE_0:
4077 		plane_info->rotation = ROTATION_ANGLE_0;
4078 		break;
4079 	case DRM_MODE_ROTATE_90:
4080 		plane_info->rotation = ROTATION_ANGLE_90;
4081 		break;
4082 	case DRM_MODE_ROTATE_180:
4083 		plane_info->rotation = ROTATION_ANGLE_180;
4084 		break;
4085 	case DRM_MODE_ROTATE_270:
4086 		plane_info->rotation = ROTATION_ANGLE_270;
4087 		break;
4088 	default:
4089 		plane_info->rotation = ROTATION_ANGLE_0;
4090 		break;
4091 	}
4092 
4093 	plane_info->visible = true;
4094 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4095 
4096 	plane_info->layer_index = 0;
4097 
4098 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4099 					  &plane_info->color_space);
4100 	if (ret)
4101 		return ret;
4102 
4103 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4104 					   plane_info->rotation, tiling_flags,
4105 					   &plane_info->tiling_info,
4106 					   &plane_info->plane_size,
4107 					   &plane_info->dcc, address, tmz_surface,
4108 					   force_disable_dcc);
4109 	if (ret)
4110 		return ret;
4111 
4112 	fill_blending_from_plane_state(
4113 		plane_state, &plane_info->per_pixel_alpha,
4114 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4115 
4116 	return 0;
4117 }
4118 
4119 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4120 				    struct dc_plane_state *dc_plane_state,
4121 				    struct drm_plane_state *plane_state,
4122 				    struct drm_crtc_state *crtc_state)
4123 {
4124 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4125 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4126 	struct dc_scaling_info scaling_info;
4127 	struct dc_plane_info plane_info;
4128 	int ret;
4129 	bool force_disable_dcc = false;
4130 
4131 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4132 	if (ret)
4133 		return ret;
4134 
4135 	dc_plane_state->src_rect = scaling_info.src_rect;
4136 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4137 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4138 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4139 
4140 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4141 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4142 					  dm_plane_state->tiling_flags,
4143 					  &plane_info,
4144 					  &dc_plane_state->address,
4145 					  dm_plane_state->tmz_surface,
4146 					  force_disable_dcc);
4147 	if (ret)
4148 		return ret;
4149 
4150 	dc_plane_state->format = plane_info.format;
4151 	dc_plane_state->color_space = plane_info.color_space;
4152 	dc_plane_state->format = plane_info.format;
4153 	dc_plane_state->plane_size = plane_info.plane_size;
4154 	dc_plane_state->rotation = plane_info.rotation;
4155 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4156 	dc_plane_state->stereo_format = plane_info.stereo_format;
4157 	dc_plane_state->tiling_info = plane_info.tiling_info;
4158 	dc_plane_state->visible = plane_info.visible;
4159 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4160 	dc_plane_state->global_alpha = plane_info.global_alpha;
4161 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4162 	dc_plane_state->dcc = plane_info.dcc;
4163 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4164 
4165 	/*
4166 	 * Always set input transfer function, since plane state is refreshed
4167 	 * every time.
4168 	 */
4169 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4170 	if (ret)
4171 		return ret;
4172 
4173 	return 0;
4174 }
4175 
4176 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4177 					   const struct dm_connector_state *dm_state,
4178 					   struct dc_stream_state *stream)
4179 {
4180 	enum amdgpu_rmx_type rmx_type;
4181 
4182 	struct rect src = { 0 }; /* viewport in composition space*/
4183 	struct rect dst = { 0 }; /* stream addressable area */
4184 
4185 	/* no mode. nothing to be done */
4186 	if (!mode)
4187 		return;
4188 
4189 	/* Full screen scaling by default */
4190 	src.width = mode->hdisplay;
4191 	src.height = mode->vdisplay;
4192 	dst.width = stream->timing.h_addressable;
4193 	dst.height = stream->timing.v_addressable;
4194 
4195 	if (dm_state) {
4196 		rmx_type = dm_state->scaling;
4197 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4198 			if (src.width * dst.height <
4199 					src.height * dst.width) {
4200 				/* height needs less upscaling/more downscaling */
4201 				dst.width = src.width *
4202 						dst.height / src.height;
4203 			} else {
4204 				/* width needs less upscaling/more downscaling */
4205 				dst.height = src.height *
4206 						dst.width / src.width;
4207 			}
4208 		} else if (rmx_type == RMX_CENTER) {
4209 			dst = src;
4210 		}
4211 
4212 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4213 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4214 
4215 		if (dm_state->underscan_enable) {
4216 			dst.x += dm_state->underscan_hborder / 2;
4217 			dst.y += dm_state->underscan_vborder / 2;
4218 			dst.width -= dm_state->underscan_hborder;
4219 			dst.height -= dm_state->underscan_vborder;
4220 		}
4221 	}
4222 
4223 	stream->src = src;
4224 	stream->dst = dst;
4225 
4226 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4227 			dst.x, dst.y, dst.width, dst.height);
4228 
4229 }
4230 
4231 static enum dc_color_depth
4232 convert_color_depth_from_display_info(const struct drm_connector *connector,
4233 				      bool is_y420, int requested_bpc)
4234 {
4235 	uint8_t bpc;
4236 
4237 	if (is_y420) {
4238 		bpc = 8;
4239 
4240 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4241 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4242 			bpc = 16;
4243 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4244 			bpc = 12;
4245 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4246 			bpc = 10;
4247 	} else {
4248 		bpc = (uint8_t)connector->display_info.bpc;
4249 		/* Assume 8 bpc by default if no bpc is specified. */
4250 		bpc = bpc ? bpc : 8;
4251 	}
4252 
4253 	if (requested_bpc > 0) {
4254 		/*
4255 		 * Cap display bpc based on the user requested value.
4256 		 *
4257 		 * The value for state->max_bpc may not correctly updated
4258 		 * depending on when the connector gets added to the state
4259 		 * or if this was called outside of atomic check, so it
4260 		 * can't be used directly.
4261 		 */
4262 		bpc = min_t(u8, bpc, requested_bpc);
4263 
4264 		/* Round down to the nearest even number. */
4265 		bpc = bpc - (bpc & 1);
4266 	}
4267 
4268 	switch (bpc) {
4269 	case 0:
4270 		/*
4271 		 * Temporary Work around, DRM doesn't parse color depth for
4272 		 * EDID revision before 1.4
4273 		 * TODO: Fix edid parsing
4274 		 */
4275 		return COLOR_DEPTH_888;
4276 	case 6:
4277 		return COLOR_DEPTH_666;
4278 	case 8:
4279 		return COLOR_DEPTH_888;
4280 	case 10:
4281 		return COLOR_DEPTH_101010;
4282 	case 12:
4283 		return COLOR_DEPTH_121212;
4284 	case 14:
4285 		return COLOR_DEPTH_141414;
4286 	case 16:
4287 		return COLOR_DEPTH_161616;
4288 	default:
4289 		return COLOR_DEPTH_UNDEFINED;
4290 	}
4291 }
4292 
4293 static enum dc_aspect_ratio
4294 get_aspect_ratio(const struct drm_display_mode *mode_in)
4295 {
4296 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4297 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4298 }
4299 
4300 static enum dc_color_space
4301 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4302 {
4303 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4304 
4305 	switch (dc_crtc_timing->pixel_encoding)	{
4306 	case PIXEL_ENCODING_YCBCR422:
4307 	case PIXEL_ENCODING_YCBCR444:
4308 	case PIXEL_ENCODING_YCBCR420:
4309 	{
4310 		/*
4311 		 * 27030khz is the separation point between HDTV and SDTV
4312 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4313 		 * respectively
4314 		 */
4315 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4316 			if (dc_crtc_timing->flags.Y_ONLY)
4317 				color_space =
4318 					COLOR_SPACE_YCBCR709_LIMITED;
4319 			else
4320 				color_space = COLOR_SPACE_YCBCR709;
4321 		} else {
4322 			if (dc_crtc_timing->flags.Y_ONLY)
4323 				color_space =
4324 					COLOR_SPACE_YCBCR601_LIMITED;
4325 			else
4326 				color_space = COLOR_SPACE_YCBCR601;
4327 		}
4328 
4329 	}
4330 	break;
4331 	case PIXEL_ENCODING_RGB:
4332 		color_space = COLOR_SPACE_SRGB;
4333 		break;
4334 
4335 	default:
4336 		WARN_ON(1);
4337 		break;
4338 	}
4339 
4340 	return color_space;
4341 }
4342 
4343 static bool adjust_colour_depth_from_display_info(
4344 	struct dc_crtc_timing *timing_out,
4345 	const struct drm_display_info *info)
4346 {
4347 	enum dc_color_depth depth = timing_out->display_color_depth;
4348 	int normalized_clk;
4349 	do {
4350 		normalized_clk = timing_out->pix_clk_100hz / 10;
4351 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4352 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4353 			normalized_clk /= 2;
4354 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4355 		switch (depth) {
4356 		case COLOR_DEPTH_888:
4357 			break;
4358 		case COLOR_DEPTH_101010:
4359 			normalized_clk = (normalized_clk * 30) / 24;
4360 			break;
4361 		case COLOR_DEPTH_121212:
4362 			normalized_clk = (normalized_clk * 36) / 24;
4363 			break;
4364 		case COLOR_DEPTH_161616:
4365 			normalized_clk = (normalized_clk * 48) / 24;
4366 			break;
4367 		default:
4368 			/* The above depths are the only ones valid for HDMI. */
4369 			return false;
4370 		}
4371 		if (normalized_clk <= info->max_tmds_clock) {
4372 			timing_out->display_color_depth = depth;
4373 			return true;
4374 		}
4375 	} while (--depth > COLOR_DEPTH_666);
4376 	return false;
4377 }
4378 
4379 static void fill_stream_properties_from_drm_display_mode(
4380 	struct dc_stream_state *stream,
4381 	const struct drm_display_mode *mode_in,
4382 	const struct drm_connector *connector,
4383 	const struct drm_connector_state *connector_state,
4384 	const struct dc_stream_state *old_stream,
4385 	int requested_bpc)
4386 {
4387 	struct dc_crtc_timing *timing_out = &stream->timing;
4388 	const struct drm_display_info *info = &connector->display_info;
4389 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4390 	struct hdmi_vendor_infoframe hv_frame;
4391 	struct hdmi_avi_infoframe avi_frame;
4392 
4393 	memset(&hv_frame, 0, sizeof(hv_frame));
4394 	memset(&avi_frame, 0, sizeof(avi_frame));
4395 
4396 	timing_out->h_border_left = 0;
4397 	timing_out->h_border_right = 0;
4398 	timing_out->v_border_top = 0;
4399 	timing_out->v_border_bottom = 0;
4400 	/* TODO: un-hardcode */
4401 	if (drm_mode_is_420_only(info, mode_in)
4402 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4403 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4404 	else if (drm_mode_is_420_also(info, mode_in)
4405 			&& aconnector->force_yuv420_output)
4406 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4407 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4408 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4409 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4410 	else
4411 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4412 
4413 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4414 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4415 		connector,
4416 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4417 		requested_bpc);
4418 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4419 	timing_out->hdmi_vic = 0;
4420 
4421 	if(old_stream) {
4422 		timing_out->vic = old_stream->timing.vic;
4423 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4424 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4425 	} else {
4426 		timing_out->vic = drm_match_cea_mode(mode_in);
4427 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4428 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4429 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4430 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4431 	}
4432 
4433 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4434 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4435 		timing_out->vic = avi_frame.video_code;
4436 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4437 		timing_out->hdmi_vic = hv_frame.vic;
4438 	}
4439 
4440 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4441 	timing_out->h_total = mode_in->crtc_htotal;
4442 	timing_out->h_sync_width =
4443 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4444 	timing_out->h_front_porch =
4445 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4446 	timing_out->v_total = mode_in->crtc_vtotal;
4447 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4448 	timing_out->v_front_porch =
4449 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4450 	timing_out->v_sync_width =
4451 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4452 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4453 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4454 
4455 	stream->output_color_space = get_output_color_space(timing_out);
4456 
4457 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4458 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4459 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4460 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4461 		    drm_mode_is_420_also(info, mode_in) &&
4462 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4463 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4464 			adjust_colour_depth_from_display_info(timing_out, info);
4465 		}
4466 	}
4467 }
4468 
4469 static void fill_audio_info(struct audio_info *audio_info,
4470 			    const struct drm_connector *drm_connector,
4471 			    const struct dc_sink *dc_sink)
4472 {
4473 	int i = 0;
4474 	int cea_revision = 0;
4475 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4476 
4477 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4478 	audio_info->product_id = edid_caps->product_id;
4479 
4480 	cea_revision = drm_connector->display_info.cea_rev;
4481 
4482 	strscpy(audio_info->display_name,
4483 		edid_caps->display_name,
4484 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4485 
4486 	if (cea_revision >= 3) {
4487 		audio_info->mode_count = edid_caps->audio_mode_count;
4488 
4489 		for (i = 0; i < audio_info->mode_count; ++i) {
4490 			audio_info->modes[i].format_code =
4491 					(enum audio_format_code)
4492 					(edid_caps->audio_modes[i].format_code);
4493 			audio_info->modes[i].channel_count =
4494 					edid_caps->audio_modes[i].channel_count;
4495 			audio_info->modes[i].sample_rates.all =
4496 					edid_caps->audio_modes[i].sample_rate;
4497 			audio_info->modes[i].sample_size =
4498 					edid_caps->audio_modes[i].sample_size;
4499 		}
4500 	}
4501 
4502 	audio_info->flags.all = edid_caps->speaker_flags;
4503 
4504 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4505 	if (drm_connector->latency_present[0]) {
4506 		audio_info->video_latency = drm_connector->video_latency[0];
4507 		audio_info->audio_latency = drm_connector->audio_latency[0];
4508 	}
4509 
4510 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4511 
4512 }
4513 
4514 static void
4515 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4516 				      struct drm_display_mode *dst_mode)
4517 {
4518 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4519 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4520 	dst_mode->crtc_clock = src_mode->crtc_clock;
4521 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4522 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4523 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4524 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4525 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4526 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4527 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4528 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4529 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4530 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4531 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4532 }
4533 
4534 static void
4535 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4536 					const struct drm_display_mode *native_mode,
4537 					bool scale_enabled)
4538 {
4539 	if (scale_enabled) {
4540 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4541 	} else if (native_mode->clock == drm_mode->clock &&
4542 			native_mode->htotal == drm_mode->htotal &&
4543 			native_mode->vtotal == drm_mode->vtotal) {
4544 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4545 	} else {
4546 		/* no scaling nor amdgpu inserted, no need to patch */
4547 	}
4548 }
4549 
4550 static struct dc_sink *
4551 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4552 {
4553 	struct dc_sink_init_data sink_init_data = { 0 };
4554 	struct dc_sink *sink = NULL;
4555 	sink_init_data.link = aconnector->dc_link;
4556 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4557 
4558 	sink = dc_sink_create(&sink_init_data);
4559 	if (!sink) {
4560 		DRM_ERROR("Failed to create sink!\n");
4561 		return NULL;
4562 	}
4563 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4564 
4565 	return sink;
4566 }
4567 
4568 static void set_multisync_trigger_params(
4569 		struct dc_stream_state *stream)
4570 {
4571 	if (stream->triggered_crtc_reset.enabled) {
4572 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4573 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4574 	}
4575 }
4576 
4577 static void set_master_stream(struct dc_stream_state *stream_set[],
4578 			      int stream_count)
4579 {
4580 	int j, highest_rfr = 0, master_stream = 0;
4581 
4582 	for (j = 0;  j < stream_count; j++) {
4583 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4584 			int refresh_rate = 0;
4585 
4586 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4587 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4588 			if (refresh_rate > highest_rfr) {
4589 				highest_rfr = refresh_rate;
4590 				master_stream = j;
4591 			}
4592 		}
4593 	}
4594 	for (j = 0;  j < stream_count; j++) {
4595 		if (stream_set[j])
4596 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4597 	}
4598 }
4599 
4600 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4601 {
4602 	int i = 0;
4603 
4604 	if (context->stream_count < 2)
4605 		return;
4606 	for (i = 0; i < context->stream_count ; i++) {
4607 		if (!context->streams[i])
4608 			continue;
4609 		/*
4610 		 * TODO: add a function to read AMD VSDB bits and set
4611 		 * crtc_sync_master.multi_sync_enabled flag
4612 		 * For now it's set to false
4613 		 */
4614 		set_multisync_trigger_params(context->streams[i]);
4615 	}
4616 	set_master_stream(context->streams, context->stream_count);
4617 }
4618 
4619 static struct dc_stream_state *
4620 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4621 		       const struct drm_display_mode *drm_mode,
4622 		       const struct dm_connector_state *dm_state,
4623 		       const struct dc_stream_state *old_stream,
4624 		       int requested_bpc)
4625 {
4626 	struct drm_display_mode *preferred_mode = NULL;
4627 	struct drm_connector *drm_connector;
4628 	const struct drm_connector_state *con_state =
4629 		dm_state ? &dm_state->base : NULL;
4630 	struct dc_stream_state *stream = NULL;
4631 	struct drm_display_mode mode = *drm_mode;
4632 	bool native_mode_found = false;
4633 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4634 	int mode_refresh;
4635 	int preferred_refresh = 0;
4636 #if defined(CONFIG_DRM_AMD_DC_DCN)
4637 	struct dsc_dec_dpcd_caps dsc_caps;
4638 #endif
4639 	uint32_t link_bandwidth_kbps;
4640 
4641 	struct dc_sink *sink = NULL;
4642 	if (aconnector == NULL) {
4643 		DRM_ERROR("aconnector is NULL!\n");
4644 		return stream;
4645 	}
4646 
4647 	drm_connector = &aconnector->base;
4648 
4649 	if (!aconnector->dc_sink) {
4650 		sink = create_fake_sink(aconnector);
4651 		if (!sink)
4652 			return stream;
4653 	} else {
4654 		sink = aconnector->dc_sink;
4655 		dc_sink_retain(sink);
4656 	}
4657 
4658 	stream = dc_create_stream_for_sink(sink);
4659 
4660 	if (stream == NULL) {
4661 		DRM_ERROR("Failed to create stream for sink!\n");
4662 		goto finish;
4663 	}
4664 
4665 	stream->dm_stream_context = aconnector;
4666 
4667 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4668 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4669 
4670 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4671 		/* Search for preferred mode */
4672 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4673 			native_mode_found = true;
4674 			break;
4675 		}
4676 	}
4677 	if (!native_mode_found)
4678 		preferred_mode = list_first_entry_or_null(
4679 				&aconnector->base.modes,
4680 				struct drm_display_mode,
4681 				head);
4682 
4683 	mode_refresh = drm_mode_vrefresh(&mode);
4684 
4685 	if (preferred_mode == NULL) {
4686 		/*
4687 		 * This may not be an error, the use case is when we have no
4688 		 * usermode calls to reset and set mode upon hotplug. In this
4689 		 * case, we call set mode ourselves to restore the previous mode
4690 		 * and the modelist may not be filled in in time.
4691 		 */
4692 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4693 	} else {
4694 		decide_crtc_timing_for_drm_display_mode(
4695 				&mode, preferred_mode,
4696 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4697 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4698 	}
4699 
4700 	if (!dm_state)
4701 		drm_mode_set_crtcinfo(&mode, 0);
4702 
4703 	/*
4704 	* If scaling is enabled and refresh rate didn't change
4705 	* we copy the vic and polarities of the old timings
4706 	*/
4707 	if (!scale || mode_refresh != preferred_refresh)
4708 		fill_stream_properties_from_drm_display_mode(stream,
4709 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4710 	else
4711 		fill_stream_properties_from_drm_display_mode(stream,
4712 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4713 
4714 	stream->timing.flags.DSC = 0;
4715 
4716 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4717 #if defined(CONFIG_DRM_AMD_DC_DCN)
4718 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4719 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4720 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4721 				      &dsc_caps);
4722 #endif
4723 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4724 							     dc_link_get_link_cap(aconnector->dc_link));
4725 
4726 #if defined(CONFIG_DRM_AMD_DC_DCN)
4727 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4728 			/* Set DSC policy according to dsc_clock_en */
4729 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4730 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4731 
4732 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4733 						  &dsc_caps,
4734 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4735 						  link_bandwidth_kbps,
4736 						  &stream->timing,
4737 						  &stream->timing.dsc_cfg))
4738 				stream->timing.flags.DSC = 1;
4739 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4740 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4741 				stream->timing.flags.DSC = 1;
4742 
4743 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4744 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4745 
4746 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4747 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4748 
4749 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4750 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4751 		}
4752 #endif
4753 	}
4754 
4755 	update_stream_scaling_settings(&mode, dm_state, stream);
4756 
4757 	fill_audio_info(
4758 		&stream->audio_info,
4759 		drm_connector,
4760 		sink);
4761 
4762 	update_stream_signal(stream, sink);
4763 
4764 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4765 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4766 
4767 	if (stream->link->psr_settings.psr_feature_enabled) {
4768 		//
4769 		// should decide stream support vsc sdp colorimetry capability
4770 		// before building vsc info packet
4771 		//
4772 		stream->use_vsc_sdp_for_colorimetry = false;
4773 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4774 			stream->use_vsc_sdp_for_colorimetry =
4775 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4776 		} else {
4777 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4778 				stream->use_vsc_sdp_for_colorimetry = true;
4779 		}
4780 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4781 	}
4782 finish:
4783 	dc_sink_release(sink);
4784 
4785 	return stream;
4786 }
4787 
4788 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4789 {
4790 	drm_crtc_cleanup(crtc);
4791 	kfree(crtc);
4792 }
4793 
4794 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4795 				  struct drm_crtc_state *state)
4796 {
4797 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4798 
4799 	/* TODO Destroy dc_stream objects are stream object is flattened */
4800 	if (cur->stream)
4801 		dc_stream_release(cur->stream);
4802 
4803 
4804 	__drm_atomic_helper_crtc_destroy_state(state);
4805 
4806 
4807 	kfree(state);
4808 }
4809 
4810 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4811 {
4812 	struct dm_crtc_state *state;
4813 
4814 	if (crtc->state)
4815 		dm_crtc_destroy_state(crtc, crtc->state);
4816 
4817 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4818 	if (WARN_ON(!state))
4819 		return;
4820 
4821 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4822 }
4823 
4824 static struct drm_crtc_state *
4825 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4826 {
4827 	struct dm_crtc_state *state, *cur;
4828 
4829 	cur = to_dm_crtc_state(crtc->state);
4830 
4831 	if (WARN_ON(!crtc->state))
4832 		return NULL;
4833 
4834 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4835 	if (!state)
4836 		return NULL;
4837 
4838 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4839 
4840 	if (cur->stream) {
4841 		state->stream = cur->stream;
4842 		dc_stream_retain(state->stream);
4843 	}
4844 
4845 	state->active_planes = cur->active_planes;
4846 	state->vrr_infopacket = cur->vrr_infopacket;
4847 	state->abm_level = cur->abm_level;
4848 	state->vrr_supported = cur->vrr_supported;
4849 	state->freesync_config = cur->freesync_config;
4850 	state->crc_src = cur->crc_src;
4851 	state->cm_has_degamma = cur->cm_has_degamma;
4852 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4853 
4854 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4855 
4856 	return &state->base;
4857 }
4858 
4859 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4860 {
4861 	enum dc_irq_source irq_source;
4862 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4863 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4864 	int rc;
4865 
4866 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4867 
4868 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4869 
4870 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4871 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4872 	return rc;
4873 }
4874 
4875 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4876 {
4877 	enum dc_irq_source irq_source;
4878 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4879 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4880 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4881 	int rc = 0;
4882 
4883 	if (enable) {
4884 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4885 		if (amdgpu_dm_vrr_active(acrtc_state))
4886 			rc = dm_set_vupdate_irq(crtc, true);
4887 	} else {
4888 		/* vblank irq off -> vupdate irq off */
4889 		rc = dm_set_vupdate_irq(crtc, false);
4890 	}
4891 
4892 	if (rc)
4893 		return rc;
4894 
4895 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4896 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4897 }
4898 
4899 static int dm_enable_vblank(struct drm_crtc *crtc)
4900 {
4901 	return dm_set_vblank(crtc, true);
4902 }
4903 
4904 static void dm_disable_vblank(struct drm_crtc *crtc)
4905 {
4906 	dm_set_vblank(crtc, false);
4907 }
4908 
4909 /* Implemented only the options currently availible for the driver */
4910 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4911 	.reset = dm_crtc_reset_state,
4912 	.destroy = amdgpu_dm_crtc_destroy,
4913 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4914 	.set_config = drm_atomic_helper_set_config,
4915 	.page_flip = drm_atomic_helper_page_flip,
4916 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4917 	.atomic_destroy_state = dm_crtc_destroy_state,
4918 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4919 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4920 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4921 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4922 	.enable_vblank = dm_enable_vblank,
4923 	.disable_vblank = dm_disable_vblank,
4924 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4925 };
4926 
4927 static enum drm_connector_status
4928 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4929 {
4930 	bool connected;
4931 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4932 
4933 	/*
4934 	 * Notes:
4935 	 * 1. This interface is NOT called in context of HPD irq.
4936 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4937 	 * makes it a bad place for *any* MST-related activity.
4938 	 */
4939 
4940 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4941 	    !aconnector->fake_enable)
4942 		connected = (aconnector->dc_sink != NULL);
4943 	else
4944 		connected = (aconnector->base.force == DRM_FORCE_ON);
4945 
4946 	update_subconnector_property(aconnector);
4947 
4948 	return (connected ? connector_status_connected :
4949 			connector_status_disconnected);
4950 }
4951 
4952 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4953 					    struct drm_connector_state *connector_state,
4954 					    struct drm_property *property,
4955 					    uint64_t val)
4956 {
4957 	struct drm_device *dev = connector->dev;
4958 	struct amdgpu_device *adev = drm_to_adev(dev);
4959 	struct dm_connector_state *dm_old_state =
4960 		to_dm_connector_state(connector->state);
4961 	struct dm_connector_state *dm_new_state =
4962 		to_dm_connector_state(connector_state);
4963 
4964 	int ret = -EINVAL;
4965 
4966 	if (property == dev->mode_config.scaling_mode_property) {
4967 		enum amdgpu_rmx_type rmx_type;
4968 
4969 		switch (val) {
4970 		case DRM_MODE_SCALE_CENTER:
4971 			rmx_type = RMX_CENTER;
4972 			break;
4973 		case DRM_MODE_SCALE_ASPECT:
4974 			rmx_type = RMX_ASPECT;
4975 			break;
4976 		case DRM_MODE_SCALE_FULLSCREEN:
4977 			rmx_type = RMX_FULL;
4978 			break;
4979 		case DRM_MODE_SCALE_NONE:
4980 		default:
4981 			rmx_type = RMX_OFF;
4982 			break;
4983 		}
4984 
4985 		if (dm_old_state->scaling == rmx_type)
4986 			return 0;
4987 
4988 		dm_new_state->scaling = rmx_type;
4989 		ret = 0;
4990 	} else if (property == adev->mode_info.underscan_hborder_property) {
4991 		dm_new_state->underscan_hborder = val;
4992 		ret = 0;
4993 	} else if (property == adev->mode_info.underscan_vborder_property) {
4994 		dm_new_state->underscan_vborder = val;
4995 		ret = 0;
4996 	} else if (property == adev->mode_info.underscan_property) {
4997 		dm_new_state->underscan_enable = val;
4998 		ret = 0;
4999 	} else if (property == adev->mode_info.abm_level_property) {
5000 		dm_new_state->abm_level = val;
5001 		ret = 0;
5002 	}
5003 
5004 	return ret;
5005 }
5006 
5007 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5008 					    const struct drm_connector_state *state,
5009 					    struct drm_property *property,
5010 					    uint64_t *val)
5011 {
5012 	struct drm_device *dev = connector->dev;
5013 	struct amdgpu_device *adev = drm_to_adev(dev);
5014 	struct dm_connector_state *dm_state =
5015 		to_dm_connector_state(state);
5016 	int ret = -EINVAL;
5017 
5018 	if (property == dev->mode_config.scaling_mode_property) {
5019 		switch (dm_state->scaling) {
5020 		case RMX_CENTER:
5021 			*val = DRM_MODE_SCALE_CENTER;
5022 			break;
5023 		case RMX_ASPECT:
5024 			*val = DRM_MODE_SCALE_ASPECT;
5025 			break;
5026 		case RMX_FULL:
5027 			*val = DRM_MODE_SCALE_FULLSCREEN;
5028 			break;
5029 		case RMX_OFF:
5030 		default:
5031 			*val = DRM_MODE_SCALE_NONE;
5032 			break;
5033 		}
5034 		ret = 0;
5035 	} else if (property == adev->mode_info.underscan_hborder_property) {
5036 		*val = dm_state->underscan_hborder;
5037 		ret = 0;
5038 	} else if (property == adev->mode_info.underscan_vborder_property) {
5039 		*val = dm_state->underscan_vborder;
5040 		ret = 0;
5041 	} else if (property == adev->mode_info.underscan_property) {
5042 		*val = dm_state->underscan_enable;
5043 		ret = 0;
5044 	} else if (property == adev->mode_info.abm_level_property) {
5045 		*val = dm_state->abm_level;
5046 		ret = 0;
5047 	}
5048 
5049 	return ret;
5050 }
5051 
5052 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5053 {
5054 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5055 
5056 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5057 }
5058 
5059 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5060 {
5061 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5062 	const struct dc_link *link = aconnector->dc_link;
5063 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5064 	struct amdgpu_display_manager *dm = &adev->dm;
5065 
5066 	/*
5067 	 * Call only if mst_mgr was iniitalized before since it's not done
5068 	 * for all connector types.
5069 	 */
5070 	if (aconnector->mst_mgr.dev)
5071 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5072 
5073 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5074 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5075 
5076 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5077 	    link->type != dc_connection_none &&
5078 	    dm->backlight_dev) {
5079 		backlight_device_unregister(dm->backlight_dev);
5080 		dm->backlight_dev = NULL;
5081 	}
5082 #endif
5083 
5084 	if (aconnector->dc_em_sink)
5085 		dc_sink_release(aconnector->dc_em_sink);
5086 	aconnector->dc_em_sink = NULL;
5087 	if (aconnector->dc_sink)
5088 		dc_sink_release(aconnector->dc_sink);
5089 	aconnector->dc_sink = NULL;
5090 
5091 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5092 	drm_connector_unregister(connector);
5093 	drm_connector_cleanup(connector);
5094 	if (aconnector->i2c) {
5095 		i2c_del_adapter(&aconnector->i2c->base);
5096 		kfree(aconnector->i2c);
5097 	}
5098 	kfree(aconnector->dm_dp_aux.aux.name);
5099 
5100 	kfree(connector);
5101 }
5102 
5103 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5104 {
5105 	struct dm_connector_state *state =
5106 		to_dm_connector_state(connector->state);
5107 
5108 	if (connector->state)
5109 		__drm_atomic_helper_connector_destroy_state(connector->state);
5110 
5111 	kfree(state);
5112 
5113 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5114 
5115 	if (state) {
5116 		state->scaling = RMX_OFF;
5117 		state->underscan_enable = false;
5118 		state->underscan_hborder = 0;
5119 		state->underscan_vborder = 0;
5120 		state->base.max_requested_bpc = 8;
5121 		state->vcpi_slots = 0;
5122 		state->pbn = 0;
5123 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5124 			state->abm_level = amdgpu_dm_abm_level;
5125 
5126 		__drm_atomic_helper_connector_reset(connector, &state->base);
5127 	}
5128 }
5129 
5130 struct drm_connector_state *
5131 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5132 {
5133 	struct dm_connector_state *state =
5134 		to_dm_connector_state(connector->state);
5135 
5136 	struct dm_connector_state *new_state =
5137 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5138 
5139 	if (!new_state)
5140 		return NULL;
5141 
5142 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5143 
5144 	new_state->freesync_capable = state->freesync_capable;
5145 	new_state->abm_level = state->abm_level;
5146 	new_state->scaling = state->scaling;
5147 	new_state->underscan_enable = state->underscan_enable;
5148 	new_state->underscan_hborder = state->underscan_hborder;
5149 	new_state->underscan_vborder = state->underscan_vborder;
5150 	new_state->vcpi_slots = state->vcpi_slots;
5151 	new_state->pbn = state->pbn;
5152 	return &new_state->base;
5153 }
5154 
5155 static int
5156 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5157 {
5158 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5159 		to_amdgpu_dm_connector(connector);
5160 	int r;
5161 
5162 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5163 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5164 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5165 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5166 		if (r)
5167 			return r;
5168 	}
5169 
5170 #if defined(CONFIG_DEBUG_FS)
5171 	connector_debugfs_init(amdgpu_dm_connector);
5172 #endif
5173 
5174 	return 0;
5175 }
5176 
5177 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5178 	.reset = amdgpu_dm_connector_funcs_reset,
5179 	.detect = amdgpu_dm_connector_detect,
5180 	.fill_modes = drm_helper_probe_single_connector_modes,
5181 	.destroy = amdgpu_dm_connector_destroy,
5182 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5183 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5184 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5185 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5186 	.late_register = amdgpu_dm_connector_late_register,
5187 	.early_unregister = amdgpu_dm_connector_unregister
5188 };
5189 
5190 static int get_modes(struct drm_connector *connector)
5191 {
5192 	return amdgpu_dm_connector_get_modes(connector);
5193 }
5194 
5195 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5196 {
5197 	struct dc_sink_init_data init_params = {
5198 			.link = aconnector->dc_link,
5199 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5200 	};
5201 	struct edid *edid;
5202 
5203 	if (!aconnector->base.edid_blob_ptr) {
5204 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5205 				aconnector->base.name);
5206 
5207 		aconnector->base.force = DRM_FORCE_OFF;
5208 		aconnector->base.override_edid = false;
5209 		return;
5210 	}
5211 
5212 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5213 
5214 	aconnector->edid = edid;
5215 
5216 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5217 		aconnector->dc_link,
5218 		(uint8_t *)edid,
5219 		(edid->extensions + 1) * EDID_LENGTH,
5220 		&init_params);
5221 
5222 	if (aconnector->base.force == DRM_FORCE_ON) {
5223 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5224 		aconnector->dc_link->local_sink :
5225 		aconnector->dc_em_sink;
5226 		dc_sink_retain(aconnector->dc_sink);
5227 	}
5228 }
5229 
5230 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5231 {
5232 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5233 
5234 	/*
5235 	 * In case of headless boot with force on for DP managed connector
5236 	 * Those settings have to be != 0 to get initial modeset
5237 	 */
5238 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5239 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5240 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5241 	}
5242 
5243 
5244 	aconnector->base.override_edid = true;
5245 	create_eml_sink(aconnector);
5246 }
5247 
5248 static struct dc_stream_state *
5249 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5250 				const struct drm_display_mode *drm_mode,
5251 				const struct dm_connector_state *dm_state,
5252 				const struct dc_stream_state *old_stream)
5253 {
5254 	struct drm_connector *connector = &aconnector->base;
5255 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5256 	struct dc_stream_state *stream;
5257 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5258 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5259 	enum dc_status dc_result = DC_OK;
5260 
5261 	do {
5262 		stream = create_stream_for_sink(aconnector, drm_mode,
5263 						dm_state, old_stream,
5264 						requested_bpc);
5265 		if (stream == NULL) {
5266 			DRM_ERROR("Failed to create stream for sink!\n");
5267 			break;
5268 		}
5269 
5270 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5271 
5272 		if (dc_result != DC_OK) {
5273 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5274 				      drm_mode->hdisplay,
5275 				      drm_mode->vdisplay,
5276 				      drm_mode->clock,
5277 				      dc_result,
5278 				      dc_status_to_str(dc_result));
5279 
5280 			dc_stream_release(stream);
5281 			stream = NULL;
5282 			requested_bpc -= 2; /* lower bpc to retry validation */
5283 		}
5284 
5285 	} while (stream == NULL && requested_bpc >= 6);
5286 
5287 	return stream;
5288 }
5289 
5290 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5291 				   struct drm_display_mode *mode)
5292 {
5293 	int result = MODE_ERROR;
5294 	struct dc_sink *dc_sink;
5295 	/* TODO: Unhardcode stream count */
5296 	struct dc_stream_state *stream;
5297 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5298 
5299 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5300 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5301 		return result;
5302 
5303 	/*
5304 	 * Only run this the first time mode_valid is called to initilialize
5305 	 * EDID mgmt
5306 	 */
5307 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5308 		!aconnector->dc_em_sink)
5309 		handle_edid_mgmt(aconnector);
5310 
5311 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5312 
5313 	if (dc_sink == NULL) {
5314 		DRM_ERROR("dc_sink is NULL!\n");
5315 		goto fail;
5316 	}
5317 
5318 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5319 	if (stream) {
5320 		dc_stream_release(stream);
5321 		result = MODE_OK;
5322 	}
5323 
5324 fail:
5325 	/* TODO: error handling*/
5326 	return result;
5327 }
5328 
5329 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5330 				struct dc_info_packet *out)
5331 {
5332 	struct hdmi_drm_infoframe frame;
5333 	unsigned char buf[30]; /* 26 + 4 */
5334 	ssize_t len;
5335 	int ret, i;
5336 
5337 	memset(out, 0, sizeof(*out));
5338 
5339 	if (!state->hdr_output_metadata)
5340 		return 0;
5341 
5342 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5343 	if (ret)
5344 		return ret;
5345 
5346 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5347 	if (len < 0)
5348 		return (int)len;
5349 
5350 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5351 	if (len != 30)
5352 		return -EINVAL;
5353 
5354 	/* Prepare the infopacket for DC. */
5355 	switch (state->connector->connector_type) {
5356 	case DRM_MODE_CONNECTOR_HDMIA:
5357 		out->hb0 = 0x87; /* type */
5358 		out->hb1 = 0x01; /* version */
5359 		out->hb2 = 0x1A; /* length */
5360 		out->sb[0] = buf[3]; /* checksum */
5361 		i = 1;
5362 		break;
5363 
5364 	case DRM_MODE_CONNECTOR_DisplayPort:
5365 	case DRM_MODE_CONNECTOR_eDP:
5366 		out->hb0 = 0x00; /* sdp id, zero */
5367 		out->hb1 = 0x87; /* type */
5368 		out->hb2 = 0x1D; /* payload len - 1 */
5369 		out->hb3 = (0x13 << 2); /* sdp version */
5370 		out->sb[0] = 0x01; /* version */
5371 		out->sb[1] = 0x1A; /* length */
5372 		i = 2;
5373 		break;
5374 
5375 	default:
5376 		return -EINVAL;
5377 	}
5378 
5379 	memcpy(&out->sb[i], &buf[4], 26);
5380 	out->valid = true;
5381 
5382 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5383 		       sizeof(out->sb), false);
5384 
5385 	return 0;
5386 }
5387 
5388 static bool
5389 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5390 			  const struct drm_connector_state *new_state)
5391 {
5392 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5393 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5394 
5395 	if (old_blob != new_blob) {
5396 		if (old_blob && new_blob &&
5397 		    old_blob->length == new_blob->length)
5398 			return memcmp(old_blob->data, new_blob->data,
5399 				      old_blob->length);
5400 
5401 		return true;
5402 	}
5403 
5404 	return false;
5405 }
5406 
5407 static int
5408 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5409 				 struct drm_atomic_state *state)
5410 {
5411 	struct drm_connector_state *new_con_state =
5412 		drm_atomic_get_new_connector_state(state, conn);
5413 	struct drm_connector_state *old_con_state =
5414 		drm_atomic_get_old_connector_state(state, conn);
5415 	struct drm_crtc *crtc = new_con_state->crtc;
5416 	struct drm_crtc_state *new_crtc_state;
5417 	int ret;
5418 
5419 	if (!crtc)
5420 		return 0;
5421 
5422 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5423 		struct dc_info_packet hdr_infopacket;
5424 
5425 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5426 		if (ret)
5427 			return ret;
5428 
5429 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5430 		if (IS_ERR(new_crtc_state))
5431 			return PTR_ERR(new_crtc_state);
5432 
5433 		/*
5434 		 * DC considers the stream backends changed if the
5435 		 * static metadata changes. Forcing the modeset also
5436 		 * gives a simple way for userspace to switch from
5437 		 * 8bpc to 10bpc when setting the metadata to enter
5438 		 * or exit HDR.
5439 		 *
5440 		 * Changing the static metadata after it's been
5441 		 * set is permissible, however. So only force a
5442 		 * modeset if we're entering or exiting HDR.
5443 		 */
5444 		new_crtc_state->mode_changed =
5445 			!old_con_state->hdr_output_metadata ||
5446 			!new_con_state->hdr_output_metadata;
5447 	}
5448 
5449 	return 0;
5450 }
5451 
5452 static const struct drm_connector_helper_funcs
5453 amdgpu_dm_connector_helper_funcs = {
5454 	/*
5455 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5456 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5457 	 * are missing after user start lightdm. So we need to renew modes list.
5458 	 * in get_modes call back, not just return the modes count
5459 	 */
5460 	.get_modes = get_modes,
5461 	.mode_valid = amdgpu_dm_connector_mode_valid,
5462 	.atomic_check = amdgpu_dm_connector_atomic_check,
5463 };
5464 
5465 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5466 {
5467 }
5468 
5469 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5470 {
5471 	struct drm_atomic_state *state = new_crtc_state->state;
5472 	struct drm_plane *plane;
5473 	int num_active = 0;
5474 
5475 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5476 		struct drm_plane_state *new_plane_state;
5477 
5478 		/* Cursor planes are "fake". */
5479 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5480 			continue;
5481 
5482 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5483 
5484 		if (!new_plane_state) {
5485 			/*
5486 			 * The plane is enable on the CRTC and hasn't changed
5487 			 * state. This means that it previously passed
5488 			 * validation and is therefore enabled.
5489 			 */
5490 			num_active += 1;
5491 			continue;
5492 		}
5493 
5494 		/* We need a framebuffer to be considered enabled. */
5495 		num_active += (new_plane_state->fb != NULL);
5496 	}
5497 
5498 	return num_active;
5499 }
5500 
5501 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5502 					 struct drm_crtc_state *new_crtc_state)
5503 {
5504 	struct dm_crtc_state *dm_new_crtc_state =
5505 		to_dm_crtc_state(new_crtc_state);
5506 
5507 	dm_new_crtc_state->active_planes = 0;
5508 
5509 	if (!dm_new_crtc_state->stream)
5510 		return;
5511 
5512 	dm_new_crtc_state->active_planes =
5513 		count_crtc_active_planes(new_crtc_state);
5514 }
5515 
5516 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5517 				       struct drm_atomic_state *state)
5518 {
5519 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
5520 									  crtc);
5521 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5522 	struct dc *dc = adev->dm.dc;
5523 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5524 	int ret = -EINVAL;
5525 
5526 	dm_update_crtc_active_planes(crtc, crtc_state);
5527 
5528 	if (unlikely(!dm_crtc_state->stream &&
5529 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
5530 		WARN_ON(1);
5531 		return ret;
5532 	}
5533 
5534 	/*
5535 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5536 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5537 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5538 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5539 	 */
5540 	if (crtc_state->enable &&
5541 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary)))
5542 		return -EINVAL;
5543 
5544 	/* In some use cases, like reset, no stream is attached */
5545 	if (!dm_crtc_state->stream)
5546 		return 0;
5547 
5548 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5549 		return 0;
5550 
5551 	return ret;
5552 }
5553 
5554 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5555 				      const struct drm_display_mode *mode,
5556 				      struct drm_display_mode *adjusted_mode)
5557 {
5558 	return true;
5559 }
5560 
5561 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5562 	.disable = dm_crtc_helper_disable,
5563 	.atomic_check = dm_crtc_helper_atomic_check,
5564 	.mode_fixup = dm_crtc_helper_mode_fixup,
5565 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5566 };
5567 
5568 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5569 {
5570 
5571 }
5572 
5573 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5574 {
5575 	switch (display_color_depth) {
5576 		case COLOR_DEPTH_666:
5577 			return 6;
5578 		case COLOR_DEPTH_888:
5579 			return 8;
5580 		case COLOR_DEPTH_101010:
5581 			return 10;
5582 		case COLOR_DEPTH_121212:
5583 			return 12;
5584 		case COLOR_DEPTH_141414:
5585 			return 14;
5586 		case COLOR_DEPTH_161616:
5587 			return 16;
5588 		default:
5589 			break;
5590 		}
5591 	return 0;
5592 }
5593 
5594 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5595 					  struct drm_crtc_state *crtc_state,
5596 					  struct drm_connector_state *conn_state)
5597 {
5598 	struct drm_atomic_state *state = crtc_state->state;
5599 	struct drm_connector *connector = conn_state->connector;
5600 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5601 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5602 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5603 	struct drm_dp_mst_topology_mgr *mst_mgr;
5604 	struct drm_dp_mst_port *mst_port;
5605 	enum dc_color_depth color_depth;
5606 	int clock, bpp = 0;
5607 	bool is_y420 = false;
5608 
5609 	if (!aconnector->port || !aconnector->dc_sink)
5610 		return 0;
5611 
5612 	mst_port = aconnector->port;
5613 	mst_mgr = &aconnector->mst_port->mst_mgr;
5614 
5615 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5616 		return 0;
5617 
5618 	if (!state->duplicated) {
5619 		int max_bpc = conn_state->max_requested_bpc;
5620 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5621 				aconnector->force_yuv420_output;
5622 		color_depth = convert_color_depth_from_display_info(connector,
5623 								    is_y420,
5624 								    max_bpc);
5625 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5626 		clock = adjusted_mode->clock;
5627 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5628 	}
5629 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5630 									   mst_mgr,
5631 									   mst_port,
5632 									   dm_new_connector_state->pbn,
5633 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5634 	if (dm_new_connector_state->vcpi_slots < 0) {
5635 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5636 		return dm_new_connector_state->vcpi_slots;
5637 	}
5638 	return 0;
5639 }
5640 
5641 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5642 	.disable = dm_encoder_helper_disable,
5643 	.atomic_check = dm_encoder_helper_atomic_check
5644 };
5645 
5646 #if defined(CONFIG_DRM_AMD_DC_DCN)
5647 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5648 					    struct dc_state *dc_state)
5649 {
5650 	struct dc_stream_state *stream = NULL;
5651 	struct drm_connector *connector;
5652 	struct drm_connector_state *new_con_state, *old_con_state;
5653 	struct amdgpu_dm_connector *aconnector;
5654 	struct dm_connector_state *dm_conn_state;
5655 	int i, j, clock, bpp;
5656 	int vcpi, pbn_div, pbn = 0;
5657 
5658 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5659 
5660 		aconnector = to_amdgpu_dm_connector(connector);
5661 
5662 		if (!aconnector->port)
5663 			continue;
5664 
5665 		if (!new_con_state || !new_con_state->crtc)
5666 			continue;
5667 
5668 		dm_conn_state = to_dm_connector_state(new_con_state);
5669 
5670 		for (j = 0; j < dc_state->stream_count; j++) {
5671 			stream = dc_state->streams[j];
5672 			if (!stream)
5673 				continue;
5674 
5675 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5676 				break;
5677 
5678 			stream = NULL;
5679 		}
5680 
5681 		if (!stream)
5682 			continue;
5683 
5684 		if (stream->timing.flags.DSC != 1) {
5685 			drm_dp_mst_atomic_enable_dsc(state,
5686 						     aconnector->port,
5687 						     dm_conn_state->pbn,
5688 						     0,
5689 						     false);
5690 			continue;
5691 		}
5692 
5693 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5694 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5695 		clock = stream->timing.pix_clk_100hz / 10;
5696 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5697 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5698 						    aconnector->port,
5699 						    pbn, pbn_div,
5700 						    true);
5701 		if (vcpi < 0)
5702 			return vcpi;
5703 
5704 		dm_conn_state->pbn = pbn;
5705 		dm_conn_state->vcpi_slots = vcpi;
5706 	}
5707 	return 0;
5708 }
5709 #endif
5710 
5711 static void dm_drm_plane_reset(struct drm_plane *plane)
5712 {
5713 	struct dm_plane_state *amdgpu_state = NULL;
5714 
5715 	if (plane->state)
5716 		plane->funcs->atomic_destroy_state(plane, plane->state);
5717 
5718 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5719 	WARN_ON(amdgpu_state == NULL);
5720 
5721 	if (amdgpu_state)
5722 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5723 }
5724 
5725 static struct drm_plane_state *
5726 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5727 {
5728 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5729 
5730 	old_dm_plane_state = to_dm_plane_state(plane->state);
5731 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5732 	if (!dm_plane_state)
5733 		return NULL;
5734 
5735 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5736 
5737 	if (old_dm_plane_state->dc_state) {
5738 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5739 		dc_plane_state_retain(dm_plane_state->dc_state);
5740 	}
5741 
5742 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5743 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5744 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5745 
5746 	return &dm_plane_state->base;
5747 }
5748 
5749 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5750 				struct drm_plane_state *state)
5751 {
5752 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5753 
5754 	if (dm_plane_state->dc_state)
5755 		dc_plane_state_release(dm_plane_state->dc_state);
5756 
5757 	drm_atomic_helper_plane_destroy_state(plane, state);
5758 }
5759 
5760 static const struct drm_plane_funcs dm_plane_funcs = {
5761 	.update_plane	= drm_atomic_helper_update_plane,
5762 	.disable_plane	= drm_atomic_helper_disable_plane,
5763 	.destroy	= drm_primary_helper_destroy,
5764 	.reset = dm_drm_plane_reset,
5765 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5766 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5767 };
5768 
5769 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5770 				      struct drm_plane_state *new_state)
5771 {
5772 	struct amdgpu_framebuffer *afb;
5773 	struct drm_gem_object *obj;
5774 	struct amdgpu_device *adev;
5775 	struct amdgpu_bo *rbo;
5776 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5777 	struct list_head list;
5778 	struct ttm_validate_buffer tv;
5779 	struct ww_acquire_ctx ticket;
5780 	uint32_t domain;
5781 	int r;
5782 
5783 	if (!new_state->fb) {
5784 		DRM_DEBUG_DRIVER("No FB bound\n");
5785 		return 0;
5786 	}
5787 
5788 	afb = to_amdgpu_framebuffer(new_state->fb);
5789 	obj = new_state->fb->obj[0];
5790 	rbo = gem_to_amdgpu_bo(obj);
5791 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5792 	INIT_LIST_HEAD(&list);
5793 
5794 	tv.bo = &rbo->tbo;
5795 	tv.num_shared = 1;
5796 	list_add(&tv.head, &list);
5797 
5798 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5799 	if (r) {
5800 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5801 		return r;
5802 	}
5803 
5804 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5805 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5806 	else
5807 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5808 
5809 	r = amdgpu_bo_pin(rbo, domain);
5810 	if (unlikely(r != 0)) {
5811 		if (r != -ERESTARTSYS)
5812 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5813 		ttm_eu_backoff_reservation(&ticket, &list);
5814 		return r;
5815 	}
5816 
5817 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5818 	if (unlikely(r != 0)) {
5819 		amdgpu_bo_unpin(rbo);
5820 		ttm_eu_backoff_reservation(&ticket, &list);
5821 		DRM_ERROR("%p bind failed\n", rbo);
5822 		return r;
5823 	}
5824 
5825 	ttm_eu_backoff_reservation(&ticket, &list);
5826 
5827 	afb->address = amdgpu_bo_gpu_offset(rbo);
5828 
5829 	amdgpu_bo_ref(rbo);
5830 
5831 	/**
5832 	 * We don't do surface updates on planes that have been newly created,
5833 	 * but we also don't have the afb->address during atomic check.
5834 	 *
5835 	 * Fill in buffer attributes depending on the address here, but only on
5836 	 * newly created planes since they're not being used by DC yet and this
5837 	 * won't modify global state.
5838 	 */
5839 	dm_plane_state_old = to_dm_plane_state(plane->state);
5840 	dm_plane_state_new = to_dm_plane_state(new_state);
5841 
5842 	if (dm_plane_state_new->dc_state &&
5843 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5844 		struct dc_plane_state *plane_state =
5845 			dm_plane_state_new->dc_state;
5846 		bool force_disable_dcc = !plane_state->dcc.enable;
5847 
5848 		fill_plane_buffer_attributes(
5849 			adev, afb, plane_state->format, plane_state->rotation,
5850 			dm_plane_state_new->tiling_flags,
5851 			&plane_state->tiling_info, &plane_state->plane_size,
5852 			&plane_state->dcc, &plane_state->address,
5853 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5854 	}
5855 
5856 	return 0;
5857 }
5858 
5859 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5860 				       struct drm_plane_state *old_state)
5861 {
5862 	struct amdgpu_bo *rbo;
5863 	int r;
5864 
5865 	if (!old_state->fb)
5866 		return;
5867 
5868 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5869 	r = amdgpu_bo_reserve(rbo, false);
5870 	if (unlikely(r)) {
5871 		DRM_ERROR("failed to reserve rbo before unpin\n");
5872 		return;
5873 	}
5874 
5875 	amdgpu_bo_unpin(rbo);
5876 	amdgpu_bo_unreserve(rbo);
5877 	amdgpu_bo_unref(&rbo);
5878 }
5879 
5880 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5881 				       struct drm_crtc_state *new_crtc_state)
5882 {
5883 	int max_downscale = 0;
5884 	int max_upscale = INT_MAX;
5885 
5886 	/* TODO: These should be checked against DC plane caps */
5887 	return drm_atomic_helper_check_plane_state(
5888 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5889 }
5890 
5891 static int dm_plane_atomic_check(struct drm_plane *plane,
5892 				 struct drm_plane_state *state)
5893 {
5894 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
5895 	struct dc *dc = adev->dm.dc;
5896 	struct dm_plane_state *dm_plane_state;
5897 	struct dc_scaling_info scaling_info;
5898 	struct drm_crtc_state *new_crtc_state;
5899 	int ret;
5900 
5901 	dm_plane_state = to_dm_plane_state(state);
5902 
5903 	if (!dm_plane_state->dc_state)
5904 		return 0;
5905 
5906 	new_crtc_state =
5907 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5908 	if (!new_crtc_state)
5909 		return -EINVAL;
5910 
5911 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5912 	if (ret)
5913 		return ret;
5914 
5915 	ret = fill_dc_scaling_info(state, &scaling_info);
5916 	if (ret)
5917 		return ret;
5918 
5919 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5920 		return 0;
5921 
5922 	return -EINVAL;
5923 }
5924 
5925 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5926 				       struct drm_plane_state *new_plane_state)
5927 {
5928 	/* Only support async updates on cursor planes. */
5929 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5930 		return -EINVAL;
5931 
5932 	return 0;
5933 }
5934 
5935 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5936 					 struct drm_plane_state *new_state)
5937 {
5938 	struct drm_plane_state *old_state =
5939 		drm_atomic_get_old_plane_state(new_state->state, plane);
5940 
5941 	swap(plane->state->fb, new_state->fb);
5942 
5943 	plane->state->src_x = new_state->src_x;
5944 	plane->state->src_y = new_state->src_y;
5945 	plane->state->src_w = new_state->src_w;
5946 	plane->state->src_h = new_state->src_h;
5947 	plane->state->crtc_x = new_state->crtc_x;
5948 	plane->state->crtc_y = new_state->crtc_y;
5949 	plane->state->crtc_w = new_state->crtc_w;
5950 	plane->state->crtc_h = new_state->crtc_h;
5951 
5952 	handle_cursor_update(plane, old_state);
5953 }
5954 
5955 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5956 	.prepare_fb = dm_plane_helper_prepare_fb,
5957 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5958 	.atomic_check = dm_plane_atomic_check,
5959 	.atomic_async_check = dm_plane_atomic_async_check,
5960 	.atomic_async_update = dm_plane_atomic_async_update
5961 };
5962 
5963 /*
5964  * TODO: these are currently initialized to rgb formats only.
5965  * For future use cases we should either initialize them dynamically based on
5966  * plane capabilities, or initialize this array to all formats, so internal drm
5967  * check will succeed, and let DC implement proper check
5968  */
5969 static const uint32_t rgb_formats[] = {
5970 	DRM_FORMAT_XRGB8888,
5971 	DRM_FORMAT_ARGB8888,
5972 	DRM_FORMAT_RGBA8888,
5973 	DRM_FORMAT_XRGB2101010,
5974 	DRM_FORMAT_XBGR2101010,
5975 	DRM_FORMAT_ARGB2101010,
5976 	DRM_FORMAT_ABGR2101010,
5977 	DRM_FORMAT_XBGR8888,
5978 	DRM_FORMAT_ABGR8888,
5979 	DRM_FORMAT_RGB565,
5980 };
5981 
5982 static const uint32_t overlay_formats[] = {
5983 	DRM_FORMAT_XRGB8888,
5984 	DRM_FORMAT_ARGB8888,
5985 	DRM_FORMAT_RGBA8888,
5986 	DRM_FORMAT_XBGR8888,
5987 	DRM_FORMAT_ABGR8888,
5988 	DRM_FORMAT_RGB565
5989 };
5990 
5991 static const u32 cursor_formats[] = {
5992 	DRM_FORMAT_ARGB8888
5993 };
5994 
5995 static int get_plane_formats(const struct drm_plane *plane,
5996 			     const struct dc_plane_cap *plane_cap,
5997 			     uint32_t *formats, int max_formats)
5998 {
5999 	int i, num_formats = 0;
6000 
6001 	/*
6002 	 * TODO: Query support for each group of formats directly from
6003 	 * DC plane caps. This will require adding more formats to the
6004 	 * caps list.
6005 	 */
6006 
6007 	switch (plane->type) {
6008 	case DRM_PLANE_TYPE_PRIMARY:
6009 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6010 			if (num_formats >= max_formats)
6011 				break;
6012 
6013 			formats[num_formats++] = rgb_formats[i];
6014 		}
6015 
6016 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6017 			formats[num_formats++] = DRM_FORMAT_NV12;
6018 		if (plane_cap && plane_cap->pixel_format_support.p010)
6019 			formats[num_formats++] = DRM_FORMAT_P010;
6020 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6021 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6022 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6023 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6024 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6025 		}
6026 		break;
6027 
6028 	case DRM_PLANE_TYPE_OVERLAY:
6029 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6030 			if (num_formats >= max_formats)
6031 				break;
6032 
6033 			formats[num_formats++] = overlay_formats[i];
6034 		}
6035 		break;
6036 
6037 	case DRM_PLANE_TYPE_CURSOR:
6038 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6039 			if (num_formats >= max_formats)
6040 				break;
6041 
6042 			formats[num_formats++] = cursor_formats[i];
6043 		}
6044 		break;
6045 	}
6046 
6047 	return num_formats;
6048 }
6049 
6050 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6051 				struct drm_plane *plane,
6052 				unsigned long possible_crtcs,
6053 				const struct dc_plane_cap *plane_cap)
6054 {
6055 	uint32_t formats[32];
6056 	int num_formats;
6057 	int res = -EPERM;
6058 	unsigned int supported_rotations;
6059 
6060 	num_formats = get_plane_formats(plane, plane_cap, formats,
6061 					ARRAY_SIZE(formats));
6062 
6063 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6064 				       &dm_plane_funcs, formats, num_formats,
6065 				       NULL, plane->type, NULL);
6066 	if (res)
6067 		return res;
6068 
6069 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6070 	    plane_cap && plane_cap->per_pixel_alpha) {
6071 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6072 					  BIT(DRM_MODE_BLEND_PREMULTI);
6073 
6074 		drm_plane_create_alpha_property(plane);
6075 		drm_plane_create_blend_mode_property(plane, blend_caps);
6076 	}
6077 
6078 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6079 	    plane_cap &&
6080 	    (plane_cap->pixel_format_support.nv12 ||
6081 	     plane_cap->pixel_format_support.p010)) {
6082 		/* This only affects YUV formats. */
6083 		drm_plane_create_color_properties(
6084 			plane,
6085 			BIT(DRM_COLOR_YCBCR_BT601) |
6086 			BIT(DRM_COLOR_YCBCR_BT709) |
6087 			BIT(DRM_COLOR_YCBCR_BT2020),
6088 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6089 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6090 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6091 	}
6092 
6093 	supported_rotations =
6094 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6095 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6096 
6097 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6098 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6099 						   supported_rotations);
6100 
6101 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6102 
6103 	/* Create (reset) the plane state */
6104 	if (plane->funcs->reset)
6105 		plane->funcs->reset(plane);
6106 
6107 	return 0;
6108 }
6109 
6110 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6111 			       struct drm_plane *plane,
6112 			       uint32_t crtc_index)
6113 {
6114 	struct amdgpu_crtc *acrtc = NULL;
6115 	struct drm_plane *cursor_plane;
6116 
6117 	int res = -ENOMEM;
6118 
6119 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6120 	if (!cursor_plane)
6121 		goto fail;
6122 
6123 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6124 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6125 
6126 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6127 	if (!acrtc)
6128 		goto fail;
6129 
6130 	res = drm_crtc_init_with_planes(
6131 			dm->ddev,
6132 			&acrtc->base,
6133 			plane,
6134 			cursor_plane,
6135 			&amdgpu_dm_crtc_funcs, NULL);
6136 
6137 	if (res)
6138 		goto fail;
6139 
6140 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6141 
6142 	/* Create (reset) the plane state */
6143 	if (acrtc->base.funcs->reset)
6144 		acrtc->base.funcs->reset(&acrtc->base);
6145 
6146 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6147 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6148 
6149 	acrtc->crtc_id = crtc_index;
6150 	acrtc->base.enabled = false;
6151 	acrtc->otg_inst = -1;
6152 
6153 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6154 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6155 				   true, MAX_COLOR_LUT_ENTRIES);
6156 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6157 
6158 	return 0;
6159 
6160 fail:
6161 	kfree(acrtc);
6162 	kfree(cursor_plane);
6163 	return res;
6164 }
6165 
6166 
6167 static int to_drm_connector_type(enum signal_type st)
6168 {
6169 	switch (st) {
6170 	case SIGNAL_TYPE_HDMI_TYPE_A:
6171 		return DRM_MODE_CONNECTOR_HDMIA;
6172 	case SIGNAL_TYPE_EDP:
6173 		return DRM_MODE_CONNECTOR_eDP;
6174 	case SIGNAL_TYPE_LVDS:
6175 		return DRM_MODE_CONNECTOR_LVDS;
6176 	case SIGNAL_TYPE_RGB:
6177 		return DRM_MODE_CONNECTOR_VGA;
6178 	case SIGNAL_TYPE_DISPLAY_PORT:
6179 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6180 		return DRM_MODE_CONNECTOR_DisplayPort;
6181 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6182 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6183 		return DRM_MODE_CONNECTOR_DVID;
6184 	case SIGNAL_TYPE_VIRTUAL:
6185 		return DRM_MODE_CONNECTOR_VIRTUAL;
6186 
6187 	default:
6188 		return DRM_MODE_CONNECTOR_Unknown;
6189 	}
6190 }
6191 
6192 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6193 {
6194 	struct drm_encoder *encoder;
6195 
6196 	/* There is only one encoder per connector */
6197 	drm_connector_for_each_possible_encoder(connector, encoder)
6198 		return encoder;
6199 
6200 	return NULL;
6201 }
6202 
6203 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6204 {
6205 	struct drm_encoder *encoder;
6206 	struct amdgpu_encoder *amdgpu_encoder;
6207 
6208 	encoder = amdgpu_dm_connector_to_encoder(connector);
6209 
6210 	if (encoder == NULL)
6211 		return;
6212 
6213 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6214 
6215 	amdgpu_encoder->native_mode.clock = 0;
6216 
6217 	if (!list_empty(&connector->probed_modes)) {
6218 		struct drm_display_mode *preferred_mode = NULL;
6219 
6220 		list_for_each_entry(preferred_mode,
6221 				    &connector->probed_modes,
6222 				    head) {
6223 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6224 				amdgpu_encoder->native_mode = *preferred_mode;
6225 
6226 			break;
6227 		}
6228 
6229 	}
6230 }
6231 
6232 static struct drm_display_mode *
6233 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6234 			     char *name,
6235 			     int hdisplay, int vdisplay)
6236 {
6237 	struct drm_device *dev = encoder->dev;
6238 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6239 	struct drm_display_mode *mode = NULL;
6240 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6241 
6242 	mode = drm_mode_duplicate(dev, native_mode);
6243 
6244 	if (mode == NULL)
6245 		return NULL;
6246 
6247 	mode->hdisplay = hdisplay;
6248 	mode->vdisplay = vdisplay;
6249 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6250 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6251 
6252 	return mode;
6253 
6254 }
6255 
6256 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6257 						 struct drm_connector *connector)
6258 {
6259 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6260 	struct drm_display_mode *mode = NULL;
6261 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6262 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6263 				to_amdgpu_dm_connector(connector);
6264 	int i;
6265 	int n;
6266 	struct mode_size {
6267 		char name[DRM_DISPLAY_MODE_LEN];
6268 		int w;
6269 		int h;
6270 	} common_modes[] = {
6271 		{  "640x480",  640,  480},
6272 		{  "800x600",  800,  600},
6273 		{ "1024x768", 1024,  768},
6274 		{ "1280x720", 1280,  720},
6275 		{ "1280x800", 1280,  800},
6276 		{"1280x1024", 1280, 1024},
6277 		{ "1440x900", 1440,  900},
6278 		{"1680x1050", 1680, 1050},
6279 		{"1600x1200", 1600, 1200},
6280 		{"1920x1080", 1920, 1080},
6281 		{"1920x1200", 1920, 1200}
6282 	};
6283 
6284 	n = ARRAY_SIZE(common_modes);
6285 
6286 	for (i = 0; i < n; i++) {
6287 		struct drm_display_mode *curmode = NULL;
6288 		bool mode_existed = false;
6289 
6290 		if (common_modes[i].w > native_mode->hdisplay ||
6291 		    common_modes[i].h > native_mode->vdisplay ||
6292 		   (common_modes[i].w == native_mode->hdisplay &&
6293 		    common_modes[i].h == native_mode->vdisplay))
6294 			continue;
6295 
6296 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6297 			if (common_modes[i].w == curmode->hdisplay &&
6298 			    common_modes[i].h == curmode->vdisplay) {
6299 				mode_existed = true;
6300 				break;
6301 			}
6302 		}
6303 
6304 		if (mode_existed)
6305 			continue;
6306 
6307 		mode = amdgpu_dm_create_common_mode(encoder,
6308 				common_modes[i].name, common_modes[i].w,
6309 				common_modes[i].h);
6310 		drm_mode_probed_add(connector, mode);
6311 		amdgpu_dm_connector->num_modes++;
6312 	}
6313 }
6314 
6315 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6316 					      struct edid *edid)
6317 {
6318 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6319 			to_amdgpu_dm_connector(connector);
6320 
6321 	if (edid) {
6322 		/* empty probed_modes */
6323 		INIT_LIST_HEAD(&connector->probed_modes);
6324 		amdgpu_dm_connector->num_modes =
6325 				drm_add_edid_modes(connector, edid);
6326 
6327 		/* sorting the probed modes before calling function
6328 		 * amdgpu_dm_get_native_mode() since EDID can have
6329 		 * more than one preferred mode. The modes that are
6330 		 * later in the probed mode list could be of higher
6331 		 * and preferred resolution. For example, 3840x2160
6332 		 * resolution in base EDID preferred timing and 4096x2160
6333 		 * preferred resolution in DID extension block later.
6334 		 */
6335 		drm_mode_sort(&connector->probed_modes);
6336 		amdgpu_dm_get_native_mode(connector);
6337 	} else {
6338 		amdgpu_dm_connector->num_modes = 0;
6339 	}
6340 }
6341 
6342 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6343 {
6344 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6345 			to_amdgpu_dm_connector(connector);
6346 	struct drm_encoder *encoder;
6347 	struct edid *edid = amdgpu_dm_connector->edid;
6348 
6349 	encoder = amdgpu_dm_connector_to_encoder(connector);
6350 
6351 	if (!edid || !drm_edid_is_valid(edid)) {
6352 		amdgpu_dm_connector->num_modes =
6353 				drm_add_modes_noedid(connector, 640, 480);
6354 	} else {
6355 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6356 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6357 	}
6358 	amdgpu_dm_fbc_init(connector);
6359 
6360 	return amdgpu_dm_connector->num_modes;
6361 }
6362 
6363 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6364 				     struct amdgpu_dm_connector *aconnector,
6365 				     int connector_type,
6366 				     struct dc_link *link,
6367 				     int link_index)
6368 {
6369 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6370 
6371 	/*
6372 	 * Some of the properties below require access to state, like bpc.
6373 	 * Allocate some default initial connector state with our reset helper.
6374 	 */
6375 	if (aconnector->base.funcs->reset)
6376 		aconnector->base.funcs->reset(&aconnector->base);
6377 
6378 	aconnector->connector_id = link_index;
6379 	aconnector->dc_link = link;
6380 	aconnector->base.interlace_allowed = false;
6381 	aconnector->base.doublescan_allowed = false;
6382 	aconnector->base.stereo_allowed = false;
6383 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6384 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6385 	aconnector->audio_inst = -1;
6386 	mutex_init(&aconnector->hpd_lock);
6387 
6388 	/*
6389 	 * configure support HPD hot plug connector_>polled default value is 0
6390 	 * which means HPD hot plug not supported
6391 	 */
6392 	switch (connector_type) {
6393 	case DRM_MODE_CONNECTOR_HDMIA:
6394 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6395 		aconnector->base.ycbcr_420_allowed =
6396 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6397 		break;
6398 	case DRM_MODE_CONNECTOR_DisplayPort:
6399 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6400 		aconnector->base.ycbcr_420_allowed =
6401 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6402 		break;
6403 	case DRM_MODE_CONNECTOR_DVID:
6404 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6405 		break;
6406 	default:
6407 		break;
6408 	}
6409 
6410 	drm_object_attach_property(&aconnector->base.base,
6411 				dm->ddev->mode_config.scaling_mode_property,
6412 				DRM_MODE_SCALE_NONE);
6413 
6414 	drm_object_attach_property(&aconnector->base.base,
6415 				adev->mode_info.underscan_property,
6416 				UNDERSCAN_OFF);
6417 	drm_object_attach_property(&aconnector->base.base,
6418 				adev->mode_info.underscan_hborder_property,
6419 				0);
6420 	drm_object_attach_property(&aconnector->base.base,
6421 				adev->mode_info.underscan_vborder_property,
6422 				0);
6423 
6424 	if (!aconnector->mst_port)
6425 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6426 
6427 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6428 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6429 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6430 
6431 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6432 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6433 		drm_object_attach_property(&aconnector->base.base,
6434 				adev->mode_info.abm_level_property, 0);
6435 	}
6436 
6437 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6438 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6439 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6440 		drm_object_attach_property(
6441 			&aconnector->base.base,
6442 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6443 
6444 		if (!aconnector->mst_port)
6445 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6446 
6447 #ifdef CONFIG_DRM_AMD_DC_HDCP
6448 		if (adev->dm.hdcp_workqueue)
6449 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6450 #endif
6451 	}
6452 }
6453 
6454 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6455 			      struct i2c_msg *msgs, int num)
6456 {
6457 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6458 	struct ddc_service *ddc_service = i2c->ddc_service;
6459 	struct i2c_command cmd;
6460 	int i;
6461 	int result = -EIO;
6462 
6463 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6464 
6465 	if (!cmd.payloads)
6466 		return result;
6467 
6468 	cmd.number_of_payloads = num;
6469 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6470 	cmd.speed = 100;
6471 
6472 	for (i = 0; i < num; i++) {
6473 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6474 		cmd.payloads[i].address = msgs[i].addr;
6475 		cmd.payloads[i].length = msgs[i].len;
6476 		cmd.payloads[i].data = msgs[i].buf;
6477 	}
6478 
6479 	if (dc_submit_i2c(
6480 			ddc_service->ctx->dc,
6481 			ddc_service->ddc_pin->hw_info.ddc_channel,
6482 			&cmd))
6483 		result = num;
6484 
6485 	kfree(cmd.payloads);
6486 	return result;
6487 }
6488 
6489 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6490 {
6491 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6492 }
6493 
6494 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6495 	.master_xfer = amdgpu_dm_i2c_xfer,
6496 	.functionality = amdgpu_dm_i2c_func,
6497 };
6498 
6499 static struct amdgpu_i2c_adapter *
6500 create_i2c(struct ddc_service *ddc_service,
6501 	   int link_index,
6502 	   int *res)
6503 {
6504 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6505 	struct amdgpu_i2c_adapter *i2c;
6506 
6507 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6508 	if (!i2c)
6509 		return NULL;
6510 	i2c->base.owner = THIS_MODULE;
6511 	i2c->base.class = I2C_CLASS_DDC;
6512 	i2c->base.dev.parent = &adev->pdev->dev;
6513 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6514 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6515 	i2c_set_adapdata(&i2c->base, i2c);
6516 	i2c->ddc_service = ddc_service;
6517 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6518 
6519 	return i2c;
6520 }
6521 
6522 
6523 /*
6524  * Note: this function assumes that dc_link_detect() was called for the
6525  * dc_link which will be represented by this aconnector.
6526  */
6527 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6528 				    struct amdgpu_dm_connector *aconnector,
6529 				    uint32_t link_index,
6530 				    struct amdgpu_encoder *aencoder)
6531 {
6532 	int res = 0;
6533 	int connector_type;
6534 	struct dc *dc = dm->dc;
6535 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6536 	struct amdgpu_i2c_adapter *i2c;
6537 
6538 	link->priv = aconnector;
6539 
6540 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6541 
6542 	i2c = create_i2c(link->ddc, link->link_index, &res);
6543 	if (!i2c) {
6544 		DRM_ERROR("Failed to create i2c adapter data\n");
6545 		return -ENOMEM;
6546 	}
6547 
6548 	aconnector->i2c = i2c;
6549 	res = i2c_add_adapter(&i2c->base);
6550 
6551 	if (res) {
6552 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6553 		goto out_free;
6554 	}
6555 
6556 	connector_type = to_drm_connector_type(link->connector_signal);
6557 
6558 	res = drm_connector_init_with_ddc(
6559 			dm->ddev,
6560 			&aconnector->base,
6561 			&amdgpu_dm_connector_funcs,
6562 			connector_type,
6563 			&i2c->base);
6564 
6565 	if (res) {
6566 		DRM_ERROR("connector_init failed\n");
6567 		aconnector->connector_id = -1;
6568 		goto out_free;
6569 	}
6570 
6571 	drm_connector_helper_add(
6572 			&aconnector->base,
6573 			&amdgpu_dm_connector_helper_funcs);
6574 
6575 	amdgpu_dm_connector_init_helper(
6576 		dm,
6577 		aconnector,
6578 		connector_type,
6579 		link,
6580 		link_index);
6581 
6582 	drm_connector_attach_encoder(
6583 		&aconnector->base, &aencoder->base);
6584 
6585 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6586 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6587 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6588 
6589 out_free:
6590 	if (res) {
6591 		kfree(i2c);
6592 		aconnector->i2c = NULL;
6593 	}
6594 	return res;
6595 }
6596 
6597 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6598 {
6599 	switch (adev->mode_info.num_crtc) {
6600 	case 1:
6601 		return 0x1;
6602 	case 2:
6603 		return 0x3;
6604 	case 3:
6605 		return 0x7;
6606 	case 4:
6607 		return 0xf;
6608 	case 5:
6609 		return 0x1f;
6610 	case 6:
6611 	default:
6612 		return 0x3f;
6613 	}
6614 }
6615 
6616 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6617 				  struct amdgpu_encoder *aencoder,
6618 				  uint32_t link_index)
6619 {
6620 	struct amdgpu_device *adev = drm_to_adev(dev);
6621 
6622 	int res = drm_encoder_init(dev,
6623 				   &aencoder->base,
6624 				   &amdgpu_dm_encoder_funcs,
6625 				   DRM_MODE_ENCODER_TMDS,
6626 				   NULL);
6627 
6628 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6629 
6630 	if (!res)
6631 		aencoder->encoder_id = link_index;
6632 	else
6633 		aencoder->encoder_id = -1;
6634 
6635 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6636 
6637 	return res;
6638 }
6639 
6640 static void manage_dm_interrupts(struct amdgpu_device *adev,
6641 				 struct amdgpu_crtc *acrtc,
6642 				 bool enable)
6643 {
6644 	/*
6645 	 * We have no guarantee that the frontend index maps to the same
6646 	 * backend index - some even map to more than one.
6647 	 *
6648 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6649 	 */
6650 	int irq_type =
6651 		amdgpu_display_crtc_idx_to_irq_type(
6652 			adev,
6653 			acrtc->crtc_id);
6654 
6655 	if (enable) {
6656 		drm_crtc_vblank_on(&acrtc->base);
6657 		amdgpu_irq_get(
6658 			adev,
6659 			&adev->pageflip_irq,
6660 			irq_type);
6661 	} else {
6662 
6663 		amdgpu_irq_put(
6664 			adev,
6665 			&adev->pageflip_irq,
6666 			irq_type);
6667 		drm_crtc_vblank_off(&acrtc->base);
6668 	}
6669 }
6670 
6671 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6672 				      struct amdgpu_crtc *acrtc)
6673 {
6674 	int irq_type =
6675 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6676 
6677 	/**
6678 	 * This reads the current state for the IRQ and force reapplies
6679 	 * the setting to hardware.
6680 	 */
6681 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6682 }
6683 
6684 static bool
6685 is_scaling_state_different(const struct dm_connector_state *dm_state,
6686 			   const struct dm_connector_state *old_dm_state)
6687 {
6688 	if (dm_state->scaling != old_dm_state->scaling)
6689 		return true;
6690 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6691 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6692 			return true;
6693 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6694 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6695 			return true;
6696 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6697 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6698 		return true;
6699 	return false;
6700 }
6701 
6702 #ifdef CONFIG_DRM_AMD_DC_HDCP
6703 static bool is_content_protection_different(struct drm_connector_state *state,
6704 					    const struct drm_connector_state *old_state,
6705 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6706 {
6707 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6708 
6709 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6710 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6711 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6712 		return true;
6713 	}
6714 
6715 	/* CP is being re enabled, ignore this */
6716 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6717 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6718 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6719 		return false;
6720 	}
6721 
6722 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6723 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6724 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6725 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6726 
6727 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6728 	 * hot-plug, headless s3, dpms
6729 	 */
6730 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6731 	    aconnector->dc_sink != NULL)
6732 		return true;
6733 
6734 	if (old_state->content_protection == state->content_protection)
6735 		return false;
6736 
6737 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6738 		return true;
6739 
6740 	return false;
6741 }
6742 
6743 #endif
6744 static void remove_stream(struct amdgpu_device *adev,
6745 			  struct amdgpu_crtc *acrtc,
6746 			  struct dc_stream_state *stream)
6747 {
6748 	/* this is the update mode case */
6749 
6750 	acrtc->otg_inst = -1;
6751 	acrtc->enabled = false;
6752 }
6753 
6754 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6755 			       struct dc_cursor_position *position)
6756 {
6757 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6758 	int x, y;
6759 	int xorigin = 0, yorigin = 0;
6760 
6761 	position->enable = false;
6762 	position->x = 0;
6763 	position->y = 0;
6764 
6765 	if (!crtc || !plane->state->fb)
6766 		return 0;
6767 
6768 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6769 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6770 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6771 			  __func__,
6772 			  plane->state->crtc_w,
6773 			  plane->state->crtc_h);
6774 		return -EINVAL;
6775 	}
6776 
6777 	x = plane->state->crtc_x;
6778 	y = plane->state->crtc_y;
6779 
6780 	if (x <= -amdgpu_crtc->max_cursor_width ||
6781 	    y <= -amdgpu_crtc->max_cursor_height)
6782 		return 0;
6783 
6784 	if (x < 0) {
6785 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6786 		x = 0;
6787 	}
6788 	if (y < 0) {
6789 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6790 		y = 0;
6791 	}
6792 	position->enable = true;
6793 	position->translate_by_source = true;
6794 	position->x = x;
6795 	position->y = y;
6796 	position->x_hotspot = xorigin;
6797 	position->y_hotspot = yorigin;
6798 
6799 	return 0;
6800 }
6801 
6802 static void handle_cursor_update(struct drm_plane *plane,
6803 				 struct drm_plane_state *old_plane_state)
6804 {
6805 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6806 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6807 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6808 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6809 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6810 	uint64_t address = afb ? afb->address : 0;
6811 	struct dc_cursor_position position;
6812 	struct dc_cursor_attributes attributes;
6813 	int ret;
6814 
6815 	if (!plane->state->fb && !old_plane_state->fb)
6816 		return;
6817 
6818 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6819 			 __func__,
6820 			 amdgpu_crtc->crtc_id,
6821 			 plane->state->crtc_w,
6822 			 plane->state->crtc_h);
6823 
6824 	ret = get_cursor_position(plane, crtc, &position);
6825 	if (ret)
6826 		return;
6827 
6828 	if (!position.enable) {
6829 		/* turn off cursor */
6830 		if (crtc_state && crtc_state->stream) {
6831 			mutex_lock(&adev->dm.dc_lock);
6832 			dc_stream_set_cursor_position(crtc_state->stream,
6833 						      &position);
6834 			mutex_unlock(&adev->dm.dc_lock);
6835 		}
6836 		return;
6837 	}
6838 
6839 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6840 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6841 
6842 	memset(&attributes, 0, sizeof(attributes));
6843 	attributes.address.high_part = upper_32_bits(address);
6844 	attributes.address.low_part  = lower_32_bits(address);
6845 	attributes.width             = plane->state->crtc_w;
6846 	attributes.height            = plane->state->crtc_h;
6847 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6848 	attributes.rotation_angle    = 0;
6849 	attributes.attribute_flags.value = 0;
6850 
6851 	attributes.pitch = attributes.width;
6852 
6853 	if (crtc_state->stream) {
6854 		mutex_lock(&adev->dm.dc_lock);
6855 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6856 							 &attributes))
6857 			DRM_ERROR("DC failed to set cursor attributes\n");
6858 
6859 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6860 						   &position))
6861 			DRM_ERROR("DC failed to set cursor position\n");
6862 		mutex_unlock(&adev->dm.dc_lock);
6863 	}
6864 }
6865 
6866 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6867 {
6868 
6869 	assert_spin_locked(&acrtc->base.dev->event_lock);
6870 	WARN_ON(acrtc->event);
6871 
6872 	acrtc->event = acrtc->base.state->event;
6873 
6874 	/* Set the flip status */
6875 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6876 
6877 	/* Mark this event as consumed */
6878 	acrtc->base.state->event = NULL;
6879 
6880 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6881 						 acrtc->crtc_id);
6882 }
6883 
6884 static void update_freesync_state_on_stream(
6885 	struct amdgpu_display_manager *dm,
6886 	struct dm_crtc_state *new_crtc_state,
6887 	struct dc_stream_state *new_stream,
6888 	struct dc_plane_state *surface,
6889 	u32 flip_timestamp_in_us)
6890 {
6891 	struct mod_vrr_params vrr_params;
6892 	struct dc_info_packet vrr_infopacket = {0};
6893 	struct amdgpu_device *adev = dm->adev;
6894 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6895 	unsigned long flags;
6896 
6897 	if (!new_stream)
6898 		return;
6899 
6900 	/*
6901 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6902 	 * For now it's sufficient to just guard against these conditions.
6903 	 */
6904 
6905 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6906 		return;
6907 
6908 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6909         vrr_params = acrtc->dm_irq_params.vrr_params;
6910 
6911 	if (surface) {
6912 		mod_freesync_handle_preflip(
6913 			dm->freesync_module,
6914 			surface,
6915 			new_stream,
6916 			flip_timestamp_in_us,
6917 			&vrr_params);
6918 
6919 		if (adev->family < AMDGPU_FAMILY_AI &&
6920 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6921 			mod_freesync_handle_v_update(dm->freesync_module,
6922 						     new_stream, &vrr_params);
6923 
6924 			/* Need to call this before the frame ends. */
6925 			dc_stream_adjust_vmin_vmax(dm->dc,
6926 						   new_crtc_state->stream,
6927 						   &vrr_params.adjust);
6928 		}
6929 	}
6930 
6931 	mod_freesync_build_vrr_infopacket(
6932 		dm->freesync_module,
6933 		new_stream,
6934 		&vrr_params,
6935 		PACKET_TYPE_VRR,
6936 		TRANSFER_FUNC_UNKNOWN,
6937 		&vrr_infopacket);
6938 
6939 	new_crtc_state->freesync_timing_changed |=
6940 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
6941 			&vrr_params.adjust,
6942 			sizeof(vrr_params.adjust)) != 0);
6943 
6944 	new_crtc_state->freesync_vrr_info_changed |=
6945 		(memcmp(&new_crtc_state->vrr_infopacket,
6946 			&vrr_infopacket,
6947 			sizeof(vrr_infopacket)) != 0);
6948 
6949 	acrtc->dm_irq_params.vrr_params = vrr_params;
6950 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6951 
6952 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
6953 	new_stream->vrr_infopacket = vrr_infopacket;
6954 
6955 	if (new_crtc_state->freesync_vrr_info_changed)
6956 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6957 			      new_crtc_state->base.crtc->base.id,
6958 			      (int)new_crtc_state->base.vrr_enabled,
6959 			      (int)vrr_params.state);
6960 
6961 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6962 }
6963 
6964 static void update_stream_irq_parameters(
6965 	struct amdgpu_display_manager *dm,
6966 	struct dm_crtc_state *new_crtc_state)
6967 {
6968 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6969 	struct mod_vrr_params vrr_params;
6970 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6971 	struct amdgpu_device *adev = dm->adev;
6972 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6973 	unsigned long flags;
6974 
6975 	if (!new_stream)
6976 		return;
6977 
6978 	/*
6979 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6980 	 * For now it's sufficient to just guard against these conditions.
6981 	 */
6982 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6983 		return;
6984 
6985 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6986 	vrr_params = acrtc->dm_irq_params.vrr_params;
6987 
6988 	if (new_crtc_state->vrr_supported &&
6989 	    config.min_refresh_in_uhz &&
6990 	    config.max_refresh_in_uhz) {
6991 		config.state = new_crtc_state->base.vrr_enabled ?
6992 			VRR_STATE_ACTIVE_VARIABLE :
6993 			VRR_STATE_INACTIVE;
6994 	} else {
6995 		config.state = VRR_STATE_UNSUPPORTED;
6996 	}
6997 
6998 	mod_freesync_build_vrr_params(dm->freesync_module,
6999 				      new_stream,
7000 				      &config, &vrr_params);
7001 
7002 	new_crtc_state->freesync_timing_changed |=
7003 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7004 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7005 
7006 	new_crtc_state->freesync_config = config;
7007 	/* Copy state for access from DM IRQ handler */
7008 	acrtc->dm_irq_params.freesync_config = config;
7009 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7010 	acrtc->dm_irq_params.vrr_params = vrr_params;
7011 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7012 }
7013 
7014 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7015 					    struct dm_crtc_state *new_state)
7016 {
7017 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7018 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7019 
7020 	if (!old_vrr_active && new_vrr_active) {
7021 		/* Transition VRR inactive -> active:
7022 		 * While VRR is active, we must not disable vblank irq, as a
7023 		 * reenable after disable would compute bogus vblank/pflip
7024 		 * timestamps if it likely happened inside display front-porch.
7025 		 *
7026 		 * We also need vupdate irq for the actual core vblank handling
7027 		 * at end of vblank.
7028 		 */
7029 		dm_set_vupdate_irq(new_state->base.crtc, true);
7030 		drm_crtc_vblank_get(new_state->base.crtc);
7031 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7032 				 __func__, new_state->base.crtc->base.id);
7033 	} else if (old_vrr_active && !new_vrr_active) {
7034 		/* Transition VRR active -> inactive:
7035 		 * Allow vblank irq disable again for fixed refresh rate.
7036 		 */
7037 		dm_set_vupdate_irq(new_state->base.crtc, false);
7038 		drm_crtc_vblank_put(new_state->base.crtc);
7039 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7040 				 __func__, new_state->base.crtc->base.id);
7041 	}
7042 }
7043 
7044 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7045 {
7046 	struct drm_plane *plane;
7047 	struct drm_plane_state *old_plane_state, *new_plane_state;
7048 	int i;
7049 
7050 	/*
7051 	 * TODO: Make this per-stream so we don't issue redundant updates for
7052 	 * commits with multiple streams.
7053 	 */
7054 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7055 				       new_plane_state, i)
7056 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7057 			handle_cursor_update(plane, old_plane_state);
7058 }
7059 
7060 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7061 				    struct dc_state *dc_state,
7062 				    struct drm_device *dev,
7063 				    struct amdgpu_display_manager *dm,
7064 				    struct drm_crtc *pcrtc,
7065 				    bool wait_for_vblank)
7066 {
7067 	uint32_t i;
7068 	uint64_t timestamp_ns;
7069 	struct drm_plane *plane;
7070 	struct drm_plane_state *old_plane_state, *new_plane_state;
7071 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7072 	struct drm_crtc_state *new_pcrtc_state =
7073 			drm_atomic_get_new_crtc_state(state, pcrtc);
7074 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7075 	struct dm_crtc_state *dm_old_crtc_state =
7076 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7077 	int planes_count = 0, vpos, hpos;
7078 	long r;
7079 	unsigned long flags;
7080 	struct amdgpu_bo *abo;
7081 	uint32_t target_vblank, last_flip_vblank;
7082 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7083 	bool pflip_present = false;
7084 	struct {
7085 		struct dc_surface_update surface_updates[MAX_SURFACES];
7086 		struct dc_plane_info plane_infos[MAX_SURFACES];
7087 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7088 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7089 		struct dc_stream_update stream_update;
7090 	} *bundle;
7091 
7092 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7093 
7094 	if (!bundle) {
7095 		dm_error("Failed to allocate update bundle\n");
7096 		goto cleanup;
7097 	}
7098 
7099 	/*
7100 	 * Disable the cursor first if we're disabling all the planes.
7101 	 * It'll remain on the screen after the planes are re-enabled
7102 	 * if we don't.
7103 	 */
7104 	if (acrtc_state->active_planes == 0)
7105 		amdgpu_dm_commit_cursors(state);
7106 
7107 	/* update planes when needed */
7108 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7109 		struct drm_crtc *crtc = new_plane_state->crtc;
7110 		struct drm_crtc_state *new_crtc_state;
7111 		struct drm_framebuffer *fb = new_plane_state->fb;
7112 		bool plane_needs_flip;
7113 		struct dc_plane_state *dc_plane;
7114 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7115 
7116 		/* Cursor plane is handled after stream updates */
7117 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7118 			continue;
7119 
7120 		if (!fb || !crtc || pcrtc != crtc)
7121 			continue;
7122 
7123 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7124 		if (!new_crtc_state->active)
7125 			continue;
7126 
7127 		dc_plane = dm_new_plane_state->dc_state;
7128 
7129 		bundle->surface_updates[planes_count].surface = dc_plane;
7130 		if (new_pcrtc_state->color_mgmt_changed) {
7131 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7132 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7133 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7134 		}
7135 
7136 		fill_dc_scaling_info(new_plane_state,
7137 				     &bundle->scaling_infos[planes_count]);
7138 
7139 		bundle->surface_updates[planes_count].scaling_info =
7140 			&bundle->scaling_infos[planes_count];
7141 
7142 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7143 
7144 		pflip_present = pflip_present || plane_needs_flip;
7145 
7146 		if (!plane_needs_flip) {
7147 			planes_count += 1;
7148 			continue;
7149 		}
7150 
7151 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7152 
7153 		/*
7154 		 * Wait for all fences on this FB. Do limited wait to avoid
7155 		 * deadlock during GPU reset when this fence will not signal
7156 		 * but we hold reservation lock for the BO.
7157 		 */
7158 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7159 							false,
7160 							msecs_to_jiffies(5000));
7161 		if (unlikely(r <= 0))
7162 			DRM_ERROR("Waiting for fences timed out!");
7163 
7164 		fill_dc_plane_info_and_addr(
7165 			dm->adev, new_plane_state,
7166 			dm_new_plane_state->tiling_flags,
7167 			&bundle->plane_infos[planes_count],
7168 			&bundle->flip_addrs[planes_count].address,
7169 			dm_new_plane_state->tmz_surface, false);
7170 
7171 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7172 				 new_plane_state->plane->index,
7173 				 bundle->plane_infos[planes_count].dcc.enable);
7174 
7175 		bundle->surface_updates[planes_count].plane_info =
7176 			&bundle->plane_infos[planes_count];
7177 
7178 		/*
7179 		 * Only allow immediate flips for fast updates that don't
7180 		 * change FB pitch, DCC state, rotation or mirroing.
7181 		 */
7182 		bundle->flip_addrs[planes_count].flip_immediate =
7183 			crtc->state->async_flip &&
7184 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7185 
7186 		timestamp_ns = ktime_get_ns();
7187 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7188 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7189 		bundle->surface_updates[planes_count].surface = dc_plane;
7190 
7191 		if (!bundle->surface_updates[planes_count].surface) {
7192 			DRM_ERROR("No surface for CRTC: id=%d\n",
7193 					acrtc_attach->crtc_id);
7194 			continue;
7195 		}
7196 
7197 		if (plane == pcrtc->primary)
7198 			update_freesync_state_on_stream(
7199 				dm,
7200 				acrtc_state,
7201 				acrtc_state->stream,
7202 				dc_plane,
7203 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7204 
7205 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7206 				 __func__,
7207 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7208 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7209 
7210 		planes_count += 1;
7211 
7212 	}
7213 
7214 	if (pflip_present) {
7215 		if (!vrr_active) {
7216 			/* Use old throttling in non-vrr fixed refresh rate mode
7217 			 * to keep flip scheduling based on target vblank counts
7218 			 * working in a backwards compatible way, e.g., for
7219 			 * clients using the GLX_OML_sync_control extension or
7220 			 * DRI3/Present extension with defined target_msc.
7221 			 */
7222 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7223 		}
7224 		else {
7225 			/* For variable refresh rate mode only:
7226 			 * Get vblank of last completed flip to avoid > 1 vrr
7227 			 * flips per video frame by use of throttling, but allow
7228 			 * flip programming anywhere in the possibly large
7229 			 * variable vrr vblank interval for fine-grained flip
7230 			 * timing control and more opportunity to avoid stutter
7231 			 * on late submission of flips.
7232 			 */
7233 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7234 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7235 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7236 		}
7237 
7238 		target_vblank = last_flip_vblank + wait_for_vblank;
7239 
7240 		/*
7241 		 * Wait until we're out of the vertical blank period before the one
7242 		 * targeted by the flip
7243 		 */
7244 		while ((acrtc_attach->enabled &&
7245 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7246 							    0, &vpos, &hpos, NULL,
7247 							    NULL, &pcrtc->hwmode)
7248 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7249 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7250 			(int)(target_vblank -
7251 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7252 			usleep_range(1000, 1100);
7253 		}
7254 
7255 		/**
7256 		 * Prepare the flip event for the pageflip interrupt to handle.
7257 		 *
7258 		 * This only works in the case where we've already turned on the
7259 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7260 		 * from 0 -> n planes we have to skip a hardware generated event
7261 		 * and rely on sending it from software.
7262 		 */
7263 		if (acrtc_attach->base.state->event &&
7264 		    acrtc_state->active_planes > 0) {
7265 			drm_crtc_vblank_get(pcrtc);
7266 
7267 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7268 
7269 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7270 			prepare_flip_isr(acrtc_attach);
7271 
7272 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7273 		}
7274 
7275 		if (acrtc_state->stream) {
7276 			if (acrtc_state->freesync_vrr_info_changed)
7277 				bundle->stream_update.vrr_infopacket =
7278 					&acrtc_state->stream->vrr_infopacket;
7279 		}
7280 	}
7281 
7282 	/* Update the planes if changed or disable if we don't have any. */
7283 	if ((planes_count || acrtc_state->active_planes == 0) &&
7284 		acrtc_state->stream) {
7285 		bundle->stream_update.stream = acrtc_state->stream;
7286 		if (new_pcrtc_state->mode_changed) {
7287 			bundle->stream_update.src = acrtc_state->stream->src;
7288 			bundle->stream_update.dst = acrtc_state->stream->dst;
7289 		}
7290 
7291 		if (new_pcrtc_state->color_mgmt_changed) {
7292 			/*
7293 			 * TODO: This isn't fully correct since we've actually
7294 			 * already modified the stream in place.
7295 			 */
7296 			bundle->stream_update.gamut_remap =
7297 				&acrtc_state->stream->gamut_remap_matrix;
7298 			bundle->stream_update.output_csc_transform =
7299 				&acrtc_state->stream->csc_color_matrix;
7300 			bundle->stream_update.out_transfer_func =
7301 				acrtc_state->stream->out_transfer_func;
7302 		}
7303 
7304 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7305 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7306 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7307 
7308 		/*
7309 		 * If FreeSync state on the stream has changed then we need to
7310 		 * re-adjust the min/max bounds now that DC doesn't handle this
7311 		 * as part of commit.
7312 		 */
7313 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7314 		    amdgpu_dm_vrr_active(acrtc_state)) {
7315 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7316 			dc_stream_adjust_vmin_vmax(
7317 				dm->dc, acrtc_state->stream,
7318 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7319 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7320 		}
7321 		mutex_lock(&dm->dc_lock);
7322 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7323 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7324 			amdgpu_dm_psr_disable(acrtc_state->stream);
7325 
7326 		dc_commit_updates_for_stream(dm->dc,
7327 						     bundle->surface_updates,
7328 						     planes_count,
7329 						     acrtc_state->stream,
7330 						     &bundle->stream_update,
7331 						     dc_state);
7332 
7333 		/**
7334 		 * Enable or disable the interrupts on the backend.
7335 		 *
7336 		 * Most pipes are put into power gating when unused.
7337 		 *
7338 		 * When power gating is enabled on a pipe we lose the
7339 		 * interrupt enablement state when power gating is disabled.
7340 		 *
7341 		 * So we need to update the IRQ control state in hardware
7342 		 * whenever the pipe turns on (since it could be previously
7343 		 * power gated) or off (since some pipes can't be power gated
7344 		 * on some ASICs).
7345 		 */
7346 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7347 			dm_update_pflip_irq_state(drm_to_adev(dev),
7348 						  acrtc_attach);
7349 
7350 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7351 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7352 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7353 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7354 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7355 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7356 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7357 			amdgpu_dm_psr_enable(acrtc_state->stream);
7358 		}
7359 
7360 		mutex_unlock(&dm->dc_lock);
7361 	}
7362 
7363 	/*
7364 	 * Update cursor state *after* programming all the planes.
7365 	 * This avoids redundant programming in the case where we're going
7366 	 * to be disabling a single plane - those pipes are being disabled.
7367 	 */
7368 	if (acrtc_state->active_planes)
7369 		amdgpu_dm_commit_cursors(state);
7370 
7371 cleanup:
7372 	kfree(bundle);
7373 }
7374 
7375 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7376 				   struct drm_atomic_state *state)
7377 {
7378 	struct amdgpu_device *adev = drm_to_adev(dev);
7379 	struct amdgpu_dm_connector *aconnector;
7380 	struct drm_connector *connector;
7381 	struct drm_connector_state *old_con_state, *new_con_state;
7382 	struct drm_crtc_state *new_crtc_state;
7383 	struct dm_crtc_state *new_dm_crtc_state;
7384 	const struct dc_stream_status *status;
7385 	int i, inst;
7386 
7387 	/* Notify device removals. */
7388 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7389 		if (old_con_state->crtc != new_con_state->crtc) {
7390 			/* CRTC changes require notification. */
7391 			goto notify;
7392 		}
7393 
7394 		if (!new_con_state->crtc)
7395 			continue;
7396 
7397 		new_crtc_state = drm_atomic_get_new_crtc_state(
7398 			state, new_con_state->crtc);
7399 
7400 		if (!new_crtc_state)
7401 			continue;
7402 
7403 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7404 			continue;
7405 
7406 	notify:
7407 		aconnector = to_amdgpu_dm_connector(connector);
7408 
7409 		mutex_lock(&adev->dm.audio_lock);
7410 		inst = aconnector->audio_inst;
7411 		aconnector->audio_inst = -1;
7412 		mutex_unlock(&adev->dm.audio_lock);
7413 
7414 		amdgpu_dm_audio_eld_notify(adev, inst);
7415 	}
7416 
7417 	/* Notify audio device additions. */
7418 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7419 		if (!new_con_state->crtc)
7420 			continue;
7421 
7422 		new_crtc_state = drm_atomic_get_new_crtc_state(
7423 			state, new_con_state->crtc);
7424 
7425 		if (!new_crtc_state)
7426 			continue;
7427 
7428 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7429 			continue;
7430 
7431 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7432 		if (!new_dm_crtc_state->stream)
7433 			continue;
7434 
7435 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7436 		if (!status)
7437 			continue;
7438 
7439 		aconnector = to_amdgpu_dm_connector(connector);
7440 
7441 		mutex_lock(&adev->dm.audio_lock);
7442 		inst = status->audio_inst;
7443 		aconnector->audio_inst = inst;
7444 		mutex_unlock(&adev->dm.audio_lock);
7445 
7446 		amdgpu_dm_audio_eld_notify(adev, inst);
7447 	}
7448 }
7449 
7450 /*
7451  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7452  * @crtc_state: the DRM CRTC state
7453  * @stream_state: the DC stream state.
7454  *
7455  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7456  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7457  */
7458 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7459 						struct dc_stream_state *stream_state)
7460 {
7461 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7462 }
7463 
7464 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7465 				   struct drm_atomic_state *state,
7466 				   bool nonblock)
7467 {
7468 	/*
7469 	 * Add check here for SoC's that support hardware cursor plane, to
7470 	 * unset legacy_cursor_update
7471 	 */
7472 
7473 	return drm_atomic_helper_commit(dev, state, nonblock);
7474 
7475 	/*TODO Handle EINTR, reenable IRQ*/
7476 }
7477 
7478 /**
7479  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7480  * @state: The atomic state to commit
7481  *
7482  * This will tell DC to commit the constructed DC state from atomic_check,
7483  * programming the hardware. Any failures here implies a hardware failure, since
7484  * atomic check should have filtered anything non-kosher.
7485  */
7486 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7487 {
7488 	struct drm_device *dev = state->dev;
7489 	struct amdgpu_device *adev = drm_to_adev(dev);
7490 	struct amdgpu_display_manager *dm = &adev->dm;
7491 	struct dm_atomic_state *dm_state;
7492 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7493 	uint32_t i, j;
7494 	struct drm_crtc *crtc;
7495 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7496 	unsigned long flags;
7497 	bool wait_for_vblank = true;
7498 	struct drm_connector *connector;
7499 	struct drm_connector_state *old_con_state, *new_con_state;
7500 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7501 	int crtc_disable_count = 0;
7502 	bool mode_set_reset_required = false;
7503 
7504 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7505 	drm_atomic_helper_calc_timestamping_constants(state);
7506 
7507 	dm_state = dm_atomic_get_new_state(state);
7508 	if (dm_state && dm_state->context) {
7509 		dc_state = dm_state->context;
7510 	} else {
7511 		/* No state changes, retain current state. */
7512 		dc_state_temp = dc_create_state(dm->dc);
7513 		ASSERT(dc_state_temp);
7514 		dc_state = dc_state_temp;
7515 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7516 	}
7517 
7518 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7519 				       new_crtc_state, i) {
7520 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7521 
7522 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7523 
7524 		if (old_crtc_state->active &&
7525 		    (!new_crtc_state->active ||
7526 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7527 			manage_dm_interrupts(adev, acrtc, false);
7528 			dc_stream_release(dm_old_crtc_state->stream);
7529 		}
7530 	}
7531 
7532 	/* update changed items */
7533 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7534 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7535 
7536 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7537 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7538 
7539 		DRM_DEBUG_DRIVER(
7540 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7541 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7542 			"connectors_changed:%d\n",
7543 			acrtc->crtc_id,
7544 			new_crtc_state->enable,
7545 			new_crtc_state->active,
7546 			new_crtc_state->planes_changed,
7547 			new_crtc_state->mode_changed,
7548 			new_crtc_state->active_changed,
7549 			new_crtc_state->connectors_changed);
7550 
7551 		/* Copy all transient state flags into dc state */
7552 		if (dm_new_crtc_state->stream) {
7553 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7554 							    dm_new_crtc_state->stream);
7555 		}
7556 
7557 		/* handles headless hotplug case, updating new_state and
7558 		 * aconnector as needed
7559 		 */
7560 
7561 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7562 
7563 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7564 
7565 			if (!dm_new_crtc_state->stream) {
7566 				/*
7567 				 * this could happen because of issues with
7568 				 * userspace notifications delivery.
7569 				 * In this case userspace tries to set mode on
7570 				 * display which is disconnected in fact.
7571 				 * dc_sink is NULL in this case on aconnector.
7572 				 * We expect reset mode will come soon.
7573 				 *
7574 				 * This can also happen when unplug is done
7575 				 * during resume sequence ended
7576 				 *
7577 				 * In this case, we want to pretend we still
7578 				 * have a sink to keep the pipe running so that
7579 				 * hw state is consistent with the sw state
7580 				 */
7581 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7582 						__func__, acrtc->base.base.id);
7583 				continue;
7584 			}
7585 
7586 			if (dm_old_crtc_state->stream)
7587 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7588 
7589 			pm_runtime_get_noresume(dev->dev);
7590 
7591 			acrtc->enabled = true;
7592 			acrtc->hw_mode = new_crtc_state->mode;
7593 			crtc->hwmode = new_crtc_state->mode;
7594 			mode_set_reset_required = true;
7595 		} else if (modereset_required(new_crtc_state)) {
7596 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7597 			/* i.e. reset mode */
7598 			if (dm_old_crtc_state->stream)
7599 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7600 			mode_set_reset_required = true;
7601 		}
7602 	} /* for_each_crtc_in_state() */
7603 
7604 	if (dc_state) {
7605 		/* if there mode set or reset, disable eDP PSR */
7606 		if (mode_set_reset_required)
7607 			amdgpu_dm_psr_disable_all(dm);
7608 
7609 		dm_enable_per_frame_crtc_master_sync(dc_state);
7610 		mutex_lock(&dm->dc_lock);
7611 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7612 		mutex_unlock(&dm->dc_lock);
7613 	}
7614 
7615 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7616 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7617 
7618 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7619 
7620 		if (dm_new_crtc_state->stream != NULL) {
7621 			const struct dc_stream_status *status =
7622 					dc_stream_get_status(dm_new_crtc_state->stream);
7623 
7624 			if (!status)
7625 				status = dc_stream_get_status_from_state(dc_state,
7626 									 dm_new_crtc_state->stream);
7627 			if (!status)
7628 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7629 			else
7630 				acrtc->otg_inst = status->primary_otg_inst;
7631 		}
7632 	}
7633 #ifdef CONFIG_DRM_AMD_DC_HDCP
7634 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7635 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7636 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7637 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7638 
7639 		new_crtc_state = NULL;
7640 
7641 		if (acrtc)
7642 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7643 
7644 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7645 
7646 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7647 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7648 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7649 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7650 			continue;
7651 		}
7652 
7653 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7654 			hdcp_update_display(
7655 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7656 				new_con_state->hdcp_content_type,
7657 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7658 													 : false);
7659 	}
7660 #endif
7661 
7662 	/* Handle connector state changes */
7663 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7664 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7665 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7666 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7667 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7668 		struct dc_stream_update stream_update;
7669 		struct dc_info_packet hdr_packet;
7670 		struct dc_stream_status *status = NULL;
7671 		bool abm_changed, hdr_changed, scaling_changed;
7672 
7673 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7674 		memset(&stream_update, 0, sizeof(stream_update));
7675 
7676 		if (acrtc) {
7677 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7678 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7679 		}
7680 
7681 		/* Skip any modesets/resets */
7682 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7683 			continue;
7684 
7685 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7686 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7687 
7688 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7689 							     dm_old_con_state);
7690 
7691 		abm_changed = dm_new_crtc_state->abm_level !=
7692 			      dm_old_crtc_state->abm_level;
7693 
7694 		hdr_changed =
7695 			is_hdr_metadata_different(old_con_state, new_con_state);
7696 
7697 		if (!scaling_changed && !abm_changed && !hdr_changed)
7698 			continue;
7699 
7700 		stream_update.stream = dm_new_crtc_state->stream;
7701 		if (scaling_changed) {
7702 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7703 					dm_new_con_state, dm_new_crtc_state->stream);
7704 
7705 			stream_update.src = dm_new_crtc_state->stream->src;
7706 			stream_update.dst = dm_new_crtc_state->stream->dst;
7707 		}
7708 
7709 		if (abm_changed) {
7710 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7711 
7712 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7713 		}
7714 
7715 		if (hdr_changed) {
7716 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7717 			stream_update.hdr_static_metadata = &hdr_packet;
7718 		}
7719 
7720 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7721 		WARN_ON(!status);
7722 		WARN_ON(!status->plane_count);
7723 
7724 		/*
7725 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7726 		 * Here we create an empty update on each plane.
7727 		 * To fix this, DC should permit updating only stream properties.
7728 		 */
7729 		for (j = 0; j < status->plane_count; j++)
7730 			dummy_updates[j].surface = status->plane_states[0];
7731 
7732 
7733 		mutex_lock(&dm->dc_lock);
7734 		dc_commit_updates_for_stream(dm->dc,
7735 						     dummy_updates,
7736 						     status->plane_count,
7737 						     dm_new_crtc_state->stream,
7738 						     &stream_update,
7739 						     dc_state);
7740 		mutex_unlock(&dm->dc_lock);
7741 	}
7742 
7743 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7744 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7745 				      new_crtc_state, i) {
7746 		if (old_crtc_state->active && !new_crtc_state->active)
7747 			crtc_disable_count++;
7748 
7749 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7750 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7751 
7752 		/* For freesync config update on crtc state and params for irq */
7753 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7754 
7755 		/* Handle vrr on->off / off->on transitions */
7756 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7757 						dm_new_crtc_state);
7758 	}
7759 
7760 	/**
7761 	 * Enable interrupts for CRTCs that are newly enabled or went through
7762 	 * a modeset. It was intentionally deferred until after the front end
7763 	 * state was modified to wait until the OTG was on and so the IRQ
7764 	 * handlers didn't access stale or invalid state.
7765 	 */
7766 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7767 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7768 
7769 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7770 
7771 		if (new_crtc_state->active &&
7772 		    (!old_crtc_state->active ||
7773 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7774 			dc_stream_retain(dm_new_crtc_state->stream);
7775 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7776 			manage_dm_interrupts(adev, acrtc, true);
7777 
7778 #ifdef CONFIG_DEBUG_FS
7779 			/**
7780 			 * Frontend may have changed so reapply the CRC capture
7781 			 * settings for the stream.
7782 			 */
7783 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7784 
7785 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7786 				amdgpu_dm_crtc_configure_crc_source(
7787 					crtc, dm_new_crtc_state,
7788 					dm_new_crtc_state->crc_src);
7789 			}
7790 #endif
7791 		}
7792 	}
7793 
7794 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7795 		if (new_crtc_state->async_flip)
7796 			wait_for_vblank = false;
7797 
7798 	/* update planes when needed per crtc*/
7799 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7800 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7801 
7802 		if (dm_new_crtc_state->stream)
7803 			amdgpu_dm_commit_planes(state, dc_state, dev,
7804 						dm, crtc, wait_for_vblank);
7805 	}
7806 
7807 	/* Update audio instances for each connector. */
7808 	amdgpu_dm_commit_audio(dev, state);
7809 
7810 	/*
7811 	 * send vblank event on all events not handled in flip and
7812 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7813 	 */
7814 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7815 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7816 
7817 		if (new_crtc_state->event)
7818 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7819 
7820 		new_crtc_state->event = NULL;
7821 	}
7822 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7823 
7824 	/* Signal HW programming completion */
7825 	drm_atomic_helper_commit_hw_done(state);
7826 
7827 	if (wait_for_vblank)
7828 		drm_atomic_helper_wait_for_flip_done(dev, state);
7829 
7830 	drm_atomic_helper_cleanup_planes(dev, state);
7831 
7832 	/*
7833 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7834 	 * so we can put the GPU into runtime suspend if we're not driving any
7835 	 * displays anymore
7836 	 */
7837 	for (i = 0; i < crtc_disable_count; i++)
7838 		pm_runtime_put_autosuspend(dev->dev);
7839 	pm_runtime_mark_last_busy(dev->dev);
7840 
7841 	if (dc_state_temp)
7842 		dc_release_state(dc_state_temp);
7843 }
7844 
7845 
7846 static int dm_force_atomic_commit(struct drm_connector *connector)
7847 {
7848 	int ret = 0;
7849 	struct drm_device *ddev = connector->dev;
7850 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7851 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7852 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7853 	struct drm_connector_state *conn_state;
7854 	struct drm_crtc_state *crtc_state;
7855 	struct drm_plane_state *plane_state;
7856 
7857 	if (!state)
7858 		return -ENOMEM;
7859 
7860 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7861 
7862 	/* Construct an atomic state to restore previous display setting */
7863 
7864 	/*
7865 	 * Attach connectors to drm_atomic_state
7866 	 */
7867 	conn_state = drm_atomic_get_connector_state(state, connector);
7868 
7869 	ret = PTR_ERR_OR_ZERO(conn_state);
7870 	if (ret)
7871 		goto err;
7872 
7873 	/* Attach crtc to drm_atomic_state*/
7874 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7875 
7876 	ret = PTR_ERR_OR_ZERO(crtc_state);
7877 	if (ret)
7878 		goto err;
7879 
7880 	/* force a restore */
7881 	crtc_state->mode_changed = true;
7882 
7883 	/* Attach plane to drm_atomic_state */
7884 	plane_state = drm_atomic_get_plane_state(state, plane);
7885 
7886 	ret = PTR_ERR_OR_ZERO(plane_state);
7887 	if (ret)
7888 		goto err;
7889 
7890 
7891 	/* Call commit internally with the state we just constructed */
7892 	ret = drm_atomic_commit(state);
7893 	if (!ret)
7894 		return 0;
7895 
7896 err:
7897 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7898 	drm_atomic_state_put(state);
7899 
7900 	return ret;
7901 }
7902 
7903 /*
7904  * This function handles all cases when set mode does not come upon hotplug.
7905  * This includes when a display is unplugged then plugged back into the
7906  * same port and when running without usermode desktop manager supprot
7907  */
7908 void dm_restore_drm_connector_state(struct drm_device *dev,
7909 				    struct drm_connector *connector)
7910 {
7911 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7912 	struct amdgpu_crtc *disconnected_acrtc;
7913 	struct dm_crtc_state *acrtc_state;
7914 
7915 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7916 		return;
7917 
7918 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7919 	if (!disconnected_acrtc)
7920 		return;
7921 
7922 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7923 	if (!acrtc_state->stream)
7924 		return;
7925 
7926 	/*
7927 	 * If the previous sink is not released and different from the current,
7928 	 * we deduce we are in a state where we can not rely on usermode call
7929 	 * to turn on the display, so we do it here
7930 	 */
7931 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7932 		dm_force_atomic_commit(&aconnector->base);
7933 }
7934 
7935 /*
7936  * Grabs all modesetting locks to serialize against any blocking commits,
7937  * Waits for completion of all non blocking commits.
7938  */
7939 static int do_aquire_global_lock(struct drm_device *dev,
7940 				 struct drm_atomic_state *state)
7941 {
7942 	struct drm_crtc *crtc;
7943 	struct drm_crtc_commit *commit;
7944 	long ret;
7945 
7946 	/*
7947 	 * Adding all modeset locks to aquire_ctx will
7948 	 * ensure that when the framework release it the
7949 	 * extra locks we are locking here will get released to
7950 	 */
7951 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7952 	if (ret)
7953 		return ret;
7954 
7955 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7956 		spin_lock(&crtc->commit_lock);
7957 		commit = list_first_entry_or_null(&crtc->commit_list,
7958 				struct drm_crtc_commit, commit_entry);
7959 		if (commit)
7960 			drm_crtc_commit_get(commit);
7961 		spin_unlock(&crtc->commit_lock);
7962 
7963 		if (!commit)
7964 			continue;
7965 
7966 		/*
7967 		 * Make sure all pending HW programming completed and
7968 		 * page flips done
7969 		 */
7970 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7971 
7972 		if (ret > 0)
7973 			ret = wait_for_completion_interruptible_timeout(
7974 					&commit->flip_done, 10*HZ);
7975 
7976 		if (ret == 0)
7977 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7978 				  "timed out\n", crtc->base.id, crtc->name);
7979 
7980 		drm_crtc_commit_put(commit);
7981 	}
7982 
7983 	return ret < 0 ? ret : 0;
7984 }
7985 
7986 static void get_freesync_config_for_crtc(
7987 	struct dm_crtc_state *new_crtc_state,
7988 	struct dm_connector_state *new_con_state)
7989 {
7990 	struct mod_freesync_config config = {0};
7991 	struct amdgpu_dm_connector *aconnector =
7992 			to_amdgpu_dm_connector(new_con_state->base.connector);
7993 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7994 	int vrefresh = drm_mode_vrefresh(mode);
7995 
7996 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7997 					vrefresh >= aconnector->min_vfreq &&
7998 					vrefresh <= aconnector->max_vfreq;
7999 
8000 	if (new_crtc_state->vrr_supported) {
8001 		new_crtc_state->stream->ignore_msa_timing_param = true;
8002 		config.state = new_crtc_state->base.vrr_enabled ?
8003 				VRR_STATE_ACTIVE_VARIABLE :
8004 				VRR_STATE_INACTIVE;
8005 		config.min_refresh_in_uhz =
8006 				aconnector->min_vfreq * 1000000;
8007 		config.max_refresh_in_uhz =
8008 				aconnector->max_vfreq * 1000000;
8009 		config.vsif_supported = true;
8010 		config.btr = true;
8011 	}
8012 
8013 	new_crtc_state->freesync_config = config;
8014 }
8015 
8016 static void reset_freesync_config_for_crtc(
8017 	struct dm_crtc_state *new_crtc_state)
8018 {
8019 	new_crtc_state->vrr_supported = false;
8020 
8021 	memset(&new_crtc_state->vrr_infopacket, 0,
8022 	       sizeof(new_crtc_state->vrr_infopacket));
8023 }
8024 
8025 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8026 				struct drm_atomic_state *state,
8027 				struct drm_crtc *crtc,
8028 				struct drm_crtc_state *old_crtc_state,
8029 				struct drm_crtc_state *new_crtc_state,
8030 				bool enable,
8031 				bool *lock_and_validation_needed)
8032 {
8033 	struct dm_atomic_state *dm_state = NULL;
8034 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8035 	struct dc_stream_state *new_stream;
8036 	int ret = 0;
8037 
8038 	/*
8039 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8040 	 * update changed items
8041 	 */
8042 	struct amdgpu_crtc *acrtc = NULL;
8043 	struct amdgpu_dm_connector *aconnector = NULL;
8044 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8045 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8046 
8047 	new_stream = NULL;
8048 
8049 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8050 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8051 	acrtc = to_amdgpu_crtc(crtc);
8052 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8053 
8054 	/* TODO This hack should go away */
8055 	if (aconnector && enable) {
8056 		/* Make sure fake sink is created in plug-in scenario */
8057 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8058 							    &aconnector->base);
8059 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8060 							    &aconnector->base);
8061 
8062 		if (IS_ERR(drm_new_conn_state)) {
8063 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8064 			goto fail;
8065 		}
8066 
8067 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8068 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8069 
8070 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8071 			goto skip_modeset;
8072 
8073 		new_stream = create_validate_stream_for_sink(aconnector,
8074 							     &new_crtc_state->mode,
8075 							     dm_new_conn_state,
8076 							     dm_old_crtc_state->stream);
8077 
8078 		/*
8079 		 * we can have no stream on ACTION_SET if a display
8080 		 * was disconnected during S3, in this case it is not an
8081 		 * error, the OS will be updated after detection, and
8082 		 * will do the right thing on next atomic commit
8083 		 */
8084 
8085 		if (!new_stream) {
8086 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8087 					__func__, acrtc->base.base.id);
8088 			ret = -ENOMEM;
8089 			goto fail;
8090 		}
8091 
8092 		/*
8093 		 * TODO: Check VSDB bits to decide whether this should
8094 		 * be enabled or not.
8095 		 */
8096 		new_stream->triggered_crtc_reset.enabled =
8097 			dm->force_timing_sync;
8098 
8099 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8100 
8101 		ret = fill_hdr_info_packet(drm_new_conn_state,
8102 					   &new_stream->hdr_static_metadata);
8103 		if (ret)
8104 			goto fail;
8105 
8106 		/*
8107 		 * If we already removed the old stream from the context
8108 		 * (and set the new stream to NULL) then we can't reuse
8109 		 * the old stream even if the stream and scaling are unchanged.
8110 		 * We'll hit the BUG_ON and black screen.
8111 		 *
8112 		 * TODO: Refactor this function to allow this check to work
8113 		 * in all conditions.
8114 		 */
8115 		if (dm_new_crtc_state->stream &&
8116 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8117 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8118 			new_crtc_state->mode_changed = false;
8119 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8120 					 new_crtc_state->mode_changed);
8121 		}
8122 	}
8123 
8124 	/* mode_changed flag may get updated above, need to check again */
8125 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8126 		goto skip_modeset;
8127 
8128 	DRM_DEBUG_DRIVER(
8129 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8130 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8131 		"connectors_changed:%d\n",
8132 		acrtc->crtc_id,
8133 		new_crtc_state->enable,
8134 		new_crtc_state->active,
8135 		new_crtc_state->planes_changed,
8136 		new_crtc_state->mode_changed,
8137 		new_crtc_state->active_changed,
8138 		new_crtc_state->connectors_changed);
8139 
8140 	/* Remove stream for any changed/disabled CRTC */
8141 	if (!enable) {
8142 
8143 		if (!dm_old_crtc_state->stream)
8144 			goto skip_modeset;
8145 
8146 		ret = dm_atomic_get_state(state, &dm_state);
8147 		if (ret)
8148 			goto fail;
8149 
8150 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8151 				crtc->base.id);
8152 
8153 		/* i.e. reset mode */
8154 		if (dc_remove_stream_from_ctx(
8155 				dm->dc,
8156 				dm_state->context,
8157 				dm_old_crtc_state->stream) != DC_OK) {
8158 			ret = -EINVAL;
8159 			goto fail;
8160 		}
8161 
8162 		dc_stream_release(dm_old_crtc_state->stream);
8163 		dm_new_crtc_state->stream = NULL;
8164 
8165 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8166 
8167 		*lock_and_validation_needed = true;
8168 
8169 	} else {/* Add stream for any updated/enabled CRTC */
8170 		/*
8171 		 * Quick fix to prevent NULL pointer on new_stream when
8172 		 * added MST connectors not found in existing crtc_state in the chained mode
8173 		 * TODO: need to dig out the root cause of that
8174 		 */
8175 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8176 			goto skip_modeset;
8177 
8178 		if (modereset_required(new_crtc_state))
8179 			goto skip_modeset;
8180 
8181 		if (modeset_required(new_crtc_state, new_stream,
8182 				     dm_old_crtc_state->stream)) {
8183 
8184 			WARN_ON(dm_new_crtc_state->stream);
8185 
8186 			ret = dm_atomic_get_state(state, &dm_state);
8187 			if (ret)
8188 				goto fail;
8189 
8190 			dm_new_crtc_state->stream = new_stream;
8191 
8192 			dc_stream_retain(new_stream);
8193 
8194 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8195 						crtc->base.id);
8196 
8197 			if (dc_add_stream_to_ctx(
8198 					dm->dc,
8199 					dm_state->context,
8200 					dm_new_crtc_state->stream) != DC_OK) {
8201 				ret = -EINVAL;
8202 				goto fail;
8203 			}
8204 
8205 			*lock_and_validation_needed = true;
8206 		}
8207 	}
8208 
8209 skip_modeset:
8210 	/* Release extra reference */
8211 	if (new_stream)
8212 		 dc_stream_release(new_stream);
8213 
8214 	/*
8215 	 * We want to do dc stream updates that do not require a
8216 	 * full modeset below.
8217 	 */
8218 	if (!(enable && aconnector && new_crtc_state->active))
8219 		return 0;
8220 	/*
8221 	 * Given above conditions, the dc state cannot be NULL because:
8222 	 * 1. We're in the process of enabling CRTCs (just been added
8223 	 *    to the dc context, or already is on the context)
8224 	 * 2. Has a valid connector attached, and
8225 	 * 3. Is currently active and enabled.
8226 	 * => The dc stream state currently exists.
8227 	 */
8228 	BUG_ON(dm_new_crtc_state->stream == NULL);
8229 
8230 	/* Scaling or underscan settings */
8231 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8232 		update_stream_scaling_settings(
8233 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8234 
8235 	/* ABM settings */
8236 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8237 
8238 	/*
8239 	 * Color management settings. We also update color properties
8240 	 * when a modeset is needed, to ensure it gets reprogrammed.
8241 	 */
8242 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8243 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8244 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8245 		if (ret)
8246 			goto fail;
8247 	}
8248 
8249 	/* Update Freesync settings. */
8250 	get_freesync_config_for_crtc(dm_new_crtc_state,
8251 				     dm_new_conn_state);
8252 
8253 	return ret;
8254 
8255 fail:
8256 	if (new_stream)
8257 		dc_stream_release(new_stream);
8258 	return ret;
8259 }
8260 
8261 static bool should_reset_plane(struct drm_atomic_state *state,
8262 			       struct drm_plane *plane,
8263 			       struct drm_plane_state *old_plane_state,
8264 			       struct drm_plane_state *new_plane_state)
8265 {
8266 	struct drm_plane *other;
8267 	struct drm_plane_state *old_other_state, *new_other_state;
8268 	struct drm_crtc_state *new_crtc_state;
8269 	int i;
8270 
8271 	/*
8272 	 * TODO: Remove this hack once the checks below are sufficient
8273 	 * enough to determine when we need to reset all the planes on
8274 	 * the stream.
8275 	 */
8276 	if (state->allow_modeset)
8277 		return true;
8278 
8279 	/* Exit early if we know that we're adding or removing the plane. */
8280 	if (old_plane_state->crtc != new_plane_state->crtc)
8281 		return true;
8282 
8283 	/* old crtc == new_crtc == NULL, plane not in context. */
8284 	if (!new_plane_state->crtc)
8285 		return false;
8286 
8287 	new_crtc_state =
8288 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8289 
8290 	if (!new_crtc_state)
8291 		return true;
8292 
8293 	/* CRTC Degamma changes currently require us to recreate planes. */
8294 	if (new_crtc_state->color_mgmt_changed)
8295 		return true;
8296 
8297 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8298 		return true;
8299 
8300 	/*
8301 	 * If there are any new primary or overlay planes being added or
8302 	 * removed then the z-order can potentially change. To ensure
8303 	 * correct z-order and pipe acquisition the current DC architecture
8304 	 * requires us to remove and recreate all existing planes.
8305 	 *
8306 	 * TODO: Come up with a more elegant solution for this.
8307 	 */
8308 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8309 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8310 
8311 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8312 			continue;
8313 
8314 		if (old_other_state->crtc != new_plane_state->crtc &&
8315 		    new_other_state->crtc != new_plane_state->crtc)
8316 			continue;
8317 
8318 		if (old_other_state->crtc != new_other_state->crtc)
8319 			return true;
8320 
8321 		/* Src/dst size and scaling updates. */
8322 		if (old_other_state->src_w != new_other_state->src_w ||
8323 		    old_other_state->src_h != new_other_state->src_h ||
8324 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8325 		    old_other_state->crtc_h != new_other_state->crtc_h)
8326 			return true;
8327 
8328 		/* Rotation / mirroring updates. */
8329 		if (old_other_state->rotation != new_other_state->rotation)
8330 			return true;
8331 
8332 		/* Blending updates. */
8333 		if (old_other_state->pixel_blend_mode !=
8334 		    new_other_state->pixel_blend_mode)
8335 			return true;
8336 
8337 		/* Alpha updates. */
8338 		if (old_other_state->alpha != new_other_state->alpha)
8339 			return true;
8340 
8341 		/* Colorspace changes. */
8342 		if (old_other_state->color_range != new_other_state->color_range ||
8343 		    old_other_state->color_encoding != new_other_state->color_encoding)
8344 			return true;
8345 
8346 		/* Framebuffer checks fall at the end. */
8347 		if (!old_other_state->fb || !new_other_state->fb)
8348 			continue;
8349 
8350 		/* Pixel format changes can require bandwidth updates. */
8351 		if (old_other_state->fb->format != new_other_state->fb->format)
8352 			return true;
8353 
8354 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8355 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8356 
8357 		/* Tiling and DCC changes also require bandwidth updates. */
8358 		if (old_dm_plane_state->tiling_flags !=
8359 		    new_dm_plane_state->tiling_flags)
8360 			return true;
8361 	}
8362 
8363 	return false;
8364 }
8365 
8366 static int dm_update_plane_state(struct dc *dc,
8367 				 struct drm_atomic_state *state,
8368 				 struct drm_plane *plane,
8369 				 struct drm_plane_state *old_plane_state,
8370 				 struct drm_plane_state *new_plane_state,
8371 				 bool enable,
8372 				 bool *lock_and_validation_needed)
8373 {
8374 
8375 	struct dm_atomic_state *dm_state = NULL;
8376 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8377 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8378 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8379 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8380 	struct amdgpu_crtc *new_acrtc;
8381 	bool needs_reset;
8382 	int ret = 0;
8383 
8384 
8385 	new_plane_crtc = new_plane_state->crtc;
8386 	old_plane_crtc = old_plane_state->crtc;
8387 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8388 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8389 
8390 	/*TODO Implement better atomic check for cursor plane */
8391 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8392 		if (!enable || !new_plane_crtc ||
8393 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8394 			return 0;
8395 
8396 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8397 
8398 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8399 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8400 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8401 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8402 			return -EINVAL;
8403 		}
8404 
8405 		return 0;
8406 	}
8407 
8408 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8409 					 new_plane_state);
8410 
8411 	/* Remove any changed/removed planes */
8412 	if (!enable) {
8413 		if (!needs_reset)
8414 			return 0;
8415 
8416 		if (!old_plane_crtc)
8417 			return 0;
8418 
8419 		old_crtc_state = drm_atomic_get_old_crtc_state(
8420 				state, old_plane_crtc);
8421 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8422 
8423 		if (!dm_old_crtc_state->stream)
8424 			return 0;
8425 
8426 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8427 				plane->base.id, old_plane_crtc->base.id);
8428 
8429 		ret = dm_atomic_get_state(state, &dm_state);
8430 		if (ret)
8431 			return ret;
8432 
8433 		if (!dc_remove_plane_from_context(
8434 				dc,
8435 				dm_old_crtc_state->stream,
8436 				dm_old_plane_state->dc_state,
8437 				dm_state->context)) {
8438 
8439 			return -EINVAL;
8440 		}
8441 
8442 
8443 		dc_plane_state_release(dm_old_plane_state->dc_state);
8444 		dm_new_plane_state->dc_state = NULL;
8445 
8446 		*lock_and_validation_needed = true;
8447 
8448 	} else { /* Add new planes */
8449 		struct dc_plane_state *dc_new_plane_state;
8450 
8451 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8452 			return 0;
8453 
8454 		if (!new_plane_crtc)
8455 			return 0;
8456 
8457 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8458 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8459 
8460 		if (!dm_new_crtc_state->stream)
8461 			return 0;
8462 
8463 		if (!needs_reset)
8464 			return 0;
8465 
8466 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8467 		if (ret)
8468 			return ret;
8469 
8470 		WARN_ON(dm_new_plane_state->dc_state);
8471 
8472 		dc_new_plane_state = dc_create_plane_state(dc);
8473 		if (!dc_new_plane_state)
8474 			return -ENOMEM;
8475 
8476 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8477 				plane->base.id, new_plane_crtc->base.id);
8478 
8479 		ret = fill_dc_plane_attributes(
8480 			drm_to_adev(new_plane_crtc->dev),
8481 			dc_new_plane_state,
8482 			new_plane_state,
8483 			new_crtc_state);
8484 		if (ret) {
8485 			dc_plane_state_release(dc_new_plane_state);
8486 			return ret;
8487 		}
8488 
8489 		ret = dm_atomic_get_state(state, &dm_state);
8490 		if (ret) {
8491 			dc_plane_state_release(dc_new_plane_state);
8492 			return ret;
8493 		}
8494 
8495 		/*
8496 		 * Any atomic check errors that occur after this will
8497 		 * not need a release. The plane state will be attached
8498 		 * to the stream, and therefore part of the atomic
8499 		 * state. It'll be released when the atomic state is
8500 		 * cleaned.
8501 		 */
8502 		if (!dc_add_plane_to_context(
8503 				dc,
8504 				dm_new_crtc_state->stream,
8505 				dc_new_plane_state,
8506 				dm_state->context)) {
8507 
8508 			dc_plane_state_release(dc_new_plane_state);
8509 			return -EINVAL;
8510 		}
8511 
8512 		dm_new_plane_state->dc_state = dc_new_plane_state;
8513 
8514 		/* Tell DC to do a full surface update every time there
8515 		 * is a plane change. Inefficient, but works for now.
8516 		 */
8517 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8518 
8519 		*lock_and_validation_needed = true;
8520 	}
8521 
8522 
8523 	return ret;
8524 }
8525 
8526 #if defined(CONFIG_DRM_AMD_DC_DCN)
8527 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8528 {
8529 	struct drm_connector *connector;
8530 	struct drm_connector_state *conn_state;
8531 	struct amdgpu_dm_connector *aconnector = NULL;
8532 	int i;
8533 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8534 		if (conn_state->crtc != crtc)
8535 			continue;
8536 
8537 		aconnector = to_amdgpu_dm_connector(connector);
8538 		if (!aconnector->port || !aconnector->mst_port)
8539 			aconnector = NULL;
8540 		else
8541 			break;
8542 	}
8543 
8544 	if (!aconnector)
8545 		return 0;
8546 
8547 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8548 }
8549 #endif
8550 
8551 /**
8552  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8553  * @dev: The DRM device
8554  * @state: The atomic state to commit
8555  *
8556  * Validate that the given atomic state is programmable by DC into hardware.
8557  * This involves constructing a &struct dc_state reflecting the new hardware
8558  * state we wish to commit, then querying DC to see if it is programmable. It's
8559  * important not to modify the existing DC state. Otherwise, atomic_check
8560  * may unexpectedly commit hardware changes.
8561  *
8562  * When validating the DC state, it's important that the right locks are
8563  * acquired. For full updates case which removes/adds/updates streams on one
8564  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8565  * that any such full update commit will wait for completion of any outstanding
8566  * flip using DRMs synchronization events.
8567  *
8568  * Note that DM adds the affected connectors for all CRTCs in state, when that
8569  * might not seem necessary. This is because DC stream creation requires the
8570  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8571  * be possible but non-trivial - a possible TODO item.
8572  *
8573  * Return: -Error code if validation failed.
8574  */
8575 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8576 				  struct drm_atomic_state *state)
8577 {
8578 	struct amdgpu_device *adev = drm_to_adev(dev);
8579 	struct dm_atomic_state *dm_state = NULL;
8580 	struct dc *dc = adev->dm.dc;
8581 	struct drm_connector *connector;
8582 	struct drm_connector_state *old_con_state, *new_con_state;
8583 	struct drm_crtc *crtc;
8584 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8585 	struct drm_plane *plane;
8586 	struct drm_plane_state *old_plane_state, *new_plane_state;
8587 	enum dc_status status;
8588 	int ret, i;
8589 	bool lock_and_validation_needed = false;
8590 
8591 	amdgpu_check_debugfs_connector_property_change(adev, state);
8592 
8593 	ret = drm_atomic_helper_check_modeset(dev, state);
8594 	if (ret)
8595 		goto fail;
8596 
8597 	/* Check connector changes */
8598 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8599 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8600 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8601 
8602 		/* Skip connectors that are disabled or part of modeset already. */
8603 		if (!old_con_state->crtc && !new_con_state->crtc)
8604 			continue;
8605 
8606 		if (!new_con_state->crtc)
8607 			continue;
8608 
8609 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8610 		if (IS_ERR(new_crtc_state)) {
8611 			ret = PTR_ERR(new_crtc_state);
8612 			goto fail;
8613 		}
8614 
8615 		if (dm_old_con_state->abm_level !=
8616 		    dm_new_con_state->abm_level)
8617 			new_crtc_state->connectors_changed = true;
8618 	}
8619 
8620 #if defined(CONFIG_DRM_AMD_DC_DCN)
8621 	if (adev->asic_type >= CHIP_NAVI10) {
8622 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8623 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8624 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8625 				if (ret)
8626 					goto fail;
8627 			}
8628 		}
8629 	}
8630 #endif
8631 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8632 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8633 		    !new_crtc_state->color_mgmt_changed &&
8634 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8635 			continue;
8636 
8637 		if (!new_crtc_state->enable)
8638 			continue;
8639 
8640 		ret = drm_atomic_add_affected_connectors(state, crtc);
8641 		if (ret)
8642 			return ret;
8643 
8644 		ret = drm_atomic_add_affected_planes(state, crtc);
8645 		if (ret)
8646 			goto fail;
8647 	}
8648 
8649 	/*
8650 	 * Add all primary and overlay planes on the CRTC to the state
8651 	 * whenever a plane is enabled to maintain correct z-ordering
8652 	 * and to enable fast surface updates.
8653 	 */
8654 	drm_for_each_crtc(crtc, dev) {
8655 		bool modified = false;
8656 
8657 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8658 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8659 				continue;
8660 
8661 			if (new_plane_state->crtc == crtc ||
8662 			    old_plane_state->crtc == crtc) {
8663 				modified = true;
8664 				break;
8665 			}
8666 		}
8667 
8668 		if (!modified)
8669 			continue;
8670 
8671 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8672 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8673 				continue;
8674 
8675 			new_plane_state =
8676 				drm_atomic_get_plane_state(state, plane);
8677 
8678 			if (IS_ERR(new_plane_state)) {
8679 				ret = PTR_ERR(new_plane_state);
8680 				goto fail;
8681 			}
8682 		}
8683 	}
8684 
8685 	/* Prepass for updating tiling flags on new planes. */
8686 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8687 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8688 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8689 
8690 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8691 				  &new_dm_plane_state->tmz_surface);
8692 		if (ret)
8693 			goto fail;
8694 	}
8695 
8696 	/* Remove exiting planes if they are modified */
8697 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8698 		ret = dm_update_plane_state(dc, state, plane,
8699 					    old_plane_state,
8700 					    new_plane_state,
8701 					    false,
8702 					    &lock_and_validation_needed);
8703 		if (ret)
8704 			goto fail;
8705 	}
8706 
8707 	/* Disable all crtcs which require disable */
8708 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8709 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8710 					   old_crtc_state,
8711 					   new_crtc_state,
8712 					   false,
8713 					   &lock_and_validation_needed);
8714 		if (ret)
8715 			goto fail;
8716 	}
8717 
8718 	/* Enable all crtcs which require enable */
8719 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8720 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8721 					   old_crtc_state,
8722 					   new_crtc_state,
8723 					   true,
8724 					   &lock_and_validation_needed);
8725 		if (ret)
8726 			goto fail;
8727 	}
8728 
8729 	/* Add new/modified planes */
8730 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8731 		ret = dm_update_plane_state(dc, state, plane,
8732 					    old_plane_state,
8733 					    new_plane_state,
8734 					    true,
8735 					    &lock_and_validation_needed);
8736 		if (ret)
8737 			goto fail;
8738 	}
8739 
8740 	/* Run this here since we want to validate the streams we created */
8741 	ret = drm_atomic_helper_check_planes(dev, state);
8742 	if (ret)
8743 		goto fail;
8744 
8745 	if (state->legacy_cursor_update) {
8746 		/*
8747 		 * This is a fast cursor update coming from the plane update
8748 		 * helper, check if it can be done asynchronously for better
8749 		 * performance.
8750 		 */
8751 		state->async_update =
8752 			!drm_atomic_helper_async_check(dev, state);
8753 
8754 		/*
8755 		 * Skip the remaining global validation if this is an async
8756 		 * update. Cursor updates can be done without affecting
8757 		 * state or bandwidth calcs and this avoids the performance
8758 		 * penalty of locking the private state object and
8759 		 * allocating a new dc_state.
8760 		 */
8761 		if (state->async_update)
8762 			return 0;
8763 	}
8764 
8765 	/* Check scaling and underscan changes*/
8766 	/* TODO Removed scaling changes validation due to inability to commit
8767 	 * new stream into context w\o causing full reset. Need to
8768 	 * decide how to handle.
8769 	 */
8770 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8771 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8772 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8773 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8774 
8775 		/* Skip any modesets/resets */
8776 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8777 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8778 			continue;
8779 
8780 		/* Skip any thing not scale or underscan changes */
8781 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8782 			continue;
8783 
8784 		lock_and_validation_needed = true;
8785 	}
8786 
8787 	/**
8788 	 * Streams and planes are reset when there are changes that affect
8789 	 * bandwidth. Anything that affects bandwidth needs to go through
8790 	 * DC global validation to ensure that the configuration can be applied
8791 	 * to hardware.
8792 	 *
8793 	 * We have to currently stall out here in atomic_check for outstanding
8794 	 * commits to finish in this case because our IRQ handlers reference
8795 	 * DRM state directly - we can end up disabling interrupts too early
8796 	 * if we don't.
8797 	 *
8798 	 * TODO: Remove this stall and drop DM state private objects.
8799 	 */
8800 	if (lock_and_validation_needed) {
8801 		ret = dm_atomic_get_state(state, &dm_state);
8802 		if (ret)
8803 			goto fail;
8804 
8805 		ret = do_aquire_global_lock(dev, state);
8806 		if (ret)
8807 			goto fail;
8808 
8809 #if defined(CONFIG_DRM_AMD_DC_DCN)
8810 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8811 			goto fail;
8812 
8813 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8814 		if (ret)
8815 			goto fail;
8816 #endif
8817 
8818 		/*
8819 		 * Perform validation of MST topology in the state:
8820 		 * We need to perform MST atomic check before calling
8821 		 * dc_validate_global_state(), or there is a chance
8822 		 * to get stuck in an infinite loop and hang eventually.
8823 		 */
8824 		ret = drm_dp_mst_atomic_check(state);
8825 		if (ret)
8826 			goto fail;
8827 		status = dc_validate_global_state(dc, dm_state->context, false);
8828 		if (status != DC_OK) {
8829 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8830 				       dc_status_to_str(status), status);
8831 			ret = -EINVAL;
8832 			goto fail;
8833 		}
8834 	} else {
8835 		/*
8836 		 * The commit is a fast update. Fast updates shouldn't change
8837 		 * the DC context, affect global validation, and can have their
8838 		 * commit work done in parallel with other commits not touching
8839 		 * the same resource. If we have a new DC context as part of
8840 		 * the DM atomic state from validation we need to free it and
8841 		 * retain the existing one instead.
8842 		 *
8843 		 * Furthermore, since the DM atomic state only contains the DC
8844 		 * context and can safely be annulled, we can free the state
8845 		 * and clear the associated private object now to free
8846 		 * some memory and avoid a possible use-after-free later.
8847 		 */
8848 
8849 		for (i = 0; i < state->num_private_objs; i++) {
8850 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8851 
8852 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8853 				int j = state->num_private_objs-1;
8854 
8855 				dm_atomic_destroy_state(obj,
8856 						state->private_objs[i].state);
8857 
8858 				/* If i is not at the end of the array then the
8859 				 * last element needs to be moved to where i was
8860 				 * before the array can safely be truncated.
8861 				 */
8862 				if (i != j)
8863 					state->private_objs[i] =
8864 						state->private_objs[j];
8865 
8866 				state->private_objs[j].ptr = NULL;
8867 				state->private_objs[j].state = NULL;
8868 				state->private_objs[j].old_state = NULL;
8869 				state->private_objs[j].new_state = NULL;
8870 
8871 				state->num_private_objs = j;
8872 				break;
8873 			}
8874 		}
8875 	}
8876 
8877 	/* Store the overall update type for use later in atomic check. */
8878 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8879 		struct dm_crtc_state *dm_new_crtc_state =
8880 			to_dm_crtc_state(new_crtc_state);
8881 
8882 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
8883 							 UPDATE_TYPE_FULL :
8884 							 UPDATE_TYPE_FAST;
8885 	}
8886 
8887 	/* Must be success */
8888 	WARN_ON(ret);
8889 	return ret;
8890 
8891 fail:
8892 	if (ret == -EDEADLK)
8893 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8894 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8895 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8896 	else
8897 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8898 
8899 	return ret;
8900 }
8901 
8902 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8903 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8904 {
8905 	uint8_t dpcd_data;
8906 	bool capable = false;
8907 
8908 	if (amdgpu_dm_connector->dc_link &&
8909 		dm_helpers_dp_read_dpcd(
8910 				NULL,
8911 				amdgpu_dm_connector->dc_link,
8912 				DP_DOWN_STREAM_PORT_COUNT,
8913 				&dpcd_data,
8914 				sizeof(dpcd_data))) {
8915 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8916 	}
8917 
8918 	return capable;
8919 }
8920 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8921 					struct edid *edid)
8922 {
8923 	int i;
8924 	bool edid_check_required;
8925 	struct detailed_timing *timing;
8926 	struct detailed_non_pixel *data;
8927 	struct detailed_data_monitor_range *range;
8928 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8929 			to_amdgpu_dm_connector(connector);
8930 	struct dm_connector_state *dm_con_state = NULL;
8931 
8932 	struct drm_device *dev = connector->dev;
8933 	struct amdgpu_device *adev = drm_to_adev(dev);
8934 	bool freesync_capable = false;
8935 
8936 	if (!connector->state) {
8937 		DRM_ERROR("%s - Connector has no state", __func__);
8938 		goto update;
8939 	}
8940 
8941 	if (!edid) {
8942 		dm_con_state = to_dm_connector_state(connector->state);
8943 
8944 		amdgpu_dm_connector->min_vfreq = 0;
8945 		amdgpu_dm_connector->max_vfreq = 0;
8946 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8947 
8948 		goto update;
8949 	}
8950 
8951 	dm_con_state = to_dm_connector_state(connector->state);
8952 
8953 	edid_check_required = false;
8954 	if (!amdgpu_dm_connector->dc_sink) {
8955 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8956 		goto update;
8957 	}
8958 	if (!adev->dm.freesync_module)
8959 		goto update;
8960 	/*
8961 	 * if edid non zero restrict freesync only for dp and edp
8962 	 */
8963 	if (edid) {
8964 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8965 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8966 			edid_check_required = is_dp_capable_without_timing_msa(
8967 						adev->dm.dc,
8968 						amdgpu_dm_connector);
8969 		}
8970 	}
8971 	if (edid_check_required == true && (edid->version > 1 ||
8972 	   (edid->version == 1 && edid->revision > 1))) {
8973 		for (i = 0; i < 4; i++) {
8974 
8975 			timing	= &edid->detailed_timings[i];
8976 			data	= &timing->data.other_data;
8977 			range	= &data->data.range;
8978 			/*
8979 			 * Check if monitor has continuous frequency mode
8980 			 */
8981 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8982 				continue;
8983 			/*
8984 			 * Check for flag range limits only. If flag == 1 then
8985 			 * no additional timing information provided.
8986 			 * Default GTF, GTF Secondary curve and CVT are not
8987 			 * supported
8988 			 */
8989 			if (range->flags != 1)
8990 				continue;
8991 
8992 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8993 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8994 			amdgpu_dm_connector->pixel_clock_mhz =
8995 				range->pixel_clock_mhz * 10;
8996 			break;
8997 		}
8998 
8999 		if (amdgpu_dm_connector->max_vfreq -
9000 		    amdgpu_dm_connector->min_vfreq > 10) {
9001 
9002 			freesync_capable = true;
9003 		}
9004 	}
9005 
9006 update:
9007 	if (dm_con_state)
9008 		dm_con_state->freesync_capable = freesync_capable;
9009 
9010 	if (connector->vrr_capable_property)
9011 		drm_connector_set_vrr_capable_property(connector,
9012 						       freesync_capable);
9013 }
9014 
9015 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9016 {
9017 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9018 
9019 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9020 		return;
9021 	if (link->type == dc_connection_none)
9022 		return;
9023 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9024 					dpcd_data, sizeof(dpcd_data))) {
9025 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9026 
9027 		if (dpcd_data[0] == 0) {
9028 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9029 			link->psr_settings.psr_feature_enabled = false;
9030 		} else {
9031 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9032 			link->psr_settings.psr_feature_enabled = true;
9033 		}
9034 
9035 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9036 	}
9037 }
9038 
9039 /*
9040  * amdgpu_dm_link_setup_psr() - configure psr link
9041  * @stream: stream state
9042  *
9043  * Return: true if success
9044  */
9045 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9046 {
9047 	struct dc_link *link = NULL;
9048 	struct psr_config psr_config = {0};
9049 	struct psr_context psr_context = {0};
9050 	bool ret = false;
9051 
9052 	if (stream == NULL)
9053 		return false;
9054 
9055 	link = stream->link;
9056 
9057 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9058 
9059 	if (psr_config.psr_version > 0) {
9060 		psr_config.psr_exit_link_training_required = 0x1;
9061 		psr_config.psr_frame_capture_indication_req = 0;
9062 		psr_config.psr_rfb_setup_time = 0x37;
9063 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9064 		psr_config.allow_smu_optimizations = 0x0;
9065 
9066 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9067 
9068 	}
9069 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9070 
9071 	return ret;
9072 }
9073 
9074 /*
9075  * amdgpu_dm_psr_enable() - enable psr f/w
9076  * @stream: stream state
9077  *
9078  * Return: true if success
9079  */
9080 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9081 {
9082 	struct dc_link *link = stream->link;
9083 	unsigned int vsync_rate_hz = 0;
9084 	struct dc_static_screen_params params = {0};
9085 	/* Calculate number of static frames before generating interrupt to
9086 	 * enter PSR.
9087 	 */
9088 	// Init fail safe of 2 frames static
9089 	unsigned int num_frames_static = 2;
9090 
9091 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9092 
9093 	vsync_rate_hz = div64_u64(div64_u64((
9094 			stream->timing.pix_clk_100hz * 100),
9095 			stream->timing.v_total),
9096 			stream->timing.h_total);
9097 
9098 	/* Round up
9099 	 * Calculate number of frames such that at least 30 ms of time has
9100 	 * passed.
9101 	 */
9102 	if (vsync_rate_hz != 0) {
9103 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9104 		num_frames_static = (30000 / frame_time_microsec) + 1;
9105 	}
9106 
9107 	params.triggers.cursor_update = true;
9108 	params.triggers.overlay_update = true;
9109 	params.triggers.surface_update = true;
9110 	params.num_frames = num_frames_static;
9111 
9112 	dc_stream_set_static_screen_params(link->ctx->dc,
9113 					   &stream, 1,
9114 					   &params);
9115 
9116 	return dc_link_set_psr_allow_active(link, true, false);
9117 }
9118 
9119 /*
9120  * amdgpu_dm_psr_disable() - disable psr f/w
9121  * @stream:  stream state
9122  *
9123  * Return: true if success
9124  */
9125 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9126 {
9127 
9128 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9129 
9130 	return dc_link_set_psr_allow_active(stream->link, false, true);
9131 }
9132 
9133 /*
9134  * amdgpu_dm_psr_disable() - disable psr f/w
9135  * if psr is enabled on any stream
9136  *
9137  * Return: true if success
9138  */
9139 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9140 {
9141 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9142 	return dc_set_psr_allow_active(dm->dc, false);
9143 }
9144 
9145 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9146 {
9147 	struct amdgpu_device *adev = drm_to_adev(dev);
9148 	struct dc *dc = adev->dm.dc;
9149 	int i;
9150 
9151 	mutex_lock(&adev->dm.dc_lock);
9152 	if (dc->current_state) {
9153 		for (i = 0; i < dc->current_state->stream_count; ++i)
9154 			dc->current_state->streams[i]
9155 				->triggered_crtc_reset.enabled =
9156 				adev->dm.force_timing_sync;
9157 
9158 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9159 		dc_trigger_sync(dc, dc->current_state);
9160 	}
9161 	mutex_unlock(&adev->dm.dc_lock);
9162 }
9163