1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
40 
41 #include "vid.h"
42 #include "amdgpu.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
45 #include "atom.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
50 #endif
51 #include "amdgpu_pm.h"
52 
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
59 #endif
60 
61 #include "ivsrcid/ivsrcid_vislands30.h"
62 
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
71 
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
81 
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84 
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
89 
90 #include "soc15_common.h"
91 #endif
92 
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
96 
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 
110 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
111 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
112 
113 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
115 
116 /* Number of bytes in PSP header for firmware. */
117 #define PSP_HEADER_BYTES 0x100
118 
119 /* Number of bytes in PSP footer for firmware. */
120 #define PSP_FOOTER_BYTES 0x100
121 
122 /**
123  * DOC: overview
124  *
125  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
126  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
127  * requests into DC requests, and DC responses into DRM responses.
128  *
129  * The root control structure is &struct amdgpu_display_manager.
130  */
131 
132 /* basic init/fini API */
133 static int amdgpu_dm_init(struct amdgpu_device *adev);
134 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
136 
137 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
138 {
139 	switch (link->dpcd_caps.dongle_type) {
140 	case DISPLAY_DONGLE_NONE:
141 		return DRM_MODE_SUBCONNECTOR_Native;
142 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
143 		return DRM_MODE_SUBCONNECTOR_VGA;
144 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_DVID;
147 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
148 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
149 		return DRM_MODE_SUBCONNECTOR_HDMIA;
150 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
151 	default:
152 		return DRM_MODE_SUBCONNECTOR_Unknown;
153 	}
154 }
155 
156 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
157 {
158 	struct dc_link *link = aconnector->dc_link;
159 	struct drm_connector *connector = &aconnector->base;
160 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
161 
162 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 		return;
164 
165 	if (aconnector->dc_sink)
166 		subconnector = get_subconnector_type(link);
167 
168 	drm_object_property_set_value(&connector->base,
169 			connector->dev->mode_config.dp_subconnector_property,
170 			subconnector);
171 }
172 
173 /*
174  * initializes drm_device display related structures, based on the information
175  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
176  * drm_encoder, drm_mode_config
177  *
178  * Returns 0 on success
179  */
180 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
181 /* removes and deallocates the drm structures, created by the above function */
182 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
183 
184 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
185 				struct drm_plane *plane,
186 				unsigned long possible_crtcs,
187 				const struct dc_plane_cap *plane_cap);
188 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
189 			       struct drm_plane *plane,
190 			       uint32_t link_index);
191 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
192 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
193 				    uint32_t link_index,
194 				    struct amdgpu_encoder *amdgpu_encoder);
195 static int amdgpu_dm_encoder_init(struct drm_device *dev,
196 				  struct amdgpu_encoder *aencoder,
197 				  uint32_t link_index);
198 
199 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
200 
201 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
202 
203 static int amdgpu_dm_atomic_check(struct drm_device *dev,
204 				  struct drm_atomic_state *state);
205 
206 static void handle_cursor_update(struct drm_plane *plane,
207 				 struct drm_plane_state *old_plane_state);
208 
209 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
210 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 				 struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236 	if (crtc >= adev->mode_info.num_crtc)
237 		return 0;
238 	else {
239 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 
241 		if (acrtc->dm_irq_params.stream == NULL) {
242 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 				  crtc);
244 			return 0;
245 		}
246 
247 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 	}
249 }
250 
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 				  u32 *vbl, u32 *position)
253 {
254 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 
256 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 		return -EINVAL;
258 	else {
259 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 
261 		if (acrtc->dm_irq_params.stream ==  NULL) {
262 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 				  crtc);
264 			return 0;
265 		}
266 
267 		/*
268 		 * TODO rework base driver to use values directly.
269 		 * for now parse it back into reg-format
270 		 */
271 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 					 &v_blank_start,
273 					 &v_blank_end,
274 					 &h_position,
275 					 &v_position);
276 
277 		*position = v_position | (h_position << 16);
278 		*vbl = v_blank_start | (v_blank_end << 16);
279 	}
280 
281 	return 0;
282 }
283 
284 static bool dm_is_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return true;
288 }
289 
290 static int dm_wait_for_idle(void *handle)
291 {
292 	/* XXX todo */
293 	return 0;
294 }
295 
296 static bool dm_check_soft_reset(void *handle)
297 {
298 	return false;
299 }
300 
301 static int dm_soft_reset(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 		     int otg_inst)
310 {
311 	struct drm_device *dev = adev_to_drm(adev);
312 	struct drm_crtc *crtc;
313 	struct amdgpu_crtc *amdgpu_crtc;
314 
315 	if (otg_inst == -1) {
316 		WARN_ON(1);
317 		return adev->mode_info.crtcs[0];
318 	}
319 
320 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321 		amdgpu_crtc = to_amdgpu_crtc(crtc);
322 
323 		if (amdgpu_crtc->otg_inst == otg_inst)
324 			return amdgpu_crtc;
325 	}
326 
327 	return NULL;
328 }
329 
330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331 {
332 	return acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_VARIABLE ||
334 	       acrtc->dm_irq_params.freesync_config.state ==
335 		       VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339 {
340 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 }
343 
344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345 					      struct dm_crtc_state *new_state)
346 {
347 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
348 		return true;
349 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350 		return true;
351 	else
352 		return false;
353 }
354 
355 /**
356  * dm_pflip_high_irq() - Handle pageflip interrupt
357  * @interrupt_params: ignored
358  *
359  * Handles the pageflip interrupt by notifying all interested parties
360  * that the pageflip has been completed.
361  */
362 static void dm_pflip_high_irq(void *interrupt_params)
363 {
364 	struct amdgpu_crtc *amdgpu_crtc;
365 	struct common_irq_params *irq_params = interrupt_params;
366 	struct amdgpu_device *adev = irq_params->adev;
367 	unsigned long flags;
368 	struct drm_pending_vblank_event *e;
369 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 	bool vrr_active;
371 
372 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373 
374 	/* IRQ could occur when in initial stage */
375 	/* TODO work and BO cleanup */
376 	if (amdgpu_crtc == NULL) {
377 		DC_LOG_PFLIP("CRTC is null, returning.\n");
378 		return;
379 	}
380 
381 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
382 
383 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
384 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
385 						 amdgpu_crtc->pflip_status,
386 						 AMDGPU_FLIP_SUBMITTED,
387 						 amdgpu_crtc->crtc_id,
388 						 amdgpu_crtc);
389 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390 		return;
391 	}
392 
393 	/* page flip completed. */
394 	e = amdgpu_crtc->event;
395 	amdgpu_crtc->event = NULL;
396 
397 	if (!e)
398 		WARN_ON(1);
399 
400 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401 
402 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
403 	if (!vrr_active ||
404 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 				      &v_blank_end, &hpos, &vpos) ||
406 	    (vpos < v_blank_start)) {
407 		/* Update to correct count and vblank timestamp if racing with
408 		 * vblank irq. This also updates to the correct vblank timestamp
409 		 * even in VRR mode, as scanout is past the front-porch atm.
410 		 */
411 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412 
413 		/* Wake up userspace by sending the pageflip event with proper
414 		 * count and timestamp of vblank of flip completion.
415 		 */
416 		if (e) {
417 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418 
419 			/* Event sent, so done with vblank for this flip */
420 			drm_crtc_vblank_put(&amdgpu_crtc->base);
421 		}
422 	} else if (e) {
423 		/* VRR active and inside front-porch: vblank count and
424 		 * timestamp for pageflip event will only be up to date after
425 		 * drm_crtc_handle_vblank() has been executed from late vblank
426 		 * irq handler after start of back-porch (vline 0). We queue the
427 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 		 * updated timestamp and count, once it runs after us.
429 		 *
430 		 * We need to open-code this instead of using the helper
431 		 * drm_crtc_arm_vblank_event(), as that helper would
432 		 * call drm_crtc_accurate_vblank_count(), which we must
433 		 * not call in VRR mode while we are in front-porch!
434 		 */
435 
436 		/* sequence will be replaced by real count during send-out. */
437 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 		e->pipe = amdgpu_crtc->crtc_id;
439 
440 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 		e = NULL;
442 	}
443 
444 	/* Keep track of vblank of this flip for flip throttling. We use the
445 	 * cooked hw counter, as that one incremented at start of this vblank
446 	 * of pageflip completion, so last_flip_vblank is the forbidden count
447 	 * for queueing new pageflips if vsync + VRR is enabled.
448 	 */
449 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451 
452 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454 
455 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
457 		     vrr_active, (int) !e);
458 }
459 
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462 	struct common_irq_params *irq_params = interrupt_params;
463 	struct amdgpu_device *adev = irq_params->adev;
464 	struct amdgpu_crtc *acrtc;
465 	struct drm_device *drm_dev;
466 	struct drm_vblank_crtc *vblank;
467 	ktime_t frame_duration_ns, previous_timestamp;
468 	unsigned long flags;
469 	int vrr_active;
470 
471 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472 
473 	if (acrtc) {
474 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 		drm_dev = acrtc->base.dev;
476 		vblank = &drm_dev->vblank[acrtc->base.index];
477 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 		frame_duration_ns = vblank->time - previous_timestamp;
479 
480 		if (frame_duration_ns > 0) {
481 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
482 						frame_duration_ns,
483 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 		}
486 
487 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488 			      acrtc->crtc_id,
489 			      vrr_active);
490 
491 		/* Core vblank handling is done here after end of front-porch in
492 		 * vrr mode, as vblank timestamping will give valid results
493 		 * while now done after front-porch. This will also deliver
494 		 * page-flip completion events that have been queued to us
495 		 * if a pageflip happened inside front-porch.
496 		 */
497 		if (vrr_active) {
498 			drm_crtc_handle_vblank(&acrtc->base);
499 
500 			/* BTR processing for pre-DCE12 ASICs */
501 			if (acrtc->dm_irq_params.stream &&
502 			    adev->family < AMDGPU_FAMILY_AI) {
503 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 				mod_freesync_handle_v_update(
505 				    adev->dm.freesync_module,
506 				    acrtc->dm_irq_params.stream,
507 				    &acrtc->dm_irq_params.vrr_params);
508 
509 				dc_stream_adjust_vmin_vmax(
510 				    adev->dm.dc,
511 				    acrtc->dm_irq_params.stream,
512 				    &acrtc->dm_irq_params.vrr_params.adjust);
513 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514 			}
515 		}
516 	}
517 }
518 
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528 	struct common_irq_params *irq_params = interrupt_params;
529 	struct amdgpu_device *adev = irq_params->adev;
530 	struct amdgpu_crtc *acrtc;
531 	unsigned long flags;
532 	int vrr_active;
533 
534 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535 	if (!acrtc)
536 		return;
537 
538 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539 
540 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 		      vrr_active, acrtc->dm_irq_params.active_planes);
542 
543 	/**
544 	 * Core vblank handling at start of front-porch is only possible
545 	 * in non-vrr mode, as only there vblank timestamping will give
546 	 * valid results while done in front-porch. Otherwise defer it
547 	 * to dm_vupdate_high_irq after end of front-porch.
548 	 */
549 	if (!vrr_active)
550 		drm_crtc_handle_vblank(&acrtc->base);
551 
552 	/**
553 	 * Following stuff must happen at start of vblank, for crc
554 	 * computation and below-the-range btr support in vrr mode.
555 	 */
556 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557 
558 	/* BTR updates need to happen before VUPDATE on Vega and above. */
559 	if (adev->family < AMDGPU_FAMILY_AI)
560 		return;
561 
562 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563 
564 	if (acrtc->dm_irq_params.stream &&
565 	    acrtc->dm_irq_params.vrr_params.supported &&
566 	    acrtc->dm_irq_params.freesync_config.state ==
567 		    VRR_STATE_ACTIVE_VARIABLE) {
568 		mod_freesync_handle_v_update(adev->dm.freesync_module,
569 					     acrtc->dm_irq_params.stream,
570 					     &acrtc->dm_irq_params.vrr_params);
571 
572 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 					   &acrtc->dm_irq_params.vrr_params.adjust);
574 	}
575 
576 	/*
577 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 	 * In that case, pageflip completion interrupts won't fire and pageflip
579 	 * completion events won't get delivered. Prevent this by sending
580 	 * pending pageflip events from here if a flip is still pending.
581 	 *
582 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 	 * avoid race conditions between flip programming and completion,
584 	 * which could cause too early flip completion events.
585 	 */
586 	if (adev->family >= AMDGPU_FAMILY_RV &&
587 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 	    acrtc->dm_irq_params.active_planes == 0) {
589 		if (acrtc->event) {
590 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591 			acrtc->event = NULL;
592 			drm_crtc_vblank_put(&acrtc->base);
593 		}
594 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 	}
596 
597 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599 
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 /**
602  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603  * DCN generation ASICs
604  * @interrupt params - interrupt parameters
605  *
606  * Used to set crc window/read out crc value at vertical line 0 position
607  */
608 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611 	struct common_irq_params *irq_params = interrupt_params;
612 	struct amdgpu_device *adev = irq_params->adev;
613 	struct amdgpu_crtc *acrtc;
614 
615 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616 
617 	if (!acrtc)
618 		return;
619 
620 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623 #endif
624 
625 /**
626  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
627  * @interrupt_params: used for determining the Outbox instance
628  *
629  * Handles the Outbox Interrupt
630  * event handler.
631  */
632 #define DMUB_TRACE_MAX_READ 64
633 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
634 {
635 	struct dmub_notification notify;
636 	struct common_irq_params *irq_params = interrupt_params;
637 	struct amdgpu_device *adev = irq_params->adev;
638 	struct amdgpu_display_manager *dm = &adev->dm;
639 	struct dmcub_trace_buf_entry entry = { 0 };
640 	uint32_t count = 0;
641 
642 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
643 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
644 			do {
645 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
646 			} while (notify.pending_notification);
647 
648 			if (adev->dm.dmub_notify)
649 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
650 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
651 				complete(&adev->dm.dmub_aux_transfer_done);
652 			// TODO : HPD Implementation
653 
654 		} else {
655 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
656 		}
657 	}
658 
659 
660 	do {
661 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
662 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
663 							entry.param0, entry.param1);
664 
665 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
666 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
667 		} else
668 			break;
669 
670 		count++;
671 
672 	} while (count <= DMUB_TRACE_MAX_READ);
673 
674 	ASSERT(count <= DMUB_TRACE_MAX_READ);
675 }
676 
677 static int dm_set_clockgating_state(void *handle,
678 		  enum amd_clockgating_state state)
679 {
680 	return 0;
681 }
682 
683 static int dm_set_powergating_state(void *handle,
684 		  enum amd_powergating_state state)
685 {
686 	return 0;
687 }
688 
689 /* Prototypes of private functions */
690 static int dm_early_init(void* handle);
691 
692 /* Allocate memory for FBC compressed data  */
693 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
694 {
695 	struct drm_device *dev = connector->dev;
696 	struct amdgpu_device *adev = drm_to_adev(dev);
697 	struct dm_compressor_info *compressor = &adev->dm.compressor;
698 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
699 	struct drm_display_mode *mode;
700 	unsigned long max_size = 0;
701 
702 	if (adev->dm.dc->fbc_compressor == NULL)
703 		return;
704 
705 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
706 		return;
707 
708 	if (compressor->bo_ptr)
709 		return;
710 
711 
712 	list_for_each_entry(mode, &connector->modes, head) {
713 		if (max_size < mode->htotal * mode->vtotal)
714 			max_size = mode->htotal * mode->vtotal;
715 	}
716 
717 	if (max_size) {
718 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
719 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
720 			    &compressor->gpu_addr, &compressor->cpu_addr);
721 
722 		if (r)
723 			DRM_ERROR("DM: Failed to initialize FBC\n");
724 		else {
725 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
726 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
727 		}
728 
729 	}
730 
731 }
732 
733 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
734 					  int pipe, bool *enabled,
735 					  unsigned char *buf, int max_bytes)
736 {
737 	struct drm_device *dev = dev_get_drvdata(kdev);
738 	struct amdgpu_device *adev = drm_to_adev(dev);
739 	struct drm_connector *connector;
740 	struct drm_connector_list_iter conn_iter;
741 	struct amdgpu_dm_connector *aconnector;
742 	int ret = 0;
743 
744 	*enabled = false;
745 
746 	mutex_lock(&adev->dm.audio_lock);
747 
748 	drm_connector_list_iter_begin(dev, &conn_iter);
749 	drm_for_each_connector_iter(connector, &conn_iter) {
750 		aconnector = to_amdgpu_dm_connector(connector);
751 		if (aconnector->audio_inst != port)
752 			continue;
753 
754 		*enabled = true;
755 		ret = drm_eld_size(connector->eld);
756 		memcpy(buf, connector->eld, min(max_bytes, ret));
757 
758 		break;
759 	}
760 	drm_connector_list_iter_end(&conn_iter);
761 
762 	mutex_unlock(&adev->dm.audio_lock);
763 
764 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
765 
766 	return ret;
767 }
768 
769 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
770 	.get_eld = amdgpu_dm_audio_component_get_eld,
771 };
772 
773 static int amdgpu_dm_audio_component_bind(struct device *kdev,
774 				       struct device *hda_kdev, void *data)
775 {
776 	struct drm_device *dev = dev_get_drvdata(kdev);
777 	struct amdgpu_device *adev = drm_to_adev(dev);
778 	struct drm_audio_component *acomp = data;
779 
780 	acomp->ops = &amdgpu_dm_audio_component_ops;
781 	acomp->dev = kdev;
782 	adev->dm.audio_component = acomp;
783 
784 	return 0;
785 }
786 
787 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
788 					  struct device *hda_kdev, void *data)
789 {
790 	struct drm_device *dev = dev_get_drvdata(kdev);
791 	struct amdgpu_device *adev = drm_to_adev(dev);
792 	struct drm_audio_component *acomp = data;
793 
794 	acomp->ops = NULL;
795 	acomp->dev = NULL;
796 	adev->dm.audio_component = NULL;
797 }
798 
799 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
800 	.bind	= amdgpu_dm_audio_component_bind,
801 	.unbind	= amdgpu_dm_audio_component_unbind,
802 };
803 
804 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
805 {
806 	int i, ret;
807 
808 	if (!amdgpu_audio)
809 		return 0;
810 
811 	adev->mode_info.audio.enabled = true;
812 
813 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
814 
815 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
816 		adev->mode_info.audio.pin[i].channels = -1;
817 		adev->mode_info.audio.pin[i].rate = -1;
818 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
819 		adev->mode_info.audio.pin[i].status_bits = 0;
820 		adev->mode_info.audio.pin[i].category_code = 0;
821 		adev->mode_info.audio.pin[i].connected = false;
822 		adev->mode_info.audio.pin[i].id =
823 			adev->dm.dc->res_pool->audios[i]->inst;
824 		adev->mode_info.audio.pin[i].offset = 0;
825 	}
826 
827 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
828 	if (ret < 0)
829 		return ret;
830 
831 	adev->dm.audio_registered = true;
832 
833 	return 0;
834 }
835 
836 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
837 {
838 	if (!amdgpu_audio)
839 		return;
840 
841 	if (!adev->mode_info.audio.enabled)
842 		return;
843 
844 	if (adev->dm.audio_registered) {
845 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
846 		adev->dm.audio_registered = false;
847 	}
848 
849 	/* TODO: Disable audio? */
850 
851 	adev->mode_info.audio.enabled = false;
852 }
853 
854 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
855 {
856 	struct drm_audio_component *acomp = adev->dm.audio_component;
857 
858 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
859 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
860 
861 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
862 						 pin, -1);
863 	}
864 }
865 
866 static int dm_dmub_hw_init(struct amdgpu_device *adev)
867 {
868 	const struct dmcub_firmware_header_v1_0 *hdr;
869 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
870 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
871 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
872 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
873 	struct abm *abm = adev->dm.dc->res_pool->abm;
874 	struct dmub_srv_hw_params hw_params;
875 	enum dmub_status status;
876 	const unsigned char *fw_inst_const, *fw_bss_data;
877 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
878 	bool has_hw_support;
879 
880 	if (!dmub_srv)
881 		/* DMUB isn't supported on the ASIC. */
882 		return 0;
883 
884 	if (!fb_info) {
885 		DRM_ERROR("No framebuffer info for DMUB service.\n");
886 		return -EINVAL;
887 	}
888 
889 	if (!dmub_fw) {
890 		/* Firmware required for DMUB support. */
891 		DRM_ERROR("No firmware provided for DMUB.\n");
892 		return -EINVAL;
893 	}
894 
895 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
896 	if (status != DMUB_STATUS_OK) {
897 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
898 		return -EINVAL;
899 	}
900 
901 	if (!has_hw_support) {
902 		DRM_INFO("DMUB unsupported on ASIC\n");
903 		return 0;
904 	}
905 
906 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
907 
908 	fw_inst_const = dmub_fw->data +
909 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
910 			PSP_HEADER_BYTES;
911 
912 	fw_bss_data = dmub_fw->data +
913 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914 		      le32_to_cpu(hdr->inst_const_bytes);
915 
916 	/* Copy firmware and bios info into FB memory. */
917 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
918 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
919 
920 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
921 
922 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
923 	 * amdgpu_ucode_init_single_fw will load dmub firmware
924 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
925 	 * will be done by dm_dmub_hw_init
926 	 */
927 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
928 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
929 				fw_inst_const_size);
930 	}
931 
932 	if (fw_bss_data_size)
933 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
934 		       fw_bss_data, fw_bss_data_size);
935 
936 	/* Copy firmware bios info into FB memory. */
937 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
938 	       adev->bios_size);
939 
940 	/* Reset regions that need to be reset. */
941 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
942 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
943 
944 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
945 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
946 
947 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
948 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
949 
950 	/* Initialize hardware. */
951 	memset(&hw_params, 0, sizeof(hw_params));
952 	hw_params.fb_base = adev->gmc.fb_start;
953 	hw_params.fb_offset = adev->gmc.aper_base;
954 
955 	/* backdoor load firmware and trigger dmub running */
956 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
957 		hw_params.load_inst_const = true;
958 
959 	if (dmcu)
960 		hw_params.psp_version = dmcu->psp_version;
961 
962 	for (i = 0; i < fb_info->num_fb; ++i)
963 		hw_params.fb[i] = &fb_info->fb[i];
964 
965 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
966 	if (status != DMUB_STATUS_OK) {
967 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
968 		return -EINVAL;
969 	}
970 
971 	/* Wait for firmware load to finish. */
972 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
973 	if (status != DMUB_STATUS_OK)
974 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
975 
976 	/* Init DMCU and ABM if available. */
977 	if (dmcu && abm) {
978 		dmcu->funcs->dmcu_init(dmcu);
979 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
980 	}
981 
982 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
983 	if (!adev->dm.dc->ctx->dmub_srv) {
984 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
985 		return -ENOMEM;
986 	}
987 
988 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989 		 adev->dm.dmcub_fw_version);
990 
991 	return 0;
992 }
993 
994 #if defined(CONFIG_DRM_AMD_DC_DCN)
995 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
996 {
997 	uint64_t pt_base;
998 	uint32_t logical_addr_low;
999 	uint32_t logical_addr_high;
1000 	uint32_t agp_base, agp_bot, agp_top;
1001 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1002 
1003 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1004 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1005 
1006 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1007 		/*
1008 		 * Raven2 has a HW issue that it is unable to use the vram which
1009 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1010 		 * workaround that increase system aperture high address (add 1)
1011 		 * to get rid of the VM fault and hardware hang.
1012 		 */
1013 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1014 	else
1015 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1016 
1017 	agp_base = 0;
1018 	agp_bot = adev->gmc.agp_start >> 24;
1019 	agp_top = adev->gmc.agp_end >> 24;
1020 
1021 
1022 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1023 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1024 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1025 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1026 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1027 	page_table_base.low_part = lower_32_bits(pt_base);
1028 
1029 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1030 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1031 
1032 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1033 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1034 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1035 
1036 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1037 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1038 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1039 
1040 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1041 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1042 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1043 
1044 	pa_config->is_hvm_enabled = 0;
1045 
1046 }
1047 #endif
1048 #if defined(CONFIG_DRM_AMD_DC_DCN)
1049 static void event_mall_stutter(struct work_struct *work)
1050 {
1051 
1052 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1053 	struct amdgpu_display_manager *dm = vblank_work->dm;
1054 
1055 	mutex_lock(&dm->dc_lock);
1056 
1057 	if (vblank_work->enable)
1058 		dm->active_vblank_irq_count++;
1059 	else if(dm->active_vblank_irq_count)
1060 		dm->active_vblank_irq_count--;
1061 
1062 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1063 
1064 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1065 
1066 	mutex_unlock(&dm->dc_lock);
1067 }
1068 
1069 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1070 {
1071 
1072 	int max_caps = dc->caps.max_links;
1073 	struct vblank_workqueue *vblank_work;
1074 	int i = 0;
1075 
1076 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1077 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1078 		kfree(vblank_work);
1079 		return NULL;
1080 	}
1081 
1082 	for (i = 0; i < max_caps; i++)
1083 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1084 
1085 	return vblank_work;
1086 }
1087 #endif
1088 static int amdgpu_dm_init(struct amdgpu_device *adev)
1089 {
1090 	struct dc_init_data init_data;
1091 #ifdef CONFIG_DRM_AMD_DC_HDCP
1092 	struct dc_callback_init init_params;
1093 #endif
1094 	int r;
1095 
1096 	adev->dm.ddev = adev_to_drm(adev);
1097 	adev->dm.adev = adev;
1098 
1099 	/* Zero all the fields */
1100 	memset(&init_data, 0, sizeof(init_data));
1101 #ifdef CONFIG_DRM_AMD_DC_HDCP
1102 	memset(&init_params, 0, sizeof(init_params));
1103 #endif
1104 
1105 	mutex_init(&adev->dm.dc_lock);
1106 	mutex_init(&adev->dm.audio_lock);
1107 #if defined(CONFIG_DRM_AMD_DC_DCN)
1108 	spin_lock_init(&adev->dm.vblank_lock);
1109 #endif
1110 
1111 	if(amdgpu_dm_irq_init(adev)) {
1112 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1113 		goto error;
1114 	}
1115 
1116 	init_data.asic_id.chip_family = adev->family;
1117 
1118 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1119 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1120 
1121 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1122 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1123 	init_data.asic_id.atombios_base_address =
1124 		adev->mode_info.atom_context->bios;
1125 
1126 	init_data.driver = adev;
1127 
1128 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1129 
1130 	if (!adev->dm.cgs_device) {
1131 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1132 		goto error;
1133 	}
1134 
1135 	init_data.cgs_device = adev->dm.cgs_device;
1136 
1137 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1138 
1139 	switch (adev->asic_type) {
1140 	case CHIP_CARRIZO:
1141 	case CHIP_STONEY:
1142 	case CHIP_RAVEN:
1143 	case CHIP_RENOIR:
1144 		init_data.flags.gpu_vm_support = true;
1145 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1146 			init_data.flags.disable_dmcu = true;
1147 		break;
1148 #if defined(CONFIG_DRM_AMD_DC_DCN)
1149 	case CHIP_VANGOGH:
1150 		init_data.flags.gpu_vm_support = true;
1151 		break;
1152 #endif
1153 	default:
1154 		break;
1155 	}
1156 
1157 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1158 		init_data.flags.fbc_support = true;
1159 
1160 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1161 		init_data.flags.multi_mon_pp_mclk_switch = true;
1162 
1163 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1164 		init_data.flags.disable_fractional_pwm = true;
1165 
1166 	init_data.flags.power_down_display_on_boot = true;
1167 
1168 	INIT_LIST_HEAD(&adev->dm.da_list);
1169 	/* Display Core create. */
1170 	adev->dm.dc = dc_create(&init_data);
1171 
1172 	if (adev->dm.dc) {
1173 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1174 	} else {
1175 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1176 		goto error;
1177 	}
1178 
1179 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1180 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1181 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1182 	}
1183 
1184 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1185 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1186 
1187 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1188 		adev->dm.dc->debug.disable_stutter = true;
1189 
1190 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1191 		adev->dm.dc->debug.disable_dsc = true;
1192 
1193 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1194 		adev->dm.dc->debug.disable_clock_gate = true;
1195 
1196 	r = dm_dmub_hw_init(adev);
1197 	if (r) {
1198 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1199 		goto error;
1200 	}
1201 
1202 	dc_hardware_init(adev->dm.dc);
1203 
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205 	if (adev->apu_flags) {
1206 		struct dc_phy_addr_space_config pa_config;
1207 
1208 		mmhub_read_system_context(adev, &pa_config);
1209 
1210 		// Call the DC init_memory func
1211 		dc_setup_system_context(adev->dm.dc, &pa_config);
1212 	}
1213 #endif
1214 
1215 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1216 	if (!adev->dm.freesync_module) {
1217 		DRM_ERROR(
1218 		"amdgpu: failed to initialize freesync_module.\n");
1219 	} else
1220 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1221 				adev->dm.freesync_module);
1222 
1223 	amdgpu_dm_init_color_mod();
1224 
1225 #if defined(CONFIG_DRM_AMD_DC_DCN)
1226 	if (adev->dm.dc->caps.max_links > 0) {
1227 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1228 
1229 		if (!adev->dm.vblank_workqueue)
1230 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1231 		else
1232 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1233 	}
1234 #endif
1235 
1236 #ifdef CONFIG_DRM_AMD_DC_HDCP
1237 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1238 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1239 
1240 		if (!adev->dm.hdcp_workqueue)
1241 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1242 		else
1243 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1244 
1245 		dc_init_callbacks(adev->dm.dc, &init_params);
1246 	}
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1249 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1250 #endif
1251 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1252 		init_completion(&adev->dm.dmub_aux_transfer_done);
1253 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1254 		if (!adev->dm.dmub_notify) {
1255 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1256 			goto error;
1257 		}
1258 		amdgpu_dm_outbox_init(adev);
1259 	}
1260 
1261 	if (amdgpu_dm_initialize_drm_device(adev)) {
1262 		DRM_ERROR(
1263 		"amdgpu: failed to initialize sw for display support.\n");
1264 		goto error;
1265 	}
1266 
1267 	/* create fake encoders for MST */
1268 	dm_dp_create_fake_mst_encoders(adev);
1269 
1270 	/* TODO: Add_display_info? */
1271 
1272 	/* TODO use dynamic cursor width */
1273 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1274 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1275 
1276 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1277 		DRM_ERROR(
1278 		"amdgpu: failed to initialize sw for display support.\n");
1279 		goto error;
1280 	}
1281 
1282 
1283 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1284 
1285 	return 0;
1286 error:
1287 	amdgpu_dm_fini(adev);
1288 
1289 	return -EINVAL;
1290 }
1291 
1292 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1293 {
1294 	int i;
1295 
1296 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1297 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1298 	}
1299 
1300 	amdgpu_dm_audio_fini(adev);
1301 
1302 	amdgpu_dm_destroy_drm_device(&adev->dm);
1303 
1304 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1305 	if (adev->dm.crc_rd_wrk) {
1306 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1307 		kfree(adev->dm.crc_rd_wrk);
1308 		adev->dm.crc_rd_wrk = NULL;
1309 	}
1310 #endif
1311 #ifdef CONFIG_DRM_AMD_DC_HDCP
1312 	if (adev->dm.hdcp_workqueue) {
1313 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1314 		adev->dm.hdcp_workqueue = NULL;
1315 	}
1316 
1317 	if (adev->dm.dc)
1318 		dc_deinit_callbacks(adev->dm.dc);
1319 #endif
1320 
1321 #if defined(CONFIG_DRM_AMD_DC_DCN)
1322 	if (adev->dm.vblank_workqueue) {
1323 		adev->dm.vblank_workqueue->dm = NULL;
1324 		kfree(adev->dm.vblank_workqueue);
1325 		adev->dm.vblank_workqueue = NULL;
1326 	}
1327 #endif
1328 
1329 	if (adev->dm.dc->ctx->dmub_srv) {
1330 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1331 		adev->dm.dc->ctx->dmub_srv = NULL;
1332 	}
1333 
1334 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1335 		kfree(adev->dm.dmub_notify);
1336 		adev->dm.dmub_notify = NULL;
1337 	}
1338 
1339 	if (adev->dm.dmub_bo)
1340 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1341 				      &adev->dm.dmub_bo_gpu_addr,
1342 				      &adev->dm.dmub_bo_cpu_addr);
1343 
1344 	/* DC Destroy TODO: Replace destroy DAL */
1345 	if (adev->dm.dc)
1346 		dc_destroy(&adev->dm.dc);
1347 	/*
1348 	 * TODO: pageflip, vlank interrupt
1349 	 *
1350 	 * amdgpu_dm_irq_fini(adev);
1351 	 */
1352 
1353 	if (adev->dm.cgs_device) {
1354 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1355 		adev->dm.cgs_device = NULL;
1356 	}
1357 	if (adev->dm.freesync_module) {
1358 		mod_freesync_destroy(adev->dm.freesync_module);
1359 		adev->dm.freesync_module = NULL;
1360 	}
1361 
1362 	mutex_destroy(&adev->dm.audio_lock);
1363 	mutex_destroy(&adev->dm.dc_lock);
1364 
1365 	return;
1366 }
1367 
1368 static int load_dmcu_fw(struct amdgpu_device *adev)
1369 {
1370 	const char *fw_name_dmcu = NULL;
1371 	int r;
1372 	const struct dmcu_firmware_header_v1_0 *hdr;
1373 
1374 	switch(adev->asic_type) {
1375 #if defined(CONFIG_DRM_AMD_DC_SI)
1376 	case CHIP_TAHITI:
1377 	case CHIP_PITCAIRN:
1378 	case CHIP_VERDE:
1379 	case CHIP_OLAND:
1380 #endif
1381 	case CHIP_BONAIRE:
1382 	case CHIP_HAWAII:
1383 	case CHIP_KAVERI:
1384 	case CHIP_KABINI:
1385 	case CHIP_MULLINS:
1386 	case CHIP_TONGA:
1387 	case CHIP_FIJI:
1388 	case CHIP_CARRIZO:
1389 	case CHIP_STONEY:
1390 	case CHIP_POLARIS11:
1391 	case CHIP_POLARIS10:
1392 	case CHIP_POLARIS12:
1393 	case CHIP_VEGAM:
1394 	case CHIP_VEGA10:
1395 	case CHIP_VEGA12:
1396 	case CHIP_VEGA20:
1397 	case CHIP_NAVI10:
1398 	case CHIP_NAVI14:
1399 	case CHIP_RENOIR:
1400 	case CHIP_SIENNA_CICHLID:
1401 	case CHIP_NAVY_FLOUNDER:
1402 	case CHIP_DIMGREY_CAVEFISH:
1403 	case CHIP_VANGOGH:
1404 		return 0;
1405 	case CHIP_NAVI12:
1406 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1407 		break;
1408 	case CHIP_RAVEN:
1409 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1410 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1411 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1412 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1413 		else
1414 			return 0;
1415 		break;
1416 	default:
1417 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1418 		return -EINVAL;
1419 	}
1420 
1421 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1422 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1423 		return 0;
1424 	}
1425 
1426 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1427 	if (r == -ENOENT) {
1428 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1429 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1430 		adev->dm.fw_dmcu = NULL;
1431 		return 0;
1432 	}
1433 	if (r) {
1434 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1435 			fw_name_dmcu);
1436 		return r;
1437 	}
1438 
1439 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1440 	if (r) {
1441 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1442 			fw_name_dmcu);
1443 		release_firmware(adev->dm.fw_dmcu);
1444 		adev->dm.fw_dmcu = NULL;
1445 		return r;
1446 	}
1447 
1448 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1449 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1450 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1451 	adev->firmware.fw_size +=
1452 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1453 
1454 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1455 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1456 	adev->firmware.fw_size +=
1457 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1458 
1459 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1460 
1461 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1462 
1463 	return 0;
1464 }
1465 
1466 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1467 {
1468 	struct amdgpu_device *adev = ctx;
1469 
1470 	return dm_read_reg(adev->dm.dc->ctx, address);
1471 }
1472 
1473 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1474 				     uint32_t value)
1475 {
1476 	struct amdgpu_device *adev = ctx;
1477 
1478 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1479 }
1480 
1481 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1482 {
1483 	struct dmub_srv_create_params create_params;
1484 	struct dmub_srv_region_params region_params;
1485 	struct dmub_srv_region_info region_info;
1486 	struct dmub_srv_fb_params fb_params;
1487 	struct dmub_srv_fb_info *fb_info;
1488 	struct dmub_srv *dmub_srv;
1489 	const struct dmcub_firmware_header_v1_0 *hdr;
1490 	const char *fw_name_dmub;
1491 	enum dmub_asic dmub_asic;
1492 	enum dmub_status status;
1493 	int r;
1494 
1495 	switch (adev->asic_type) {
1496 	case CHIP_RENOIR:
1497 		dmub_asic = DMUB_ASIC_DCN21;
1498 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1499 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1500 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1501 		break;
1502 	case CHIP_SIENNA_CICHLID:
1503 		dmub_asic = DMUB_ASIC_DCN30;
1504 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1505 		break;
1506 	case CHIP_NAVY_FLOUNDER:
1507 		dmub_asic = DMUB_ASIC_DCN30;
1508 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1509 		break;
1510 	case CHIP_VANGOGH:
1511 		dmub_asic = DMUB_ASIC_DCN301;
1512 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1513 		break;
1514 	case CHIP_DIMGREY_CAVEFISH:
1515 		dmub_asic = DMUB_ASIC_DCN302;
1516 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1517 		break;
1518 
1519 	default:
1520 		/* ASIC doesn't support DMUB. */
1521 		return 0;
1522 	}
1523 
1524 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1525 	if (r) {
1526 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1527 		return 0;
1528 	}
1529 
1530 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1531 	if (r) {
1532 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1533 		return 0;
1534 	}
1535 
1536 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1537 
1538 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1539 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1540 			AMDGPU_UCODE_ID_DMCUB;
1541 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1542 			adev->dm.dmub_fw;
1543 		adev->firmware.fw_size +=
1544 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1545 
1546 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1547 			 adev->dm.dmcub_fw_version);
1548 	}
1549 
1550 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1551 
1552 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1553 	dmub_srv = adev->dm.dmub_srv;
1554 
1555 	if (!dmub_srv) {
1556 		DRM_ERROR("Failed to allocate DMUB service!\n");
1557 		return -ENOMEM;
1558 	}
1559 
1560 	memset(&create_params, 0, sizeof(create_params));
1561 	create_params.user_ctx = adev;
1562 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1563 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1564 	create_params.asic = dmub_asic;
1565 
1566 	/* Create the DMUB service. */
1567 	status = dmub_srv_create(dmub_srv, &create_params);
1568 	if (status != DMUB_STATUS_OK) {
1569 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1570 		return -EINVAL;
1571 	}
1572 
1573 	/* Calculate the size of all the regions for the DMUB service. */
1574 	memset(&region_params, 0, sizeof(region_params));
1575 
1576 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1577 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1578 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1579 	region_params.vbios_size = adev->bios_size;
1580 	region_params.fw_bss_data = region_params.bss_data_size ?
1581 		adev->dm.dmub_fw->data +
1582 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1583 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1584 	region_params.fw_inst_const =
1585 		adev->dm.dmub_fw->data +
1586 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1587 		PSP_HEADER_BYTES;
1588 
1589 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1590 					   &region_info);
1591 
1592 	if (status != DMUB_STATUS_OK) {
1593 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1594 		return -EINVAL;
1595 	}
1596 
1597 	/*
1598 	 * Allocate a framebuffer based on the total size of all the regions.
1599 	 * TODO: Move this into GART.
1600 	 */
1601 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1602 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1603 				    &adev->dm.dmub_bo_gpu_addr,
1604 				    &adev->dm.dmub_bo_cpu_addr);
1605 	if (r)
1606 		return r;
1607 
1608 	/* Rebase the regions on the framebuffer address. */
1609 	memset(&fb_params, 0, sizeof(fb_params));
1610 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1611 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1612 	fb_params.region_info = &region_info;
1613 
1614 	adev->dm.dmub_fb_info =
1615 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1616 	fb_info = adev->dm.dmub_fb_info;
1617 
1618 	if (!fb_info) {
1619 		DRM_ERROR(
1620 			"Failed to allocate framebuffer info for DMUB service!\n");
1621 		return -ENOMEM;
1622 	}
1623 
1624 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1625 	if (status != DMUB_STATUS_OK) {
1626 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1627 		return -EINVAL;
1628 	}
1629 
1630 	return 0;
1631 }
1632 
1633 static int dm_sw_init(void *handle)
1634 {
1635 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1636 	int r;
1637 
1638 	r = dm_dmub_sw_init(adev);
1639 	if (r)
1640 		return r;
1641 
1642 	return load_dmcu_fw(adev);
1643 }
1644 
1645 static int dm_sw_fini(void *handle)
1646 {
1647 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1648 
1649 	kfree(adev->dm.dmub_fb_info);
1650 	adev->dm.dmub_fb_info = NULL;
1651 
1652 	if (adev->dm.dmub_srv) {
1653 		dmub_srv_destroy(adev->dm.dmub_srv);
1654 		adev->dm.dmub_srv = NULL;
1655 	}
1656 
1657 	release_firmware(adev->dm.dmub_fw);
1658 	adev->dm.dmub_fw = NULL;
1659 
1660 	release_firmware(adev->dm.fw_dmcu);
1661 	adev->dm.fw_dmcu = NULL;
1662 
1663 	return 0;
1664 }
1665 
1666 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1667 {
1668 	struct amdgpu_dm_connector *aconnector;
1669 	struct drm_connector *connector;
1670 	struct drm_connector_list_iter iter;
1671 	int ret = 0;
1672 
1673 	drm_connector_list_iter_begin(dev, &iter);
1674 	drm_for_each_connector_iter(connector, &iter) {
1675 		aconnector = to_amdgpu_dm_connector(connector);
1676 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1677 		    aconnector->mst_mgr.aux) {
1678 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1679 					 aconnector,
1680 					 aconnector->base.base.id);
1681 
1682 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1683 			if (ret < 0) {
1684 				DRM_ERROR("DM_MST: Failed to start MST\n");
1685 				aconnector->dc_link->type =
1686 					dc_connection_single;
1687 				break;
1688 			}
1689 		}
1690 	}
1691 	drm_connector_list_iter_end(&iter);
1692 
1693 	return ret;
1694 }
1695 
1696 static int dm_late_init(void *handle)
1697 {
1698 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699 
1700 	struct dmcu_iram_parameters params;
1701 	unsigned int linear_lut[16];
1702 	int i;
1703 	struct dmcu *dmcu = NULL;
1704 	bool ret = true;
1705 
1706 	dmcu = adev->dm.dc->res_pool->dmcu;
1707 
1708 	for (i = 0; i < 16; i++)
1709 		linear_lut[i] = 0xFFFF * i / 15;
1710 
1711 	params.set = 0;
1712 	params.backlight_ramping_start = 0xCCCC;
1713 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1714 	params.backlight_lut_array_size = 16;
1715 	params.backlight_lut_array = linear_lut;
1716 
1717 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1718 	 * 0xFFFF x 0.01 = 0x28F
1719 	 */
1720 	params.min_abm_backlight = 0x28F;
1721 
1722 	/* In the case where abm is implemented on dmcub,
1723 	 * dmcu object will be null.
1724 	 * ABM 2.4 and up are implemented on dmcub.
1725 	 */
1726 	if (dmcu)
1727 		ret = dmcu_load_iram(dmcu, params);
1728 	else if (adev->dm.dc->ctx->dmub_srv)
1729 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1730 
1731 	if (!ret)
1732 		return -EINVAL;
1733 
1734 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1735 }
1736 
1737 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1738 {
1739 	struct amdgpu_dm_connector *aconnector;
1740 	struct drm_connector *connector;
1741 	struct drm_connector_list_iter iter;
1742 	struct drm_dp_mst_topology_mgr *mgr;
1743 	int ret;
1744 	bool need_hotplug = false;
1745 
1746 	drm_connector_list_iter_begin(dev, &iter);
1747 	drm_for_each_connector_iter(connector, &iter) {
1748 		aconnector = to_amdgpu_dm_connector(connector);
1749 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1750 		    aconnector->mst_port)
1751 			continue;
1752 
1753 		mgr = &aconnector->mst_mgr;
1754 
1755 		if (suspend) {
1756 			drm_dp_mst_topology_mgr_suspend(mgr);
1757 		} else {
1758 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1759 			if (ret < 0) {
1760 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1761 				need_hotplug = true;
1762 			}
1763 		}
1764 	}
1765 	drm_connector_list_iter_end(&iter);
1766 
1767 	if (need_hotplug)
1768 		drm_kms_helper_hotplug_event(dev);
1769 }
1770 
1771 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1772 {
1773 	struct smu_context *smu = &adev->smu;
1774 	int ret = 0;
1775 
1776 	if (!is_support_sw_smu(adev))
1777 		return 0;
1778 
1779 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1780 	 * on window driver dc implementation.
1781 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1782 	 * should be passed to smu during boot up and resume from s3.
1783 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1784 	 * dcn20_resource_construct
1785 	 * then call pplib functions below to pass the settings to smu:
1786 	 * smu_set_watermarks_for_clock_ranges
1787 	 * smu_set_watermarks_table
1788 	 * navi10_set_watermarks_table
1789 	 * smu_write_watermarks_table
1790 	 *
1791 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1792 	 * dc has implemented different flow for window driver:
1793 	 * dc_hardware_init / dc_set_power_state
1794 	 * dcn10_init_hw
1795 	 * notify_wm_ranges
1796 	 * set_wm_ranges
1797 	 * -- Linux
1798 	 * smu_set_watermarks_for_clock_ranges
1799 	 * renoir_set_watermarks_table
1800 	 * smu_write_watermarks_table
1801 	 *
1802 	 * For Linux,
1803 	 * dc_hardware_init -> amdgpu_dm_init
1804 	 * dc_set_power_state --> dm_resume
1805 	 *
1806 	 * therefore, this function apply to navi10/12/14 but not Renoir
1807 	 * *
1808 	 */
1809 	switch(adev->asic_type) {
1810 	case CHIP_NAVI10:
1811 	case CHIP_NAVI14:
1812 	case CHIP_NAVI12:
1813 		break;
1814 	default:
1815 		return 0;
1816 	}
1817 
1818 	ret = smu_write_watermarks_table(smu);
1819 	if (ret) {
1820 		DRM_ERROR("Failed to update WMTABLE!\n");
1821 		return ret;
1822 	}
1823 
1824 	return 0;
1825 }
1826 
1827 /**
1828  * dm_hw_init() - Initialize DC device
1829  * @handle: The base driver device containing the amdgpu_dm device.
1830  *
1831  * Initialize the &struct amdgpu_display_manager device. This involves calling
1832  * the initializers of each DM component, then populating the struct with them.
1833  *
1834  * Although the function implies hardware initialization, both hardware and
1835  * software are initialized here. Splitting them out to their relevant init
1836  * hooks is a future TODO item.
1837  *
1838  * Some notable things that are initialized here:
1839  *
1840  * - Display Core, both software and hardware
1841  * - DC modules that we need (freesync and color management)
1842  * - DRM software states
1843  * - Interrupt sources and handlers
1844  * - Vblank support
1845  * - Debug FS entries, if enabled
1846  */
1847 static int dm_hw_init(void *handle)
1848 {
1849 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1850 	/* Create DAL display manager */
1851 	amdgpu_dm_init(adev);
1852 	amdgpu_dm_hpd_init(adev);
1853 
1854 	return 0;
1855 }
1856 
1857 /**
1858  * dm_hw_fini() - Teardown DC device
1859  * @handle: The base driver device containing the amdgpu_dm device.
1860  *
1861  * Teardown components within &struct amdgpu_display_manager that require
1862  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1863  * were loaded. Also flush IRQ workqueues and disable them.
1864  */
1865 static int dm_hw_fini(void *handle)
1866 {
1867 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1868 
1869 	amdgpu_dm_hpd_fini(adev);
1870 
1871 	amdgpu_dm_irq_fini(adev);
1872 	amdgpu_dm_fini(adev);
1873 	return 0;
1874 }
1875 
1876 
1877 static int dm_enable_vblank(struct drm_crtc *crtc);
1878 static void dm_disable_vblank(struct drm_crtc *crtc);
1879 
1880 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1881 				 struct dc_state *state, bool enable)
1882 {
1883 	enum dc_irq_source irq_source;
1884 	struct amdgpu_crtc *acrtc;
1885 	int rc = -EBUSY;
1886 	int i = 0;
1887 
1888 	for (i = 0; i < state->stream_count; i++) {
1889 		acrtc = get_crtc_by_otg_inst(
1890 				adev, state->stream_status[i].primary_otg_inst);
1891 
1892 		if (acrtc && state->stream_status[i].plane_count != 0) {
1893 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1894 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1895 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1896 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1897 			if (rc)
1898 				DRM_WARN("Failed to %s pflip interrupts\n",
1899 					 enable ? "enable" : "disable");
1900 
1901 			if (enable) {
1902 				rc = dm_enable_vblank(&acrtc->base);
1903 				if (rc)
1904 					DRM_WARN("Failed to enable vblank interrupts\n");
1905 			} else {
1906 				dm_disable_vblank(&acrtc->base);
1907 			}
1908 
1909 		}
1910 	}
1911 
1912 }
1913 
1914 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1915 {
1916 	struct dc_state *context = NULL;
1917 	enum dc_status res = DC_ERROR_UNEXPECTED;
1918 	int i;
1919 	struct dc_stream_state *del_streams[MAX_PIPES];
1920 	int del_streams_count = 0;
1921 
1922 	memset(del_streams, 0, sizeof(del_streams));
1923 
1924 	context = dc_create_state(dc);
1925 	if (context == NULL)
1926 		goto context_alloc_fail;
1927 
1928 	dc_resource_state_copy_construct_current(dc, context);
1929 
1930 	/* First remove from context all streams */
1931 	for (i = 0; i < context->stream_count; i++) {
1932 		struct dc_stream_state *stream = context->streams[i];
1933 
1934 		del_streams[del_streams_count++] = stream;
1935 	}
1936 
1937 	/* Remove all planes for removed streams and then remove the streams */
1938 	for (i = 0; i < del_streams_count; i++) {
1939 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1940 			res = DC_FAIL_DETACH_SURFACES;
1941 			goto fail;
1942 		}
1943 
1944 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1945 		if (res != DC_OK)
1946 			goto fail;
1947 	}
1948 
1949 
1950 	res = dc_validate_global_state(dc, context, false);
1951 
1952 	if (res != DC_OK) {
1953 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1954 		goto fail;
1955 	}
1956 
1957 	res = dc_commit_state(dc, context);
1958 
1959 fail:
1960 	dc_release_state(context);
1961 
1962 context_alloc_fail:
1963 	return res;
1964 }
1965 
1966 static int dm_suspend(void *handle)
1967 {
1968 	struct amdgpu_device *adev = handle;
1969 	struct amdgpu_display_manager *dm = &adev->dm;
1970 	int ret = 0;
1971 
1972 	if (amdgpu_in_reset(adev)) {
1973 		mutex_lock(&dm->dc_lock);
1974 
1975 #if defined(CONFIG_DRM_AMD_DC_DCN)
1976 		dc_allow_idle_optimizations(adev->dm.dc, false);
1977 #endif
1978 
1979 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1980 
1981 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1982 
1983 		amdgpu_dm_commit_zero_streams(dm->dc);
1984 
1985 		amdgpu_dm_irq_suspend(adev);
1986 
1987 		return ret;
1988 	}
1989 
1990 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1991 	amdgpu_dm_crtc_secure_display_suspend(adev);
1992 #endif
1993 	WARN_ON(adev->dm.cached_state);
1994 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1995 
1996 	s3_handle_mst(adev_to_drm(adev), true);
1997 
1998 	amdgpu_dm_irq_suspend(adev);
1999 
2000 
2001 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2002 
2003 	return 0;
2004 }
2005 
2006 static struct amdgpu_dm_connector *
2007 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2008 					     struct drm_crtc *crtc)
2009 {
2010 	uint32_t i;
2011 	struct drm_connector_state *new_con_state;
2012 	struct drm_connector *connector;
2013 	struct drm_crtc *crtc_from_state;
2014 
2015 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2016 		crtc_from_state = new_con_state->crtc;
2017 
2018 		if (crtc_from_state == crtc)
2019 			return to_amdgpu_dm_connector(connector);
2020 	}
2021 
2022 	return NULL;
2023 }
2024 
2025 static void emulated_link_detect(struct dc_link *link)
2026 {
2027 	struct dc_sink_init_data sink_init_data = { 0 };
2028 	struct display_sink_capability sink_caps = { 0 };
2029 	enum dc_edid_status edid_status;
2030 	struct dc_context *dc_ctx = link->ctx;
2031 	struct dc_sink *sink = NULL;
2032 	struct dc_sink *prev_sink = NULL;
2033 
2034 	link->type = dc_connection_none;
2035 	prev_sink = link->local_sink;
2036 
2037 	if (prev_sink)
2038 		dc_sink_release(prev_sink);
2039 
2040 	switch (link->connector_signal) {
2041 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2042 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2043 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2044 		break;
2045 	}
2046 
2047 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2048 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2049 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2050 		break;
2051 	}
2052 
2053 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2054 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2055 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2056 		break;
2057 	}
2058 
2059 	case SIGNAL_TYPE_LVDS: {
2060 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2061 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2062 		break;
2063 	}
2064 
2065 	case SIGNAL_TYPE_EDP: {
2066 		sink_caps.transaction_type =
2067 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2068 		sink_caps.signal = SIGNAL_TYPE_EDP;
2069 		break;
2070 	}
2071 
2072 	case SIGNAL_TYPE_DISPLAY_PORT: {
2073 		sink_caps.transaction_type =
2074 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2075 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2076 		break;
2077 	}
2078 
2079 	default:
2080 		DC_ERROR("Invalid connector type! signal:%d\n",
2081 			link->connector_signal);
2082 		return;
2083 	}
2084 
2085 	sink_init_data.link = link;
2086 	sink_init_data.sink_signal = sink_caps.signal;
2087 
2088 	sink = dc_sink_create(&sink_init_data);
2089 	if (!sink) {
2090 		DC_ERROR("Failed to create sink!\n");
2091 		return;
2092 	}
2093 
2094 	/* dc_sink_create returns a new reference */
2095 	link->local_sink = sink;
2096 
2097 	edid_status = dm_helpers_read_local_edid(
2098 			link->ctx,
2099 			link,
2100 			sink);
2101 
2102 	if (edid_status != EDID_OK)
2103 		DC_ERROR("Failed to read EDID");
2104 
2105 }
2106 
2107 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2108 				     struct amdgpu_display_manager *dm)
2109 {
2110 	struct {
2111 		struct dc_surface_update surface_updates[MAX_SURFACES];
2112 		struct dc_plane_info plane_infos[MAX_SURFACES];
2113 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2114 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2115 		struct dc_stream_update stream_update;
2116 	} * bundle;
2117 	int k, m;
2118 
2119 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2120 
2121 	if (!bundle) {
2122 		dm_error("Failed to allocate update bundle\n");
2123 		goto cleanup;
2124 	}
2125 
2126 	for (k = 0; k < dc_state->stream_count; k++) {
2127 		bundle->stream_update.stream = dc_state->streams[k];
2128 
2129 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2130 			bundle->surface_updates[m].surface =
2131 				dc_state->stream_status->plane_states[m];
2132 			bundle->surface_updates[m].surface->force_full_update =
2133 				true;
2134 		}
2135 		dc_commit_updates_for_stream(
2136 			dm->dc, bundle->surface_updates,
2137 			dc_state->stream_status->plane_count,
2138 			dc_state->streams[k], &bundle->stream_update, dc_state);
2139 	}
2140 
2141 cleanup:
2142 	kfree(bundle);
2143 
2144 	return;
2145 }
2146 
2147 static void dm_set_dpms_off(struct dc_link *link)
2148 {
2149 	struct dc_stream_state *stream_state;
2150 	struct amdgpu_dm_connector *aconnector = link->priv;
2151 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2152 	struct dc_stream_update stream_update;
2153 	bool dpms_off = true;
2154 
2155 	memset(&stream_update, 0, sizeof(stream_update));
2156 	stream_update.dpms_off = &dpms_off;
2157 
2158 	mutex_lock(&adev->dm.dc_lock);
2159 	stream_state = dc_stream_find_from_link(link);
2160 
2161 	if (stream_state == NULL) {
2162 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2163 		mutex_unlock(&adev->dm.dc_lock);
2164 		return;
2165 	}
2166 
2167 	stream_update.stream = stream_state;
2168 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2169 				     stream_state, &stream_update,
2170 				     stream_state->ctx->dc->current_state);
2171 	mutex_unlock(&adev->dm.dc_lock);
2172 }
2173 
2174 static int dm_resume(void *handle)
2175 {
2176 	struct amdgpu_device *adev = handle;
2177 	struct drm_device *ddev = adev_to_drm(adev);
2178 	struct amdgpu_display_manager *dm = &adev->dm;
2179 	struct amdgpu_dm_connector *aconnector;
2180 	struct drm_connector *connector;
2181 	struct drm_connector_list_iter iter;
2182 	struct drm_crtc *crtc;
2183 	struct drm_crtc_state *new_crtc_state;
2184 	struct dm_crtc_state *dm_new_crtc_state;
2185 	struct drm_plane *plane;
2186 	struct drm_plane_state *new_plane_state;
2187 	struct dm_plane_state *dm_new_plane_state;
2188 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2189 	enum dc_connection_type new_connection_type = dc_connection_none;
2190 	struct dc_state *dc_state;
2191 	int i, r, j;
2192 
2193 	if (amdgpu_in_reset(adev)) {
2194 		dc_state = dm->cached_dc_state;
2195 
2196 		r = dm_dmub_hw_init(adev);
2197 		if (r)
2198 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2199 
2200 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2201 		dc_resume(dm->dc);
2202 
2203 		amdgpu_dm_irq_resume_early(adev);
2204 
2205 		for (i = 0; i < dc_state->stream_count; i++) {
2206 			dc_state->streams[i]->mode_changed = true;
2207 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2208 				dc_state->stream_status->plane_states[j]->update_flags.raw
2209 					= 0xffffffff;
2210 			}
2211 		}
2212 
2213 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2214 
2215 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2216 
2217 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2218 
2219 		dc_release_state(dm->cached_dc_state);
2220 		dm->cached_dc_state = NULL;
2221 
2222 		amdgpu_dm_irq_resume_late(adev);
2223 
2224 		mutex_unlock(&dm->dc_lock);
2225 
2226 		return 0;
2227 	}
2228 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2229 	dc_release_state(dm_state->context);
2230 	dm_state->context = dc_create_state(dm->dc);
2231 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2232 	dc_resource_state_construct(dm->dc, dm_state->context);
2233 
2234 	/* Before powering on DC we need to re-initialize DMUB. */
2235 	r = dm_dmub_hw_init(adev);
2236 	if (r)
2237 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2238 
2239 	/* power on hardware */
2240 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2241 
2242 	/* program HPD filter */
2243 	dc_resume(dm->dc);
2244 
2245 	/*
2246 	 * early enable HPD Rx IRQ, should be done before set mode as short
2247 	 * pulse interrupts are used for MST
2248 	 */
2249 	amdgpu_dm_irq_resume_early(adev);
2250 
2251 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2252 	s3_handle_mst(ddev, false);
2253 
2254 	/* Do detection*/
2255 	drm_connector_list_iter_begin(ddev, &iter);
2256 	drm_for_each_connector_iter(connector, &iter) {
2257 		aconnector = to_amdgpu_dm_connector(connector);
2258 
2259 		/*
2260 		 * this is the case when traversing through already created
2261 		 * MST connectors, should be skipped
2262 		 */
2263 		if (aconnector->mst_port)
2264 			continue;
2265 
2266 		mutex_lock(&aconnector->hpd_lock);
2267 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2268 			DRM_ERROR("KMS: Failed to detect connector\n");
2269 
2270 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2271 			emulated_link_detect(aconnector->dc_link);
2272 		else
2273 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2274 
2275 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2276 			aconnector->fake_enable = false;
2277 
2278 		if (aconnector->dc_sink)
2279 			dc_sink_release(aconnector->dc_sink);
2280 		aconnector->dc_sink = NULL;
2281 		amdgpu_dm_update_connector_after_detect(aconnector);
2282 		mutex_unlock(&aconnector->hpd_lock);
2283 	}
2284 	drm_connector_list_iter_end(&iter);
2285 
2286 	/* Force mode set in atomic commit */
2287 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2288 		new_crtc_state->active_changed = true;
2289 
2290 	/*
2291 	 * atomic_check is expected to create the dc states. We need to release
2292 	 * them here, since they were duplicated as part of the suspend
2293 	 * procedure.
2294 	 */
2295 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2296 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2297 		if (dm_new_crtc_state->stream) {
2298 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2299 			dc_stream_release(dm_new_crtc_state->stream);
2300 			dm_new_crtc_state->stream = NULL;
2301 		}
2302 	}
2303 
2304 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2305 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2306 		if (dm_new_plane_state->dc_state) {
2307 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2308 			dc_plane_state_release(dm_new_plane_state->dc_state);
2309 			dm_new_plane_state->dc_state = NULL;
2310 		}
2311 	}
2312 
2313 	drm_atomic_helper_resume(ddev, dm->cached_state);
2314 
2315 	dm->cached_state = NULL;
2316 
2317 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2318 	amdgpu_dm_crtc_secure_display_resume(adev);
2319 #endif
2320 
2321 	amdgpu_dm_irq_resume_late(adev);
2322 
2323 	amdgpu_dm_smu_write_watermarks_table(adev);
2324 
2325 	return 0;
2326 }
2327 
2328 /**
2329  * DOC: DM Lifecycle
2330  *
2331  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2332  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2333  * the base driver's device list to be initialized and torn down accordingly.
2334  *
2335  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2336  */
2337 
2338 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2339 	.name = "dm",
2340 	.early_init = dm_early_init,
2341 	.late_init = dm_late_init,
2342 	.sw_init = dm_sw_init,
2343 	.sw_fini = dm_sw_fini,
2344 	.hw_init = dm_hw_init,
2345 	.hw_fini = dm_hw_fini,
2346 	.suspend = dm_suspend,
2347 	.resume = dm_resume,
2348 	.is_idle = dm_is_idle,
2349 	.wait_for_idle = dm_wait_for_idle,
2350 	.check_soft_reset = dm_check_soft_reset,
2351 	.soft_reset = dm_soft_reset,
2352 	.set_clockgating_state = dm_set_clockgating_state,
2353 	.set_powergating_state = dm_set_powergating_state,
2354 };
2355 
2356 const struct amdgpu_ip_block_version dm_ip_block =
2357 {
2358 	.type = AMD_IP_BLOCK_TYPE_DCE,
2359 	.major = 1,
2360 	.minor = 0,
2361 	.rev = 0,
2362 	.funcs = &amdgpu_dm_funcs,
2363 };
2364 
2365 
2366 /**
2367  * DOC: atomic
2368  *
2369  * *WIP*
2370  */
2371 
2372 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2373 	.fb_create = amdgpu_display_user_framebuffer_create,
2374 	.get_format_info = amd_get_format_info,
2375 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2376 	.atomic_check = amdgpu_dm_atomic_check,
2377 	.atomic_commit = drm_atomic_helper_commit,
2378 };
2379 
2380 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2381 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2382 };
2383 
2384 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2385 {
2386 	u32 max_cll, min_cll, max, min, q, r;
2387 	struct amdgpu_dm_backlight_caps *caps;
2388 	struct amdgpu_display_manager *dm;
2389 	struct drm_connector *conn_base;
2390 	struct amdgpu_device *adev;
2391 	struct dc_link *link = NULL;
2392 	static const u8 pre_computed_values[] = {
2393 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2394 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2395 
2396 	if (!aconnector || !aconnector->dc_link)
2397 		return;
2398 
2399 	link = aconnector->dc_link;
2400 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2401 		return;
2402 
2403 	conn_base = &aconnector->base;
2404 	adev = drm_to_adev(conn_base->dev);
2405 	dm = &adev->dm;
2406 	caps = &dm->backlight_caps;
2407 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2408 	caps->aux_support = false;
2409 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2410 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2411 
2412 	if (caps->ext_caps->bits.oled == 1 ||
2413 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2414 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2415 		caps->aux_support = true;
2416 
2417 	if (amdgpu_backlight == 0)
2418 		caps->aux_support = false;
2419 	else if (amdgpu_backlight == 1)
2420 		caps->aux_support = true;
2421 
2422 	/* From the specification (CTA-861-G), for calculating the maximum
2423 	 * luminance we need to use:
2424 	 *	Luminance = 50*2**(CV/32)
2425 	 * Where CV is a one-byte value.
2426 	 * For calculating this expression we may need float point precision;
2427 	 * to avoid this complexity level, we take advantage that CV is divided
2428 	 * by a constant. From the Euclids division algorithm, we know that CV
2429 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2430 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2431 	 * need to pre-compute the value of r/32. For pre-computing the values
2432 	 * We just used the following Ruby line:
2433 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2434 	 * The results of the above expressions can be verified at
2435 	 * pre_computed_values.
2436 	 */
2437 	q = max_cll >> 5;
2438 	r = max_cll % 32;
2439 	max = (1 << q) * pre_computed_values[r];
2440 
2441 	// min luminance: maxLum * (CV/255)^2 / 100
2442 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2443 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2444 
2445 	caps->aux_max_input_signal = max;
2446 	caps->aux_min_input_signal = min;
2447 }
2448 
2449 void amdgpu_dm_update_connector_after_detect(
2450 		struct amdgpu_dm_connector *aconnector)
2451 {
2452 	struct drm_connector *connector = &aconnector->base;
2453 	struct drm_device *dev = connector->dev;
2454 	struct dc_sink *sink;
2455 
2456 	/* MST handled by drm_mst framework */
2457 	if (aconnector->mst_mgr.mst_state == true)
2458 		return;
2459 
2460 	sink = aconnector->dc_link->local_sink;
2461 	if (sink)
2462 		dc_sink_retain(sink);
2463 
2464 	/*
2465 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2466 	 * the connector sink is set to either fake or physical sink depends on link status.
2467 	 * Skip if already done during boot.
2468 	 */
2469 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2470 			&& aconnector->dc_em_sink) {
2471 
2472 		/*
2473 		 * For S3 resume with headless use eml_sink to fake stream
2474 		 * because on resume connector->sink is set to NULL
2475 		 */
2476 		mutex_lock(&dev->mode_config.mutex);
2477 
2478 		if (sink) {
2479 			if (aconnector->dc_sink) {
2480 				amdgpu_dm_update_freesync_caps(connector, NULL);
2481 				/*
2482 				 * retain and release below are used to
2483 				 * bump up refcount for sink because the link doesn't point
2484 				 * to it anymore after disconnect, so on next crtc to connector
2485 				 * reshuffle by UMD we will get into unwanted dc_sink release
2486 				 */
2487 				dc_sink_release(aconnector->dc_sink);
2488 			}
2489 			aconnector->dc_sink = sink;
2490 			dc_sink_retain(aconnector->dc_sink);
2491 			amdgpu_dm_update_freesync_caps(connector,
2492 					aconnector->edid);
2493 		} else {
2494 			amdgpu_dm_update_freesync_caps(connector, NULL);
2495 			if (!aconnector->dc_sink) {
2496 				aconnector->dc_sink = aconnector->dc_em_sink;
2497 				dc_sink_retain(aconnector->dc_sink);
2498 			}
2499 		}
2500 
2501 		mutex_unlock(&dev->mode_config.mutex);
2502 
2503 		if (sink)
2504 			dc_sink_release(sink);
2505 		return;
2506 	}
2507 
2508 	/*
2509 	 * TODO: temporary guard to look for proper fix
2510 	 * if this sink is MST sink, we should not do anything
2511 	 */
2512 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2513 		dc_sink_release(sink);
2514 		return;
2515 	}
2516 
2517 	if (aconnector->dc_sink == sink) {
2518 		/*
2519 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2520 		 * Do nothing!!
2521 		 */
2522 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2523 				aconnector->connector_id);
2524 		if (sink)
2525 			dc_sink_release(sink);
2526 		return;
2527 	}
2528 
2529 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2530 		aconnector->connector_id, aconnector->dc_sink, sink);
2531 
2532 	mutex_lock(&dev->mode_config.mutex);
2533 
2534 	/*
2535 	 * 1. Update status of the drm connector
2536 	 * 2. Send an event and let userspace tell us what to do
2537 	 */
2538 	if (sink) {
2539 		/*
2540 		 * TODO: check if we still need the S3 mode update workaround.
2541 		 * If yes, put it here.
2542 		 */
2543 		if (aconnector->dc_sink) {
2544 			amdgpu_dm_update_freesync_caps(connector, NULL);
2545 			dc_sink_release(aconnector->dc_sink);
2546 		}
2547 
2548 		aconnector->dc_sink = sink;
2549 		dc_sink_retain(aconnector->dc_sink);
2550 		if (sink->dc_edid.length == 0) {
2551 			aconnector->edid = NULL;
2552 			if (aconnector->dc_link->aux_mode) {
2553 				drm_dp_cec_unset_edid(
2554 					&aconnector->dm_dp_aux.aux);
2555 			}
2556 		} else {
2557 			aconnector->edid =
2558 				(struct edid *)sink->dc_edid.raw_edid;
2559 
2560 			drm_connector_update_edid_property(connector,
2561 							   aconnector->edid);
2562 			if (aconnector->dc_link->aux_mode)
2563 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2564 						    aconnector->edid);
2565 		}
2566 
2567 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2568 		update_connector_ext_caps(aconnector);
2569 	} else {
2570 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2571 		amdgpu_dm_update_freesync_caps(connector, NULL);
2572 		drm_connector_update_edid_property(connector, NULL);
2573 		aconnector->num_modes = 0;
2574 		dc_sink_release(aconnector->dc_sink);
2575 		aconnector->dc_sink = NULL;
2576 		aconnector->edid = NULL;
2577 #ifdef CONFIG_DRM_AMD_DC_HDCP
2578 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2579 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2580 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2581 #endif
2582 	}
2583 
2584 	mutex_unlock(&dev->mode_config.mutex);
2585 
2586 	update_subconnector_property(aconnector);
2587 
2588 	if (sink)
2589 		dc_sink_release(sink);
2590 }
2591 
2592 static void handle_hpd_irq(void *param)
2593 {
2594 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2595 	struct drm_connector *connector = &aconnector->base;
2596 	struct drm_device *dev = connector->dev;
2597 	enum dc_connection_type new_connection_type = dc_connection_none;
2598 	struct amdgpu_device *adev = drm_to_adev(dev);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2601 #endif
2602 
2603 	if (adev->dm.disable_hpd_irq)
2604 		return;
2605 
2606 	/*
2607 	 * In case of failure or MST no need to update connector status or notify the OS
2608 	 * since (for MST case) MST does this in its own context.
2609 	 */
2610 	mutex_lock(&aconnector->hpd_lock);
2611 
2612 #ifdef CONFIG_DRM_AMD_DC_HDCP
2613 	if (adev->dm.hdcp_workqueue) {
2614 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2615 		dm_con_state->update_hdcp = true;
2616 	}
2617 #endif
2618 	if (aconnector->fake_enable)
2619 		aconnector->fake_enable = false;
2620 
2621 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2622 		DRM_ERROR("KMS: Failed to detect connector\n");
2623 
2624 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2625 		emulated_link_detect(aconnector->dc_link);
2626 
2627 
2628 		drm_modeset_lock_all(dev);
2629 		dm_restore_drm_connector_state(dev, connector);
2630 		drm_modeset_unlock_all(dev);
2631 
2632 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2633 			drm_kms_helper_hotplug_event(dev);
2634 
2635 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2636 		if (new_connection_type == dc_connection_none &&
2637 		    aconnector->dc_link->type == dc_connection_none)
2638 			dm_set_dpms_off(aconnector->dc_link);
2639 
2640 		amdgpu_dm_update_connector_after_detect(aconnector);
2641 
2642 		drm_modeset_lock_all(dev);
2643 		dm_restore_drm_connector_state(dev, connector);
2644 		drm_modeset_unlock_all(dev);
2645 
2646 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2647 			drm_kms_helper_hotplug_event(dev);
2648 	}
2649 	mutex_unlock(&aconnector->hpd_lock);
2650 
2651 }
2652 
2653 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2654 {
2655 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2656 	uint8_t dret;
2657 	bool new_irq_handled = false;
2658 	int dpcd_addr;
2659 	int dpcd_bytes_to_read;
2660 
2661 	const int max_process_count = 30;
2662 	int process_count = 0;
2663 
2664 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2665 
2666 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2667 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2668 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2669 		dpcd_addr = DP_SINK_COUNT;
2670 	} else {
2671 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2672 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2673 		dpcd_addr = DP_SINK_COUNT_ESI;
2674 	}
2675 
2676 	dret = drm_dp_dpcd_read(
2677 		&aconnector->dm_dp_aux.aux,
2678 		dpcd_addr,
2679 		esi,
2680 		dpcd_bytes_to_read);
2681 
2682 	while (dret == dpcd_bytes_to_read &&
2683 		process_count < max_process_count) {
2684 		uint8_t retry;
2685 		dret = 0;
2686 
2687 		process_count++;
2688 
2689 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2690 		/* handle HPD short pulse irq */
2691 		if (aconnector->mst_mgr.mst_state)
2692 			drm_dp_mst_hpd_irq(
2693 				&aconnector->mst_mgr,
2694 				esi,
2695 				&new_irq_handled);
2696 
2697 		if (new_irq_handled) {
2698 			/* ACK at DPCD to notify down stream */
2699 			const int ack_dpcd_bytes_to_write =
2700 				dpcd_bytes_to_read - 1;
2701 
2702 			for (retry = 0; retry < 3; retry++) {
2703 				uint8_t wret;
2704 
2705 				wret = drm_dp_dpcd_write(
2706 					&aconnector->dm_dp_aux.aux,
2707 					dpcd_addr + 1,
2708 					&esi[1],
2709 					ack_dpcd_bytes_to_write);
2710 				if (wret == ack_dpcd_bytes_to_write)
2711 					break;
2712 			}
2713 
2714 			/* check if there is new irq to be handled */
2715 			dret = drm_dp_dpcd_read(
2716 				&aconnector->dm_dp_aux.aux,
2717 				dpcd_addr,
2718 				esi,
2719 				dpcd_bytes_to_read);
2720 
2721 			new_irq_handled = false;
2722 		} else {
2723 			break;
2724 		}
2725 	}
2726 
2727 	if (process_count == max_process_count)
2728 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2729 }
2730 
2731 static void handle_hpd_rx_irq(void *param)
2732 {
2733 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2734 	struct drm_connector *connector = &aconnector->base;
2735 	struct drm_device *dev = connector->dev;
2736 	struct dc_link *dc_link = aconnector->dc_link;
2737 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2738 	bool result = false;
2739 	enum dc_connection_type new_connection_type = dc_connection_none;
2740 	struct amdgpu_device *adev = drm_to_adev(dev);
2741 	union hpd_irq_data hpd_irq_data;
2742 
2743 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2744 
2745 	if (adev->dm.disable_hpd_irq)
2746 		return;
2747 
2748 
2749 	/*
2750 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2751 	 * conflict, after implement i2c helper, this mutex should be
2752 	 * retired.
2753 	 */
2754 	mutex_lock(&aconnector->hpd_lock);
2755 
2756 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2757 
2758 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2759 		(dc_link->type == dc_connection_mst_branch)) {
2760 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2761 			result = true;
2762 			dm_handle_hpd_rx_irq(aconnector);
2763 			goto out;
2764 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2765 			result = false;
2766 			dm_handle_hpd_rx_irq(aconnector);
2767 			goto out;
2768 		}
2769 	}
2770 
2771 	if (!amdgpu_in_reset(adev)) {
2772 		mutex_lock(&adev->dm.dc_lock);
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2775 #else
2776 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2777 #endif
2778 		mutex_unlock(&adev->dm.dc_lock);
2779 	}
2780 
2781 out:
2782 	if (result && !is_mst_root_connector) {
2783 		/* Downstream Port status changed. */
2784 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2785 			DRM_ERROR("KMS: Failed to detect connector\n");
2786 
2787 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2788 			emulated_link_detect(dc_link);
2789 
2790 			if (aconnector->fake_enable)
2791 				aconnector->fake_enable = false;
2792 
2793 			amdgpu_dm_update_connector_after_detect(aconnector);
2794 
2795 
2796 			drm_modeset_lock_all(dev);
2797 			dm_restore_drm_connector_state(dev, connector);
2798 			drm_modeset_unlock_all(dev);
2799 
2800 			drm_kms_helper_hotplug_event(dev);
2801 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2802 
2803 			if (aconnector->fake_enable)
2804 				aconnector->fake_enable = false;
2805 
2806 			amdgpu_dm_update_connector_after_detect(aconnector);
2807 
2808 
2809 			drm_modeset_lock_all(dev);
2810 			dm_restore_drm_connector_state(dev, connector);
2811 			drm_modeset_unlock_all(dev);
2812 
2813 			drm_kms_helper_hotplug_event(dev);
2814 		}
2815 	}
2816 #ifdef CONFIG_DRM_AMD_DC_HDCP
2817 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2818 		if (adev->dm.hdcp_workqueue)
2819 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2820 	}
2821 #endif
2822 
2823 	if (dc_link->type != dc_connection_mst_branch)
2824 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2825 
2826 	mutex_unlock(&aconnector->hpd_lock);
2827 }
2828 
2829 static void register_hpd_handlers(struct amdgpu_device *adev)
2830 {
2831 	struct drm_device *dev = adev_to_drm(adev);
2832 	struct drm_connector *connector;
2833 	struct amdgpu_dm_connector *aconnector;
2834 	const struct dc_link *dc_link;
2835 	struct dc_interrupt_params int_params = {0};
2836 
2837 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2839 
2840 	list_for_each_entry(connector,
2841 			&dev->mode_config.connector_list, head)	{
2842 
2843 		aconnector = to_amdgpu_dm_connector(connector);
2844 		dc_link = aconnector->dc_link;
2845 
2846 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2847 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2848 			int_params.irq_source = dc_link->irq_source_hpd;
2849 
2850 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2851 					handle_hpd_irq,
2852 					(void *) aconnector);
2853 		}
2854 
2855 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2856 
2857 			/* Also register for DP short pulse (hpd_rx). */
2858 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2859 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2860 
2861 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862 					handle_hpd_rx_irq,
2863 					(void *) aconnector);
2864 		}
2865 	}
2866 }
2867 
2868 #if defined(CONFIG_DRM_AMD_DC_SI)
2869 /* Register IRQ sources and initialize IRQ callbacks */
2870 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2871 {
2872 	struct dc *dc = adev->dm.dc;
2873 	struct common_irq_params *c_irq_params;
2874 	struct dc_interrupt_params int_params = {0};
2875 	int r;
2876 	int i;
2877 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2878 
2879 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2880 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2881 
2882 	/*
2883 	 * Actions of amdgpu_irq_add_id():
2884 	 * 1. Register a set() function with base driver.
2885 	 *    Base driver will call set() function to enable/disable an
2886 	 *    interrupt in DC hardware.
2887 	 * 2. Register amdgpu_dm_irq_handler().
2888 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2889 	 *    coming from DC hardware.
2890 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2891 	 *    for acknowledging and handling. */
2892 
2893 	/* Use VBLANK interrupt */
2894 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2895 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2896 		if (r) {
2897 			DRM_ERROR("Failed to add crtc irq id!\n");
2898 			return r;
2899 		}
2900 
2901 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902 		int_params.irq_source =
2903 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2904 
2905 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2906 
2907 		c_irq_params->adev = adev;
2908 		c_irq_params->irq_src = int_params.irq_source;
2909 
2910 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2911 				dm_crtc_high_irq, c_irq_params);
2912 	}
2913 
2914 	/* Use GRPH_PFLIP interrupt */
2915 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2916 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2917 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2918 		if (r) {
2919 			DRM_ERROR("Failed to add page flip irq id!\n");
2920 			return r;
2921 		}
2922 
2923 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2924 		int_params.irq_source =
2925 			dc_interrupt_to_irq_source(dc, i, 0);
2926 
2927 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2928 
2929 		c_irq_params->adev = adev;
2930 		c_irq_params->irq_src = int_params.irq_source;
2931 
2932 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2933 				dm_pflip_high_irq, c_irq_params);
2934 
2935 	}
2936 
2937 	/* HPD */
2938 	r = amdgpu_irq_add_id(adev, client_id,
2939 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2940 	if (r) {
2941 		DRM_ERROR("Failed to add hpd irq id!\n");
2942 		return r;
2943 	}
2944 
2945 	register_hpd_handlers(adev);
2946 
2947 	return 0;
2948 }
2949 #endif
2950 
2951 /* Register IRQ sources and initialize IRQ callbacks */
2952 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2953 {
2954 	struct dc *dc = adev->dm.dc;
2955 	struct common_irq_params *c_irq_params;
2956 	struct dc_interrupt_params int_params = {0};
2957 	int r;
2958 	int i;
2959 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2960 
2961 	if (adev->asic_type >= CHIP_VEGA10)
2962 		client_id = SOC15_IH_CLIENTID_DCE;
2963 
2964 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2965 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2966 
2967 	/*
2968 	 * Actions of amdgpu_irq_add_id():
2969 	 * 1. Register a set() function with base driver.
2970 	 *    Base driver will call set() function to enable/disable an
2971 	 *    interrupt in DC hardware.
2972 	 * 2. Register amdgpu_dm_irq_handler().
2973 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2974 	 *    coming from DC hardware.
2975 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2976 	 *    for acknowledging and handling. */
2977 
2978 	/* Use VBLANK interrupt */
2979 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2980 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2981 		if (r) {
2982 			DRM_ERROR("Failed to add crtc irq id!\n");
2983 			return r;
2984 		}
2985 
2986 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987 		int_params.irq_source =
2988 			dc_interrupt_to_irq_source(dc, i, 0);
2989 
2990 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2991 
2992 		c_irq_params->adev = adev;
2993 		c_irq_params->irq_src = int_params.irq_source;
2994 
2995 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996 				dm_crtc_high_irq, c_irq_params);
2997 	}
2998 
2999 	/* Use VUPDATE interrupt */
3000 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3001 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3002 		if (r) {
3003 			DRM_ERROR("Failed to add vupdate irq id!\n");
3004 			return r;
3005 		}
3006 
3007 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008 		int_params.irq_source =
3009 			dc_interrupt_to_irq_source(dc, i, 0);
3010 
3011 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3012 
3013 		c_irq_params->adev = adev;
3014 		c_irq_params->irq_src = int_params.irq_source;
3015 
3016 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017 				dm_vupdate_high_irq, c_irq_params);
3018 	}
3019 
3020 	/* Use GRPH_PFLIP interrupt */
3021 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3022 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3023 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3024 		if (r) {
3025 			DRM_ERROR("Failed to add page flip irq id!\n");
3026 			return r;
3027 		}
3028 
3029 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3030 		int_params.irq_source =
3031 			dc_interrupt_to_irq_source(dc, i, 0);
3032 
3033 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3034 
3035 		c_irq_params->adev = adev;
3036 		c_irq_params->irq_src = int_params.irq_source;
3037 
3038 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3039 				dm_pflip_high_irq, c_irq_params);
3040 
3041 	}
3042 
3043 	/* HPD */
3044 	r = amdgpu_irq_add_id(adev, client_id,
3045 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3046 	if (r) {
3047 		DRM_ERROR("Failed to add hpd irq id!\n");
3048 		return r;
3049 	}
3050 
3051 	register_hpd_handlers(adev);
3052 
3053 	return 0;
3054 }
3055 
3056 #if defined(CONFIG_DRM_AMD_DC_DCN)
3057 /* Register IRQ sources and initialize IRQ callbacks */
3058 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3059 {
3060 	struct dc *dc = adev->dm.dc;
3061 	struct common_irq_params *c_irq_params;
3062 	struct dc_interrupt_params int_params = {0};
3063 	int r;
3064 	int i;
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066 	static const unsigned int vrtl_int_srcid[] = {
3067 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3068 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3069 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3070 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3071 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3072 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3073 	};
3074 #endif
3075 
3076 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3077 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3078 
3079 	/*
3080 	 * Actions of amdgpu_irq_add_id():
3081 	 * 1. Register a set() function with base driver.
3082 	 *    Base driver will call set() function to enable/disable an
3083 	 *    interrupt in DC hardware.
3084 	 * 2. Register amdgpu_dm_irq_handler().
3085 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3086 	 *    coming from DC hardware.
3087 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3088 	 *    for acknowledging and handling.
3089 	 */
3090 
3091 	/* Use VSTARTUP interrupt */
3092 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3093 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3094 			i++) {
3095 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3096 
3097 		if (r) {
3098 			DRM_ERROR("Failed to add crtc irq id!\n");
3099 			return r;
3100 		}
3101 
3102 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3103 		int_params.irq_source =
3104 			dc_interrupt_to_irq_source(dc, i, 0);
3105 
3106 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3107 
3108 		c_irq_params->adev = adev;
3109 		c_irq_params->irq_src = int_params.irq_source;
3110 
3111 		amdgpu_dm_irq_register_interrupt(
3112 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3113 	}
3114 
3115 	/* Use otg vertical line interrupt */
3116 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3117 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3118 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3119 				vrtl_int_srcid[i], &adev->vline0_irq);
3120 
3121 		if (r) {
3122 			DRM_ERROR("Failed to add vline0 irq id!\n");
3123 			return r;
3124 		}
3125 
3126 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3127 		int_params.irq_source =
3128 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3129 
3130 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3131 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3132 			break;
3133 		}
3134 
3135 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3136 					- DC_IRQ_SOURCE_DC1_VLINE0];
3137 
3138 		c_irq_params->adev = adev;
3139 		c_irq_params->irq_src = int_params.irq_source;
3140 
3141 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3142 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3143 	}
3144 #endif
3145 
3146 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3147 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3148 	 * to trigger at end of each vblank, regardless of state of the lock,
3149 	 * matching DCE behaviour.
3150 	 */
3151 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3152 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3153 	     i++) {
3154 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3155 
3156 		if (r) {
3157 			DRM_ERROR("Failed to add vupdate irq id!\n");
3158 			return r;
3159 		}
3160 
3161 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162 		int_params.irq_source =
3163 			dc_interrupt_to_irq_source(dc, i, 0);
3164 
3165 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3166 
3167 		c_irq_params->adev = adev;
3168 		c_irq_params->irq_src = int_params.irq_source;
3169 
3170 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3171 				dm_vupdate_high_irq, c_irq_params);
3172 	}
3173 
3174 	/* Use GRPH_PFLIP interrupt */
3175 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3176 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3177 			i++) {
3178 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3179 		if (r) {
3180 			DRM_ERROR("Failed to add page flip irq id!\n");
3181 			return r;
3182 		}
3183 
3184 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3185 		int_params.irq_source =
3186 			dc_interrupt_to_irq_source(dc, i, 0);
3187 
3188 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3189 
3190 		c_irq_params->adev = adev;
3191 		c_irq_params->irq_src = int_params.irq_source;
3192 
3193 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3194 				dm_pflip_high_irq, c_irq_params);
3195 
3196 	}
3197 
3198 	/* HPD */
3199 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3200 			&adev->hpd_irq);
3201 	if (r) {
3202 		DRM_ERROR("Failed to add hpd irq id!\n");
3203 		return r;
3204 	}
3205 
3206 	register_hpd_handlers(adev);
3207 
3208 	return 0;
3209 }
3210 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3211 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3212 {
3213 	struct dc *dc = adev->dm.dc;
3214 	struct common_irq_params *c_irq_params;
3215 	struct dc_interrupt_params int_params = {0};
3216 	int r, i;
3217 
3218 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3219 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3220 
3221 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3222 			&adev->dmub_outbox_irq);
3223 	if (r) {
3224 		DRM_ERROR("Failed to add outbox irq id!\n");
3225 		return r;
3226 	}
3227 
3228 	if (dc->ctx->dmub_srv) {
3229 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3230 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3231 		int_params.irq_source =
3232 		dc_interrupt_to_irq_source(dc, i, 0);
3233 
3234 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3235 
3236 		c_irq_params->adev = adev;
3237 		c_irq_params->irq_src = int_params.irq_source;
3238 
3239 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240 				dm_dmub_outbox1_low_irq, c_irq_params);
3241 	}
3242 
3243 	return 0;
3244 }
3245 #endif
3246 
3247 /*
3248  * Acquires the lock for the atomic state object and returns
3249  * the new atomic state.
3250  *
3251  * This should only be called during atomic check.
3252  */
3253 static int dm_atomic_get_state(struct drm_atomic_state *state,
3254 			       struct dm_atomic_state **dm_state)
3255 {
3256 	struct drm_device *dev = state->dev;
3257 	struct amdgpu_device *adev = drm_to_adev(dev);
3258 	struct amdgpu_display_manager *dm = &adev->dm;
3259 	struct drm_private_state *priv_state;
3260 
3261 	if (*dm_state)
3262 		return 0;
3263 
3264 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3265 	if (IS_ERR(priv_state))
3266 		return PTR_ERR(priv_state);
3267 
3268 	*dm_state = to_dm_atomic_state(priv_state);
3269 
3270 	return 0;
3271 }
3272 
3273 static struct dm_atomic_state *
3274 dm_atomic_get_new_state(struct drm_atomic_state *state)
3275 {
3276 	struct drm_device *dev = state->dev;
3277 	struct amdgpu_device *adev = drm_to_adev(dev);
3278 	struct amdgpu_display_manager *dm = &adev->dm;
3279 	struct drm_private_obj *obj;
3280 	struct drm_private_state *new_obj_state;
3281 	int i;
3282 
3283 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3284 		if (obj->funcs == dm->atomic_obj.funcs)
3285 			return to_dm_atomic_state(new_obj_state);
3286 	}
3287 
3288 	return NULL;
3289 }
3290 
3291 static struct drm_private_state *
3292 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3293 {
3294 	struct dm_atomic_state *old_state, *new_state;
3295 
3296 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3297 	if (!new_state)
3298 		return NULL;
3299 
3300 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3301 
3302 	old_state = to_dm_atomic_state(obj->state);
3303 
3304 	if (old_state && old_state->context)
3305 		new_state->context = dc_copy_state(old_state->context);
3306 
3307 	if (!new_state->context) {
3308 		kfree(new_state);
3309 		return NULL;
3310 	}
3311 
3312 	return &new_state->base;
3313 }
3314 
3315 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3316 				    struct drm_private_state *state)
3317 {
3318 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3319 
3320 	if (dm_state && dm_state->context)
3321 		dc_release_state(dm_state->context);
3322 
3323 	kfree(dm_state);
3324 }
3325 
3326 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3327 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3328 	.atomic_destroy_state = dm_atomic_destroy_state,
3329 };
3330 
3331 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3332 {
3333 	struct dm_atomic_state *state;
3334 	int r;
3335 
3336 	adev->mode_info.mode_config_initialized = true;
3337 
3338 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3339 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3340 
3341 	adev_to_drm(adev)->mode_config.max_width = 16384;
3342 	adev_to_drm(adev)->mode_config.max_height = 16384;
3343 
3344 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3345 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3346 	/* indicates support for immediate flip */
3347 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3348 
3349 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3350 
3351 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3352 	if (!state)
3353 		return -ENOMEM;
3354 
3355 	state->context = dc_create_state(adev->dm.dc);
3356 	if (!state->context) {
3357 		kfree(state);
3358 		return -ENOMEM;
3359 	}
3360 
3361 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3362 
3363 	drm_atomic_private_obj_init(adev_to_drm(adev),
3364 				    &adev->dm.atomic_obj,
3365 				    &state->base,
3366 				    &dm_atomic_state_funcs);
3367 
3368 	r = amdgpu_display_modeset_create_props(adev);
3369 	if (r) {
3370 		dc_release_state(state->context);
3371 		kfree(state);
3372 		return r;
3373 	}
3374 
3375 	r = amdgpu_dm_audio_init(adev);
3376 	if (r) {
3377 		dc_release_state(state->context);
3378 		kfree(state);
3379 		return r;
3380 	}
3381 
3382 	return 0;
3383 }
3384 
3385 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3386 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3387 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3388 
3389 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3390 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3391 
3392 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3393 {
3394 #if defined(CONFIG_ACPI)
3395 	struct amdgpu_dm_backlight_caps caps;
3396 
3397 	memset(&caps, 0, sizeof(caps));
3398 
3399 	if (dm->backlight_caps.caps_valid)
3400 		return;
3401 
3402 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3403 	if (caps.caps_valid) {
3404 		dm->backlight_caps.caps_valid = true;
3405 		if (caps.aux_support)
3406 			return;
3407 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3408 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3409 	} else {
3410 		dm->backlight_caps.min_input_signal =
3411 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3412 		dm->backlight_caps.max_input_signal =
3413 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3414 	}
3415 #else
3416 	if (dm->backlight_caps.aux_support)
3417 		return;
3418 
3419 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3420 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3421 #endif
3422 }
3423 
3424 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3425 				unsigned *min, unsigned *max)
3426 {
3427 	if (!caps)
3428 		return 0;
3429 
3430 	if (caps->aux_support) {
3431 		// Firmware limits are in nits, DC API wants millinits.
3432 		*max = 1000 * caps->aux_max_input_signal;
3433 		*min = 1000 * caps->aux_min_input_signal;
3434 	} else {
3435 		// Firmware limits are 8-bit, PWM control is 16-bit.
3436 		*max = 0x101 * caps->max_input_signal;
3437 		*min = 0x101 * caps->min_input_signal;
3438 	}
3439 	return 1;
3440 }
3441 
3442 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3443 					uint32_t brightness)
3444 {
3445 	unsigned min, max;
3446 
3447 	if (!get_brightness_range(caps, &min, &max))
3448 		return brightness;
3449 
3450 	// Rescale 0..255 to min..max
3451 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3452 				       AMDGPU_MAX_BL_LEVEL);
3453 }
3454 
3455 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3456 				      uint32_t brightness)
3457 {
3458 	unsigned min, max;
3459 
3460 	if (!get_brightness_range(caps, &min, &max))
3461 		return brightness;
3462 
3463 	if (brightness < min)
3464 		return 0;
3465 	// Rescale min..max to 0..255
3466 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3467 				 max - min);
3468 }
3469 
3470 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3471 {
3472 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3473 	struct amdgpu_dm_backlight_caps caps;
3474 	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3475 	u32 brightness;
3476 	bool rc;
3477 	int i;
3478 
3479 	amdgpu_dm_update_backlight_caps(dm);
3480 	caps = dm->backlight_caps;
3481 
3482 	for (i = 0; i < dm->num_of_edps; i++)
3483 		link[i] = (struct dc_link *)dm->backlight_link[i];
3484 
3485 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3486 	// Change brightness based on AUX property
3487 	if (caps.aux_support) {
3488 		for (i = 0; i < dm->num_of_edps; i++) {
3489 			rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
3490 				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3491 			if (!rc) {
3492 				DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3493 				break;
3494 			}
3495 		}
3496 	} else {
3497 		for (i = 0; i < dm->num_of_edps; i++) {
3498 			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
3499 			if (!rc) {
3500 				DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3501 				break;
3502 			}
3503 		}
3504 	}
3505 
3506 	return rc ? 0 : 1;
3507 }
3508 
3509 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3510 {
3511 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3512 	struct amdgpu_dm_backlight_caps caps;
3513 
3514 	amdgpu_dm_update_backlight_caps(dm);
3515 	caps = dm->backlight_caps;
3516 
3517 	if (caps.aux_support) {
3518 		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3519 		u32 avg, peak;
3520 		bool rc;
3521 
3522 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3523 		if (!rc)
3524 			return bd->props.brightness;
3525 		return convert_brightness_to_user(&caps, avg);
3526 	} else {
3527 		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3528 
3529 		if (ret == DC_ERROR_UNEXPECTED)
3530 			return bd->props.brightness;
3531 		return convert_brightness_to_user(&caps, ret);
3532 	}
3533 }
3534 
3535 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3536 	.options = BL_CORE_SUSPENDRESUME,
3537 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3538 	.update_status	= amdgpu_dm_backlight_update_status,
3539 };
3540 
3541 static void
3542 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3543 {
3544 	char bl_name[16];
3545 	struct backlight_properties props = { 0 };
3546 
3547 	amdgpu_dm_update_backlight_caps(dm);
3548 
3549 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3550 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3551 	props.type = BACKLIGHT_RAW;
3552 
3553 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3554 		 adev_to_drm(dm->adev)->primary->index);
3555 
3556 	dm->backlight_dev = backlight_device_register(bl_name,
3557 						      adev_to_drm(dm->adev)->dev,
3558 						      dm,
3559 						      &amdgpu_dm_backlight_ops,
3560 						      &props);
3561 
3562 	if (IS_ERR(dm->backlight_dev))
3563 		DRM_ERROR("DM: Backlight registration failed!\n");
3564 	else
3565 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3566 }
3567 
3568 #endif
3569 
3570 static int initialize_plane(struct amdgpu_display_manager *dm,
3571 			    struct amdgpu_mode_info *mode_info, int plane_id,
3572 			    enum drm_plane_type plane_type,
3573 			    const struct dc_plane_cap *plane_cap)
3574 {
3575 	struct drm_plane *plane;
3576 	unsigned long possible_crtcs;
3577 	int ret = 0;
3578 
3579 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3580 	if (!plane) {
3581 		DRM_ERROR("KMS: Failed to allocate plane\n");
3582 		return -ENOMEM;
3583 	}
3584 	plane->type = plane_type;
3585 
3586 	/*
3587 	 * HACK: IGT tests expect that the primary plane for a CRTC
3588 	 * can only have one possible CRTC. Only expose support for
3589 	 * any CRTC if they're not going to be used as a primary plane
3590 	 * for a CRTC - like overlay or underlay planes.
3591 	 */
3592 	possible_crtcs = 1 << plane_id;
3593 	if (plane_id >= dm->dc->caps.max_streams)
3594 		possible_crtcs = 0xff;
3595 
3596 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3597 
3598 	if (ret) {
3599 		DRM_ERROR("KMS: Failed to initialize plane\n");
3600 		kfree(plane);
3601 		return ret;
3602 	}
3603 
3604 	if (mode_info)
3605 		mode_info->planes[plane_id] = plane;
3606 
3607 	return ret;
3608 }
3609 
3610 
3611 static void register_backlight_device(struct amdgpu_display_manager *dm,
3612 				      struct dc_link *link)
3613 {
3614 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3615 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3616 
3617 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3618 	    link->type != dc_connection_none) {
3619 		/*
3620 		 * Event if registration failed, we should continue with
3621 		 * DM initialization because not having a backlight control
3622 		 * is better then a black screen.
3623 		 */
3624 		if (!dm->backlight_dev)
3625 			amdgpu_dm_register_backlight_device(dm);
3626 
3627 		if (dm->backlight_dev) {
3628 			dm->backlight_link[dm->num_of_edps] = link;
3629 			dm->num_of_edps++;
3630 		}
3631 	}
3632 #endif
3633 }
3634 
3635 
3636 /*
3637  * In this architecture, the association
3638  * connector -> encoder -> crtc
3639  * id not really requried. The crtc and connector will hold the
3640  * display_index as an abstraction to use with DAL component
3641  *
3642  * Returns 0 on success
3643  */
3644 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3645 {
3646 	struct amdgpu_display_manager *dm = &adev->dm;
3647 	int32_t i;
3648 	struct amdgpu_dm_connector *aconnector = NULL;
3649 	struct amdgpu_encoder *aencoder = NULL;
3650 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3651 	uint32_t link_cnt;
3652 	int32_t primary_planes;
3653 	enum dc_connection_type new_connection_type = dc_connection_none;
3654 	const struct dc_plane_cap *plane;
3655 
3656 	dm->display_indexes_num = dm->dc->caps.max_streams;
3657 	/* Update the actual used number of crtc */
3658 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3659 
3660 	link_cnt = dm->dc->caps.max_links;
3661 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3662 		DRM_ERROR("DM: Failed to initialize mode config\n");
3663 		return -EINVAL;
3664 	}
3665 
3666 	/* There is one primary plane per CRTC */
3667 	primary_planes = dm->dc->caps.max_streams;
3668 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3669 
3670 	/*
3671 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3672 	 * Order is reversed to match iteration order in atomic check.
3673 	 */
3674 	for (i = (primary_planes - 1); i >= 0; i--) {
3675 		plane = &dm->dc->caps.planes[i];
3676 
3677 		if (initialize_plane(dm, mode_info, i,
3678 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3679 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3680 			goto fail;
3681 		}
3682 	}
3683 
3684 	/*
3685 	 * Initialize overlay planes, index starting after primary planes.
3686 	 * These planes have a higher DRM index than the primary planes since
3687 	 * they should be considered as having a higher z-order.
3688 	 * Order is reversed to match iteration order in atomic check.
3689 	 *
3690 	 * Only support DCN for now, and only expose one so we don't encourage
3691 	 * userspace to use up all the pipes.
3692 	 */
3693 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3694 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3695 
3696 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3697 			continue;
3698 
3699 		if (!plane->blends_with_above || !plane->blends_with_below)
3700 			continue;
3701 
3702 		if (!plane->pixel_format_support.argb8888)
3703 			continue;
3704 
3705 		if (initialize_plane(dm, NULL, primary_planes + i,
3706 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3707 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3708 			goto fail;
3709 		}
3710 
3711 		/* Only create one overlay plane. */
3712 		break;
3713 	}
3714 
3715 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3716 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3717 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3718 			goto fail;
3719 		}
3720 
3721 	/* Use Outbox interrupt */
3722 	switch (adev->asic_type) {
3723 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3724 	case CHIP_SIENNA_CICHLID:
3725 	case CHIP_NAVY_FLOUNDER:
3726 #endif
3727 	case CHIP_RENOIR:
3728 		if (register_outbox_irq_handlers(dm->adev)) {
3729 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3730 			goto fail;
3731 		}
3732 		break;
3733 	default:
3734 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3735 	}
3736 
3737 	/* loops over all connectors on the board */
3738 	for (i = 0; i < link_cnt; i++) {
3739 		struct dc_link *link = NULL;
3740 
3741 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3742 			DRM_ERROR(
3743 				"KMS: Cannot support more than %d display indexes\n",
3744 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3745 			continue;
3746 		}
3747 
3748 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3749 		if (!aconnector)
3750 			goto fail;
3751 
3752 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3753 		if (!aencoder)
3754 			goto fail;
3755 
3756 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3757 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3758 			goto fail;
3759 		}
3760 
3761 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3762 			DRM_ERROR("KMS: Failed to initialize connector\n");
3763 			goto fail;
3764 		}
3765 
3766 		link = dc_get_link_at_index(dm->dc, i);
3767 
3768 		if (!dc_link_detect_sink(link, &new_connection_type))
3769 			DRM_ERROR("KMS: Failed to detect connector\n");
3770 
3771 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3772 			emulated_link_detect(link);
3773 			amdgpu_dm_update_connector_after_detect(aconnector);
3774 
3775 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3776 			amdgpu_dm_update_connector_after_detect(aconnector);
3777 			register_backlight_device(dm, link);
3778 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3779 				amdgpu_dm_set_psr_caps(link);
3780 		}
3781 
3782 
3783 	}
3784 
3785 	/* Software is initialized. Now we can register interrupt handlers. */
3786 	switch (adev->asic_type) {
3787 #if defined(CONFIG_DRM_AMD_DC_SI)
3788 	case CHIP_TAHITI:
3789 	case CHIP_PITCAIRN:
3790 	case CHIP_VERDE:
3791 	case CHIP_OLAND:
3792 		if (dce60_register_irq_handlers(dm->adev)) {
3793 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3794 			goto fail;
3795 		}
3796 		break;
3797 #endif
3798 	case CHIP_BONAIRE:
3799 	case CHIP_HAWAII:
3800 	case CHIP_KAVERI:
3801 	case CHIP_KABINI:
3802 	case CHIP_MULLINS:
3803 	case CHIP_TONGA:
3804 	case CHIP_FIJI:
3805 	case CHIP_CARRIZO:
3806 	case CHIP_STONEY:
3807 	case CHIP_POLARIS11:
3808 	case CHIP_POLARIS10:
3809 	case CHIP_POLARIS12:
3810 	case CHIP_VEGAM:
3811 	case CHIP_VEGA10:
3812 	case CHIP_VEGA12:
3813 	case CHIP_VEGA20:
3814 		if (dce110_register_irq_handlers(dm->adev)) {
3815 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3816 			goto fail;
3817 		}
3818 		break;
3819 #if defined(CONFIG_DRM_AMD_DC_DCN)
3820 	case CHIP_RAVEN:
3821 	case CHIP_NAVI12:
3822 	case CHIP_NAVI10:
3823 	case CHIP_NAVI14:
3824 	case CHIP_RENOIR:
3825 	case CHIP_SIENNA_CICHLID:
3826 	case CHIP_NAVY_FLOUNDER:
3827 	case CHIP_DIMGREY_CAVEFISH:
3828 	case CHIP_VANGOGH:
3829 		if (dcn10_register_irq_handlers(dm->adev)) {
3830 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3831 			goto fail;
3832 		}
3833 		break;
3834 #endif
3835 	default:
3836 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3837 		goto fail;
3838 	}
3839 
3840 	return 0;
3841 fail:
3842 	kfree(aencoder);
3843 	kfree(aconnector);
3844 
3845 	return -EINVAL;
3846 }
3847 
3848 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3849 {
3850 	drm_mode_config_cleanup(dm->ddev);
3851 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3852 	return;
3853 }
3854 
3855 /******************************************************************************
3856  * amdgpu_display_funcs functions
3857  *****************************************************************************/
3858 
3859 /*
3860  * dm_bandwidth_update - program display watermarks
3861  *
3862  * @adev: amdgpu_device pointer
3863  *
3864  * Calculate and program the display watermarks and line buffer allocation.
3865  */
3866 static void dm_bandwidth_update(struct amdgpu_device *adev)
3867 {
3868 	/* TODO: implement later */
3869 }
3870 
3871 static const struct amdgpu_display_funcs dm_display_funcs = {
3872 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3873 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3874 	.backlight_set_level = NULL, /* never called for DC */
3875 	.backlight_get_level = NULL, /* never called for DC */
3876 	.hpd_sense = NULL,/* called unconditionally */
3877 	.hpd_set_polarity = NULL, /* called unconditionally */
3878 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3879 	.page_flip_get_scanoutpos =
3880 		dm_crtc_get_scanoutpos,/* called unconditionally */
3881 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3882 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3883 };
3884 
3885 #if defined(CONFIG_DEBUG_KERNEL_DC)
3886 
3887 static ssize_t s3_debug_store(struct device *device,
3888 			      struct device_attribute *attr,
3889 			      const char *buf,
3890 			      size_t count)
3891 {
3892 	int ret;
3893 	int s3_state;
3894 	struct drm_device *drm_dev = dev_get_drvdata(device);
3895 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3896 
3897 	ret = kstrtoint(buf, 0, &s3_state);
3898 
3899 	if (ret == 0) {
3900 		if (s3_state) {
3901 			dm_resume(adev);
3902 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3903 		} else
3904 			dm_suspend(adev);
3905 	}
3906 
3907 	return ret == 0 ? count : 0;
3908 }
3909 
3910 DEVICE_ATTR_WO(s3_debug);
3911 
3912 #endif
3913 
3914 static int dm_early_init(void *handle)
3915 {
3916 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3917 
3918 	switch (adev->asic_type) {
3919 #if defined(CONFIG_DRM_AMD_DC_SI)
3920 	case CHIP_TAHITI:
3921 	case CHIP_PITCAIRN:
3922 	case CHIP_VERDE:
3923 		adev->mode_info.num_crtc = 6;
3924 		adev->mode_info.num_hpd = 6;
3925 		adev->mode_info.num_dig = 6;
3926 		break;
3927 	case CHIP_OLAND:
3928 		adev->mode_info.num_crtc = 2;
3929 		adev->mode_info.num_hpd = 2;
3930 		adev->mode_info.num_dig = 2;
3931 		break;
3932 #endif
3933 	case CHIP_BONAIRE:
3934 	case CHIP_HAWAII:
3935 		adev->mode_info.num_crtc = 6;
3936 		adev->mode_info.num_hpd = 6;
3937 		adev->mode_info.num_dig = 6;
3938 		break;
3939 	case CHIP_KAVERI:
3940 		adev->mode_info.num_crtc = 4;
3941 		adev->mode_info.num_hpd = 6;
3942 		adev->mode_info.num_dig = 7;
3943 		break;
3944 	case CHIP_KABINI:
3945 	case CHIP_MULLINS:
3946 		adev->mode_info.num_crtc = 2;
3947 		adev->mode_info.num_hpd = 6;
3948 		adev->mode_info.num_dig = 6;
3949 		break;
3950 	case CHIP_FIJI:
3951 	case CHIP_TONGA:
3952 		adev->mode_info.num_crtc = 6;
3953 		adev->mode_info.num_hpd = 6;
3954 		adev->mode_info.num_dig = 7;
3955 		break;
3956 	case CHIP_CARRIZO:
3957 		adev->mode_info.num_crtc = 3;
3958 		adev->mode_info.num_hpd = 6;
3959 		adev->mode_info.num_dig = 9;
3960 		break;
3961 	case CHIP_STONEY:
3962 		adev->mode_info.num_crtc = 2;
3963 		adev->mode_info.num_hpd = 6;
3964 		adev->mode_info.num_dig = 9;
3965 		break;
3966 	case CHIP_POLARIS11:
3967 	case CHIP_POLARIS12:
3968 		adev->mode_info.num_crtc = 5;
3969 		adev->mode_info.num_hpd = 5;
3970 		adev->mode_info.num_dig = 5;
3971 		break;
3972 	case CHIP_POLARIS10:
3973 	case CHIP_VEGAM:
3974 		adev->mode_info.num_crtc = 6;
3975 		adev->mode_info.num_hpd = 6;
3976 		adev->mode_info.num_dig = 6;
3977 		break;
3978 	case CHIP_VEGA10:
3979 	case CHIP_VEGA12:
3980 	case CHIP_VEGA20:
3981 		adev->mode_info.num_crtc = 6;
3982 		adev->mode_info.num_hpd = 6;
3983 		adev->mode_info.num_dig = 6;
3984 		break;
3985 #if defined(CONFIG_DRM_AMD_DC_DCN)
3986 	case CHIP_RAVEN:
3987 	case CHIP_RENOIR:
3988 	case CHIP_VANGOGH:
3989 		adev->mode_info.num_crtc = 4;
3990 		adev->mode_info.num_hpd = 4;
3991 		adev->mode_info.num_dig = 4;
3992 		break;
3993 	case CHIP_NAVI10:
3994 	case CHIP_NAVI12:
3995 	case CHIP_SIENNA_CICHLID:
3996 	case CHIP_NAVY_FLOUNDER:
3997 		adev->mode_info.num_crtc = 6;
3998 		adev->mode_info.num_hpd = 6;
3999 		adev->mode_info.num_dig = 6;
4000 		break;
4001 	case CHIP_NAVI14:
4002 	case CHIP_DIMGREY_CAVEFISH:
4003 		adev->mode_info.num_crtc = 5;
4004 		adev->mode_info.num_hpd = 5;
4005 		adev->mode_info.num_dig = 5;
4006 		break;
4007 #endif
4008 	default:
4009 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4010 		return -EINVAL;
4011 	}
4012 
4013 	amdgpu_dm_set_irq_funcs(adev);
4014 
4015 	if (adev->mode_info.funcs == NULL)
4016 		adev->mode_info.funcs = &dm_display_funcs;
4017 
4018 	/*
4019 	 * Note: Do NOT change adev->audio_endpt_rreg and
4020 	 * adev->audio_endpt_wreg because they are initialised in
4021 	 * amdgpu_device_init()
4022 	 */
4023 #if defined(CONFIG_DEBUG_KERNEL_DC)
4024 	device_create_file(
4025 		adev_to_drm(adev)->dev,
4026 		&dev_attr_s3_debug);
4027 #endif
4028 
4029 	return 0;
4030 }
4031 
4032 static bool modeset_required(struct drm_crtc_state *crtc_state,
4033 			     struct dc_stream_state *new_stream,
4034 			     struct dc_stream_state *old_stream)
4035 {
4036 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4037 }
4038 
4039 static bool modereset_required(struct drm_crtc_state *crtc_state)
4040 {
4041 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4042 }
4043 
4044 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4045 {
4046 	drm_encoder_cleanup(encoder);
4047 	kfree(encoder);
4048 }
4049 
4050 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4051 	.destroy = amdgpu_dm_encoder_destroy,
4052 };
4053 
4054 
4055 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4056 					 struct drm_framebuffer *fb,
4057 					 int *min_downscale, int *max_upscale)
4058 {
4059 	struct amdgpu_device *adev = drm_to_adev(dev);
4060 	struct dc *dc = adev->dm.dc;
4061 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4062 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4063 
4064 	switch (fb->format->format) {
4065 	case DRM_FORMAT_P010:
4066 	case DRM_FORMAT_NV12:
4067 	case DRM_FORMAT_NV21:
4068 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4069 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4070 		break;
4071 
4072 	case DRM_FORMAT_XRGB16161616F:
4073 	case DRM_FORMAT_ARGB16161616F:
4074 	case DRM_FORMAT_XBGR16161616F:
4075 	case DRM_FORMAT_ABGR16161616F:
4076 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4077 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4078 		break;
4079 
4080 	default:
4081 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4082 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4083 		break;
4084 	}
4085 
4086 	/*
4087 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4088 	 * scaling factor of 1.0 == 1000 units.
4089 	 */
4090 	if (*max_upscale == 1)
4091 		*max_upscale = 1000;
4092 
4093 	if (*min_downscale == 1)
4094 		*min_downscale = 1000;
4095 }
4096 
4097 
4098 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4099 				struct dc_scaling_info *scaling_info)
4100 {
4101 	int scale_w, scale_h, min_downscale, max_upscale;
4102 
4103 	memset(scaling_info, 0, sizeof(*scaling_info));
4104 
4105 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4106 	scaling_info->src_rect.x = state->src_x >> 16;
4107 	scaling_info->src_rect.y = state->src_y >> 16;
4108 
4109 	/*
4110 	 * For reasons we don't (yet) fully understand a non-zero
4111 	 * src_y coordinate into an NV12 buffer can cause a
4112 	 * system hang. To avoid hangs (and maybe be overly cautious)
4113 	 * let's reject both non-zero src_x and src_y.
4114 	 *
4115 	 * We currently know of only one use-case to reproduce a
4116 	 * scenario with non-zero src_x and src_y for NV12, which
4117 	 * is to gesture the YouTube Android app into full screen
4118 	 * on ChromeOS.
4119 	 */
4120 	if (state->fb &&
4121 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4122 	    (scaling_info->src_rect.x != 0 ||
4123 	     scaling_info->src_rect.y != 0))
4124 		return -EINVAL;
4125 
4126 	scaling_info->src_rect.width = state->src_w >> 16;
4127 	if (scaling_info->src_rect.width == 0)
4128 		return -EINVAL;
4129 
4130 	scaling_info->src_rect.height = state->src_h >> 16;
4131 	if (scaling_info->src_rect.height == 0)
4132 		return -EINVAL;
4133 
4134 	scaling_info->dst_rect.x = state->crtc_x;
4135 	scaling_info->dst_rect.y = state->crtc_y;
4136 
4137 	if (state->crtc_w == 0)
4138 		return -EINVAL;
4139 
4140 	scaling_info->dst_rect.width = state->crtc_w;
4141 
4142 	if (state->crtc_h == 0)
4143 		return -EINVAL;
4144 
4145 	scaling_info->dst_rect.height = state->crtc_h;
4146 
4147 	/* DRM doesn't specify clipping on destination output. */
4148 	scaling_info->clip_rect = scaling_info->dst_rect;
4149 
4150 	/* Validate scaling per-format with DC plane caps */
4151 	if (state->plane && state->plane->dev && state->fb) {
4152 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4153 					     &min_downscale, &max_upscale);
4154 	} else {
4155 		min_downscale = 250;
4156 		max_upscale = 16000;
4157 	}
4158 
4159 	scale_w = scaling_info->dst_rect.width * 1000 /
4160 		  scaling_info->src_rect.width;
4161 
4162 	if (scale_w < min_downscale || scale_w > max_upscale)
4163 		return -EINVAL;
4164 
4165 	scale_h = scaling_info->dst_rect.height * 1000 /
4166 		  scaling_info->src_rect.height;
4167 
4168 	if (scale_h < min_downscale || scale_h > max_upscale)
4169 		return -EINVAL;
4170 
4171 	/*
4172 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4173 	 * assume reasonable defaults based on the format.
4174 	 */
4175 
4176 	return 0;
4177 }
4178 
4179 static void
4180 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4181 				 uint64_t tiling_flags)
4182 {
4183 	/* Fill GFX8 params */
4184 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4185 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4186 
4187 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4188 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4189 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4190 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4191 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4192 
4193 		/* XXX fix me for VI */
4194 		tiling_info->gfx8.num_banks = num_banks;
4195 		tiling_info->gfx8.array_mode =
4196 				DC_ARRAY_2D_TILED_THIN1;
4197 		tiling_info->gfx8.tile_split = tile_split;
4198 		tiling_info->gfx8.bank_width = bankw;
4199 		tiling_info->gfx8.bank_height = bankh;
4200 		tiling_info->gfx8.tile_aspect = mtaspect;
4201 		tiling_info->gfx8.tile_mode =
4202 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4203 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4204 			== DC_ARRAY_1D_TILED_THIN1) {
4205 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4206 	}
4207 
4208 	tiling_info->gfx8.pipe_config =
4209 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4210 }
4211 
4212 static void
4213 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4214 				  union dc_tiling_info *tiling_info)
4215 {
4216 	tiling_info->gfx9.num_pipes =
4217 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4218 	tiling_info->gfx9.num_banks =
4219 		adev->gfx.config.gb_addr_config_fields.num_banks;
4220 	tiling_info->gfx9.pipe_interleave =
4221 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4222 	tiling_info->gfx9.num_shader_engines =
4223 		adev->gfx.config.gb_addr_config_fields.num_se;
4224 	tiling_info->gfx9.max_compressed_frags =
4225 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4226 	tiling_info->gfx9.num_rb_per_se =
4227 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4228 	tiling_info->gfx9.shaderEnable = 1;
4229 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4230 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4231 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4232 	    adev->asic_type == CHIP_VANGOGH)
4233 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4234 }
4235 
4236 static int
4237 validate_dcc(struct amdgpu_device *adev,
4238 	     const enum surface_pixel_format format,
4239 	     const enum dc_rotation_angle rotation,
4240 	     const union dc_tiling_info *tiling_info,
4241 	     const struct dc_plane_dcc_param *dcc,
4242 	     const struct dc_plane_address *address,
4243 	     const struct plane_size *plane_size)
4244 {
4245 	struct dc *dc = adev->dm.dc;
4246 	struct dc_dcc_surface_param input;
4247 	struct dc_surface_dcc_cap output;
4248 
4249 	memset(&input, 0, sizeof(input));
4250 	memset(&output, 0, sizeof(output));
4251 
4252 	if (!dcc->enable)
4253 		return 0;
4254 
4255 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4256 	    !dc->cap_funcs.get_dcc_compression_cap)
4257 		return -EINVAL;
4258 
4259 	input.format = format;
4260 	input.surface_size.width = plane_size->surface_size.width;
4261 	input.surface_size.height = plane_size->surface_size.height;
4262 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4263 
4264 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4265 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4266 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4267 		input.scan = SCAN_DIRECTION_VERTICAL;
4268 
4269 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4270 		return -EINVAL;
4271 
4272 	if (!output.capable)
4273 		return -EINVAL;
4274 
4275 	if (dcc->independent_64b_blks == 0 &&
4276 	    output.grph.rgb.independent_64b_blks != 0)
4277 		return -EINVAL;
4278 
4279 	return 0;
4280 }
4281 
4282 static bool
4283 modifier_has_dcc(uint64_t modifier)
4284 {
4285 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4286 }
4287 
4288 static unsigned
4289 modifier_gfx9_swizzle_mode(uint64_t modifier)
4290 {
4291 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4292 		return 0;
4293 
4294 	return AMD_FMT_MOD_GET(TILE, modifier);
4295 }
4296 
4297 static const struct drm_format_info *
4298 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4299 {
4300 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4301 }
4302 
4303 static void
4304 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4305 				    union dc_tiling_info *tiling_info,
4306 				    uint64_t modifier)
4307 {
4308 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4309 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4310 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4311 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4312 
4313 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4314 
4315 	if (!IS_AMD_FMT_MOD(modifier))
4316 		return;
4317 
4318 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4319 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4320 
4321 	if (adev->family >= AMDGPU_FAMILY_NV) {
4322 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4323 	} else {
4324 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4325 
4326 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4327 	}
4328 }
4329 
4330 enum dm_micro_swizzle {
4331 	MICRO_SWIZZLE_Z = 0,
4332 	MICRO_SWIZZLE_S = 1,
4333 	MICRO_SWIZZLE_D = 2,
4334 	MICRO_SWIZZLE_R = 3
4335 };
4336 
4337 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4338 					  uint32_t format,
4339 					  uint64_t modifier)
4340 {
4341 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4342 	const struct drm_format_info *info = drm_format_info(format);
4343 	int i;
4344 
4345 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4346 
4347 	if (!info)
4348 		return false;
4349 
4350 	/*
4351 	 * We always have to allow these modifiers:
4352 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4353 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4354 	 */
4355 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4356 	    modifier == DRM_FORMAT_MOD_INVALID) {
4357 		return true;
4358 	}
4359 
4360 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4361 	for (i = 0; i < plane->modifier_count; i++) {
4362 		if (modifier == plane->modifiers[i])
4363 			break;
4364 	}
4365 	if (i == plane->modifier_count)
4366 		return false;
4367 
4368 	/*
4369 	 * For D swizzle the canonical modifier depends on the bpp, so check
4370 	 * it here.
4371 	 */
4372 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4373 	    adev->family >= AMDGPU_FAMILY_NV) {
4374 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4375 			return false;
4376 	}
4377 
4378 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4379 	    info->cpp[0] < 8)
4380 		return false;
4381 
4382 	if (modifier_has_dcc(modifier)) {
4383 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4384 		if (info->cpp[0] != 4)
4385 			return false;
4386 		/* We support multi-planar formats, but not when combined with
4387 		 * additional DCC metadata planes. */
4388 		if (info->num_planes > 1)
4389 			return false;
4390 	}
4391 
4392 	return true;
4393 }
4394 
4395 static void
4396 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4397 {
4398 	if (!*mods)
4399 		return;
4400 
4401 	if (*cap - *size < 1) {
4402 		uint64_t new_cap = *cap * 2;
4403 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4404 
4405 		if (!new_mods) {
4406 			kfree(*mods);
4407 			*mods = NULL;
4408 			return;
4409 		}
4410 
4411 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4412 		kfree(*mods);
4413 		*mods = new_mods;
4414 		*cap = new_cap;
4415 	}
4416 
4417 	(*mods)[*size] = mod;
4418 	*size += 1;
4419 }
4420 
4421 static void
4422 add_gfx9_modifiers(const struct amdgpu_device *adev,
4423 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4424 {
4425 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4426 	int pipe_xor_bits = min(8, pipes +
4427 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4428 	int bank_xor_bits = min(8 - pipe_xor_bits,
4429 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4430 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4431 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4432 
4433 
4434 	if (adev->family == AMDGPU_FAMILY_RV) {
4435 		/* Raven2 and later */
4436 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4437 
4438 		/*
4439 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4440 		 * doesn't support _D on DCN
4441 		 */
4442 
4443 		if (has_constant_encode) {
4444 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4445 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4446 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4447 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4448 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4449 				    AMD_FMT_MOD_SET(DCC, 1) |
4450 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4451 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4452 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4453 		}
4454 
4455 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4456 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4457 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4458 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4459 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4460 			    AMD_FMT_MOD_SET(DCC, 1) |
4461 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4462 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4463 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4464 
4465 		if (has_constant_encode) {
4466 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4467 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4468 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4469 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4470 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4471 				    AMD_FMT_MOD_SET(DCC, 1) |
4472 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4473 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4474 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4475 
4476 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4477 				    AMD_FMT_MOD_SET(RB, rb) |
4478 				    AMD_FMT_MOD_SET(PIPE, pipes));
4479 		}
4480 
4481 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4482 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4483 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4484 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4485 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4486 			    AMD_FMT_MOD_SET(DCC, 1) |
4487 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4488 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4489 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4490 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4491 			    AMD_FMT_MOD_SET(RB, rb) |
4492 			    AMD_FMT_MOD_SET(PIPE, pipes));
4493 	}
4494 
4495 	/*
4496 	 * Only supported for 64bpp on Raven, will be filtered on format in
4497 	 * dm_plane_format_mod_supported.
4498 	 */
4499 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4500 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4501 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4502 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4503 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4504 
4505 	if (adev->family == AMDGPU_FAMILY_RV) {
4506 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4507 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4508 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4509 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4510 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4511 	}
4512 
4513 	/*
4514 	 * Only supported for 64bpp on Raven, will be filtered on format in
4515 	 * dm_plane_format_mod_supported.
4516 	 */
4517 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4518 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4519 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4520 
4521 	if (adev->family == AMDGPU_FAMILY_RV) {
4522 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4523 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4524 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4525 	}
4526 }
4527 
4528 static void
4529 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4530 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4531 {
4532 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4533 
4534 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4535 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4536 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4537 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4538 		    AMD_FMT_MOD_SET(DCC, 1) |
4539 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4540 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4541 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4542 
4543 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4544 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4545 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4546 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4547 		    AMD_FMT_MOD_SET(DCC, 1) |
4548 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4549 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4550 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4551 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4552 
4553 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4554 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4555 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4556 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4557 
4558 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4559 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4560 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4561 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4562 
4563 
4564 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4565 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4567 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4568 
4569 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4570 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4571 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4572 }
4573 
4574 static void
4575 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4576 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4577 {
4578 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4579 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4580 
4581 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4582 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4583 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4584 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4585 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4586 		    AMD_FMT_MOD_SET(DCC, 1) |
4587 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4588 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4589 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4590 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4591 
4592 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4593 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4594 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4595 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4596 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4597 		    AMD_FMT_MOD_SET(DCC, 1) |
4598 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4599 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4600 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4601 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4602 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4603 
4604 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4605 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4606 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4607 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4608 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4609 
4610 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4611 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4612 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4613 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4614 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4615 
4616 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4617 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4618 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4619 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4620 
4621 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4622 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4623 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4624 }
4625 
4626 static int
4627 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4628 {
4629 	uint64_t size = 0, capacity = 128;
4630 	*mods = NULL;
4631 
4632 	/* We have not hooked up any pre-GFX9 modifiers. */
4633 	if (adev->family < AMDGPU_FAMILY_AI)
4634 		return 0;
4635 
4636 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4637 
4638 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4639 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4640 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4641 		return *mods ? 0 : -ENOMEM;
4642 	}
4643 
4644 	switch (adev->family) {
4645 	case AMDGPU_FAMILY_AI:
4646 	case AMDGPU_FAMILY_RV:
4647 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4648 		break;
4649 	case AMDGPU_FAMILY_NV:
4650 	case AMDGPU_FAMILY_VGH:
4651 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4652 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4653 		else
4654 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4655 		break;
4656 	}
4657 
4658 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4659 
4660 	/* INVALID marks the end of the list. */
4661 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4662 
4663 	if (!*mods)
4664 		return -ENOMEM;
4665 
4666 	return 0;
4667 }
4668 
4669 static int
4670 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4671 					  const struct amdgpu_framebuffer *afb,
4672 					  const enum surface_pixel_format format,
4673 					  const enum dc_rotation_angle rotation,
4674 					  const struct plane_size *plane_size,
4675 					  union dc_tiling_info *tiling_info,
4676 					  struct dc_plane_dcc_param *dcc,
4677 					  struct dc_plane_address *address,
4678 					  const bool force_disable_dcc)
4679 {
4680 	const uint64_t modifier = afb->base.modifier;
4681 	int ret;
4682 
4683 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4684 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4685 
4686 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4687 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4688 
4689 		dcc->enable = 1;
4690 		dcc->meta_pitch = afb->base.pitches[1];
4691 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4692 
4693 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4694 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4695 	}
4696 
4697 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4698 	if (ret)
4699 		return ret;
4700 
4701 	return 0;
4702 }
4703 
4704 static int
4705 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4706 			     const struct amdgpu_framebuffer *afb,
4707 			     const enum surface_pixel_format format,
4708 			     const enum dc_rotation_angle rotation,
4709 			     const uint64_t tiling_flags,
4710 			     union dc_tiling_info *tiling_info,
4711 			     struct plane_size *plane_size,
4712 			     struct dc_plane_dcc_param *dcc,
4713 			     struct dc_plane_address *address,
4714 			     bool tmz_surface,
4715 			     bool force_disable_dcc)
4716 {
4717 	const struct drm_framebuffer *fb = &afb->base;
4718 	int ret;
4719 
4720 	memset(tiling_info, 0, sizeof(*tiling_info));
4721 	memset(plane_size, 0, sizeof(*plane_size));
4722 	memset(dcc, 0, sizeof(*dcc));
4723 	memset(address, 0, sizeof(*address));
4724 
4725 	address->tmz_surface = tmz_surface;
4726 
4727 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4728 		uint64_t addr = afb->address + fb->offsets[0];
4729 
4730 		plane_size->surface_size.x = 0;
4731 		plane_size->surface_size.y = 0;
4732 		plane_size->surface_size.width = fb->width;
4733 		plane_size->surface_size.height = fb->height;
4734 		plane_size->surface_pitch =
4735 			fb->pitches[0] / fb->format->cpp[0];
4736 
4737 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4738 		address->grph.addr.low_part = lower_32_bits(addr);
4739 		address->grph.addr.high_part = upper_32_bits(addr);
4740 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4741 		uint64_t luma_addr = afb->address + fb->offsets[0];
4742 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4743 
4744 		plane_size->surface_size.x = 0;
4745 		plane_size->surface_size.y = 0;
4746 		plane_size->surface_size.width = fb->width;
4747 		plane_size->surface_size.height = fb->height;
4748 		plane_size->surface_pitch =
4749 			fb->pitches[0] / fb->format->cpp[0];
4750 
4751 		plane_size->chroma_size.x = 0;
4752 		plane_size->chroma_size.y = 0;
4753 		/* TODO: set these based on surface format */
4754 		plane_size->chroma_size.width = fb->width / 2;
4755 		plane_size->chroma_size.height = fb->height / 2;
4756 
4757 		plane_size->chroma_pitch =
4758 			fb->pitches[1] / fb->format->cpp[1];
4759 
4760 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4761 		address->video_progressive.luma_addr.low_part =
4762 			lower_32_bits(luma_addr);
4763 		address->video_progressive.luma_addr.high_part =
4764 			upper_32_bits(luma_addr);
4765 		address->video_progressive.chroma_addr.low_part =
4766 			lower_32_bits(chroma_addr);
4767 		address->video_progressive.chroma_addr.high_part =
4768 			upper_32_bits(chroma_addr);
4769 	}
4770 
4771 	if (adev->family >= AMDGPU_FAMILY_AI) {
4772 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4773 								rotation, plane_size,
4774 								tiling_info, dcc,
4775 								address,
4776 								force_disable_dcc);
4777 		if (ret)
4778 			return ret;
4779 	} else {
4780 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4781 	}
4782 
4783 	return 0;
4784 }
4785 
4786 static void
4787 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4788 			       bool *per_pixel_alpha, bool *global_alpha,
4789 			       int *global_alpha_value)
4790 {
4791 	*per_pixel_alpha = false;
4792 	*global_alpha = false;
4793 	*global_alpha_value = 0xff;
4794 
4795 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4796 		return;
4797 
4798 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4799 		static const uint32_t alpha_formats[] = {
4800 			DRM_FORMAT_ARGB8888,
4801 			DRM_FORMAT_RGBA8888,
4802 			DRM_FORMAT_ABGR8888,
4803 		};
4804 		uint32_t format = plane_state->fb->format->format;
4805 		unsigned int i;
4806 
4807 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4808 			if (format == alpha_formats[i]) {
4809 				*per_pixel_alpha = true;
4810 				break;
4811 			}
4812 		}
4813 	}
4814 
4815 	if (plane_state->alpha < 0xffff) {
4816 		*global_alpha = true;
4817 		*global_alpha_value = plane_state->alpha >> 8;
4818 	}
4819 }
4820 
4821 static int
4822 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4823 			    const enum surface_pixel_format format,
4824 			    enum dc_color_space *color_space)
4825 {
4826 	bool full_range;
4827 
4828 	*color_space = COLOR_SPACE_SRGB;
4829 
4830 	/* DRM color properties only affect non-RGB formats. */
4831 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4832 		return 0;
4833 
4834 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4835 
4836 	switch (plane_state->color_encoding) {
4837 	case DRM_COLOR_YCBCR_BT601:
4838 		if (full_range)
4839 			*color_space = COLOR_SPACE_YCBCR601;
4840 		else
4841 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4842 		break;
4843 
4844 	case DRM_COLOR_YCBCR_BT709:
4845 		if (full_range)
4846 			*color_space = COLOR_SPACE_YCBCR709;
4847 		else
4848 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4849 		break;
4850 
4851 	case DRM_COLOR_YCBCR_BT2020:
4852 		if (full_range)
4853 			*color_space = COLOR_SPACE_2020_YCBCR;
4854 		else
4855 			return -EINVAL;
4856 		break;
4857 
4858 	default:
4859 		return -EINVAL;
4860 	}
4861 
4862 	return 0;
4863 }
4864 
4865 static int
4866 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4867 			    const struct drm_plane_state *plane_state,
4868 			    const uint64_t tiling_flags,
4869 			    struct dc_plane_info *plane_info,
4870 			    struct dc_plane_address *address,
4871 			    bool tmz_surface,
4872 			    bool force_disable_dcc)
4873 {
4874 	const struct drm_framebuffer *fb = plane_state->fb;
4875 	const struct amdgpu_framebuffer *afb =
4876 		to_amdgpu_framebuffer(plane_state->fb);
4877 	int ret;
4878 
4879 	memset(plane_info, 0, sizeof(*plane_info));
4880 
4881 	switch (fb->format->format) {
4882 	case DRM_FORMAT_C8:
4883 		plane_info->format =
4884 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4885 		break;
4886 	case DRM_FORMAT_RGB565:
4887 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4888 		break;
4889 	case DRM_FORMAT_XRGB8888:
4890 	case DRM_FORMAT_ARGB8888:
4891 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4892 		break;
4893 	case DRM_FORMAT_XRGB2101010:
4894 	case DRM_FORMAT_ARGB2101010:
4895 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4896 		break;
4897 	case DRM_FORMAT_XBGR2101010:
4898 	case DRM_FORMAT_ABGR2101010:
4899 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4900 		break;
4901 	case DRM_FORMAT_XBGR8888:
4902 	case DRM_FORMAT_ABGR8888:
4903 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4904 		break;
4905 	case DRM_FORMAT_NV21:
4906 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4907 		break;
4908 	case DRM_FORMAT_NV12:
4909 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4910 		break;
4911 	case DRM_FORMAT_P010:
4912 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4913 		break;
4914 	case DRM_FORMAT_XRGB16161616F:
4915 	case DRM_FORMAT_ARGB16161616F:
4916 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4917 		break;
4918 	case DRM_FORMAT_XBGR16161616F:
4919 	case DRM_FORMAT_ABGR16161616F:
4920 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4921 		break;
4922 	default:
4923 		DRM_ERROR(
4924 			"Unsupported screen format %p4cc\n",
4925 			&fb->format->format);
4926 		return -EINVAL;
4927 	}
4928 
4929 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4930 	case DRM_MODE_ROTATE_0:
4931 		plane_info->rotation = ROTATION_ANGLE_0;
4932 		break;
4933 	case DRM_MODE_ROTATE_90:
4934 		plane_info->rotation = ROTATION_ANGLE_90;
4935 		break;
4936 	case DRM_MODE_ROTATE_180:
4937 		plane_info->rotation = ROTATION_ANGLE_180;
4938 		break;
4939 	case DRM_MODE_ROTATE_270:
4940 		plane_info->rotation = ROTATION_ANGLE_270;
4941 		break;
4942 	default:
4943 		plane_info->rotation = ROTATION_ANGLE_0;
4944 		break;
4945 	}
4946 
4947 	plane_info->visible = true;
4948 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4949 
4950 	plane_info->layer_index = 0;
4951 
4952 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4953 					  &plane_info->color_space);
4954 	if (ret)
4955 		return ret;
4956 
4957 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4958 					   plane_info->rotation, tiling_flags,
4959 					   &plane_info->tiling_info,
4960 					   &plane_info->plane_size,
4961 					   &plane_info->dcc, address, tmz_surface,
4962 					   force_disable_dcc);
4963 	if (ret)
4964 		return ret;
4965 
4966 	fill_blending_from_plane_state(
4967 		plane_state, &plane_info->per_pixel_alpha,
4968 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4969 
4970 	return 0;
4971 }
4972 
4973 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4974 				    struct dc_plane_state *dc_plane_state,
4975 				    struct drm_plane_state *plane_state,
4976 				    struct drm_crtc_state *crtc_state)
4977 {
4978 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4979 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4980 	struct dc_scaling_info scaling_info;
4981 	struct dc_plane_info plane_info;
4982 	int ret;
4983 	bool force_disable_dcc = false;
4984 
4985 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4986 	if (ret)
4987 		return ret;
4988 
4989 	dc_plane_state->src_rect = scaling_info.src_rect;
4990 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4991 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4992 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4993 
4994 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4995 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4996 					  afb->tiling_flags,
4997 					  &plane_info,
4998 					  &dc_plane_state->address,
4999 					  afb->tmz_surface,
5000 					  force_disable_dcc);
5001 	if (ret)
5002 		return ret;
5003 
5004 	dc_plane_state->format = plane_info.format;
5005 	dc_plane_state->color_space = plane_info.color_space;
5006 	dc_plane_state->format = plane_info.format;
5007 	dc_plane_state->plane_size = plane_info.plane_size;
5008 	dc_plane_state->rotation = plane_info.rotation;
5009 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5010 	dc_plane_state->stereo_format = plane_info.stereo_format;
5011 	dc_plane_state->tiling_info = plane_info.tiling_info;
5012 	dc_plane_state->visible = plane_info.visible;
5013 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5014 	dc_plane_state->global_alpha = plane_info.global_alpha;
5015 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5016 	dc_plane_state->dcc = plane_info.dcc;
5017 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5018 	dc_plane_state->flip_int_enabled = true;
5019 
5020 	/*
5021 	 * Always set input transfer function, since plane state is refreshed
5022 	 * every time.
5023 	 */
5024 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5025 	if (ret)
5026 		return ret;
5027 
5028 	return 0;
5029 }
5030 
5031 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5032 					   const struct dm_connector_state *dm_state,
5033 					   struct dc_stream_state *stream)
5034 {
5035 	enum amdgpu_rmx_type rmx_type;
5036 
5037 	struct rect src = { 0 }; /* viewport in composition space*/
5038 	struct rect dst = { 0 }; /* stream addressable area */
5039 
5040 	/* no mode. nothing to be done */
5041 	if (!mode)
5042 		return;
5043 
5044 	/* Full screen scaling by default */
5045 	src.width = mode->hdisplay;
5046 	src.height = mode->vdisplay;
5047 	dst.width = stream->timing.h_addressable;
5048 	dst.height = stream->timing.v_addressable;
5049 
5050 	if (dm_state) {
5051 		rmx_type = dm_state->scaling;
5052 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5053 			if (src.width * dst.height <
5054 					src.height * dst.width) {
5055 				/* height needs less upscaling/more downscaling */
5056 				dst.width = src.width *
5057 						dst.height / src.height;
5058 			} else {
5059 				/* width needs less upscaling/more downscaling */
5060 				dst.height = src.height *
5061 						dst.width / src.width;
5062 			}
5063 		} else if (rmx_type == RMX_CENTER) {
5064 			dst = src;
5065 		}
5066 
5067 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5068 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5069 
5070 		if (dm_state->underscan_enable) {
5071 			dst.x += dm_state->underscan_hborder / 2;
5072 			dst.y += dm_state->underscan_vborder / 2;
5073 			dst.width -= dm_state->underscan_hborder;
5074 			dst.height -= dm_state->underscan_vborder;
5075 		}
5076 	}
5077 
5078 	stream->src = src;
5079 	stream->dst = dst;
5080 
5081 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5082 		      dst.x, dst.y, dst.width, dst.height);
5083 
5084 }
5085 
5086 static enum dc_color_depth
5087 convert_color_depth_from_display_info(const struct drm_connector *connector,
5088 				      bool is_y420, int requested_bpc)
5089 {
5090 	uint8_t bpc;
5091 
5092 	if (is_y420) {
5093 		bpc = 8;
5094 
5095 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5096 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5097 			bpc = 16;
5098 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5099 			bpc = 12;
5100 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5101 			bpc = 10;
5102 	} else {
5103 		bpc = (uint8_t)connector->display_info.bpc;
5104 		/* Assume 8 bpc by default if no bpc is specified. */
5105 		bpc = bpc ? bpc : 8;
5106 	}
5107 
5108 	if (requested_bpc > 0) {
5109 		/*
5110 		 * Cap display bpc based on the user requested value.
5111 		 *
5112 		 * The value for state->max_bpc may not correctly updated
5113 		 * depending on when the connector gets added to the state
5114 		 * or if this was called outside of atomic check, so it
5115 		 * can't be used directly.
5116 		 */
5117 		bpc = min_t(u8, bpc, requested_bpc);
5118 
5119 		/* Round down to the nearest even number. */
5120 		bpc = bpc - (bpc & 1);
5121 	}
5122 
5123 	switch (bpc) {
5124 	case 0:
5125 		/*
5126 		 * Temporary Work around, DRM doesn't parse color depth for
5127 		 * EDID revision before 1.4
5128 		 * TODO: Fix edid parsing
5129 		 */
5130 		return COLOR_DEPTH_888;
5131 	case 6:
5132 		return COLOR_DEPTH_666;
5133 	case 8:
5134 		return COLOR_DEPTH_888;
5135 	case 10:
5136 		return COLOR_DEPTH_101010;
5137 	case 12:
5138 		return COLOR_DEPTH_121212;
5139 	case 14:
5140 		return COLOR_DEPTH_141414;
5141 	case 16:
5142 		return COLOR_DEPTH_161616;
5143 	default:
5144 		return COLOR_DEPTH_UNDEFINED;
5145 	}
5146 }
5147 
5148 static enum dc_aspect_ratio
5149 get_aspect_ratio(const struct drm_display_mode *mode_in)
5150 {
5151 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5152 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5153 }
5154 
5155 static enum dc_color_space
5156 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5157 {
5158 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5159 
5160 	switch (dc_crtc_timing->pixel_encoding)	{
5161 	case PIXEL_ENCODING_YCBCR422:
5162 	case PIXEL_ENCODING_YCBCR444:
5163 	case PIXEL_ENCODING_YCBCR420:
5164 	{
5165 		/*
5166 		 * 27030khz is the separation point between HDTV and SDTV
5167 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5168 		 * respectively
5169 		 */
5170 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5171 			if (dc_crtc_timing->flags.Y_ONLY)
5172 				color_space =
5173 					COLOR_SPACE_YCBCR709_LIMITED;
5174 			else
5175 				color_space = COLOR_SPACE_YCBCR709;
5176 		} else {
5177 			if (dc_crtc_timing->flags.Y_ONLY)
5178 				color_space =
5179 					COLOR_SPACE_YCBCR601_LIMITED;
5180 			else
5181 				color_space = COLOR_SPACE_YCBCR601;
5182 		}
5183 
5184 	}
5185 	break;
5186 	case PIXEL_ENCODING_RGB:
5187 		color_space = COLOR_SPACE_SRGB;
5188 		break;
5189 
5190 	default:
5191 		WARN_ON(1);
5192 		break;
5193 	}
5194 
5195 	return color_space;
5196 }
5197 
5198 static bool adjust_colour_depth_from_display_info(
5199 	struct dc_crtc_timing *timing_out,
5200 	const struct drm_display_info *info)
5201 {
5202 	enum dc_color_depth depth = timing_out->display_color_depth;
5203 	int normalized_clk;
5204 	do {
5205 		normalized_clk = timing_out->pix_clk_100hz / 10;
5206 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5207 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5208 			normalized_clk /= 2;
5209 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5210 		switch (depth) {
5211 		case COLOR_DEPTH_888:
5212 			break;
5213 		case COLOR_DEPTH_101010:
5214 			normalized_clk = (normalized_clk * 30) / 24;
5215 			break;
5216 		case COLOR_DEPTH_121212:
5217 			normalized_clk = (normalized_clk * 36) / 24;
5218 			break;
5219 		case COLOR_DEPTH_161616:
5220 			normalized_clk = (normalized_clk * 48) / 24;
5221 			break;
5222 		default:
5223 			/* The above depths are the only ones valid for HDMI. */
5224 			return false;
5225 		}
5226 		if (normalized_clk <= info->max_tmds_clock) {
5227 			timing_out->display_color_depth = depth;
5228 			return true;
5229 		}
5230 	} while (--depth > COLOR_DEPTH_666);
5231 	return false;
5232 }
5233 
5234 static void fill_stream_properties_from_drm_display_mode(
5235 	struct dc_stream_state *stream,
5236 	const struct drm_display_mode *mode_in,
5237 	const struct drm_connector *connector,
5238 	const struct drm_connector_state *connector_state,
5239 	const struct dc_stream_state *old_stream,
5240 	int requested_bpc)
5241 {
5242 	struct dc_crtc_timing *timing_out = &stream->timing;
5243 	const struct drm_display_info *info = &connector->display_info;
5244 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5245 	struct hdmi_vendor_infoframe hv_frame;
5246 	struct hdmi_avi_infoframe avi_frame;
5247 
5248 	memset(&hv_frame, 0, sizeof(hv_frame));
5249 	memset(&avi_frame, 0, sizeof(avi_frame));
5250 
5251 	timing_out->h_border_left = 0;
5252 	timing_out->h_border_right = 0;
5253 	timing_out->v_border_top = 0;
5254 	timing_out->v_border_bottom = 0;
5255 	/* TODO: un-hardcode */
5256 	if (drm_mode_is_420_only(info, mode_in)
5257 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5258 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5259 	else if (drm_mode_is_420_also(info, mode_in)
5260 			&& aconnector->force_yuv420_output)
5261 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5262 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5263 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5264 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5265 	else
5266 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5267 
5268 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5269 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5270 		connector,
5271 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5272 		requested_bpc);
5273 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5274 	timing_out->hdmi_vic = 0;
5275 
5276 	if(old_stream) {
5277 		timing_out->vic = old_stream->timing.vic;
5278 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5279 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5280 	} else {
5281 		timing_out->vic = drm_match_cea_mode(mode_in);
5282 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5283 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5284 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5285 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5286 	}
5287 
5288 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5289 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5290 		timing_out->vic = avi_frame.video_code;
5291 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5292 		timing_out->hdmi_vic = hv_frame.vic;
5293 	}
5294 
5295 	if (is_freesync_video_mode(mode_in, aconnector)) {
5296 		timing_out->h_addressable = mode_in->hdisplay;
5297 		timing_out->h_total = mode_in->htotal;
5298 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5299 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5300 		timing_out->v_total = mode_in->vtotal;
5301 		timing_out->v_addressable = mode_in->vdisplay;
5302 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5303 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5304 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5305 	} else {
5306 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5307 		timing_out->h_total = mode_in->crtc_htotal;
5308 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5309 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5310 		timing_out->v_total = mode_in->crtc_vtotal;
5311 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5312 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5313 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5314 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5315 	}
5316 
5317 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5318 
5319 	stream->output_color_space = get_output_color_space(timing_out);
5320 
5321 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5322 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5323 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5324 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5325 		    drm_mode_is_420_also(info, mode_in) &&
5326 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5327 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5328 			adjust_colour_depth_from_display_info(timing_out, info);
5329 		}
5330 	}
5331 }
5332 
5333 static void fill_audio_info(struct audio_info *audio_info,
5334 			    const struct drm_connector *drm_connector,
5335 			    const struct dc_sink *dc_sink)
5336 {
5337 	int i = 0;
5338 	int cea_revision = 0;
5339 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5340 
5341 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5342 	audio_info->product_id = edid_caps->product_id;
5343 
5344 	cea_revision = drm_connector->display_info.cea_rev;
5345 
5346 	strscpy(audio_info->display_name,
5347 		edid_caps->display_name,
5348 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5349 
5350 	if (cea_revision >= 3) {
5351 		audio_info->mode_count = edid_caps->audio_mode_count;
5352 
5353 		for (i = 0; i < audio_info->mode_count; ++i) {
5354 			audio_info->modes[i].format_code =
5355 					(enum audio_format_code)
5356 					(edid_caps->audio_modes[i].format_code);
5357 			audio_info->modes[i].channel_count =
5358 					edid_caps->audio_modes[i].channel_count;
5359 			audio_info->modes[i].sample_rates.all =
5360 					edid_caps->audio_modes[i].sample_rate;
5361 			audio_info->modes[i].sample_size =
5362 					edid_caps->audio_modes[i].sample_size;
5363 		}
5364 	}
5365 
5366 	audio_info->flags.all = edid_caps->speaker_flags;
5367 
5368 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5369 	if (drm_connector->latency_present[0]) {
5370 		audio_info->video_latency = drm_connector->video_latency[0];
5371 		audio_info->audio_latency = drm_connector->audio_latency[0];
5372 	}
5373 
5374 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5375 
5376 }
5377 
5378 static void
5379 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5380 				      struct drm_display_mode *dst_mode)
5381 {
5382 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5383 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5384 	dst_mode->crtc_clock = src_mode->crtc_clock;
5385 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5386 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5387 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5388 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5389 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5390 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5391 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5392 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5393 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5394 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5395 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5396 }
5397 
5398 static void
5399 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5400 					const struct drm_display_mode *native_mode,
5401 					bool scale_enabled)
5402 {
5403 	if (scale_enabled) {
5404 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5405 	} else if (native_mode->clock == drm_mode->clock &&
5406 			native_mode->htotal == drm_mode->htotal &&
5407 			native_mode->vtotal == drm_mode->vtotal) {
5408 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5409 	} else {
5410 		/* no scaling nor amdgpu inserted, no need to patch */
5411 	}
5412 }
5413 
5414 static struct dc_sink *
5415 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5416 {
5417 	struct dc_sink_init_data sink_init_data = { 0 };
5418 	struct dc_sink *sink = NULL;
5419 	sink_init_data.link = aconnector->dc_link;
5420 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5421 
5422 	sink = dc_sink_create(&sink_init_data);
5423 	if (!sink) {
5424 		DRM_ERROR("Failed to create sink!\n");
5425 		return NULL;
5426 	}
5427 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5428 
5429 	return sink;
5430 }
5431 
5432 static void set_multisync_trigger_params(
5433 		struct dc_stream_state *stream)
5434 {
5435 	struct dc_stream_state *master = NULL;
5436 
5437 	if (stream->triggered_crtc_reset.enabled) {
5438 		master = stream->triggered_crtc_reset.event_source;
5439 		stream->triggered_crtc_reset.event =
5440 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5441 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5442 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5443 	}
5444 }
5445 
5446 static void set_master_stream(struct dc_stream_state *stream_set[],
5447 			      int stream_count)
5448 {
5449 	int j, highest_rfr = 0, master_stream = 0;
5450 
5451 	for (j = 0;  j < stream_count; j++) {
5452 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5453 			int refresh_rate = 0;
5454 
5455 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5456 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5457 			if (refresh_rate > highest_rfr) {
5458 				highest_rfr = refresh_rate;
5459 				master_stream = j;
5460 			}
5461 		}
5462 	}
5463 	for (j = 0;  j < stream_count; j++) {
5464 		if (stream_set[j])
5465 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5466 	}
5467 }
5468 
5469 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5470 {
5471 	int i = 0;
5472 	struct dc_stream_state *stream;
5473 
5474 	if (context->stream_count < 2)
5475 		return;
5476 	for (i = 0; i < context->stream_count ; i++) {
5477 		if (!context->streams[i])
5478 			continue;
5479 		/*
5480 		 * TODO: add a function to read AMD VSDB bits and set
5481 		 * crtc_sync_master.multi_sync_enabled flag
5482 		 * For now it's set to false
5483 		 */
5484 	}
5485 
5486 	set_master_stream(context->streams, context->stream_count);
5487 
5488 	for (i = 0; i < context->stream_count ; i++) {
5489 		stream = context->streams[i];
5490 
5491 		if (!stream)
5492 			continue;
5493 
5494 		set_multisync_trigger_params(stream);
5495 	}
5496 }
5497 
5498 static struct drm_display_mode *
5499 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5500 			  bool use_probed_modes)
5501 {
5502 	struct drm_display_mode *m, *m_pref = NULL;
5503 	u16 current_refresh, highest_refresh;
5504 	struct list_head *list_head = use_probed_modes ?
5505 						    &aconnector->base.probed_modes :
5506 						    &aconnector->base.modes;
5507 
5508 	if (aconnector->freesync_vid_base.clock != 0)
5509 		return &aconnector->freesync_vid_base;
5510 
5511 	/* Find the preferred mode */
5512 	list_for_each_entry (m, list_head, head) {
5513 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5514 			m_pref = m;
5515 			break;
5516 		}
5517 	}
5518 
5519 	if (!m_pref) {
5520 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5521 		m_pref = list_first_entry_or_null(
5522 			&aconnector->base.modes, struct drm_display_mode, head);
5523 		if (!m_pref) {
5524 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5525 			return NULL;
5526 		}
5527 	}
5528 
5529 	highest_refresh = drm_mode_vrefresh(m_pref);
5530 
5531 	/*
5532 	 * Find the mode with highest refresh rate with same resolution.
5533 	 * For some monitors, preferred mode is not the mode with highest
5534 	 * supported refresh rate.
5535 	 */
5536 	list_for_each_entry (m, list_head, head) {
5537 		current_refresh  = drm_mode_vrefresh(m);
5538 
5539 		if (m->hdisplay == m_pref->hdisplay &&
5540 		    m->vdisplay == m_pref->vdisplay &&
5541 		    highest_refresh < current_refresh) {
5542 			highest_refresh = current_refresh;
5543 			m_pref = m;
5544 		}
5545 	}
5546 
5547 	aconnector->freesync_vid_base = *m_pref;
5548 	return m_pref;
5549 }
5550 
5551 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5552 				   struct amdgpu_dm_connector *aconnector)
5553 {
5554 	struct drm_display_mode *high_mode;
5555 	int timing_diff;
5556 
5557 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5558 	if (!high_mode || !mode)
5559 		return false;
5560 
5561 	timing_diff = high_mode->vtotal - mode->vtotal;
5562 
5563 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5564 	    high_mode->hdisplay != mode->hdisplay ||
5565 	    high_mode->vdisplay != mode->vdisplay ||
5566 	    high_mode->hsync_start != mode->hsync_start ||
5567 	    high_mode->hsync_end != mode->hsync_end ||
5568 	    high_mode->htotal != mode->htotal ||
5569 	    high_mode->hskew != mode->hskew ||
5570 	    high_mode->vscan != mode->vscan ||
5571 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5572 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5573 		return false;
5574 	else
5575 		return true;
5576 }
5577 
5578 static struct dc_stream_state *
5579 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5580 		       const struct drm_display_mode *drm_mode,
5581 		       const struct dm_connector_state *dm_state,
5582 		       const struct dc_stream_state *old_stream,
5583 		       int requested_bpc)
5584 {
5585 	struct drm_display_mode *preferred_mode = NULL;
5586 	struct drm_connector *drm_connector;
5587 	const struct drm_connector_state *con_state =
5588 		dm_state ? &dm_state->base : NULL;
5589 	struct dc_stream_state *stream = NULL;
5590 	struct drm_display_mode mode = *drm_mode;
5591 	struct drm_display_mode saved_mode;
5592 	struct drm_display_mode *freesync_mode = NULL;
5593 	bool native_mode_found = false;
5594 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5595 	int mode_refresh;
5596 	int preferred_refresh = 0;
5597 #if defined(CONFIG_DRM_AMD_DC_DCN)
5598 	struct dsc_dec_dpcd_caps dsc_caps;
5599 	uint32_t link_bandwidth_kbps;
5600 #endif
5601 	struct dc_sink *sink = NULL;
5602 
5603 	memset(&saved_mode, 0, sizeof(saved_mode));
5604 
5605 	if (aconnector == NULL) {
5606 		DRM_ERROR("aconnector is NULL!\n");
5607 		return stream;
5608 	}
5609 
5610 	drm_connector = &aconnector->base;
5611 
5612 	if (!aconnector->dc_sink) {
5613 		sink = create_fake_sink(aconnector);
5614 		if (!sink)
5615 			return stream;
5616 	} else {
5617 		sink = aconnector->dc_sink;
5618 		dc_sink_retain(sink);
5619 	}
5620 
5621 	stream = dc_create_stream_for_sink(sink);
5622 
5623 	if (stream == NULL) {
5624 		DRM_ERROR("Failed to create stream for sink!\n");
5625 		goto finish;
5626 	}
5627 
5628 	stream->dm_stream_context = aconnector;
5629 
5630 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5631 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5632 
5633 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5634 		/* Search for preferred mode */
5635 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5636 			native_mode_found = true;
5637 			break;
5638 		}
5639 	}
5640 	if (!native_mode_found)
5641 		preferred_mode = list_first_entry_or_null(
5642 				&aconnector->base.modes,
5643 				struct drm_display_mode,
5644 				head);
5645 
5646 	mode_refresh = drm_mode_vrefresh(&mode);
5647 
5648 	if (preferred_mode == NULL) {
5649 		/*
5650 		 * This may not be an error, the use case is when we have no
5651 		 * usermode calls to reset and set mode upon hotplug. In this
5652 		 * case, we call set mode ourselves to restore the previous mode
5653 		 * and the modelist may not be filled in in time.
5654 		 */
5655 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5656 	} else {
5657 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5658 				 is_freesync_video_mode(&mode, aconnector);
5659 		if (recalculate_timing) {
5660 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5661 			saved_mode = mode;
5662 			mode = *freesync_mode;
5663 		} else {
5664 			decide_crtc_timing_for_drm_display_mode(
5665 				&mode, preferred_mode,
5666 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5667 		}
5668 
5669 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5670 	}
5671 
5672 	if (recalculate_timing)
5673 		drm_mode_set_crtcinfo(&saved_mode, 0);
5674 	else if (!dm_state)
5675 		drm_mode_set_crtcinfo(&mode, 0);
5676 
5677        /*
5678 	* If scaling is enabled and refresh rate didn't change
5679 	* we copy the vic and polarities of the old timings
5680 	*/
5681 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5682 		fill_stream_properties_from_drm_display_mode(
5683 			stream, &mode, &aconnector->base, con_state, NULL,
5684 			requested_bpc);
5685 	else
5686 		fill_stream_properties_from_drm_display_mode(
5687 			stream, &mode, &aconnector->base, con_state, old_stream,
5688 			requested_bpc);
5689 
5690 	stream->timing.flags.DSC = 0;
5691 
5692 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5693 #if defined(CONFIG_DRM_AMD_DC_DCN)
5694 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5695 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5696 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5697 				      &dsc_caps);
5698 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5699 							     dc_link_get_link_cap(aconnector->dc_link));
5700 
5701 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5702 			/* Set DSC policy according to dsc_clock_en */
5703 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5704 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5705 
5706 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5707 						  &dsc_caps,
5708 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5709 						  0,
5710 						  link_bandwidth_kbps,
5711 						  &stream->timing,
5712 						  &stream->timing.dsc_cfg))
5713 				stream->timing.flags.DSC = 1;
5714 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5715 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5716 				stream->timing.flags.DSC = 1;
5717 
5718 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5719 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5720 
5721 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5722 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5723 
5724 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5725 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5726 		}
5727 #endif
5728 	}
5729 
5730 	update_stream_scaling_settings(&mode, dm_state, stream);
5731 
5732 	fill_audio_info(
5733 		&stream->audio_info,
5734 		drm_connector,
5735 		sink);
5736 
5737 	update_stream_signal(stream, sink);
5738 
5739 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5740 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5741 
5742 	if (stream->link->psr_settings.psr_feature_enabled) {
5743 		//
5744 		// should decide stream support vsc sdp colorimetry capability
5745 		// before building vsc info packet
5746 		//
5747 		stream->use_vsc_sdp_for_colorimetry = false;
5748 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5749 			stream->use_vsc_sdp_for_colorimetry =
5750 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5751 		} else {
5752 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5753 				stream->use_vsc_sdp_for_colorimetry = true;
5754 		}
5755 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5756 	}
5757 finish:
5758 	dc_sink_release(sink);
5759 
5760 	return stream;
5761 }
5762 
5763 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5764 {
5765 	drm_crtc_cleanup(crtc);
5766 	kfree(crtc);
5767 }
5768 
5769 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5770 				  struct drm_crtc_state *state)
5771 {
5772 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5773 
5774 	/* TODO Destroy dc_stream objects are stream object is flattened */
5775 	if (cur->stream)
5776 		dc_stream_release(cur->stream);
5777 
5778 
5779 	__drm_atomic_helper_crtc_destroy_state(state);
5780 
5781 
5782 	kfree(state);
5783 }
5784 
5785 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5786 {
5787 	struct dm_crtc_state *state;
5788 
5789 	if (crtc->state)
5790 		dm_crtc_destroy_state(crtc, crtc->state);
5791 
5792 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5793 	if (WARN_ON(!state))
5794 		return;
5795 
5796 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5797 }
5798 
5799 static struct drm_crtc_state *
5800 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5801 {
5802 	struct dm_crtc_state *state, *cur;
5803 
5804 	cur = to_dm_crtc_state(crtc->state);
5805 
5806 	if (WARN_ON(!crtc->state))
5807 		return NULL;
5808 
5809 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5810 	if (!state)
5811 		return NULL;
5812 
5813 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5814 
5815 	if (cur->stream) {
5816 		state->stream = cur->stream;
5817 		dc_stream_retain(state->stream);
5818 	}
5819 
5820 	state->active_planes = cur->active_planes;
5821 	state->vrr_infopacket = cur->vrr_infopacket;
5822 	state->abm_level = cur->abm_level;
5823 	state->vrr_supported = cur->vrr_supported;
5824 	state->freesync_config = cur->freesync_config;
5825 	state->cm_has_degamma = cur->cm_has_degamma;
5826 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5827 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5828 
5829 	return &state->base;
5830 }
5831 
5832 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5833 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5834 {
5835 	crtc_debugfs_init(crtc);
5836 
5837 	return 0;
5838 }
5839 #endif
5840 
5841 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5842 {
5843 	enum dc_irq_source irq_source;
5844 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5845 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5846 	int rc;
5847 
5848 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5849 
5850 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5851 
5852 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5853 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5854 	return rc;
5855 }
5856 
5857 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5858 {
5859 	enum dc_irq_source irq_source;
5860 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5861 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5862 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5863 #if defined(CONFIG_DRM_AMD_DC_DCN)
5864 	struct amdgpu_display_manager *dm = &adev->dm;
5865 	unsigned long flags;
5866 #endif
5867 	int rc = 0;
5868 
5869 	if (enable) {
5870 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5871 		if (amdgpu_dm_vrr_active(acrtc_state))
5872 			rc = dm_set_vupdate_irq(crtc, true);
5873 	} else {
5874 		/* vblank irq off -> vupdate irq off */
5875 		rc = dm_set_vupdate_irq(crtc, false);
5876 	}
5877 
5878 	if (rc)
5879 		return rc;
5880 
5881 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5882 
5883 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5884 		return -EBUSY;
5885 
5886 	if (amdgpu_in_reset(adev))
5887 		return 0;
5888 
5889 #if defined(CONFIG_DRM_AMD_DC_DCN)
5890 	spin_lock_irqsave(&dm->vblank_lock, flags);
5891 	dm->vblank_workqueue->dm = dm;
5892 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5893 	dm->vblank_workqueue->enable = enable;
5894 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5895 	schedule_work(&dm->vblank_workqueue->mall_work);
5896 #endif
5897 
5898 	return 0;
5899 }
5900 
5901 static int dm_enable_vblank(struct drm_crtc *crtc)
5902 {
5903 	return dm_set_vblank(crtc, true);
5904 }
5905 
5906 static void dm_disable_vblank(struct drm_crtc *crtc)
5907 {
5908 	dm_set_vblank(crtc, false);
5909 }
5910 
5911 /* Implemented only the options currently availible for the driver */
5912 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5913 	.reset = dm_crtc_reset_state,
5914 	.destroy = amdgpu_dm_crtc_destroy,
5915 	.set_config = drm_atomic_helper_set_config,
5916 	.page_flip = drm_atomic_helper_page_flip,
5917 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5918 	.atomic_destroy_state = dm_crtc_destroy_state,
5919 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5920 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5921 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5922 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5923 	.enable_vblank = dm_enable_vblank,
5924 	.disable_vblank = dm_disable_vblank,
5925 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5926 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5927 	.late_register = amdgpu_dm_crtc_late_register,
5928 #endif
5929 };
5930 
5931 static enum drm_connector_status
5932 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5933 {
5934 	bool connected;
5935 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5936 
5937 	/*
5938 	 * Notes:
5939 	 * 1. This interface is NOT called in context of HPD irq.
5940 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5941 	 * makes it a bad place for *any* MST-related activity.
5942 	 */
5943 
5944 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5945 	    !aconnector->fake_enable)
5946 		connected = (aconnector->dc_sink != NULL);
5947 	else
5948 		connected = (aconnector->base.force == DRM_FORCE_ON);
5949 
5950 	update_subconnector_property(aconnector);
5951 
5952 	return (connected ? connector_status_connected :
5953 			connector_status_disconnected);
5954 }
5955 
5956 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5957 					    struct drm_connector_state *connector_state,
5958 					    struct drm_property *property,
5959 					    uint64_t val)
5960 {
5961 	struct drm_device *dev = connector->dev;
5962 	struct amdgpu_device *adev = drm_to_adev(dev);
5963 	struct dm_connector_state *dm_old_state =
5964 		to_dm_connector_state(connector->state);
5965 	struct dm_connector_state *dm_new_state =
5966 		to_dm_connector_state(connector_state);
5967 
5968 	int ret = -EINVAL;
5969 
5970 	if (property == dev->mode_config.scaling_mode_property) {
5971 		enum amdgpu_rmx_type rmx_type;
5972 
5973 		switch (val) {
5974 		case DRM_MODE_SCALE_CENTER:
5975 			rmx_type = RMX_CENTER;
5976 			break;
5977 		case DRM_MODE_SCALE_ASPECT:
5978 			rmx_type = RMX_ASPECT;
5979 			break;
5980 		case DRM_MODE_SCALE_FULLSCREEN:
5981 			rmx_type = RMX_FULL;
5982 			break;
5983 		case DRM_MODE_SCALE_NONE:
5984 		default:
5985 			rmx_type = RMX_OFF;
5986 			break;
5987 		}
5988 
5989 		if (dm_old_state->scaling == rmx_type)
5990 			return 0;
5991 
5992 		dm_new_state->scaling = rmx_type;
5993 		ret = 0;
5994 	} else if (property == adev->mode_info.underscan_hborder_property) {
5995 		dm_new_state->underscan_hborder = val;
5996 		ret = 0;
5997 	} else if (property == adev->mode_info.underscan_vborder_property) {
5998 		dm_new_state->underscan_vborder = val;
5999 		ret = 0;
6000 	} else if (property == adev->mode_info.underscan_property) {
6001 		dm_new_state->underscan_enable = val;
6002 		ret = 0;
6003 	} else if (property == adev->mode_info.abm_level_property) {
6004 		dm_new_state->abm_level = val;
6005 		ret = 0;
6006 	}
6007 
6008 	return ret;
6009 }
6010 
6011 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6012 					    const struct drm_connector_state *state,
6013 					    struct drm_property *property,
6014 					    uint64_t *val)
6015 {
6016 	struct drm_device *dev = connector->dev;
6017 	struct amdgpu_device *adev = drm_to_adev(dev);
6018 	struct dm_connector_state *dm_state =
6019 		to_dm_connector_state(state);
6020 	int ret = -EINVAL;
6021 
6022 	if (property == dev->mode_config.scaling_mode_property) {
6023 		switch (dm_state->scaling) {
6024 		case RMX_CENTER:
6025 			*val = DRM_MODE_SCALE_CENTER;
6026 			break;
6027 		case RMX_ASPECT:
6028 			*val = DRM_MODE_SCALE_ASPECT;
6029 			break;
6030 		case RMX_FULL:
6031 			*val = DRM_MODE_SCALE_FULLSCREEN;
6032 			break;
6033 		case RMX_OFF:
6034 		default:
6035 			*val = DRM_MODE_SCALE_NONE;
6036 			break;
6037 		}
6038 		ret = 0;
6039 	} else if (property == adev->mode_info.underscan_hborder_property) {
6040 		*val = dm_state->underscan_hborder;
6041 		ret = 0;
6042 	} else if (property == adev->mode_info.underscan_vborder_property) {
6043 		*val = dm_state->underscan_vborder;
6044 		ret = 0;
6045 	} else if (property == adev->mode_info.underscan_property) {
6046 		*val = dm_state->underscan_enable;
6047 		ret = 0;
6048 	} else if (property == adev->mode_info.abm_level_property) {
6049 		*val = dm_state->abm_level;
6050 		ret = 0;
6051 	}
6052 
6053 	return ret;
6054 }
6055 
6056 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6057 {
6058 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6059 
6060 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6061 }
6062 
6063 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6064 {
6065 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6066 	const struct dc_link *link = aconnector->dc_link;
6067 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6068 	struct amdgpu_display_manager *dm = &adev->dm;
6069 
6070 	/*
6071 	 * Call only if mst_mgr was iniitalized before since it's not done
6072 	 * for all connector types.
6073 	 */
6074 	if (aconnector->mst_mgr.dev)
6075 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6076 
6077 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6078 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6079 
6080 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6081 	    link->type != dc_connection_none &&
6082 	    dm->backlight_dev) {
6083 		backlight_device_unregister(dm->backlight_dev);
6084 		dm->backlight_dev = NULL;
6085 	}
6086 #endif
6087 
6088 	if (aconnector->dc_em_sink)
6089 		dc_sink_release(aconnector->dc_em_sink);
6090 	aconnector->dc_em_sink = NULL;
6091 	if (aconnector->dc_sink)
6092 		dc_sink_release(aconnector->dc_sink);
6093 	aconnector->dc_sink = NULL;
6094 
6095 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6096 	drm_connector_unregister(connector);
6097 	drm_connector_cleanup(connector);
6098 	if (aconnector->i2c) {
6099 		i2c_del_adapter(&aconnector->i2c->base);
6100 		kfree(aconnector->i2c);
6101 	}
6102 	kfree(aconnector->dm_dp_aux.aux.name);
6103 
6104 	kfree(connector);
6105 }
6106 
6107 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6108 {
6109 	struct dm_connector_state *state =
6110 		to_dm_connector_state(connector->state);
6111 
6112 	if (connector->state)
6113 		__drm_atomic_helper_connector_destroy_state(connector->state);
6114 
6115 	kfree(state);
6116 
6117 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6118 
6119 	if (state) {
6120 		state->scaling = RMX_OFF;
6121 		state->underscan_enable = false;
6122 		state->underscan_hborder = 0;
6123 		state->underscan_vborder = 0;
6124 		state->base.max_requested_bpc = 8;
6125 		state->vcpi_slots = 0;
6126 		state->pbn = 0;
6127 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6128 			state->abm_level = amdgpu_dm_abm_level;
6129 
6130 		__drm_atomic_helper_connector_reset(connector, &state->base);
6131 	}
6132 }
6133 
6134 struct drm_connector_state *
6135 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6136 {
6137 	struct dm_connector_state *state =
6138 		to_dm_connector_state(connector->state);
6139 
6140 	struct dm_connector_state *new_state =
6141 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6142 
6143 	if (!new_state)
6144 		return NULL;
6145 
6146 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6147 
6148 	new_state->freesync_capable = state->freesync_capable;
6149 	new_state->abm_level = state->abm_level;
6150 	new_state->scaling = state->scaling;
6151 	new_state->underscan_enable = state->underscan_enable;
6152 	new_state->underscan_hborder = state->underscan_hborder;
6153 	new_state->underscan_vborder = state->underscan_vborder;
6154 	new_state->vcpi_slots = state->vcpi_slots;
6155 	new_state->pbn = state->pbn;
6156 	return &new_state->base;
6157 }
6158 
6159 static int
6160 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6161 {
6162 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6163 		to_amdgpu_dm_connector(connector);
6164 	int r;
6165 
6166 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6167 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6168 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6169 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6170 		if (r)
6171 			return r;
6172 	}
6173 
6174 #if defined(CONFIG_DEBUG_FS)
6175 	connector_debugfs_init(amdgpu_dm_connector);
6176 #endif
6177 
6178 	return 0;
6179 }
6180 
6181 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6182 	.reset = amdgpu_dm_connector_funcs_reset,
6183 	.detect = amdgpu_dm_connector_detect,
6184 	.fill_modes = drm_helper_probe_single_connector_modes,
6185 	.destroy = amdgpu_dm_connector_destroy,
6186 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6187 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6188 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6189 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6190 	.late_register = amdgpu_dm_connector_late_register,
6191 	.early_unregister = amdgpu_dm_connector_unregister
6192 };
6193 
6194 static int get_modes(struct drm_connector *connector)
6195 {
6196 	return amdgpu_dm_connector_get_modes(connector);
6197 }
6198 
6199 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6200 {
6201 	struct dc_sink_init_data init_params = {
6202 			.link = aconnector->dc_link,
6203 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6204 	};
6205 	struct edid *edid;
6206 
6207 	if (!aconnector->base.edid_blob_ptr) {
6208 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6209 				aconnector->base.name);
6210 
6211 		aconnector->base.force = DRM_FORCE_OFF;
6212 		aconnector->base.override_edid = false;
6213 		return;
6214 	}
6215 
6216 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6217 
6218 	aconnector->edid = edid;
6219 
6220 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6221 		aconnector->dc_link,
6222 		(uint8_t *)edid,
6223 		(edid->extensions + 1) * EDID_LENGTH,
6224 		&init_params);
6225 
6226 	if (aconnector->base.force == DRM_FORCE_ON) {
6227 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6228 		aconnector->dc_link->local_sink :
6229 		aconnector->dc_em_sink;
6230 		dc_sink_retain(aconnector->dc_sink);
6231 	}
6232 }
6233 
6234 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6235 {
6236 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6237 
6238 	/*
6239 	 * In case of headless boot with force on for DP managed connector
6240 	 * Those settings have to be != 0 to get initial modeset
6241 	 */
6242 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6243 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6244 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6245 	}
6246 
6247 
6248 	aconnector->base.override_edid = true;
6249 	create_eml_sink(aconnector);
6250 }
6251 
6252 static struct dc_stream_state *
6253 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6254 				const struct drm_display_mode *drm_mode,
6255 				const struct dm_connector_state *dm_state,
6256 				const struct dc_stream_state *old_stream)
6257 {
6258 	struct drm_connector *connector = &aconnector->base;
6259 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6260 	struct dc_stream_state *stream;
6261 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6262 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6263 	enum dc_status dc_result = DC_OK;
6264 
6265 	do {
6266 		stream = create_stream_for_sink(aconnector, drm_mode,
6267 						dm_state, old_stream,
6268 						requested_bpc);
6269 		if (stream == NULL) {
6270 			DRM_ERROR("Failed to create stream for sink!\n");
6271 			break;
6272 		}
6273 
6274 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6275 
6276 		if (dc_result != DC_OK) {
6277 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6278 				      drm_mode->hdisplay,
6279 				      drm_mode->vdisplay,
6280 				      drm_mode->clock,
6281 				      dc_result,
6282 				      dc_status_to_str(dc_result));
6283 
6284 			dc_stream_release(stream);
6285 			stream = NULL;
6286 			requested_bpc -= 2; /* lower bpc to retry validation */
6287 		}
6288 
6289 	} while (stream == NULL && requested_bpc >= 6);
6290 
6291 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6292 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6293 
6294 		aconnector->force_yuv420_output = true;
6295 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6296 						dm_state, old_stream);
6297 		aconnector->force_yuv420_output = false;
6298 	}
6299 
6300 	return stream;
6301 }
6302 
6303 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6304 				   struct drm_display_mode *mode)
6305 {
6306 	int result = MODE_ERROR;
6307 	struct dc_sink *dc_sink;
6308 	/* TODO: Unhardcode stream count */
6309 	struct dc_stream_state *stream;
6310 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6311 
6312 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6313 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6314 		return result;
6315 
6316 	/*
6317 	 * Only run this the first time mode_valid is called to initilialize
6318 	 * EDID mgmt
6319 	 */
6320 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6321 		!aconnector->dc_em_sink)
6322 		handle_edid_mgmt(aconnector);
6323 
6324 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6325 
6326 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6327 				aconnector->base.force != DRM_FORCE_ON) {
6328 		DRM_ERROR("dc_sink is NULL!\n");
6329 		goto fail;
6330 	}
6331 
6332 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6333 	if (stream) {
6334 		dc_stream_release(stream);
6335 		result = MODE_OK;
6336 	}
6337 
6338 fail:
6339 	/* TODO: error handling*/
6340 	return result;
6341 }
6342 
6343 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6344 				struct dc_info_packet *out)
6345 {
6346 	struct hdmi_drm_infoframe frame;
6347 	unsigned char buf[30]; /* 26 + 4 */
6348 	ssize_t len;
6349 	int ret, i;
6350 
6351 	memset(out, 0, sizeof(*out));
6352 
6353 	if (!state->hdr_output_metadata)
6354 		return 0;
6355 
6356 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6357 	if (ret)
6358 		return ret;
6359 
6360 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6361 	if (len < 0)
6362 		return (int)len;
6363 
6364 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6365 	if (len != 30)
6366 		return -EINVAL;
6367 
6368 	/* Prepare the infopacket for DC. */
6369 	switch (state->connector->connector_type) {
6370 	case DRM_MODE_CONNECTOR_HDMIA:
6371 		out->hb0 = 0x87; /* type */
6372 		out->hb1 = 0x01; /* version */
6373 		out->hb2 = 0x1A; /* length */
6374 		out->sb[0] = buf[3]; /* checksum */
6375 		i = 1;
6376 		break;
6377 
6378 	case DRM_MODE_CONNECTOR_DisplayPort:
6379 	case DRM_MODE_CONNECTOR_eDP:
6380 		out->hb0 = 0x00; /* sdp id, zero */
6381 		out->hb1 = 0x87; /* type */
6382 		out->hb2 = 0x1D; /* payload len - 1 */
6383 		out->hb3 = (0x13 << 2); /* sdp version */
6384 		out->sb[0] = 0x01; /* version */
6385 		out->sb[1] = 0x1A; /* length */
6386 		i = 2;
6387 		break;
6388 
6389 	default:
6390 		return -EINVAL;
6391 	}
6392 
6393 	memcpy(&out->sb[i], &buf[4], 26);
6394 	out->valid = true;
6395 
6396 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6397 		       sizeof(out->sb), false);
6398 
6399 	return 0;
6400 }
6401 
6402 static bool
6403 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6404 			  const struct drm_connector_state *new_state)
6405 {
6406 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6407 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6408 
6409 	if (old_blob != new_blob) {
6410 		if (old_blob && new_blob &&
6411 		    old_blob->length == new_blob->length)
6412 			return memcmp(old_blob->data, new_blob->data,
6413 				      old_blob->length);
6414 
6415 		return true;
6416 	}
6417 
6418 	return false;
6419 }
6420 
6421 static int
6422 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6423 				 struct drm_atomic_state *state)
6424 {
6425 	struct drm_connector_state *new_con_state =
6426 		drm_atomic_get_new_connector_state(state, conn);
6427 	struct drm_connector_state *old_con_state =
6428 		drm_atomic_get_old_connector_state(state, conn);
6429 	struct drm_crtc *crtc = new_con_state->crtc;
6430 	struct drm_crtc_state *new_crtc_state;
6431 	int ret;
6432 
6433 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6434 
6435 	if (!crtc)
6436 		return 0;
6437 
6438 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6439 		struct dc_info_packet hdr_infopacket;
6440 
6441 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6442 		if (ret)
6443 			return ret;
6444 
6445 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6446 		if (IS_ERR(new_crtc_state))
6447 			return PTR_ERR(new_crtc_state);
6448 
6449 		/*
6450 		 * DC considers the stream backends changed if the
6451 		 * static metadata changes. Forcing the modeset also
6452 		 * gives a simple way for userspace to switch from
6453 		 * 8bpc to 10bpc when setting the metadata to enter
6454 		 * or exit HDR.
6455 		 *
6456 		 * Changing the static metadata after it's been
6457 		 * set is permissible, however. So only force a
6458 		 * modeset if we're entering or exiting HDR.
6459 		 */
6460 		new_crtc_state->mode_changed =
6461 			!old_con_state->hdr_output_metadata ||
6462 			!new_con_state->hdr_output_metadata;
6463 	}
6464 
6465 	return 0;
6466 }
6467 
6468 static const struct drm_connector_helper_funcs
6469 amdgpu_dm_connector_helper_funcs = {
6470 	/*
6471 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6472 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6473 	 * are missing after user start lightdm. So we need to renew modes list.
6474 	 * in get_modes call back, not just return the modes count
6475 	 */
6476 	.get_modes = get_modes,
6477 	.mode_valid = amdgpu_dm_connector_mode_valid,
6478 	.atomic_check = amdgpu_dm_connector_atomic_check,
6479 };
6480 
6481 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6482 {
6483 }
6484 
6485 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6486 {
6487 	struct drm_atomic_state *state = new_crtc_state->state;
6488 	struct drm_plane *plane;
6489 	int num_active = 0;
6490 
6491 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6492 		struct drm_plane_state *new_plane_state;
6493 
6494 		/* Cursor planes are "fake". */
6495 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6496 			continue;
6497 
6498 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6499 
6500 		if (!new_plane_state) {
6501 			/*
6502 			 * The plane is enable on the CRTC and hasn't changed
6503 			 * state. This means that it previously passed
6504 			 * validation and is therefore enabled.
6505 			 */
6506 			num_active += 1;
6507 			continue;
6508 		}
6509 
6510 		/* We need a framebuffer to be considered enabled. */
6511 		num_active += (new_plane_state->fb != NULL);
6512 	}
6513 
6514 	return num_active;
6515 }
6516 
6517 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6518 					 struct drm_crtc_state *new_crtc_state)
6519 {
6520 	struct dm_crtc_state *dm_new_crtc_state =
6521 		to_dm_crtc_state(new_crtc_state);
6522 
6523 	dm_new_crtc_state->active_planes = 0;
6524 
6525 	if (!dm_new_crtc_state->stream)
6526 		return;
6527 
6528 	dm_new_crtc_state->active_planes =
6529 		count_crtc_active_planes(new_crtc_state);
6530 }
6531 
6532 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6533 				       struct drm_atomic_state *state)
6534 {
6535 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6536 									  crtc);
6537 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6538 	struct dc *dc = adev->dm.dc;
6539 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6540 	int ret = -EINVAL;
6541 
6542 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6543 
6544 	dm_update_crtc_active_planes(crtc, crtc_state);
6545 
6546 	if (unlikely(!dm_crtc_state->stream &&
6547 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6548 		WARN_ON(1);
6549 		return ret;
6550 	}
6551 
6552 	/*
6553 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6554 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6555 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6556 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6557 	 */
6558 	if (crtc_state->enable &&
6559 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6560 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6561 		return -EINVAL;
6562 	}
6563 
6564 	/* In some use cases, like reset, no stream is attached */
6565 	if (!dm_crtc_state->stream)
6566 		return 0;
6567 
6568 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6569 		return 0;
6570 
6571 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6572 	return ret;
6573 }
6574 
6575 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6576 				      const struct drm_display_mode *mode,
6577 				      struct drm_display_mode *adjusted_mode)
6578 {
6579 	return true;
6580 }
6581 
6582 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6583 	.disable = dm_crtc_helper_disable,
6584 	.atomic_check = dm_crtc_helper_atomic_check,
6585 	.mode_fixup = dm_crtc_helper_mode_fixup,
6586 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6587 };
6588 
6589 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6590 {
6591 
6592 }
6593 
6594 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6595 {
6596 	switch (display_color_depth) {
6597 		case COLOR_DEPTH_666:
6598 			return 6;
6599 		case COLOR_DEPTH_888:
6600 			return 8;
6601 		case COLOR_DEPTH_101010:
6602 			return 10;
6603 		case COLOR_DEPTH_121212:
6604 			return 12;
6605 		case COLOR_DEPTH_141414:
6606 			return 14;
6607 		case COLOR_DEPTH_161616:
6608 			return 16;
6609 		default:
6610 			break;
6611 		}
6612 	return 0;
6613 }
6614 
6615 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6616 					  struct drm_crtc_state *crtc_state,
6617 					  struct drm_connector_state *conn_state)
6618 {
6619 	struct drm_atomic_state *state = crtc_state->state;
6620 	struct drm_connector *connector = conn_state->connector;
6621 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6622 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6623 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6624 	struct drm_dp_mst_topology_mgr *mst_mgr;
6625 	struct drm_dp_mst_port *mst_port;
6626 	enum dc_color_depth color_depth;
6627 	int clock, bpp = 0;
6628 	bool is_y420 = false;
6629 
6630 	if (!aconnector->port || !aconnector->dc_sink)
6631 		return 0;
6632 
6633 	mst_port = aconnector->port;
6634 	mst_mgr = &aconnector->mst_port->mst_mgr;
6635 
6636 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6637 		return 0;
6638 
6639 	if (!state->duplicated) {
6640 		int max_bpc = conn_state->max_requested_bpc;
6641 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6642 				aconnector->force_yuv420_output;
6643 		color_depth = convert_color_depth_from_display_info(connector,
6644 								    is_y420,
6645 								    max_bpc);
6646 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6647 		clock = adjusted_mode->clock;
6648 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6649 	}
6650 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6651 									   mst_mgr,
6652 									   mst_port,
6653 									   dm_new_connector_state->pbn,
6654 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6655 	if (dm_new_connector_state->vcpi_slots < 0) {
6656 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6657 		return dm_new_connector_state->vcpi_slots;
6658 	}
6659 	return 0;
6660 }
6661 
6662 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6663 	.disable = dm_encoder_helper_disable,
6664 	.atomic_check = dm_encoder_helper_atomic_check
6665 };
6666 
6667 #if defined(CONFIG_DRM_AMD_DC_DCN)
6668 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6669 					    struct dc_state *dc_state)
6670 {
6671 	struct dc_stream_state *stream = NULL;
6672 	struct drm_connector *connector;
6673 	struct drm_connector_state *new_con_state;
6674 	struct amdgpu_dm_connector *aconnector;
6675 	struct dm_connector_state *dm_conn_state;
6676 	int i, j, clock, bpp;
6677 	int vcpi, pbn_div, pbn = 0;
6678 
6679 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6680 
6681 		aconnector = to_amdgpu_dm_connector(connector);
6682 
6683 		if (!aconnector->port)
6684 			continue;
6685 
6686 		if (!new_con_state || !new_con_state->crtc)
6687 			continue;
6688 
6689 		dm_conn_state = to_dm_connector_state(new_con_state);
6690 
6691 		for (j = 0; j < dc_state->stream_count; j++) {
6692 			stream = dc_state->streams[j];
6693 			if (!stream)
6694 				continue;
6695 
6696 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6697 				break;
6698 
6699 			stream = NULL;
6700 		}
6701 
6702 		if (!stream)
6703 			continue;
6704 
6705 		if (stream->timing.flags.DSC != 1) {
6706 			drm_dp_mst_atomic_enable_dsc(state,
6707 						     aconnector->port,
6708 						     dm_conn_state->pbn,
6709 						     0,
6710 						     false);
6711 			continue;
6712 		}
6713 
6714 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6715 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6716 		clock = stream->timing.pix_clk_100hz / 10;
6717 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6718 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6719 						    aconnector->port,
6720 						    pbn, pbn_div,
6721 						    true);
6722 		if (vcpi < 0)
6723 			return vcpi;
6724 
6725 		dm_conn_state->pbn = pbn;
6726 		dm_conn_state->vcpi_slots = vcpi;
6727 	}
6728 	return 0;
6729 }
6730 #endif
6731 
6732 static void dm_drm_plane_reset(struct drm_plane *plane)
6733 {
6734 	struct dm_plane_state *amdgpu_state = NULL;
6735 
6736 	if (plane->state)
6737 		plane->funcs->atomic_destroy_state(plane, plane->state);
6738 
6739 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6740 	WARN_ON(amdgpu_state == NULL);
6741 
6742 	if (amdgpu_state)
6743 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6744 }
6745 
6746 static struct drm_plane_state *
6747 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6748 {
6749 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6750 
6751 	old_dm_plane_state = to_dm_plane_state(plane->state);
6752 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6753 	if (!dm_plane_state)
6754 		return NULL;
6755 
6756 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6757 
6758 	if (old_dm_plane_state->dc_state) {
6759 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6760 		dc_plane_state_retain(dm_plane_state->dc_state);
6761 	}
6762 
6763 	return &dm_plane_state->base;
6764 }
6765 
6766 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6767 				struct drm_plane_state *state)
6768 {
6769 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6770 
6771 	if (dm_plane_state->dc_state)
6772 		dc_plane_state_release(dm_plane_state->dc_state);
6773 
6774 	drm_atomic_helper_plane_destroy_state(plane, state);
6775 }
6776 
6777 static const struct drm_plane_funcs dm_plane_funcs = {
6778 	.update_plane	= drm_atomic_helper_update_plane,
6779 	.disable_plane	= drm_atomic_helper_disable_plane,
6780 	.destroy	= drm_primary_helper_destroy,
6781 	.reset = dm_drm_plane_reset,
6782 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6783 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6784 	.format_mod_supported = dm_plane_format_mod_supported,
6785 };
6786 
6787 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6788 				      struct drm_plane_state *new_state)
6789 {
6790 	struct amdgpu_framebuffer *afb;
6791 	struct drm_gem_object *obj;
6792 	struct amdgpu_device *adev;
6793 	struct amdgpu_bo *rbo;
6794 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6795 	struct list_head list;
6796 	struct ttm_validate_buffer tv;
6797 	struct ww_acquire_ctx ticket;
6798 	uint32_t domain;
6799 	int r;
6800 
6801 	if (!new_state->fb) {
6802 		DRM_DEBUG_KMS("No FB bound\n");
6803 		return 0;
6804 	}
6805 
6806 	afb = to_amdgpu_framebuffer(new_state->fb);
6807 	obj = new_state->fb->obj[0];
6808 	rbo = gem_to_amdgpu_bo(obj);
6809 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6810 	INIT_LIST_HEAD(&list);
6811 
6812 	tv.bo = &rbo->tbo;
6813 	tv.num_shared = 1;
6814 	list_add(&tv.head, &list);
6815 
6816 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6817 	if (r) {
6818 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6819 		return r;
6820 	}
6821 
6822 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6823 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6824 	else
6825 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6826 
6827 	r = amdgpu_bo_pin(rbo, domain);
6828 	if (unlikely(r != 0)) {
6829 		if (r != -ERESTARTSYS)
6830 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6831 		ttm_eu_backoff_reservation(&ticket, &list);
6832 		return r;
6833 	}
6834 
6835 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6836 	if (unlikely(r != 0)) {
6837 		amdgpu_bo_unpin(rbo);
6838 		ttm_eu_backoff_reservation(&ticket, &list);
6839 		DRM_ERROR("%p bind failed\n", rbo);
6840 		return r;
6841 	}
6842 
6843 	ttm_eu_backoff_reservation(&ticket, &list);
6844 
6845 	afb->address = amdgpu_bo_gpu_offset(rbo);
6846 
6847 	amdgpu_bo_ref(rbo);
6848 
6849 	/**
6850 	 * We don't do surface updates on planes that have been newly created,
6851 	 * but we also don't have the afb->address during atomic check.
6852 	 *
6853 	 * Fill in buffer attributes depending on the address here, but only on
6854 	 * newly created planes since they're not being used by DC yet and this
6855 	 * won't modify global state.
6856 	 */
6857 	dm_plane_state_old = to_dm_plane_state(plane->state);
6858 	dm_plane_state_new = to_dm_plane_state(new_state);
6859 
6860 	if (dm_plane_state_new->dc_state &&
6861 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6862 		struct dc_plane_state *plane_state =
6863 			dm_plane_state_new->dc_state;
6864 		bool force_disable_dcc = !plane_state->dcc.enable;
6865 
6866 		fill_plane_buffer_attributes(
6867 			adev, afb, plane_state->format, plane_state->rotation,
6868 			afb->tiling_flags,
6869 			&plane_state->tiling_info, &plane_state->plane_size,
6870 			&plane_state->dcc, &plane_state->address,
6871 			afb->tmz_surface, force_disable_dcc);
6872 	}
6873 
6874 	return 0;
6875 }
6876 
6877 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6878 				       struct drm_plane_state *old_state)
6879 {
6880 	struct amdgpu_bo *rbo;
6881 	int r;
6882 
6883 	if (!old_state->fb)
6884 		return;
6885 
6886 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6887 	r = amdgpu_bo_reserve(rbo, false);
6888 	if (unlikely(r)) {
6889 		DRM_ERROR("failed to reserve rbo before unpin\n");
6890 		return;
6891 	}
6892 
6893 	amdgpu_bo_unpin(rbo);
6894 	amdgpu_bo_unreserve(rbo);
6895 	amdgpu_bo_unref(&rbo);
6896 }
6897 
6898 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6899 				       struct drm_crtc_state *new_crtc_state)
6900 {
6901 	struct drm_framebuffer *fb = state->fb;
6902 	int min_downscale, max_upscale;
6903 	int min_scale = 0;
6904 	int max_scale = INT_MAX;
6905 
6906 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6907 	if (fb && state->crtc) {
6908 		/* Validate viewport to cover the case when only the position changes */
6909 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6910 			int viewport_width = state->crtc_w;
6911 			int viewport_height = state->crtc_h;
6912 
6913 			if (state->crtc_x < 0)
6914 				viewport_width += state->crtc_x;
6915 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6916 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6917 
6918 			if (state->crtc_y < 0)
6919 				viewport_height += state->crtc_y;
6920 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6921 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6922 
6923 			if (viewport_width < 0 || viewport_height < 0) {
6924 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6925 				return -EINVAL;
6926 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6927 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6928 				return -EINVAL;
6929 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
6930 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6931 				return -EINVAL;
6932 			}
6933 
6934 		}
6935 
6936 		/* Get min/max allowed scaling factors from plane caps. */
6937 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6938 					     &min_downscale, &max_upscale);
6939 		/*
6940 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6941 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6942 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6943 		 */
6944 		min_scale = (1000 << 16) / max_upscale;
6945 		max_scale = (1000 << 16) / min_downscale;
6946 	}
6947 
6948 	return drm_atomic_helper_check_plane_state(
6949 		state, new_crtc_state, min_scale, max_scale, true, true);
6950 }
6951 
6952 static int dm_plane_atomic_check(struct drm_plane *plane,
6953 				 struct drm_atomic_state *state)
6954 {
6955 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6956 										 plane);
6957 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6958 	struct dc *dc = adev->dm.dc;
6959 	struct dm_plane_state *dm_plane_state;
6960 	struct dc_scaling_info scaling_info;
6961 	struct drm_crtc_state *new_crtc_state;
6962 	int ret;
6963 
6964 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6965 
6966 	dm_plane_state = to_dm_plane_state(new_plane_state);
6967 
6968 	if (!dm_plane_state->dc_state)
6969 		return 0;
6970 
6971 	new_crtc_state =
6972 		drm_atomic_get_new_crtc_state(state,
6973 					      new_plane_state->crtc);
6974 	if (!new_crtc_state)
6975 		return -EINVAL;
6976 
6977 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6978 	if (ret)
6979 		return ret;
6980 
6981 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6982 	if (ret)
6983 		return ret;
6984 
6985 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6986 		return 0;
6987 
6988 	return -EINVAL;
6989 }
6990 
6991 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6992 				       struct drm_atomic_state *state)
6993 {
6994 	/* Only support async updates on cursor planes. */
6995 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6996 		return -EINVAL;
6997 
6998 	return 0;
6999 }
7000 
7001 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7002 					 struct drm_atomic_state *state)
7003 {
7004 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7005 									   plane);
7006 	struct drm_plane_state *old_state =
7007 		drm_atomic_get_old_plane_state(state, plane);
7008 
7009 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7010 
7011 	swap(plane->state->fb, new_state->fb);
7012 
7013 	plane->state->src_x = new_state->src_x;
7014 	plane->state->src_y = new_state->src_y;
7015 	plane->state->src_w = new_state->src_w;
7016 	plane->state->src_h = new_state->src_h;
7017 	plane->state->crtc_x = new_state->crtc_x;
7018 	plane->state->crtc_y = new_state->crtc_y;
7019 	plane->state->crtc_w = new_state->crtc_w;
7020 	plane->state->crtc_h = new_state->crtc_h;
7021 
7022 	handle_cursor_update(plane, old_state);
7023 }
7024 
7025 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7026 	.prepare_fb = dm_plane_helper_prepare_fb,
7027 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7028 	.atomic_check = dm_plane_atomic_check,
7029 	.atomic_async_check = dm_plane_atomic_async_check,
7030 	.atomic_async_update = dm_plane_atomic_async_update
7031 };
7032 
7033 /*
7034  * TODO: these are currently initialized to rgb formats only.
7035  * For future use cases we should either initialize them dynamically based on
7036  * plane capabilities, or initialize this array to all formats, so internal drm
7037  * check will succeed, and let DC implement proper check
7038  */
7039 static const uint32_t rgb_formats[] = {
7040 	DRM_FORMAT_XRGB8888,
7041 	DRM_FORMAT_ARGB8888,
7042 	DRM_FORMAT_RGBA8888,
7043 	DRM_FORMAT_XRGB2101010,
7044 	DRM_FORMAT_XBGR2101010,
7045 	DRM_FORMAT_ARGB2101010,
7046 	DRM_FORMAT_ABGR2101010,
7047 	DRM_FORMAT_XBGR8888,
7048 	DRM_FORMAT_ABGR8888,
7049 	DRM_FORMAT_RGB565,
7050 };
7051 
7052 static const uint32_t overlay_formats[] = {
7053 	DRM_FORMAT_XRGB8888,
7054 	DRM_FORMAT_ARGB8888,
7055 	DRM_FORMAT_RGBA8888,
7056 	DRM_FORMAT_XBGR8888,
7057 	DRM_FORMAT_ABGR8888,
7058 	DRM_FORMAT_RGB565
7059 };
7060 
7061 static const u32 cursor_formats[] = {
7062 	DRM_FORMAT_ARGB8888
7063 };
7064 
7065 static int get_plane_formats(const struct drm_plane *plane,
7066 			     const struct dc_plane_cap *plane_cap,
7067 			     uint32_t *formats, int max_formats)
7068 {
7069 	int i, num_formats = 0;
7070 
7071 	/*
7072 	 * TODO: Query support for each group of formats directly from
7073 	 * DC plane caps. This will require adding more formats to the
7074 	 * caps list.
7075 	 */
7076 
7077 	switch (plane->type) {
7078 	case DRM_PLANE_TYPE_PRIMARY:
7079 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7080 			if (num_formats >= max_formats)
7081 				break;
7082 
7083 			formats[num_formats++] = rgb_formats[i];
7084 		}
7085 
7086 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7087 			formats[num_formats++] = DRM_FORMAT_NV12;
7088 		if (plane_cap && plane_cap->pixel_format_support.p010)
7089 			formats[num_formats++] = DRM_FORMAT_P010;
7090 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7091 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7092 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7093 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7094 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7095 		}
7096 		break;
7097 
7098 	case DRM_PLANE_TYPE_OVERLAY:
7099 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7100 			if (num_formats >= max_formats)
7101 				break;
7102 
7103 			formats[num_formats++] = overlay_formats[i];
7104 		}
7105 		break;
7106 
7107 	case DRM_PLANE_TYPE_CURSOR:
7108 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7109 			if (num_formats >= max_formats)
7110 				break;
7111 
7112 			formats[num_formats++] = cursor_formats[i];
7113 		}
7114 		break;
7115 	}
7116 
7117 	return num_formats;
7118 }
7119 
7120 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7121 				struct drm_plane *plane,
7122 				unsigned long possible_crtcs,
7123 				const struct dc_plane_cap *plane_cap)
7124 {
7125 	uint32_t formats[32];
7126 	int num_formats;
7127 	int res = -EPERM;
7128 	unsigned int supported_rotations;
7129 	uint64_t *modifiers = NULL;
7130 
7131 	num_formats = get_plane_formats(plane, plane_cap, formats,
7132 					ARRAY_SIZE(formats));
7133 
7134 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7135 	if (res)
7136 		return res;
7137 
7138 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7139 				       &dm_plane_funcs, formats, num_formats,
7140 				       modifiers, plane->type, NULL);
7141 	kfree(modifiers);
7142 	if (res)
7143 		return res;
7144 
7145 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7146 	    plane_cap && plane_cap->per_pixel_alpha) {
7147 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7148 					  BIT(DRM_MODE_BLEND_PREMULTI);
7149 
7150 		drm_plane_create_alpha_property(plane);
7151 		drm_plane_create_blend_mode_property(plane, blend_caps);
7152 	}
7153 
7154 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7155 	    plane_cap &&
7156 	    (plane_cap->pixel_format_support.nv12 ||
7157 	     plane_cap->pixel_format_support.p010)) {
7158 		/* This only affects YUV formats. */
7159 		drm_plane_create_color_properties(
7160 			plane,
7161 			BIT(DRM_COLOR_YCBCR_BT601) |
7162 			BIT(DRM_COLOR_YCBCR_BT709) |
7163 			BIT(DRM_COLOR_YCBCR_BT2020),
7164 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7165 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7166 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7167 	}
7168 
7169 	supported_rotations =
7170 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7171 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7172 
7173 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7174 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7175 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7176 						   supported_rotations);
7177 
7178 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7179 
7180 	/* Create (reset) the plane state */
7181 	if (plane->funcs->reset)
7182 		plane->funcs->reset(plane);
7183 
7184 	return 0;
7185 }
7186 
7187 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7188 			       struct drm_plane *plane,
7189 			       uint32_t crtc_index)
7190 {
7191 	struct amdgpu_crtc *acrtc = NULL;
7192 	struct drm_plane *cursor_plane;
7193 
7194 	int res = -ENOMEM;
7195 
7196 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7197 	if (!cursor_plane)
7198 		goto fail;
7199 
7200 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7201 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7202 
7203 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7204 	if (!acrtc)
7205 		goto fail;
7206 
7207 	res = drm_crtc_init_with_planes(
7208 			dm->ddev,
7209 			&acrtc->base,
7210 			plane,
7211 			cursor_plane,
7212 			&amdgpu_dm_crtc_funcs, NULL);
7213 
7214 	if (res)
7215 		goto fail;
7216 
7217 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7218 
7219 	/* Create (reset) the plane state */
7220 	if (acrtc->base.funcs->reset)
7221 		acrtc->base.funcs->reset(&acrtc->base);
7222 
7223 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7224 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7225 
7226 	acrtc->crtc_id = crtc_index;
7227 	acrtc->base.enabled = false;
7228 	acrtc->otg_inst = -1;
7229 
7230 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7231 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7232 				   true, MAX_COLOR_LUT_ENTRIES);
7233 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7234 
7235 	return 0;
7236 
7237 fail:
7238 	kfree(acrtc);
7239 	kfree(cursor_plane);
7240 	return res;
7241 }
7242 
7243 
7244 static int to_drm_connector_type(enum signal_type st)
7245 {
7246 	switch (st) {
7247 	case SIGNAL_TYPE_HDMI_TYPE_A:
7248 		return DRM_MODE_CONNECTOR_HDMIA;
7249 	case SIGNAL_TYPE_EDP:
7250 		return DRM_MODE_CONNECTOR_eDP;
7251 	case SIGNAL_TYPE_LVDS:
7252 		return DRM_MODE_CONNECTOR_LVDS;
7253 	case SIGNAL_TYPE_RGB:
7254 		return DRM_MODE_CONNECTOR_VGA;
7255 	case SIGNAL_TYPE_DISPLAY_PORT:
7256 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7257 		return DRM_MODE_CONNECTOR_DisplayPort;
7258 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7259 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7260 		return DRM_MODE_CONNECTOR_DVID;
7261 	case SIGNAL_TYPE_VIRTUAL:
7262 		return DRM_MODE_CONNECTOR_VIRTUAL;
7263 
7264 	default:
7265 		return DRM_MODE_CONNECTOR_Unknown;
7266 	}
7267 }
7268 
7269 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7270 {
7271 	struct drm_encoder *encoder;
7272 
7273 	/* There is only one encoder per connector */
7274 	drm_connector_for_each_possible_encoder(connector, encoder)
7275 		return encoder;
7276 
7277 	return NULL;
7278 }
7279 
7280 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7281 {
7282 	struct drm_encoder *encoder;
7283 	struct amdgpu_encoder *amdgpu_encoder;
7284 
7285 	encoder = amdgpu_dm_connector_to_encoder(connector);
7286 
7287 	if (encoder == NULL)
7288 		return;
7289 
7290 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7291 
7292 	amdgpu_encoder->native_mode.clock = 0;
7293 
7294 	if (!list_empty(&connector->probed_modes)) {
7295 		struct drm_display_mode *preferred_mode = NULL;
7296 
7297 		list_for_each_entry(preferred_mode,
7298 				    &connector->probed_modes,
7299 				    head) {
7300 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7301 				amdgpu_encoder->native_mode = *preferred_mode;
7302 
7303 			break;
7304 		}
7305 
7306 	}
7307 }
7308 
7309 static struct drm_display_mode *
7310 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7311 			     char *name,
7312 			     int hdisplay, int vdisplay)
7313 {
7314 	struct drm_device *dev = encoder->dev;
7315 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7316 	struct drm_display_mode *mode = NULL;
7317 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7318 
7319 	mode = drm_mode_duplicate(dev, native_mode);
7320 
7321 	if (mode == NULL)
7322 		return NULL;
7323 
7324 	mode->hdisplay = hdisplay;
7325 	mode->vdisplay = vdisplay;
7326 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7327 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7328 
7329 	return mode;
7330 
7331 }
7332 
7333 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7334 						 struct drm_connector *connector)
7335 {
7336 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7337 	struct drm_display_mode *mode = NULL;
7338 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7339 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7340 				to_amdgpu_dm_connector(connector);
7341 	int i;
7342 	int n;
7343 	struct mode_size {
7344 		char name[DRM_DISPLAY_MODE_LEN];
7345 		int w;
7346 		int h;
7347 	} common_modes[] = {
7348 		{  "640x480",  640,  480},
7349 		{  "800x600",  800,  600},
7350 		{ "1024x768", 1024,  768},
7351 		{ "1280x720", 1280,  720},
7352 		{ "1280x800", 1280,  800},
7353 		{"1280x1024", 1280, 1024},
7354 		{ "1440x900", 1440,  900},
7355 		{"1680x1050", 1680, 1050},
7356 		{"1600x1200", 1600, 1200},
7357 		{"1920x1080", 1920, 1080},
7358 		{"1920x1200", 1920, 1200}
7359 	};
7360 
7361 	n = ARRAY_SIZE(common_modes);
7362 
7363 	for (i = 0; i < n; i++) {
7364 		struct drm_display_mode *curmode = NULL;
7365 		bool mode_existed = false;
7366 
7367 		if (common_modes[i].w > native_mode->hdisplay ||
7368 		    common_modes[i].h > native_mode->vdisplay ||
7369 		   (common_modes[i].w == native_mode->hdisplay &&
7370 		    common_modes[i].h == native_mode->vdisplay))
7371 			continue;
7372 
7373 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7374 			if (common_modes[i].w == curmode->hdisplay &&
7375 			    common_modes[i].h == curmode->vdisplay) {
7376 				mode_existed = true;
7377 				break;
7378 			}
7379 		}
7380 
7381 		if (mode_existed)
7382 			continue;
7383 
7384 		mode = amdgpu_dm_create_common_mode(encoder,
7385 				common_modes[i].name, common_modes[i].w,
7386 				common_modes[i].h);
7387 		drm_mode_probed_add(connector, mode);
7388 		amdgpu_dm_connector->num_modes++;
7389 	}
7390 }
7391 
7392 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7393 					      struct edid *edid)
7394 {
7395 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7396 			to_amdgpu_dm_connector(connector);
7397 
7398 	if (edid) {
7399 		/* empty probed_modes */
7400 		INIT_LIST_HEAD(&connector->probed_modes);
7401 		amdgpu_dm_connector->num_modes =
7402 				drm_add_edid_modes(connector, edid);
7403 
7404 		/* sorting the probed modes before calling function
7405 		 * amdgpu_dm_get_native_mode() since EDID can have
7406 		 * more than one preferred mode. The modes that are
7407 		 * later in the probed mode list could be of higher
7408 		 * and preferred resolution. For example, 3840x2160
7409 		 * resolution in base EDID preferred timing and 4096x2160
7410 		 * preferred resolution in DID extension block later.
7411 		 */
7412 		drm_mode_sort(&connector->probed_modes);
7413 		amdgpu_dm_get_native_mode(connector);
7414 
7415 		/* Freesync capabilities are reset by calling
7416 		 * drm_add_edid_modes() and need to be
7417 		 * restored here.
7418 		 */
7419 		amdgpu_dm_update_freesync_caps(connector, edid);
7420 	} else {
7421 		amdgpu_dm_connector->num_modes = 0;
7422 	}
7423 }
7424 
7425 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7426 			      struct drm_display_mode *mode)
7427 {
7428 	struct drm_display_mode *m;
7429 
7430 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7431 		if (drm_mode_equal(m, mode))
7432 			return true;
7433 	}
7434 
7435 	return false;
7436 }
7437 
7438 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7439 {
7440 	const struct drm_display_mode *m;
7441 	struct drm_display_mode *new_mode;
7442 	uint i;
7443 	uint32_t new_modes_count = 0;
7444 
7445 	/* Standard FPS values
7446 	 *
7447 	 * 23.976   - TV/NTSC
7448 	 * 24 	    - Cinema
7449 	 * 25 	    - TV/PAL
7450 	 * 29.97    - TV/NTSC
7451 	 * 30 	    - TV/NTSC
7452 	 * 48 	    - Cinema HFR
7453 	 * 50 	    - TV/PAL
7454 	 * 60 	    - Commonly used
7455 	 * 48,72,96 - Multiples of 24
7456 	 */
7457 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7458 					 48000, 50000, 60000, 72000, 96000 };
7459 
7460 	/*
7461 	 * Find mode with highest refresh rate with the same resolution
7462 	 * as the preferred mode. Some monitors report a preferred mode
7463 	 * with lower resolution than the highest refresh rate supported.
7464 	 */
7465 
7466 	m = get_highest_refresh_rate_mode(aconnector, true);
7467 	if (!m)
7468 		return 0;
7469 
7470 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7471 		uint64_t target_vtotal, target_vtotal_diff;
7472 		uint64_t num, den;
7473 
7474 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7475 			continue;
7476 
7477 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7478 		    common_rates[i] > aconnector->max_vfreq * 1000)
7479 			continue;
7480 
7481 		num = (unsigned long long)m->clock * 1000 * 1000;
7482 		den = common_rates[i] * (unsigned long long)m->htotal;
7483 		target_vtotal = div_u64(num, den);
7484 		target_vtotal_diff = target_vtotal - m->vtotal;
7485 
7486 		/* Check for illegal modes */
7487 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7488 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7489 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7490 			continue;
7491 
7492 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7493 		if (!new_mode)
7494 			goto out;
7495 
7496 		new_mode->vtotal += (u16)target_vtotal_diff;
7497 		new_mode->vsync_start += (u16)target_vtotal_diff;
7498 		new_mode->vsync_end += (u16)target_vtotal_diff;
7499 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7500 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7501 
7502 		if (!is_duplicate_mode(aconnector, new_mode)) {
7503 			drm_mode_probed_add(&aconnector->base, new_mode);
7504 			new_modes_count += 1;
7505 		} else
7506 			drm_mode_destroy(aconnector->base.dev, new_mode);
7507 	}
7508  out:
7509 	return new_modes_count;
7510 }
7511 
7512 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7513 						   struct edid *edid)
7514 {
7515 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7516 		to_amdgpu_dm_connector(connector);
7517 
7518 	if (!(amdgpu_freesync_vid_mode && edid))
7519 		return;
7520 
7521 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7522 		amdgpu_dm_connector->num_modes +=
7523 			add_fs_modes(amdgpu_dm_connector);
7524 }
7525 
7526 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7527 {
7528 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7529 			to_amdgpu_dm_connector(connector);
7530 	struct drm_encoder *encoder;
7531 	struct edid *edid = amdgpu_dm_connector->edid;
7532 
7533 	encoder = amdgpu_dm_connector_to_encoder(connector);
7534 
7535 	if (!drm_edid_is_valid(edid)) {
7536 		amdgpu_dm_connector->num_modes =
7537 				drm_add_modes_noedid(connector, 640, 480);
7538 	} else {
7539 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7540 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7541 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7542 	}
7543 	amdgpu_dm_fbc_init(connector);
7544 
7545 	return amdgpu_dm_connector->num_modes;
7546 }
7547 
7548 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7549 				     struct amdgpu_dm_connector *aconnector,
7550 				     int connector_type,
7551 				     struct dc_link *link,
7552 				     int link_index)
7553 {
7554 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7555 
7556 	/*
7557 	 * Some of the properties below require access to state, like bpc.
7558 	 * Allocate some default initial connector state with our reset helper.
7559 	 */
7560 	if (aconnector->base.funcs->reset)
7561 		aconnector->base.funcs->reset(&aconnector->base);
7562 
7563 	aconnector->connector_id = link_index;
7564 	aconnector->dc_link = link;
7565 	aconnector->base.interlace_allowed = false;
7566 	aconnector->base.doublescan_allowed = false;
7567 	aconnector->base.stereo_allowed = false;
7568 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7569 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7570 	aconnector->audio_inst = -1;
7571 	mutex_init(&aconnector->hpd_lock);
7572 
7573 	/*
7574 	 * configure support HPD hot plug connector_>polled default value is 0
7575 	 * which means HPD hot plug not supported
7576 	 */
7577 	switch (connector_type) {
7578 	case DRM_MODE_CONNECTOR_HDMIA:
7579 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7580 		aconnector->base.ycbcr_420_allowed =
7581 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7582 		break;
7583 	case DRM_MODE_CONNECTOR_DisplayPort:
7584 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7585 		aconnector->base.ycbcr_420_allowed =
7586 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7587 		break;
7588 	case DRM_MODE_CONNECTOR_DVID:
7589 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7590 		break;
7591 	default:
7592 		break;
7593 	}
7594 
7595 	drm_object_attach_property(&aconnector->base.base,
7596 				dm->ddev->mode_config.scaling_mode_property,
7597 				DRM_MODE_SCALE_NONE);
7598 
7599 	drm_object_attach_property(&aconnector->base.base,
7600 				adev->mode_info.underscan_property,
7601 				UNDERSCAN_OFF);
7602 	drm_object_attach_property(&aconnector->base.base,
7603 				adev->mode_info.underscan_hborder_property,
7604 				0);
7605 	drm_object_attach_property(&aconnector->base.base,
7606 				adev->mode_info.underscan_vborder_property,
7607 				0);
7608 
7609 	if (!aconnector->mst_port)
7610 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7611 
7612 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7613 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7614 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7615 
7616 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7617 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7618 		drm_object_attach_property(&aconnector->base.base,
7619 				adev->mode_info.abm_level_property, 0);
7620 	}
7621 
7622 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7623 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7624 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7625 		drm_object_attach_property(
7626 			&aconnector->base.base,
7627 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7628 
7629 		if (!aconnector->mst_port)
7630 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7631 
7632 #ifdef CONFIG_DRM_AMD_DC_HDCP
7633 		if (adev->dm.hdcp_workqueue)
7634 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7635 #endif
7636 	}
7637 }
7638 
7639 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7640 			      struct i2c_msg *msgs, int num)
7641 {
7642 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7643 	struct ddc_service *ddc_service = i2c->ddc_service;
7644 	struct i2c_command cmd;
7645 	int i;
7646 	int result = -EIO;
7647 
7648 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7649 
7650 	if (!cmd.payloads)
7651 		return result;
7652 
7653 	cmd.number_of_payloads = num;
7654 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7655 	cmd.speed = 100;
7656 
7657 	for (i = 0; i < num; i++) {
7658 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7659 		cmd.payloads[i].address = msgs[i].addr;
7660 		cmd.payloads[i].length = msgs[i].len;
7661 		cmd.payloads[i].data = msgs[i].buf;
7662 	}
7663 
7664 	if (dc_submit_i2c(
7665 			ddc_service->ctx->dc,
7666 			ddc_service->ddc_pin->hw_info.ddc_channel,
7667 			&cmd))
7668 		result = num;
7669 
7670 	kfree(cmd.payloads);
7671 	return result;
7672 }
7673 
7674 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7675 {
7676 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7677 }
7678 
7679 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7680 	.master_xfer = amdgpu_dm_i2c_xfer,
7681 	.functionality = amdgpu_dm_i2c_func,
7682 };
7683 
7684 static struct amdgpu_i2c_adapter *
7685 create_i2c(struct ddc_service *ddc_service,
7686 	   int link_index,
7687 	   int *res)
7688 {
7689 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7690 	struct amdgpu_i2c_adapter *i2c;
7691 
7692 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7693 	if (!i2c)
7694 		return NULL;
7695 	i2c->base.owner = THIS_MODULE;
7696 	i2c->base.class = I2C_CLASS_DDC;
7697 	i2c->base.dev.parent = &adev->pdev->dev;
7698 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7699 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7700 	i2c_set_adapdata(&i2c->base, i2c);
7701 	i2c->ddc_service = ddc_service;
7702 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7703 
7704 	return i2c;
7705 }
7706 
7707 
7708 /*
7709  * Note: this function assumes that dc_link_detect() was called for the
7710  * dc_link which will be represented by this aconnector.
7711  */
7712 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7713 				    struct amdgpu_dm_connector *aconnector,
7714 				    uint32_t link_index,
7715 				    struct amdgpu_encoder *aencoder)
7716 {
7717 	int res = 0;
7718 	int connector_type;
7719 	struct dc *dc = dm->dc;
7720 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7721 	struct amdgpu_i2c_adapter *i2c;
7722 
7723 	link->priv = aconnector;
7724 
7725 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7726 
7727 	i2c = create_i2c(link->ddc, link->link_index, &res);
7728 	if (!i2c) {
7729 		DRM_ERROR("Failed to create i2c adapter data\n");
7730 		return -ENOMEM;
7731 	}
7732 
7733 	aconnector->i2c = i2c;
7734 	res = i2c_add_adapter(&i2c->base);
7735 
7736 	if (res) {
7737 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7738 		goto out_free;
7739 	}
7740 
7741 	connector_type = to_drm_connector_type(link->connector_signal);
7742 
7743 	res = drm_connector_init_with_ddc(
7744 			dm->ddev,
7745 			&aconnector->base,
7746 			&amdgpu_dm_connector_funcs,
7747 			connector_type,
7748 			&i2c->base);
7749 
7750 	if (res) {
7751 		DRM_ERROR("connector_init failed\n");
7752 		aconnector->connector_id = -1;
7753 		goto out_free;
7754 	}
7755 
7756 	drm_connector_helper_add(
7757 			&aconnector->base,
7758 			&amdgpu_dm_connector_helper_funcs);
7759 
7760 	amdgpu_dm_connector_init_helper(
7761 		dm,
7762 		aconnector,
7763 		connector_type,
7764 		link,
7765 		link_index);
7766 
7767 	drm_connector_attach_encoder(
7768 		&aconnector->base, &aencoder->base);
7769 
7770 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7771 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7772 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7773 
7774 out_free:
7775 	if (res) {
7776 		kfree(i2c);
7777 		aconnector->i2c = NULL;
7778 	}
7779 	return res;
7780 }
7781 
7782 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7783 {
7784 	switch (adev->mode_info.num_crtc) {
7785 	case 1:
7786 		return 0x1;
7787 	case 2:
7788 		return 0x3;
7789 	case 3:
7790 		return 0x7;
7791 	case 4:
7792 		return 0xf;
7793 	case 5:
7794 		return 0x1f;
7795 	case 6:
7796 	default:
7797 		return 0x3f;
7798 	}
7799 }
7800 
7801 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7802 				  struct amdgpu_encoder *aencoder,
7803 				  uint32_t link_index)
7804 {
7805 	struct amdgpu_device *adev = drm_to_adev(dev);
7806 
7807 	int res = drm_encoder_init(dev,
7808 				   &aencoder->base,
7809 				   &amdgpu_dm_encoder_funcs,
7810 				   DRM_MODE_ENCODER_TMDS,
7811 				   NULL);
7812 
7813 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7814 
7815 	if (!res)
7816 		aencoder->encoder_id = link_index;
7817 	else
7818 		aencoder->encoder_id = -1;
7819 
7820 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7821 
7822 	return res;
7823 }
7824 
7825 static void manage_dm_interrupts(struct amdgpu_device *adev,
7826 				 struct amdgpu_crtc *acrtc,
7827 				 bool enable)
7828 {
7829 	/*
7830 	 * We have no guarantee that the frontend index maps to the same
7831 	 * backend index - some even map to more than one.
7832 	 *
7833 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7834 	 */
7835 	int irq_type =
7836 		amdgpu_display_crtc_idx_to_irq_type(
7837 			adev,
7838 			acrtc->crtc_id);
7839 
7840 	if (enable) {
7841 		drm_crtc_vblank_on(&acrtc->base);
7842 		amdgpu_irq_get(
7843 			adev,
7844 			&adev->pageflip_irq,
7845 			irq_type);
7846 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7847 		amdgpu_irq_get(
7848 			adev,
7849 			&adev->vline0_irq,
7850 			irq_type);
7851 #endif
7852 	} else {
7853 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7854 		amdgpu_irq_put(
7855 			adev,
7856 			&adev->vline0_irq,
7857 			irq_type);
7858 #endif
7859 		amdgpu_irq_put(
7860 			adev,
7861 			&adev->pageflip_irq,
7862 			irq_type);
7863 		drm_crtc_vblank_off(&acrtc->base);
7864 	}
7865 }
7866 
7867 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7868 				      struct amdgpu_crtc *acrtc)
7869 {
7870 	int irq_type =
7871 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7872 
7873 	/**
7874 	 * This reads the current state for the IRQ and force reapplies
7875 	 * the setting to hardware.
7876 	 */
7877 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7878 }
7879 
7880 static bool
7881 is_scaling_state_different(const struct dm_connector_state *dm_state,
7882 			   const struct dm_connector_state *old_dm_state)
7883 {
7884 	if (dm_state->scaling != old_dm_state->scaling)
7885 		return true;
7886 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7887 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7888 			return true;
7889 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7890 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7891 			return true;
7892 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7893 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7894 		return true;
7895 	return false;
7896 }
7897 
7898 #ifdef CONFIG_DRM_AMD_DC_HDCP
7899 static bool is_content_protection_different(struct drm_connector_state *state,
7900 					    const struct drm_connector_state *old_state,
7901 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7902 {
7903 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7904 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7905 
7906 	/* Handle: Type0/1 change */
7907 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7908 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7909 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7910 		return true;
7911 	}
7912 
7913 	/* CP is being re enabled, ignore this
7914 	 *
7915 	 * Handles:	ENABLED -> DESIRED
7916 	 */
7917 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7918 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7919 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7920 		return false;
7921 	}
7922 
7923 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7924 	 *
7925 	 * Handles:	UNDESIRED -> ENABLED
7926 	 */
7927 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7928 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7929 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7930 
7931 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7932 	 * hot-plug, headless s3, dpms
7933 	 *
7934 	 * Handles:	DESIRED -> DESIRED (Special case)
7935 	 */
7936 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7937 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7938 		dm_con_state->update_hdcp = false;
7939 		return true;
7940 	}
7941 
7942 	/*
7943 	 * Handles:	UNDESIRED -> UNDESIRED
7944 	 *		DESIRED -> DESIRED
7945 	 *		ENABLED -> ENABLED
7946 	 */
7947 	if (old_state->content_protection == state->content_protection)
7948 		return false;
7949 
7950 	/*
7951 	 * Handles:	UNDESIRED -> DESIRED
7952 	 *		DESIRED -> UNDESIRED
7953 	 *		ENABLED -> UNDESIRED
7954 	 */
7955 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7956 		return true;
7957 
7958 	/*
7959 	 * Handles:	DESIRED -> ENABLED
7960 	 */
7961 	return false;
7962 }
7963 
7964 #endif
7965 static void remove_stream(struct amdgpu_device *adev,
7966 			  struct amdgpu_crtc *acrtc,
7967 			  struct dc_stream_state *stream)
7968 {
7969 	/* this is the update mode case */
7970 
7971 	acrtc->otg_inst = -1;
7972 	acrtc->enabled = false;
7973 }
7974 
7975 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7976 			       struct dc_cursor_position *position)
7977 {
7978 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7979 	int x, y;
7980 	int xorigin = 0, yorigin = 0;
7981 
7982 	if (!crtc || !plane->state->fb)
7983 		return 0;
7984 
7985 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7986 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7987 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7988 			  __func__,
7989 			  plane->state->crtc_w,
7990 			  plane->state->crtc_h);
7991 		return -EINVAL;
7992 	}
7993 
7994 	x = plane->state->crtc_x;
7995 	y = plane->state->crtc_y;
7996 
7997 	if (x <= -amdgpu_crtc->max_cursor_width ||
7998 	    y <= -amdgpu_crtc->max_cursor_height)
7999 		return 0;
8000 
8001 	if (x < 0) {
8002 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8003 		x = 0;
8004 	}
8005 	if (y < 0) {
8006 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8007 		y = 0;
8008 	}
8009 	position->enable = true;
8010 	position->translate_by_source = true;
8011 	position->x = x;
8012 	position->y = y;
8013 	position->x_hotspot = xorigin;
8014 	position->y_hotspot = yorigin;
8015 
8016 	return 0;
8017 }
8018 
8019 static void handle_cursor_update(struct drm_plane *plane,
8020 				 struct drm_plane_state *old_plane_state)
8021 {
8022 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8023 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8024 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8025 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8026 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8027 	uint64_t address = afb ? afb->address : 0;
8028 	struct dc_cursor_position position = {0};
8029 	struct dc_cursor_attributes attributes;
8030 	int ret;
8031 
8032 	if (!plane->state->fb && !old_plane_state->fb)
8033 		return;
8034 
8035 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8036 		      __func__,
8037 		      amdgpu_crtc->crtc_id,
8038 		      plane->state->crtc_w,
8039 		      plane->state->crtc_h);
8040 
8041 	ret = get_cursor_position(plane, crtc, &position);
8042 	if (ret)
8043 		return;
8044 
8045 	if (!position.enable) {
8046 		/* turn off cursor */
8047 		if (crtc_state && crtc_state->stream) {
8048 			mutex_lock(&adev->dm.dc_lock);
8049 			dc_stream_set_cursor_position(crtc_state->stream,
8050 						      &position);
8051 			mutex_unlock(&adev->dm.dc_lock);
8052 		}
8053 		return;
8054 	}
8055 
8056 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8057 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8058 
8059 	memset(&attributes, 0, sizeof(attributes));
8060 	attributes.address.high_part = upper_32_bits(address);
8061 	attributes.address.low_part  = lower_32_bits(address);
8062 	attributes.width             = plane->state->crtc_w;
8063 	attributes.height            = plane->state->crtc_h;
8064 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8065 	attributes.rotation_angle    = 0;
8066 	attributes.attribute_flags.value = 0;
8067 
8068 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8069 
8070 	if (crtc_state->stream) {
8071 		mutex_lock(&adev->dm.dc_lock);
8072 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8073 							 &attributes))
8074 			DRM_ERROR("DC failed to set cursor attributes\n");
8075 
8076 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8077 						   &position))
8078 			DRM_ERROR("DC failed to set cursor position\n");
8079 		mutex_unlock(&adev->dm.dc_lock);
8080 	}
8081 }
8082 
8083 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8084 {
8085 
8086 	assert_spin_locked(&acrtc->base.dev->event_lock);
8087 	WARN_ON(acrtc->event);
8088 
8089 	acrtc->event = acrtc->base.state->event;
8090 
8091 	/* Set the flip status */
8092 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8093 
8094 	/* Mark this event as consumed */
8095 	acrtc->base.state->event = NULL;
8096 
8097 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8098 		     acrtc->crtc_id);
8099 }
8100 
8101 static void update_freesync_state_on_stream(
8102 	struct amdgpu_display_manager *dm,
8103 	struct dm_crtc_state *new_crtc_state,
8104 	struct dc_stream_state *new_stream,
8105 	struct dc_plane_state *surface,
8106 	u32 flip_timestamp_in_us)
8107 {
8108 	struct mod_vrr_params vrr_params;
8109 	struct dc_info_packet vrr_infopacket = {0};
8110 	struct amdgpu_device *adev = dm->adev;
8111 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8112 	unsigned long flags;
8113 	bool pack_sdp_v1_3 = false;
8114 
8115 	if (!new_stream)
8116 		return;
8117 
8118 	/*
8119 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8120 	 * For now it's sufficient to just guard against these conditions.
8121 	 */
8122 
8123 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8124 		return;
8125 
8126 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8127         vrr_params = acrtc->dm_irq_params.vrr_params;
8128 
8129 	if (surface) {
8130 		mod_freesync_handle_preflip(
8131 			dm->freesync_module,
8132 			surface,
8133 			new_stream,
8134 			flip_timestamp_in_us,
8135 			&vrr_params);
8136 
8137 		if (adev->family < AMDGPU_FAMILY_AI &&
8138 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8139 			mod_freesync_handle_v_update(dm->freesync_module,
8140 						     new_stream, &vrr_params);
8141 
8142 			/* Need to call this before the frame ends. */
8143 			dc_stream_adjust_vmin_vmax(dm->dc,
8144 						   new_crtc_state->stream,
8145 						   &vrr_params.adjust);
8146 		}
8147 	}
8148 
8149 	mod_freesync_build_vrr_infopacket(
8150 		dm->freesync_module,
8151 		new_stream,
8152 		&vrr_params,
8153 		PACKET_TYPE_VRR,
8154 		TRANSFER_FUNC_UNKNOWN,
8155 		&vrr_infopacket,
8156 		pack_sdp_v1_3);
8157 
8158 	new_crtc_state->freesync_timing_changed |=
8159 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8160 			&vrr_params.adjust,
8161 			sizeof(vrr_params.adjust)) != 0);
8162 
8163 	new_crtc_state->freesync_vrr_info_changed |=
8164 		(memcmp(&new_crtc_state->vrr_infopacket,
8165 			&vrr_infopacket,
8166 			sizeof(vrr_infopacket)) != 0);
8167 
8168 	acrtc->dm_irq_params.vrr_params = vrr_params;
8169 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8170 
8171 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8172 	new_stream->vrr_infopacket = vrr_infopacket;
8173 
8174 	if (new_crtc_state->freesync_vrr_info_changed)
8175 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8176 			      new_crtc_state->base.crtc->base.id,
8177 			      (int)new_crtc_state->base.vrr_enabled,
8178 			      (int)vrr_params.state);
8179 
8180 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8181 }
8182 
8183 static void update_stream_irq_parameters(
8184 	struct amdgpu_display_manager *dm,
8185 	struct dm_crtc_state *new_crtc_state)
8186 {
8187 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8188 	struct mod_vrr_params vrr_params;
8189 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8190 	struct amdgpu_device *adev = dm->adev;
8191 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8192 	unsigned long flags;
8193 
8194 	if (!new_stream)
8195 		return;
8196 
8197 	/*
8198 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8199 	 * For now it's sufficient to just guard against these conditions.
8200 	 */
8201 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8202 		return;
8203 
8204 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8205 	vrr_params = acrtc->dm_irq_params.vrr_params;
8206 
8207 	if (new_crtc_state->vrr_supported &&
8208 	    config.min_refresh_in_uhz &&
8209 	    config.max_refresh_in_uhz) {
8210 		/*
8211 		 * if freesync compatible mode was set, config.state will be set
8212 		 * in atomic check
8213 		 */
8214 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8215 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8216 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8217 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8218 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8219 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8220 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8221 		} else {
8222 			config.state = new_crtc_state->base.vrr_enabled ?
8223 						     VRR_STATE_ACTIVE_VARIABLE :
8224 						     VRR_STATE_INACTIVE;
8225 		}
8226 	} else {
8227 		config.state = VRR_STATE_UNSUPPORTED;
8228 	}
8229 
8230 	mod_freesync_build_vrr_params(dm->freesync_module,
8231 				      new_stream,
8232 				      &config, &vrr_params);
8233 
8234 	new_crtc_state->freesync_timing_changed |=
8235 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8236 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8237 
8238 	new_crtc_state->freesync_config = config;
8239 	/* Copy state for access from DM IRQ handler */
8240 	acrtc->dm_irq_params.freesync_config = config;
8241 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8242 	acrtc->dm_irq_params.vrr_params = vrr_params;
8243 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8244 }
8245 
8246 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8247 					    struct dm_crtc_state *new_state)
8248 {
8249 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8250 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8251 
8252 	if (!old_vrr_active && new_vrr_active) {
8253 		/* Transition VRR inactive -> active:
8254 		 * While VRR is active, we must not disable vblank irq, as a
8255 		 * reenable after disable would compute bogus vblank/pflip
8256 		 * timestamps if it likely happened inside display front-porch.
8257 		 *
8258 		 * We also need vupdate irq for the actual core vblank handling
8259 		 * at end of vblank.
8260 		 */
8261 		dm_set_vupdate_irq(new_state->base.crtc, true);
8262 		drm_crtc_vblank_get(new_state->base.crtc);
8263 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8264 				 __func__, new_state->base.crtc->base.id);
8265 	} else if (old_vrr_active && !new_vrr_active) {
8266 		/* Transition VRR active -> inactive:
8267 		 * Allow vblank irq disable again for fixed refresh rate.
8268 		 */
8269 		dm_set_vupdate_irq(new_state->base.crtc, false);
8270 		drm_crtc_vblank_put(new_state->base.crtc);
8271 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8272 				 __func__, new_state->base.crtc->base.id);
8273 	}
8274 }
8275 
8276 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8277 {
8278 	struct drm_plane *plane;
8279 	struct drm_plane_state *old_plane_state;
8280 	int i;
8281 
8282 	/*
8283 	 * TODO: Make this per-stream so we don't issue redundant updates for
8284 	 * commits with multiple streams.
8285 	 */
8286 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8287 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8288 			handle_cursor_update(plane, old_plane_state);
8289 }
8290 
8291 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8292 				    struct dc_state *dc_state,
8293 				    struct drm_device *dev,
8294 				    struct amdgpu_display_manager *dm,
8295 				    struct drm_crtc *pcrtc,
8296 				    bool wait_for_vblank)
8297 {
8298 	uint32_t i;
8299 	uint64_t timestamp_ns;
8300 	struct drm_plane *plane;
8301 	struct drm_plane_state *old_plane_state, *new_plane_state;
8302 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8303 	struct drm_crtc_state *new_pcrtc_state =
8304 			drm_atomic_get_new_crtc_state(state, pcrtc);
8305 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8306 	struct dm_crtc_state *dm_old_crtc_state =
8307 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8308 	int planes_count = 0, vpos, hpos;
8309 	long r;
8310 	unsigned long flags;
8311 	struct amdgpu_bo *abo;
8312 	uint32_t target_vblank, last_flip_vblank;
8313 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8314 	bool pflip_present = false;
8315 	struct {
8316 		struct dc_surface_update surface_updates[MAX_SURFACES];
8317 		struct dc_plane_info plane_infos[MAX_SURFACES];
8318 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8319 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8320 		struct dc_stream_update stream_update;
8321 	} *bundle;
8322 
8323 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8324 
8325 	if (!bundle) {
8326 		dm_error("Failed to allocate update bundle\n");
8327 		goto cleanup;
8328 	}
8329 
8330 	/*
8331 	 * Disable the cursor first if we're disabling all the planes.
8332 	 * It'll remain on the screen after the planes are re-enabled
8333 	 * if we don't.
8334 	 */
8335 	if (acrtc_state->active_planes == 0)
8336 		amdgpu_dm_commit_cursors(state);
8337 
8338 	/* update planes when needed */
8339 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8340 		struct drm_crtc *crtc = new_plane_state->crtc;
8341 		struct drm_crtc_state *new_crtc_state;
8342 		struct drm_framebuffer *fb = new_plane_state->fb;
8343 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8344 		bool plane_needs_flip;
8345 		struct dc_plane_state *dc_plane;
8346 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8347 
8348 		/* Cursor plane is handled after stream updates */
8349 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8350 			continue;
8351 
8352 		if (!fb || !crtc || pcrtc != crtc)
8353 			continue;
8354 
8355 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8356 		if (!new_crtc_state->active)
8357 			continue;
8358 
8359 		dc_plane = dm_new_plane_state->dc_state;
8360 
8361 		bundle->surface_updates[planes_count].surface = dc_plane;
8362 		if (new_pcrtc_state->color_mgmt_changed) {
8363 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8364 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8365 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8366 		}
8367 
8368 		fill_dc_scaling_info(new_plane_state,
8369 				     &bundle->scaling_infos[planes_count]);
8370 
8371 		bundle->surface_updates[planes_count].scaling_info =
8372 			&bundle->scaling_infos[planes_count];
8373 
8374 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8375 
8376 		pflip_present = pflip_present || plane_needs_flip;
8377 
8378 		if (!plane_needs_flip) {
8379 			planes_count += 1;
8380 			continue;
8381 		}
8382 
8383 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8384 
8385 		/*
8386 		 * Wait for all fences on this FB. Do limited wait to avoid
8387 		 * deadlock during GPU reset when this fence will not signal
8388 		 * but we hold reservation lock for the BO.
8389 		 */
8390 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8391 							false,
8392 							msecs_to_jiffies(5000));
8393 		if (unlikely(r <= 0))
8394 			DRM_ERROR("Waiting for fences timed out!");
8395 
8396 		fill_dc_plane_info_and_addr(
8397 			dm->adev, new_plane_state,
8398 			afb->tiling_flags,
8399 			&bundle->plane_infos[planes_count],
8400 			&bundle->flip_addrs[planes_count].address,
8401 			afb->tmz_surface, false);
8402 
8403 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8404 				 new_plane_state->plane->index,
8405 				 bundle->plane_infos[planes_count].dcc.enable);
8406 
8407 		bundle->surface_updates[planes_count].plane_info =
8408 			&bundle->plane_infos[planes_count];
8409 
8410 		/*
8411 		 * Only allow immediate flips for fast updates that don't
8412 		 * change FB pitch, DCC state, rotation or mirroing.
8413 		 */
8414 		bundle->flip_addrs[planes_count].flip_immediate =
8415 			crtc->state->async_flip &&
8416 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8417 
8418 		timestamp_ns = ktime_get_ns();
8419 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8420 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8421 		bundle->surface_updates[planes_count].surface = dc_plane;
8422 
8423 		if (!bundle->surface_updates[planes_count].surface) {
8424 			DRM_ERROR("No surface for CRTC: id=%d\n",
8425 					acrtc_attach->crtc_id);
8426 			continue;
8427 		}
8428 
8429 		if (plane == pcrtc->primary)
8430 			update_freesync_state_on_stream(
8431 				dm,
8432 				acrtc_state,
8433 				acrtc_state->stream,
8434 				dc_plane,
8435 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8436 
8437 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8438 				 __func__,
8439 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8440 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8441 
8442 		planes_count += 1;
8443 
8444 	}
8445 
8446 	if (pflip_present) {
8447 		if (!vrr_active) {
8448 			/* Use old throttling in non-vrr fixed refresh rate mode
8449 			 * to keep flip scheduling based on target vblank counts
8450 			 * working in a backwards compatible way, e.g., for
8451 			 * clients using the GLX_OML_sync_control extension or
8452 			 * DRI3/Present extension with defined target_msc.
8453 			 */
8454 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8455 		}
8456 		else {
8457 			/* For variable refresh rate mode only:
8458 			 * Get vblank of last completed flip to avoid > 1 vrr
8459 			 * flips per video frame by use of throttling, but allow
8460 			 * flip programming anywhere in the possibly large
8461 			 * variable vrr vblank interval for fine-grained flip
8462 			 * timing control and more opportunity to avoid stutter
8463 			 * on late submission of flips.
8464 			 */
8465 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8466 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8467 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8468 		}
8469 
8470 		target_vblank = last_flip_vblank + wait_for_vblank;
8471 
8472 		/*
8473 		 * Wait until we're out of the vertical blank period before the one
8474 		 * targeted by the flip
8475 		 */
8476 		while ((acrtc_attach->enabled &&
8477 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8478 							    0, &vpos, &hpos, NULL,
8479 							    NULL, &pcrtc->hwmode)
8480 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8481 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8482 			(int)(target_vblank -
8483 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8484 			usleep_range(1000, 1100);
8485 		}
8486 
8487 		/**
8488 		 * Prepare the flip event for the pageflip interrupt to handle.
8489 		 *
8490 		 * This only works in the case where we've already turned on the
8491 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8492 		 * from 0 -> n planes we have to skip a hardware generated event
8493 		 * and rely on sending it from software.
8494 		 */
8495 		if (acrtc_attach->base.state->event &&
8496 		    acrtc_state->active_planes > 0) {
8497 			drm_crtc_vblank_get(pcrtc);
8498 
8499 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8500 
8501 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8502 			prepare_flip_isr(acrtc_attach);
8503 
8504 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8505 		}
8506 
8507 		if (acrtc_state->stream) {
8508 			if (acrtc_state->freesync_vrr_info_changed)
8509 				bundle->stream_update.vrr_infopacket =
8510 					&acrtc_state->stream->vrr_infopacket;
8511 		}
8512 	}
8513 
8514 	/* Update the planes if changed or disable if we don't have any. */
8515 	if ((planes_count || acrtc_state->active_planes == 0) &&
8516 		acrtc_state->stream) {
8517 		bundle->stream_update.stream = acrtc_state->stream;
8518 		if (new_pcrtc_state->mode_changed) {
8519 			bundle->stream_update.src = acrtc_state->stream->src;
8520 			bundle->stream_update.dst = acrtc_state->stream->dst;
8521 		}
8522 
8523 		if (new_pcrtc_state->color_mgmt_changed) {
8524 			/*
8525 			 * TODO: This isn't fully correct since we've actually
8526 			 * already modified the stream in place.
8527 			 */
8528 			bundle->stream_update.gamut_remap =
8529 				&acrtc_state->stream->gamut_remap_matrix;
8530 			bundle->stream_update.output_csc_transform =
8531 				&acrtc_state->stream->csc_color_matrix;
8532 			bundle->stream_update.out_transfer_func =
8533 				acrtc_state->stream->out_transfer_func;
8534 		}
8535 
8536 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8537 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8538 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8539 
8540 		/*
8541 		 * If FreeSync state on the stream has changed then we need to
8542 		 * re-adjust the min/max bounds now that DC doesn't handle this
8543 		 * as part of commit.
8544 		 */
8545 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8546 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8547 			dc_stream_adjust_vmin_vmax(
8548 				dm->dc, acrtc_state->stream,
8549 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8550 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8551 		}
8552 		mutex_lock(&dm->dc_lock);
8553 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8554 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8555 			amdgpu_dm_psr_disable(acrtc_state->stream);
8556 
8557 		dc_commit_updates_for_stream(dm->dc,
8558 						     bundle->surface_updates,
8559 						     planes_count,
8560 						     acrtc_state->stream,
8561 						     &bundle->stream_update,
8562 						     dc_state);
8563 
8564 		/**
8565 		 * Enable or disable the interrupts on the backend.
8566 		 *
8567 		 * Most pipes are put into power gating when unused.
8568 		 *
8569 		 * When power gating is enabled on a pipe we lose the
8570 		 * interrupt enablement state when power gating is disabled.
8571 		 *
8572 		 * So we need to update the IRQ control state in hardware
8573 		 * whenever the pipe turns on (since it could be previously
8574 		 * power gated) or off (since some pipes can't be power gated
8575 		 * on some ASICs).
8576 		 */
8577 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8578 			dm_update_pflip_irq_state(drm_to_adev(dev),
8579 						  acrtc_attach);
8580 
8581 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8582 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8583 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8584 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8585 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8586 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8587 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8588 			amdgpu_dm_psr_enable(acrtc_state->stream);
8589 		}
8590 
8591 		mutex_unlock(&dm->dc_lock);
8592 	}
8593 
8594 	/*
8595 	 * Update cursor state *after* programming all the planes.
8596 	 * This avoids redundant programming in the case where we're going
8597 	 * to be disabling a single plane - those pipes are being disabled.
8598 	 */
8599 	if (acrtc_state->active_planes)
8600 		amdgpu_dm_commit_cursors(state);
8601 
8602 cleanup:
8603 	kfree(bundle);
8604 }
8605 
8606 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8607 				   struct drm_atomic_state *state)
8608 {
8609 	struct amdgpu_device *adev = drm_to_adev(dev);
8610 	struct amdgpu_dm_connector *aconnector;
8611 	struct drm_connector *connector;
8612 	struct drm_connector_state *old_con_state, *new_con_state;
8613 	struct drm_crtc_state *new_crtc_state;
8614 	struct dm_crtc_state *new_dm_crtc_state;
8615 	const struct dc_stream_status *status;
8616 	int i, inst;
8617 
8618 	/* Notify device removals. */
8619 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8620 		if (old_con_state->crtc != new_con_state->crtc) {
8621 			/* CRTC changes require notification. */
8622 			goto notify;
8623 		}
8624 
8625 		if (!new_con_state->crtc)
8626 			continue;
8627 
8628 		new_crtc_state = drm_atomic_get_new_crtc_state(
8629 			state, new_con_state->crtc);
8630 
8631 		if (!new_crtc_state)
8632 			continue;
8633 
8634 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8635 			continue;
8636 
8637 	notify:
8638 		aconnector = to_amdgpu_dm_connector(connector);
8639 
8640 		mutex_lock(&adev->dm.audio_lock);
8641 		inst = aconnector->audio_inst;
8642 		aconnector->audio_inst = -1;
8643 		mutex_unlock(&adev->dm.audio_lock);
8644 
8645 		amdgpu_dm_audio_eld_notify(adev, inst);
8646 	}
8647 
8648 	/* Notify audio device additions. */
8649 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8650 		if (!new_con_state->crtc)
8651 			continue;
8652 
8653 		new_crtc_state = drm_atomic_get_new_crtc_state(
8654 			state, new_con_state->crtc);
8655 
8656 		if (!new_crtc_state)
8657 			continue;
8658 
8659 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8660 			continue;
8661 
8662 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8663 		if (!new_dm_crtc_state->stream)
8664 			continue;
8665 
8666 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8667 		if (!status)
8668 			continue;
8669 
8670 		aconnector = to_amdgpu_dm_connector(connector);
8671 
8672 		mutex_lock(&adev->dm.audio_lock);
8673 		inst = status->audio_inst;
8674 		aconnector->audio_inst = inst;
8675 		mutex_unlock(&adev->dm.audio_lock);
8676 
8677 		amdgpu_dm_audio_eld_notify(adev, inst);
8678 	}
8679 }
8680 
8681 /*
8682  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8683  * @crtc_state: the DRM CRTC state
8684  * @stream_state: the DC stream state.
8685  *
8686  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8687  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8688  */
8689 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8690 						struct dc_stream_state *stream_state)
8691 {
8692 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8693 }
8694 
8695 /**
8696  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8697  * @state: The atomic state to commit
8698  *
8699  * This will tell DC to commit the constructed DC state from atomic_check,
8700  * programming the hardware. Any failures here implies a hardware failure, since
8701  * atomic check should have filtered anything non-kosher.
8702  */
8703 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8704 {
8705 	struct drm_device *dev = state->dev;
8706 	struct amdgpu_device *adev = drm_to_adev(dev);
8707 	struct amdgpu_display_manager *dm = &adev->dm;
8708 	struct dm_atomic_state *dm_state;
8709 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8710 	uint32_t i, j;
8711 	struct drm_crtc *crtc;
8712 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8713 	unsigned long flags;
8714 	bool wait_for_vblank = true;
8715 	struct drm_connector *connector;
8716 	struct drm_connector_state *old_con_state, *new_con_state;
8717 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8718 	int crtc_disable_count = 0;
8719 	bool mode_set_reset_required = false;
8720 
8721 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8722 
8723 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8724 
8725 	dm_state = dm_atomic_get_new_state(state);
8726 	if (dm_state && dm_state->context) {
8727 		dc_state = dm_state->context;
8728 	} else {
8729 		/* No state changes, retain current state. */
8730 		dc_state_temp = dc_create_state(dm->dc);
8731 		ASSERT(dc_state_temp);
8732 		dc_state = dc_state_temp;
8733 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8734 	}
8735 
8736 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8737 				       new_crtc_state, i) {
8738 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8739 
8740 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8741 
8742 		if (old_crtc_state->active &&
8743 		    (!new_crtc_state->active ||
8744 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8745 			manage_dm_interrupts(adev, acrtc, false);
8746 			dc_stream_release(dm_old_crtc_state->stream);
8747 		}
8748 	}
8749 
8750 	drm_atomic_helper_calc_timestamping_constants(state);
8751 
8752 	/* update changed items */
8753 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8754 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8755 
8756 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8757 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8758 
8759 		DRM_DEBUG_ATOMIC(
8760 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8761 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8762 			"connectors_changed:%d\n",
8763 			acrtc->crtc_id,
8764 			new_crtc_state->enable,
8765 			new_crtc_state->active,
8766 			new_crtc_state->planes_changed,
8767 			new_crtc_state->mode_changed,
8768 			new_crtc_state->active_changed,
8769 			new_crtc_state->connectors_changed);
8770 
8771 		/* Disable cursor if disabling crtc */
8772 		if (old_crtc_state->active && !new_crtc_state->active) {
8773 			struct dc_cursor_position position;
8774 
8775 			memset(&position, 0, sizeof(position));
8776 			mutex_lock(&dm->dc_lock);
8777 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8778 			mutex_unlock(&dm->dc_lock);
8779 		}
8780 
8781 		/* Copy all transient state flags into dc state */
8782 		if (dm_new_crtc_state->stream) {
8783 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8784 							    dm_new_crtc_state->stream);
8785 		}
8786 
8787 		/* handles headless hotplug case, updating new_state and
8788 		 * aconnector as needed
8789 		 */
8790 
8791 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8792 
8793 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8794 
8795 			if (!dm_new_crtc_state->stream) {
8796 				/*
8797 				 * this could happen because of issues with
8798 				 * userspace notifications delivery.
8799 				 * In this case userspace tries to set mode on
8800 				 * display which is disconnected in fact.
8801 				 * dc_sink is NULL in this case on aconnector.
8802 				 * We expect reset mode will come soon.
8803 				 *
8804 				 * This can also happen when unplug is done
8805 				 * during resume sequence ended
8806 				 *
8807 				 * In this case, we want to pretend we still
8808 				 * have a sink to keep the pipe running so that
8809 				 * hw state is consistent with the sw state
8810 				 */
8811 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8812 						__func__, acrtc->base.base.id);
8813 				continue;
8814 			}
8815 
8816 			if (dm_old_crtc_state->stream)
8817 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8818 
8819 			pm_runtime_get_noresume(dev->dev);
8820 
8821 			acrtc->enabled = true;
8822 			acrtc->hw_mode = new_crtc_state->mode;
8823 			crtc->hwmode = new_crtc_state->mode;
8824 			mode_set_reset_required = true;
8825 		} else if (modereset_required(new_crtc_state)) {
8826 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8827 			/* i.e. reset mode */
8828 			if (dm_old_crtc_state->stream)
8829 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8830 
8831 			mode_set_reset_required = true;
8832 		}
8833 	} /* for_each_crtc_in_state() */
8834 
8835 	if (dc_state) {
8836 		/* if there mode set or reset, disable eDP PSR */
8837 		if (mode_set_reset_required)
8838 			amdgpu_dm_psr_disable_all(dm);
8839 
8840 		dm_enable_per_frame_crtc_master_sync(dc_state);
8841 		mutex_lock(&dm->dc_lock);
8842 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8843 #if defined(CONFIG_DRM_AMD_DC_DCN)
8844                /* Allow idle optimization when vblank count is 0 for display off */
8845                if (dm->active_vblank_irq_count == 0)
8846                    dc_allow_idle_optimizations(dm->dc,true);
8847 #endif
8848 		mutex_unlock(&dm->dc_lock);
8849 	}
8850 
8851 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8852 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8853 
8854 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8855 
8856 		if (dm_new_crtc_state->stream != NULL) {
8857 			const struct dc_stream_status *status =
8858 					dc_stream_get_status(dm_new_crtc_state->stream);
8859 
8860 			if (!status)
8861 				status = dc_stream_get_status_from_state(dc_state,
8862 									 dm_new_crtc_state->stream);
8863 			if (!status)
8864 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8865 			else
8866 				acrtc->otg_inst = status->primary_otg_inst;
8867 		}
8868 	}
8869 #ifdef CONFIG_DRM_AMD_DC_HDCP
8870 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8871 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8872 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8873 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8874 
8875 		new_crtc_state = NULL;
8876 
8877 		if (acrtc)
8878 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8879 
8880 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8881 
8882 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8883 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8884 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8885 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8886 			dm_new_con_state->update_hdcp = true;
8887 			continue;
8888 		}
8889 
8890 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8891 			hdcp_update_display(
8892 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8893 				new_con_state->hdcp_content_type,
8894 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8895 	}
8896 #endif
8897 
8898 	/* Handle connector state changes */
8899 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8900 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8901 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8902 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8903 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8904 		struct dc_stream_update stream_update;
8905 		struct dc_info_packet hdr_packet;
8906 		struct dc_stream_status *status = NULL;
8907 		bool abm_changed, hdr_changed, scaling_changed;
8908 
8909 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8910 		memset(&stream_update, 0, sizeof(stream_update));
8911 
8912 		if (acrtc) {
8913 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8914 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8915 		}
8916 
8917 		/* Skip any modesets/resets */
8918 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8919 			continue;
8920 
8921 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8922 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8923 
8924 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8925 							     dm_old_con_state);
8926 
8927 		abm_changed = dm_new_crtc_state->abm_level !=
8928 			      dm_old_crtc_state->abm_level;
8929 
8930 		hdr_changed =
8931 			is_hdr_metadata_different(old_con_state, new_con_state);
8932 
8933 		if (!scaling_changed && !abm_changed && !hdr_changed)
8934 			continue;
8935 
8936 		stream_update.stream = dm_new_crtc_state->stream;
8937 		if (scaling_changed) {
8938 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8939 					dm_new_con_state, dm_new_crtc_state->stream);
8940 
8941 			stream_update.src = dm_new_crtc_state->stream->src;
8942 			stream_update.dst = dm_new_crtc_state->stream->dst;
8943 		}
8944 
8945 		if (abm_changed) {
8946 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8947 
8948 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8949 		}
8950 
8951 		if (hdr_changed) {
8952 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8953 			stream_update.hdr_static_metadata = &hdr_packet;
8954 		}
8955 
8956 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8957 		WARN_ON(!status);
8958 		WARN_ON(!status->plane_count);
8959 
8960 		/*
8961 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8962 		 * Here we create an empty update on each plane.
8963 		 * To fix this, DC should permit updating only stream properties.
8964 		 */
8965 		for (j = 0; j < status->plane_count; j++)
8966 			dummy_updates[j].surface = status->plane_states[0];
8967 
8968 
8969 		mutex_lock(&dm->dc_lock);
8970 		dc_commit_updates_for_stream(dm->dc,
8971 						     dummy_updates,
8972 						     status->plane_count,
8973 						     dm_new_crtc_state->stream,
8974 						     &stream_update,
8975 						     dc_state);
8976 		mutex_unlock(&dm->dc_lock);
8977 	}
8978 
8979 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8980 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8981 				      new_crtc_state, i) {
8982 		if (old_crtc_state->active && !new_crtc_state->active)
8983 			crtc_disable_count++;
8984 
8985 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8986 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8987 
8988 		/* For freesync config update on crtc state and params for irq */
8989 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8990 
8991 		/* Handle vrr on->off / off->on transitions */
8992 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8993 						dm_new_crtc_state);
8994 	}
8995 
8996 	/**
8997 	 * Enable interrupts for CRTCs that are newly enabled or went through
8998 	 * a modeset. It was intentionally deferred until after the front end
8999 	 * state was modified to wait until the OTG was on and so the IRQ
9000 	 * handlers didn't access stale or invalid state.
9001 	 */
9002 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9003 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9004 #ifdef CONFIG_DEBUG_FS
9005 		bool configure_crc = false;
9006 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9007 #endif
9008 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9009 
9010 		if (new_crtc_state->active &&
9011 		    (!old_crtc_state->active ||
9012 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9013 			dc_stream_retain(dm_new_crtc_state->stream);
9014 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9015 			manage_dm_interrupts(adev, acrtc, true);
9016 
9017 #ifdef CONFIG_DEBUG_FS
9018 			/**
9019 			 * Frontend may have changed so reapply the CRC capture
9020 			 * settings for the stream.
9021 			 */
9022 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9023 			spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9024 			cur_crc_src = acrtc->dm_irq_params.crc_src;
9025 			spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9026 
9027 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9028 				configure_crc = true;
9029 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9030 				if (amdgpu_dm_crc_window_is_activated(crtc))
9031 					configure_crc = false;
9032 #endif
9033 			}
9034 
9035 			if (configure_crc)
9036 				amdgpu_dm_crtc_configure_crc_source(
9037 					crtc, dm_new_crtc_state, cur_crc_src);
9038 #endif
9039 		}
9040 	}
9041 
9042 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9043 		if (new_crtc_state->async_flip)
9044 			wait_for_vblank = false;
9045 
9046 	/* update planes when needed per crtc*/
9047 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9048 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9049 
9050 		if (dm_new_crtc_state->stream)
9051 			amdgpu_dm_commit_planes(state, dc_state, dev,
9052 						dm, crtc, wait_for_vblank);
9053 	}
9054 
9055 	/* Update audio instances for each connector. */
9056 	amdgpu_dm_commit_audio(dev, state);
9057 
9058 	/*
9059 	 * send vblank event on all events not handled in flip and
9060 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9061 	 */
9062 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9063 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9064 
9065 		if (new_crtc_state->event)
9066 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9067 
9068 		new_crtc_state->event = NULL;
9069 	}
9070 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9071 
9072 	/* Signal HW programming completion */
9073 	drm_atomic_helper_commit_hw_done(state);
9074 
9075 	if (wait_for_vblank)
9076 		drm_atomic_helper_wait_for_flip_done(dev, state);
9077 
9078 	drm_atomic_helper_cleanup_planes(dev, state);
9079 
9080 	/* return the stolen vga memory back to VRAM */
9081 	if (!adev->mman.keep_stolen_vga_memory)
9082 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9083 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9084 
9085 	/*
9086 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9087 	 * so we can put the GPU into runtime suspend if we're not driving any
9088 	 * displays anymore
9089 	 */
9090 	for (i = 0; i < crtc_disable_count; i++)
9091 		pm_runtime_put_autosuspend(dev->dev);
9092 	pm_runtime_mark_last_busy(dev->dev);
9093 
9094 	if (dc_state_temp)
9095 		dc_release_state(dc_state_temp);
9096 }
9097 
9098 
9099 static int dm_force_atomic_commit(struct drm_connector *connector)
9100 {
9101 	int ret = 0;
9102 	struct drm_device *ddev = connector->dev;
9103 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9104 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9105 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9106 	struct drm_connector_state *conn_state;
9107 	struct drm_crtc_state *crtc_state;
9108 	struct drm_plane_state *plane_state;
9109 
9110 	if (!state)
9111 		return -ENOMEM;
9112 
9113 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9114 
9115 	/* Construct an atomic state to restore previous display setting */
9116 
9117 	/*
9118 	 * Attach connectors to drm_atomic_state
9119 	 */
9120 	conn_state = drm_atomic_get_connector_state(state, connector);
9121 
9122 	ret = PTR_ERR_OR_ZERO(conn_state);
9123 	if (ret)
9124 		goto out;
9125 
9126 	/* Attach crtc to drm_atomic_state*/
9127 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9128 
9129 	ret = PTR_ERR_OR_ZERO(crtc_state);
9130 	if (ret)
9131 		goto out;
9132 
9133 	/* force a restore */
9134 	crtc_state->mode_changed = true;
9135 
9136 	/* Attach plane to drm_atomic_state */
9137 	plane_state = drm_atomic_get_plane_state(state, plane);
9138 
9139 	ret = PTR_ERR_OR_ZERO(plane_state);
9140 	if (ret)
9141 		goto out;
9142 
9143 	/* Call commit internally with the state we just constructed */
9144 	ret = drm_atomic_commit(state);
9145 
9146 out:
9147 	drm_atomic_state_put(state);
9148 	if (ret)
9149 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9150 
9151 	return ret;
9152 }
9153 
9154 /*
9155  * This function handles all cases when set mode does not come upon hotplug.
9156  * This includes when a display is unplugged then plugged back into the
9157  * same port and when running without usermode desktop manager supprot
9158  */
9159 void dm_restore_drm_connector_state(struct drm_device *dev,
9160 				    struct drm_connector *connector)
9161 {
9162 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9163 	struct amdgpu_crtc *disconnected_acrtc;
9164 	struct dm_crtc_state *acrtc_state;
9165 
9166 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9167 		return;
9168 
9169 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9170 	if (!disconnected_acrtc)
9171 		return;
9172 
9173 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9174 	if (!acrtc_state->stream)
9175 		return;
9176 
9177 	/*
9178 	 * If the previous sink is not released and different from the current,
9179 	 * we deduce we are in a state where we can not rely on usermode call
9180 	 * to turn on the display, so we do it here
9181 	 */
9182 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9183 		dm_force_atomic_commit(&aconnector->base);
9184 }
9185 
9186 /*
9187  * Grabs all modesetting locks to serialize against any blocking commits,
9188  * Waits for completion of all non blocking commits.
9189  */
9190 static int do_aquire_global_lock(struct drm_device *dev,
9191 				 struct drm_atomic_state *state)
9192 {
9193 	struct drm_crtc *crtc;
9194 	struct drm_crtc_commit *commit;
9195 	long ret;
9196 
9197 	/*
9198 	 * Adding all modeset locks to aquire_ctx will
9199 	 * ensure that when the framework release it the
9200 	 * extra locks we are locking here will get released to
9201 	 */
9202 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9203 	if (ret)
9204 		return ret;
9205 
9206 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9207 		spin_lock(&crtc->commit_lock);
9208 		commit = list_first_entry_or_null(&crtc->commit_list,
9209 				struct drm_crtc_commit, commit_entry);
9210 		if (commit)
9211 			drm_crtc_commit_get(commit);
9212 		spin_unlock(&crtc->commit_lock);
9213 
9214 		if (!commit)
9215 			continue;
9216 
9217 		/*
9218 		 * Make sure all pending HW programming completed and
9219 		 * page flips done
9220 		 */
9221 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9222 
9223 		if (ret > 0)
9224 			ret = wait_for_completion_interruptible_timeout(
9225 					&commit->flip_done, 10*HZ);
9226 
9227 		if (ret == 0)
9228 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9229 				  "timed out\n", crtc->base.id, crtc->name);
9230 
9231 		drm_crtc_commit_put(commit);
9232 	}
9233 
9234 	return ret < 0 ? ret : 0;
9235 }
9236 
9237 static void get_freesync_config_for_crtc(
9238 	struct dm_crtc_state *new_crtc_state,
9239 	struct dm_connector_state *new_con_state)
9240 {
9241 	struct mod_freesync_config config = {0};
9242 	struct amdgpu_dm_connector *aconnector =
9243 			to_amdgpu_dm_connector(new_con_state->base.connector);
9244 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9245 	int vrefresh = drm_mode_vrefresh(mode);
9246 	bool fs_vid_mode = false;
9247 
9248 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9249 					vrefresh >= aconnector->min_vfreq &&
9250 					vrefresh <= aconnector->max_vfreq;
9251 
9252 	if (new_crtc_state->vrr_supported) {
9253 		new_crtc_state->stream->ignore_msa_timing_param = true;
9254 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9255 
9256 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9257 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9258 		config.vsif_supported = true;
9259 		config.btr = true;
9260 
9261 		if (fs_vid_mode) {
9262 			config.state = VRR_STATE_ACTIVE_FIXED;
9263 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9264 			goto out;
9265 		} else if (new_crtc_state->base.vrr_enabled) {
9266 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9267 		} else {
9268 			config.state = VRR_STATE_INACTIVE;
9269 		}
9270 	}
9271 out:
9272 	new_crtc_state->freesync_config = config;
9273 }
9274 
9275 static void reset_freesync_config_for_crtc(
9276 	struct dm_crtc_state *new_crtc_state)
9277 {
9278 	new_crtc_state->vrr_supported = false;
9279 
9280 	memset(&new_crtc_state->vrr_infopacket, 0,
9281 	       sizeof(new_crtc_state->vrr_infopacket));
9282 }
9283 
9284 static bool
9285 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9286 				 struct drm_crtc_state *new_crtc_state)
9287 {
9288 	struct drm_display_mode old_mode, new_mode;
9289 
9290 	if (!old_crtc_state || !new_crtc_state)
9291 		return false;
9292 
9293 	old_mode = old_crtc_state->mode;
9294 	new_mode = new_crtc_state->mode;
9295 
9296 	if (old_mode.clock       == new_mode.clock &&
9297 	    old_mode.hdisplay    == new_mode.hdisplay &&
9298 	    old_mode.vdisplay    == new_mode.vdisplay &&
9299 	    old_mode.htotal      == new_mode.htotal &&
9300 	    old_mode.vtotal      != new_mode.vtotal &&
9301 	    old_mode.hsync_start == new_mode.hsync_start &&
9302 	    old_mode.vsync_start != new_mode.vsync_start &&
9303 	    old_mode.hsync_end   == new_mode.hsync_end &&
9304 	    old_mode.vsync_end   != new_mode.vsync_end &&
9305 	    old_mode.hskew       == new_mode.hskew &&
9306 	    old_mode.vscan       == new_mode.vscan &&
9307 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9308 	    (new_mode.vsync_end - new_mode.vsync_start))
9309 		return true;
9310 
9311 	return false;
9312 }
9313 
9314 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9315 	uint64_t num, den, res;
9316 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9317 
9318 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9319 
9320 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9321 	den = (unsigned long long)new_crtc_state->mode.htotal *
9322 	      (unsigned long long)new_crtc_state->mode.vtotal;
9323 
9324 	res = div_u64(num, den);
9325 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9326 }
9327 
9328 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9329 				struct drm_atomic_state *state,
9330 				struct drm_crtc *crtc,
9331 				struct drm_crtc_state *old_crtc_state,
9332 				struct drm_crtc_state *new_crtc_state,
9333 				bool enable,
9334 				bool *lock_and_validation_needed)
9335 {
9336 	struct dm_atomic_state *dm_state = NULL;
9337 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9338 	struct dc_stream_state *new_stream;
9339 	int ret = 0;
9340 
9341 	/*
9342 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9343 	 * update changed items
9344 	 */
9345 	struct amdgpu_crtc *acrtc = NULL;
9346 	struct amdgpu_dm_connector *aconnector = NULL;
9347 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9348 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9349 
9350 	new_stream = NULL;
9351 
9352 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9353 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9354 	acrtc = to_amdgpu_crtc(crtc);
9355 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9356 
9357 	/* TODO This hack should go away */
9358 	if (aconnector && enable) {
9359 		/* Make sure fake sink is created in plug-in scenario */
9360 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9361 							    &aconnector->base);
9362 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9363 							    &aconnector->base);
9364 
9365 		if (IS_ERR(drm_new_conn_state)) {
9366 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9367 			goto fail;
9368 		}
9369 
9370 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9371 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9372 
9373 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9374 			goto skip_modeset;
9375 
9376 		new_stream = create_validate_stream_for_sink(aconnector,
9377 							     &new_crtc_state->mode,
9378 							     dm_new_conn_state,
9379 							     dm_old_crtc_state->stream);
9380 
9381 		/*
9382 		 * we can have no stream on ACTION_SET if a display
9383 		 * was disconnected during S3, in this case it is not an
9384 		 * error, the OS will be updated after detection, and
9385 		 * will do the right thing on next atomic commit
9386 		 */
9387 
9388 		if (!new_stream) {
9389 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9390 					__func__, acrtc->base.base.id);
9391 			ret = -ENOMEM;
9392 			goto fail;
9393 		}
9394 
9395 		/*
9396 		 * TODO: Check VSDB bits to decide whether this should
9397 		 * be enabled or not.
9398 		 */
9399 		new_stream->triggered_crtc_reset.enabled =
9400 			dm->force_timing_sync;
9401 
9402 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9403 
9404 		ret = fill_hdr_info_packet(drm_new_conn_state,
9405 					   &new_stream->hdr_static_metadata);
9406 		if (ret)
9407 			goto fail;
9408 
9409 		/*
9410 		 * If we already removed the old stream from the context
9411 		 * (and set the new stream to NULL) then we can't reuse
9412 		 * the old stream even if the stream and scaling are unchanged.
9413 		 * We'll hit the BUG_ON and black screen.
9414 		 *
9415 		 * TODO: Refactor this function to allow this check to work
9416 		 * in all conditions.
9417 		 */
9418 		if (amdgpu_freesync_vid_mode &&
9419 		    dm_new_crtc_state->stream &&
9420 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9421 			goto skip_modeset;
9422 
9423 		if (dm_new_crtc_state->stream &&
9424 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9425 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9426 			new_crtc_state->mode_changed = false;
9427 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9428 					 new_crtc_state->mode_changed);
9429 		}
9430 	}
9431 
9432 	/* mode_changed flag may get updated above, need to check again */
9433 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9434 		goto skip_modeset;
9435 
9436 	DRM_DEBUG_ATOMIC(
9437 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9438 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9439 		"connectors_changed:%d\n",
9440 		acrtc->crtc_id,
9441 		new_crtc_state->enable,
9442 		new_crtc_state->active,
9443 		new_crtc_state->planes_changed,
9444 		new_crtc_state->mode_changed,
9445 		new_crtc_state->active_changed,
9446 		new_crtc_state->connectors_changed);
9447 
9448 	/* Remove stream for any changed/disabled CRTC */
9449 	if (!enable) {
9450 
9451 		if (!dm_old_crtc_state->stream)
9452 			goto skip_modeset;
9453 
9454 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9455 		    is_timing_unchanged_for_freesync(new_crtc_state,
9456 						     old_crtc_state)) {
9457 			new_crtc_state->mode_changed = false;
9458 			DRM_DEBUG_DRIVER(
9459 				"Mode change not required for front porch change, "
9460 				"setting mode_changed to %d",
9461 				new_crtc_state->mode_changed);
9462 
9463 			set_freesync_fixed_config(dm_new_crtc_state);
9464 
9465 			goto skip_modeset;
9466 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9467 			   is_freesync_video_mode(&new_crtc_state->mode,
9468 						  aconnector)) {
9469 			set_freesync_fixed_config(dm_new_crtc_state);
9470 		}
9471 
9472 		ret = dm_atomic_get_state(state, &dm_state);
9473 		if (ret)
9474 			goto fail;
9475 
9476 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9477 				crtc->base.id);
9478 
9479 		/* i.e. reset mode */
9480 		if (dc_remove_stream_from_ctx(
9481 				dm->dc,
9482 				dm_state->context,
9483 				dm_old_crtc_state->stream) != DC_OK) {
9484 			ret = -EINVAL;
9485 			goto fail;
9486 		}
9487 
9488 		dc_stream_release(dm_old_crtc_state->stream);
9489 		dm_new_crtc_state->stream = NULL;
9490 
9491 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9492 
9493 		*lock_and_validation_needed = true;
9494 
9495 	} else {/* Add stream for any updated/enabled CRTC */
9496 		/*
9497 		 * Quick fix to prevent NULL pointer on new_stream when
9498 		 * added MST connectors not found in existing crtc_state in the chained mode
9499 		 * TODO: need to dig out the root cause of that
9500 		 */
9501 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9502 			goto skip_modeset;
9503 
9504 		if (modereset_required(new_crtc_state))
9505 			goto skip_modeset;
9506 
9507 		if (modeset_required(new_crtc_state, new_stream,
9508 				     dm_old_crtc_state->stream)) {
9509 
9510 			WARN_ON(dm_new_crtc_state->stream);
9511 
9512 			ret = dm_atomic_get_state(state, &dm_state);
9513 			if (ret)
9514 				goto fail;
9515 
9516 			dm_new_crtc_state->stream = new_stream;
9517 
9518 			dc_stream_retain(new_stream);
9519 
9520 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9521 					 crtc->base.id);
9522 
9523 			if (dc_add_stream_to_ctx(
9524 					dm->dc,
9525 					dm_state->context,
9526 					dm_new_crtc_state->stream) != DC_OK) {
9527 				ret = -EINVAL;
9528 				goto fail;
9529 			}
9530 
9531 			*lock_and_validation_needed = true;
9532 		}
9533 	}
9534 
9535 skip_modeset:
9536 	/* Release extra reference */
9537 	if (new_stream)
9538 		 dc_stream_release(new_stream);
9539 
9540 	/*
9541 	 * We want to do dc stream updates that do not require a
9542 	 * full modeset below.
9543 	 */
9544 	if (!(enable && aconnector && new_crtc_state->active))
9545 		return 0;
9546 	/*
9547 	 * Given above conditions, the dc state cannot be NULL because:
9548 	 * 1. We're in the process of enabling CRTCs (just been added
9549 	 *    to the dc context, or already is on the context)
9550 	 * 2. Has a valid connector attached, and
9551 	 * 3. Is currently active and enabled.
9552 	 * => The dc stream state currently exists.
9553 	 */
9554 	BUG_ON(dm_new_crtc_state->stream == NULL);
9555 
9556 	/* Scaling or underscan settings */
9557 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9558 		update_stream_scaling_settings(
9559 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9560 
9561 	/* ABM settings */
9562 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9563 
9564 	/*
9565 	 * Color management settings. We also update color properties
9566 	 * when a modeset is needed, to ensure it gets reprogrammed.
9567 	 */
9568 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9569 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9570 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9571 		if (ret)
9572 			goto fail;
9573 	}
9574 
9575 	/* Update Freesync settings. */
9576 	get_freesync_config_for_crtc(dm_new_crtc_state,
9577 				     dm_new_conn_state);
9578 
9579 	return ret;
9580 
9581 fail:
9582 	if (new_stream)
9583 		dc_stream_release(new_stream);
9584 	return ret;
9585 }
9586 
9587 static bool should_reset_plane(struct drm_atomic_state *state,
9588 			       struct drm_plane *plane,
9589 			       struct drm_plane_state *old_plane_state,
9590 			       struct drm_plane_state *new_plane_state)
9591 {
9592 	struct drm_plane *other;
9593 	struct drm_plane_state *old_other_state, *new_other_state;
9594 	struct drm_crtc_state *new_crtc_state;
9595 	int i;
9596 
9597 	/*
9598 	 * TODO: Remove this hack once the checks below are sufficient
9599 	 * enough to determine when we need to reset all the planes on
9600 	 * the stream.
9601 	 */
9602 	if (state->allow_modeset)
9603 		return true;
9604 
9605 	/* Exit early if we know that we're adding or removing the plane. */
9606 	if (old_plane_state->crtc != new_plane_state->crtc)
9607 		return true;
9608 
9609 	/* old crtc == new_crtc == NULL, plane not in context. */
9610 	if (!new_plane_state->crtc)
9611 		return false;
9612 
9613 	new_crtc_state =
9614 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9615 
9616 	if (!new_crtc_state)
9617 		return true;
9618 
9619 	/* CRTC Degamma changes currently require us to recreate planes. */
9620 	if (new_crtc_state->color_mgmt_changed)
9621 		return true;
9622 
9623 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9624 		return true;
9625 
9626 	/*
9627 	 * If there are any new primary or overlay planes being added or
9628 	 * removed then the z-order can potentially change. To ensure
9629 	 * correct z-order and pipe acquisition the current DC architecture
9630 	 * requires us to remove and recreate all existing planes.
9631 	 *
9632 	 * TODO: Come up with a more elegant solution for this.
9633 	 */
9634 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9635 		struct amdgpu_framebuffer *old_afb, *new_afb;
9636 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9637 			continue;
9638 
9639 		if (old_other_state->crtc != new_plane_state->crtc &&
9640 		    new_other_state->crtc != new_plane_state->crtc)
9641 			continue;
9642 
9643 		if (old_other_state->crtc != new_other_state->crtc)
9644 			return true;
9645 
9646 		/* Src/dst size and scaling updates. */
9647 		if (old_other_state->src_w != new_other_state->src_w ||
9648 		    old_other_state->src_h != new_other_state->src_h ||
9649 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9650 		    old_other_state->crtc_h != new_other_state->crtc_h)
9651 			return true;
9652 
9653 		/* Rotation / mirroring updates. */
9654 		if (old_other_state->rotation != new_other_state->rotation)
9655 			return true;
9656 
9657 		/* Blending updates. */
9658 		if (old_other_state->pixel_blend_mode !=
9659 		    new_other_state->pixel_blend_mode)
9660 			return true;
9661 
9662 		/* Alpha updates. */
9663 		if (old_other_state->alpha != new_other_state->alpha)
9664 			return true;
9665 
9666 		/* Colorspace changes. */
9667 		if (old_other_state->color_range != new_other_state->color_range ||
9668 		    old_other_state->color_encoding != new_other_state->color_encoding)
9669 			return true;
9670 
9671 		/* Framebuffer checks fall at the end. */
9672 		if (!old_other_state->fb || !new_other_state->fb)
9673 			continue;
9674 
9675 		/* Pixel format changes can require bandwidth updates. */
9676 		if (old_other_state->fb->format != new_other_state->fb->format)
9677 			return true;
9678 
9679 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9680 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9681 
9682 		/* Tiling and DCC changes also require bandwidth updates. */
9683 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9684 		    old_afb->base.modifier != new_afb->base.modifier)
9685 			return true;
9686 	}
9687 
9688 	return false;
9689 }
9690 
9691 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9692 			      struct drm_plane_state *new_plane_state,
9693 			      struct drm_framebuffer *fb)
9694 {
9695 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9696 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9697 	unsigned int pitch;
9698 	bool linear;
9699 
9700 	if (fb->width > new_acrtc->max_cursor_width ||
9701 	    fb->height > new_acrtc->max_cursor_height) {
9702 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9703 				 new_plane_state->fb->width,
9704 				 new_plane_state->fb->height);
9705 		return -EINVAL;
9706 	}
9707 	if (new_plane_state->src_w != fb->width << 16 ||
9708 	    new_plane_state->src_h != fb->height << 16) {
9709 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9710 		return -EINVAL;
9711 	}
9712 
9713 	/* Pitch in pixels */
9714 	pitch = fb->pitches[0] / fb->format->cpp[0];
9715 
9716 	if (fb->width != pitch) {
9717 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9718 				 fb->width, pitch);
9719 		return -EINVAL;
9720 	}
9721 
9722 	switch (pitch) {
9723 	case 64:
9724 	case 128:
9725 	case 256:
9726 		/* FB pitch is supported by cursor plane */
9727 		break;
9728 	default:
9729 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9730 		return -EINVAL;
9731 	}
9732 
9733 	/* Core DRM takes care of checking FB modifiers, so we only need to
9734 	 * check tiling flags when the FB doesn't have a modifier. */
9735 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9736 		if (adev->family < AMDGPU_FAMILY_AI) {
9737 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9738 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9739 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9740 		} else {
9741 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9742 		}
9743 		if (!linear) {
9744 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9745 			return -EINVAL;
9746 		}
9747 	}
9748 
9749 	return 0;
9750 }
9751 
9752 static int dm_update_plane_state(struct dc *dc,
9753 				 struct drm_atomic_state *state,
9754 				 struct drm_plane *plane,
9755 				 struct drm_plane_state *old_plane_state,
9756 				 struct drm_plane_state *new_plane_state,
9757 				 bool enable,
9758 				 bool *lock_and_validation_needed)
9759 {
9760 
9761 	struct dm_atomic_state *dm_state = NULL;
9762 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9763 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9764 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9765 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9766 	struct amdgpu_crtc *new_acrtc;
9767 	bool needs_reset;
9768 	int ret = 0;
9769 
9770 
9771 	new_plane_crtc = new_plane_state->crtc;
9772 	old_plane_crtc = old_plane_state->crtc;
9773 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9774 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9775 
9776 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9777 		if (!enable || !new_plane_crtc ||
9778 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9779 			return 0;
9780 
9781 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9782 
9783 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9784 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9785 			return -EINVAL;
9786 		}
9787 
9788 		if (new_plane_state->fb) {
9789 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9790 						 new_plane_state->fb);
9791 			if (ret)
9792 				return ret;
9793 		}
9794 
9795 		return 0;
9796 	}
9797 
9798 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9799 					 new_plane_state);
9800 
9801 	/* Remove any changed/removed planes */
9802 	if (!enable) {
9803 		if (!needs_reset)
9804 			return 0;
9805 
9806 		if (!old_plane_crtc)
9807 			return 0;
9808 
9809 		old_crtc_state = drm_atomic_get_old_crtc_state(
9810 				state, old_plane_crtc);
9811 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9812 
9813 		if (!dm_old_crtc_state->stream)
9814 			return 0;
9815 
9816 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9817 				plane->base.id, old_plane_crtc->base.id);
9818 
9819 		ret = dm_atomic_get_state(state, &dm_state);
9820 		if (ret)
9821 			return ret;
9822 
9823 		if (!dc_remove_plane_from_context(
9824 				dc,
9825 				dm_old_crtc_state->stream,
9826 				dm_old_plane_state->dc_state,
9827 				dm_state->context)) {
9828 
9829 			return -EINVAL;
9830 		}
9831 
9832 
9833 		dc_plane_state_release(dm_old_plane_state->dc_state);
9834 		dm_new_plane_state->dc_state = NULL;
9835 
9836 		*lock_and_validation_needed = true;
9837 
9838 	} else { /* Add new planes */
9839 		struct dc_plane_state *dc_new_plane_state;
9840 
9841 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9842 			return 0;
9843 
9844 		if (!new_plane_crtc)
9845 			return 0;
9846 
9847 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9848 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9849 
9850 		if (!dm_new_crtc_state->stream)
9851 			return 0;
9852 
9853 		if (!needs_reset)
9854 			return 0;
9855 
9856 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9857 		if (ret)
9858 			return ret;
9859 
9860 		WARN_ON(dm_new_plane_state->dc_state);
9861 
9862 		dc_new_plane_state = dc_create_plane_state(dc);
9863 		if (!dc_new_plane_state)
9864 			return -ENOMEM;
9865 
9866 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9867 				 plane->base.id, new_plane_crtc->base.id);
9868 
9869 		ret = fill_dc_plane_attributes(
9870 			drm_to_adev(new_plane_crtc->dev),
9871 			dc_new_plane_state,
9872 			new_plane_state,
9873 			new_crtc_state);
9874 		if (ret) {
9875 			dc_plane_state_release(dc_new_plane_state);
9876 			return ret;
9877 		}
9878 
9879 		ret = dm_atomic_get_state(state, &dm_state);
9880 		if (ret) {
9881 			dc_plane_state_release(dc_new_plane_state);
9882 			return ret;
9883 		}
9884 
9885 		/*
9886 		 * Any atomic check errors that occur after this will
9887 		 * not need a release. The plane state will be attached
9888 		 * to the stream, and therefore part of the atomic
9889 		 * state. It'll be released when the atomic state is
9890 		 * cleaned.
9891 		 */
9892 		if (!dc_add_plane_to_context(
9893 				dc,
9894 				dm_new_crtc_state->stream,
9895 				dc_new_plane_state,
9896 				dm_state->context)) {
9897 
9898 			dc_plane_state_release(dc_new_plane_state);
9899 			return -EINVAL;
9900 		}
9901 
9902 		dm_new_plane_state->dc_state = dc_new_plane_state;
9903 
9904 		/* Tell DC to do a full surface update every time there
9905 		 * is a plane change. Inefficient, but works for now.
9906 		 */
9907 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9908 
9909 		*lock_and_validation_needed = true;
9910 	}
9911 
9912 
9913 	return ret;
9914 }
9915 
9916 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9917 				struct drm_crtc *crtc,
9918 				struct drm_crtc_state *new_crtc_state)
9919 {
9920 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9921 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9922 
9923 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9924 	 * cursor per pipe but it's going to inherit the scaling and
9925 	 * positioning from the underlying pipe. Check the cursor plane's
9926 	 * blending properties match the primary plane's. */
9927 
9928 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9929 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9930 	if (!new_cursor_state || !new_primary_state ||
9931 	    !new_cursor_state->fb || !new_primary_state->fb) {
9932 		return 0;
9933 	}
9934 
9935 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9936 			 (new_cursor_state->src_w >> 16);
9937 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9938 			 (new_cursor_state->src_h >> 16);
9939 
9940 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9941 			 (new_primary_state->src_w >> 16);
9942 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9943 			 (new_primary_state->src_h >> 16);
9944 
9945 	if (cursor_scale_w != primary_scale_w ||
9946 	    cursor_scale_h != primary_scale_h) {
9947 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9948 		return -EINVAL;
9949 	}
9950 
9951 	return 0;
9952 }
9953 
9954 #if defined(CONFIG_DRM_AMD_DC_DCN)
9955 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9956 {
9957 	struct drm_connector *connector;
9958 	struct drm_connector_state *conn_state;
9959 	struct amdgpu_dm_connector *aconnector = NULL;
9960 	int i;
9961 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9962 		if (conn_state->crtc != crtc)
9963 			continue;
9964 
9965 		aconnector = to_amdgpu_dm_connector(connector);
9966 		if (!aconnector->port || !aconnector->mst_port)
9967 			aconnector = NULL;
9968 		else
9969 			break;
9970 	}
9971 
9972 	if (!aconnector)
9973 		return 0;
9974 
9975 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9976 }
9977 #endif
9978 
9979 static int validate_overlay(struct drm_atomic_state *state)
9980 {
9981 	int i;
9982 	struct drm_plane *plane;
9983 	struct drm_plane_state *old_plane_state, *new_plane_state;
9984 	struct drm_plane_state *primary_state, *overlay_state = NULL;
9985 
9986 	/* Check if primary plane is contained inside overlay */
9987 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9988 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9989 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9990 				return 0;
9991 
9992 			overlay_state = new_plane_state;
9993 			continue;
9994 		}
9995 	}
9996 
9997 	/* check if we're making changes to the overlay plane */
9998 	if (!overlay_state)
9999 		return 0;
10000 
10001 	/* check if overlay plane is enabled */
10002 	if (!overlay_state->crtc)
10003 		return 0;
10004 
10005 	/* find the primary plane for the CRTC that the overlay is enabled on */
10006 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10007 	if (IS_ERR(primary_state))
10008 		return PTR_ERR(primary_state);
10009 
10010 	/* check if primary plane is enabled */
10011 	if (!primary_state->crtc)
10012 		return 0;
10013 
10014 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10015 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10016 	    primary_state->crtc_y < overlay_state->crtc_y ||
10017 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10018 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10019 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10020 		return -EINVAL;
10021 	}
10022 
10023 	return 0;
10024 }
10025 
10026 /**
10027  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10028  * @dev: The DRM device
10029  * @state: The atomic state to commit
10030  *
10031  * Validate that the given atomic state is programmable by DC into hardware.
10032  * This involves constructing a &struct dc_state reflecting the new hardware
10033  * state we wish to commit, then querying DC to see if it is programmable. It's
10034  * important not to modify the existing DC state. Otherwise, atomic_check
10035  * may unexpectedly commit hardware changes.
10036  *
10037  * When validating the DC state, it's important that the right locks are
10038  * acquired. For full updates case which removes/adds/updates streams on one
10039  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10040  * that any such full update commit will wait for completion of any outstanding
10041  * flip using DRMs synchronization events.
10042  *
10043  * Note that DM adds the affected connectors for all CRTCs in state, when that
10044  * might not seem necessary. This is because DC stream creation requires the
10045  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10046  * be possible but non-trivial - a possible TODO item.
10047  *
10048  * Return: -Error code if validation failed.
10049  */
10050 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10051 				  struct drm_atomic_state *state)
10052 {
10053 	struct amdgpu_device *adev = drm_to_adev(dev);
10054 	struct dm_atomic_state *dm_state = NULL;
10055 	struct dc *dc = adev->dm.dc;
10056 	struct drm_connector *connector;
10057 	struct drm_connector_state *old_con_state, *new_con_state;
10058 	struct drm_crtc *crtc;
10059 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10060 	struct drm_plane *plane;
10061 	struct drm_plane_state *old_plane_state, *new_plane_state;
10062 	enum dc_status status;
10063 	int ret, i;
10064 	bool lock_and_validation_needed = false;
10065 	struct dm_crtc_state *dm_old_crtc_state;
10066 
10067 	trace_amdgpu_dm_atomic_check_begin(state);
10068 
10069 	ret = drm_atomic_helper_check_modeset(dev, state);
10070 	if (ret)
10071 		goto fail;
10072 
10073 	/* Check connector changes */
10074 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10075 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10076 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10077 
10078 		/* Skip connectors that are disabled or part of modeset already. */
10079 		if (!old_con_state->crtc && !new_con_state->crtc)
10080 			continue;
10081 
10082 		if (!new_con_state->crtc)
10083 			continue;
10084 
10085 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10086 		if (IS_ERR(new_crtc_state)) {
10087 			ret = PTR_ERR(new_crtc_state);
10088 			goto fail;
10089 		}
10090 
10091 		if (dm_old_con_state->abm_level !=
10092 		    dm_new_con_state->abm_level)
10093 			new_crtc_state->connectors_changed = true;
10094 	}
10095 
10096 #if defined(CONFIG_DRM_AMD_DC_DCN)
10097 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10098 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10099 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10100 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10101 				if (ret)
10102 					goto fail;
10103 			}
10104 		}
10105 	}
10106 #endif
10107 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10108 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10109 
10110 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10111 		    !new_crtc_state->color_mgmt_changed &&
10112 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10113 			dm_old_crtc_state->dsc_force_changed == false)
10114 			continue;
10115 
10116 		if (!new_crtc_state->enable)
10117 			continue;
10118 
10119 		ret = drm_atomic_add_affected_connectors(state, crtc);
10120 		if (ret)
10121 			return ret;
10122 
10123 		ret = drm_atomic_add_affected_planes(state, crtc);
10124 		if (ret)
10125 			goto fail;
10126 
10127 		if (dm_old_crtc_state->dsc_force_changed)
10128 			new_crtc_state->mode_changed = true;
10129 	}
10130 
10131 	/*
10132 	 * Add all primary and overlay planes on the CRTC to the state
10133 	 * whenever a plane is enabled to maintain correct z-ordering
10134 	 * and to enable fast surface updates.
10135 	 */
10136 	drm_for_each_crtc(crtc, dev) {
10137 		bool modified = false;
10138 
10139 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10140 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10141 				continue;
10142 
10143 			if (new_plane_state->crtc == crtc ||
10144 			    old_plane_state->crtc == crtc) {
10145 				modified = true;
10146 				break;
10147 			}
10148 		}
10149 
10150 		if (!modified)
10151 			continue;
10152 
10153 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10154 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10155 				continue;
10156 
10157 			new_plane_state =
10158 				drm_atomic_get_plane_state(state, plane);
10159 
10160 			if (IS_ERR(new_plane_state)) {
10161 				ret = PTR_ERR(new_plane_state);
10162 				goto fail;
10163 			}
10164 		}
10165 	}
10166 
10167 	/* Remove exiting planes if they are modified */
10168 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10169 		ret = dm_update_plane_state(dc, state, plane,
10170 					    old_plane_state,
10171 					    new_plane_state,
10172 					    false,
10173 					    &lock_and_validation_needed);
10174 		if (ret)
10175 			goto fail;
10176 	}
10177 
10178 	/* Disable all crtcs which require disable */
10179 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10180 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10181 					   old_crtc_state,
10182 					   new_crtc_state,
10183 					   false,
10184 					   &lock_and_validation_needed);
10185 		if (ret)
10186 			goto fail;
10187 	}
10188 
10189 	/* Enable all crtcs which require enable */
10190 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10191 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10192 					   old_crtc_state,
10193 					   new_crtc_state,
10194 					   true,
10195 					   &lock_and_validation_needed);
10196 		if (ret)
10197 			goto fail;
10198 	}
10199 
10200 	ret = validate_overlay(state);
10201 	if (ret)
10202 		goto fail;
10203 
10204 	/* Add new/modified planes */
10205 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10206 		ret = dm_update_plane_state(dc, state, plane,
10207 					    old_plane_state,
10208 					    new_plane_state,
10209 					    true,
10210 					    &lock_and_validation_needed);
10211 		if (ret)
10212 			goto fail;
10213 	}
10214 
10215 	/* Run this here since we want to validate the streams we created */
10216 	ret = drm_atomic_helper_check_planes(dev, state);
10217 	if (ret)
10218 		goto fail;
10219 
10220 	/* Check cursor planes scaling */
10221 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10222 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10223 		if (ret)
10224 			goto fail;
10225 	}
10226 
10227 	if (state->legacy_cursor_update) {
10228 		/*
10229 		 * This is a fast cursor update coming from the plane update
10230 		 * helper, check if it can be done asynchronously for better
10231 		 * performance.
10232 		 */
10233 		state->async_update =
10234 			!drm_atomic_helper_async_check(dev, state);
10235 
10236 		/*
10237 		 * Skip the remaining global validation if this is an async
10238 		 * update. Cursor updates can be done without affecting
10239 		 * state or bandwidth calcs and this avoids the performance
10240 		 * penalty of locking the private state object and
10241 		 * allocating a new dc_state.
10242 		 */
10243 		if (state->async_update)
10244 			return 0;
10245 	}
10246 
10247 	/* Check scaling and underscan changes*/
10248 	/* TODO Removed scaling changes validation due to inability to commit
10249 	 * new stream into context w\o causing full reset. Need to
10250 	 * decide how to handle.
10251 	 */
10252 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10253 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10254 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10255 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10256 
10257 		/* Skip any modesets/resets */
10258 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10259 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10260 			continue;
10261 
10262 		/* Skip any thing not scale or underscan changes */
10263 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10264 			continue;
10265 
10266 		lock_and_validation_needed = true;
10267 	}
10268 
10269 	/**
10270 	 * Streams and planes are reset when there are changes that affect
10271 	 * bandwidth. Anything that affects bandwidth needs to go through
10272 	 * DC global validation to ensure that the configuration can be applied
10273 	 * to hardware.
10274 	 *
10275 	 * We have to currently stall out here in atomic_check for outstanding
10276 	 * commits to finish in this case because our IRQ handlers reference
10277 	 * DRM state directly - we can end up disabling interrupts too early
10278 	 * if we don't.
10279 	 *
10280 	 * TODO: Remove this stall and drop DM state private objects.
10281 	 */
10282 	if (lock_and_validation_needed) {
10283 		ret = dm_atomic_get_state(state, &dm_state);
10284 		if (ret)
10285 			goto fail;
10286 
10287 		ret = do_aquire_global_lock(dev, state);
10288 		if (ret)
10289 			goto fail;
10290 
10291 #if defined(CONFIG_DRM_AMD_DC_DCN)
10292 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10293 			goto fail;
10294 
10295 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10296 		if (ret)
10297 			goto fail;
10298 #endif
10299 
10300 		/*
10301 		 * Perform validation of MST topology in the state:
10302 		 * We need to perform MST atomic check before calling
10303 		 * dc_validate_global_state(), or there is a chance
10304 		 * to get stuck in an infinite loop and hang eventually.
10305 		 */
10306 		ret = drm_dp_mst_atomic_check(state);
10307 		if (ret)
10308 			goto fail;
10309 		status = dc_validate_global_state(dc, dm_state->context, false);
10310 		if (status != DC_OK) {
10311 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10312 				       dc_status_to_str(status), status);
10313 			ret = -EINVAL;
10314 			goto fail;
10315 		}
10316 	} else {
10317 		/*
10318 		 * The commit is a fast update. Fast updates shouldn't change
10319 		 * the DC context, affect global validation, and can have their
10320 		 * commit work done in parallel with other commits not touching
10321 		 * the same resource. If we have a new DC context as part of
10322 		 * the DM atomic state from validation we need to free it and
10323 		 * retain the existing one instead.
10324 		 *
10325 		 * Furthermore, since the DM atomic state only contains the DC
10326 		 * context and can safely be annulled, we can free the state
10327 		 * and clear the associated private object now to free
10328 		 * some memory and avoid a possible use-after-free later.
10329 		 */
10330 
10331 		for (i = 0; i < state->num_private_objs; i++) {
10332 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10333 
10334 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10335 				int j = state->num_private_objs-1;
10336 
10337 				dm_atomic_destroy_state(obj,
10338 						state->private_objs[i].state);
10339 
10340 				/* If i is not at the end of the array then the
10341 				 * last element needs to be moved to where i was
10342 				 * before the array can safely be truncated.
10343 				 */
10344 				if (i != j)
10345 					state->private_objs[i] =
10346 						state->private_objs[j];
10347 
10348 				state->private_objs[j].ptr = NULL;
10349 				state->private_objs[j].state = NULL;
10350 				state->private_objs[j].old_state = NULL;
10351 				state->private_objs[j].new_state = NULL;
10352 
10353 				state->num_private_objs = j;
10354 				break;
10355 			}
10356 		}
10357 	}
10358 
10359 	/* Store the overall update type for use later in atomic check. */
10360 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10361 		struct dm_crtc_state *dm_new_crtc_state =
10362 			to_dm_crtc_state(new_crtc_state);
10363 
10364 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10365 							 UPDATE_TYPE_FULL :
10366 							 UPDATE_TYPE_FAST;
10367 	}
10368 
10369 	/* Must be success */
10370 	WARN_ON(ret);
10371 
10372 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10373 
10374 	return ret;
10375 
10376 fail:
10377 	if (ret == -EDEADLK)
10378 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10379 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10380 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10381 	else
10382 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10383 
10384 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10385 
10386 	return ret;
10387 }
10388 
10389 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10390 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10391 {
10392 	uint8_t dpcd_data;
10393 	bool capable = false;
10394 
10395 	if (amdgpu_dm_connector->dc_link &&
10396 		dm_helpers_dp_read_dpcd(
10397 				NULL,
10398 				amdgpu_dm_connector->dc_link,
10399 				DP_DOWN_STREAM_PORT_COUNT,
10400 				&dpcd_data,
10401 				sizeof(dpcd_data))) {
10402 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10403 	}
10404 
10405 	return capable;
10406 }
10407 
10408 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10409 		uint8_t *edid_ext, int len,
10410 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10411 {
10412 	int i;
10413 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10414 	struct dc *dc = adev->dm.dc;
10415 
10416 	/* send extension block to DMCU for parsing */
10417 	for (i = 0; i < len; i += 8) {
10418 		bool res;
10419 		int offset;
10420 
10421 		/* send 8 bytes a time */
10422 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10423 			return false;
10424 
10425 		if (i+8 == len) {
10426 			/* EDID block sent completed, expect result */
10427 			int version, min_rate, max_rate;
10428 
10429 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10430 			if (res) {
10431 				/* amd vsdb found */
10432 				vsdb_info->freesync_supported = 1;
10433 				vsdb_info->amd_vsdb_version = version;
10434 				vsdb_info->min_refresh_rate_hz = min_rate;
10435 				vsdb_info->max_refresh_rate_hz = max_rate;
10436 				return true;
10437 			}
10438 			/* not amd vsdb */
10439 			return false;
10440 		}
10441 
10442 		/* check for ack*/
10443 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10444 		if (!res)
10445 			return false;
10446 	}
10447 
10448 	return false;
10449 }
10450 
10451 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10452 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10453 {
10454 	uint8_t *edid_ext = NULL;
10455 	int i;
10456 	bool valid_vsdb_found = false;
10457 
10458 	/*----- drm_find_cea_extension() -----*/
10459 	/* No EDID or EDID extensions */
10460 	if (edid == NULL || edid->extensions == 0)
10461 		return -ENODEV;
10462 
10463 	/* Find CEA extension */
10464 	for (i = 0; i < edid->extensions; i++) {
10465 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10466 		if (edid_ext[0] == CEA_EXT)
10467 			break;
10468 	}
10469 
10470 	if (i == edid->extensions)
10471 		return -ENODEV;
10472 
10473 	/*----- cea_db_offsets() -----*/
10474 	if (edid_ext[0] != CEA_EXT)
10475 		return -ENODEV;
10476 
10477 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10478 
10479 	return valid_vsdb_found ? i : -ENODEV;
10480 }
10481 
10482 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10483 					struct edid *edid)
10484 {
10485 	int i = 0;
10486 	struct detailed_timing *timing;
10487 	struct detailed_non_pixel *data;
10488 	struct detailed_data_monitor_range *range;
10489 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10490 			to_amdgpu_dm_connector(connector);
10491 	struct dm_connector_state *dm_con_state = NULL;
10492 
10493 	struct drm_device *dev = connector->dev;
10494 	struct amdgpu_device *adev = drm_to_adev(dev);
10495 	bool freesync_capable = false;
10496 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10497 
10498 	if (!connector->state) {
10499 		DRM_ERROR("%s - Connector has no state", __func__);
10500 		goto update;
10501 	}
10502 
10503 	if (!edid) {
10504 		dm_con_state = to_dm_connector_state(connector->state);
10505 
10506 		amdgpu_dm_connector->min_vfreq = 0;
10507 		amdgpu_dm_connector->max_vfreq = 0;
10508 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10509 
10510 		goto update;
10511 	}
10512 
10513 	dm_con_state = to_dm_connector_state(connector->state);
10514 
10515 	if (!amdgpu_dm_connector->dc_sink) {
10516 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10517 		goto update;
10518 	}
10519 	if (!adev->dm.freesync_module)
10520 		goto update;
10521 
10522 
10523 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10524 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10525 		bool edid_check_required = false;
10526 
10527 		if (edid) {
10528 			edid_check_required = is_dp_capable_without_timing_msa(
10529 						adev->dm.dc,
10530 						amdgpu_dm_connector);
10531 		}
10532 
10533 		if (edid_check_required == true && (edid->version > 1 ||
10534 		   (edid->version == 1 && edid->revision > 1))) {
10535 			for (i = 0; i < 4; i++) {
10536 
10537 				timing	= &edid->detailed_timings[i];
10538 				data	= &timing->data.other_data;
10539 				range	= &data->data.range;
10540 				/*
10541 				 * Check if monitor has continuous frequency mode
10542 				 */
10543 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10544 					continue;
10545 				/*
10546 				 * Check for flag range limits only. If flag == 1 then
10547 				 * no additional timing information provided.
10548 				 * Default GTF, GTF Secondary curve and CVT are not
10549 				 * supported
10550 				 */
10551 				if (range->flags != 1)
10552 					continue;
10553 
10554 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10555 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10556 				amdgpu_dm_connector->pixel_clock_mhz =
10557 					range->pixel_clock_mhz * 10;
10558 
10559 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10560 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10561 
10562 				break;
10563 			}
10564 
10565 			if (amdgpu_dm_connector->max_vfreq -
10566 			    amdgpu_dm_connector->min_vfreq > 10) {
10567 
10568 				freesync_capable = true;
10569 			}
10570 		}
10571 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10572 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10573 		if (i >= 0 && vsdb_info.freesync_supported) {
10574 			timing  = &edid->detailed_timings[i];
10575 			data    = &timing->data.other_data;
10576 
10577 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10578 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10579 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10580 				freesync_capable = true;
10581 
10582 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10583 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10584 		}
10585 	}
10586 
10587 update:
10588 	if (dm_con_state)
10589 		dm_con_state->freesync_capable = freesync_capable;
10590 
10591 	if (connector->vrr_capable_property)
10592 		drm_connector_set_vrr_capable_property(connector,
10593 						       freesync_capable);
10594 }
10595 
10596 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10597 {
10598 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10599 
10600 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10601 		return;
10602 	if (link->type == dc_connection_none)
10603 		return;
10604 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10605 					dpcd_data, sizeof(dpcd_data))) {
10606 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10607 
10608 		if (dpcd_data[0] == 0) {
10609 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10610 			link->psr_settings.psr_feature_enabled = false;
10611 		} else {
10612 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10613 			link->psr_settings.psr_feature_enabled = true;
10614 		}
10615 
10616 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10617 	}
10618 }
10619 
10620 /*
10621  * amdgpu_dm_link_setup_psr() - configure psr link
10622  * @stream: stream state
10623  *
10624  * Return: true if success
10625  */
10626 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10627 {
10628 	struct dc_link *link = NULL;
10629 	struct psr_config psr_config = {0};
10630 	struct psr_context psr_context = {0};
10631 	bool ret = false;
10632 
10633 	if (stream == NULL)
10634 		return false;
10635 
10636 	link = stream->link;
10637 
10638 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10639 
10640 	if (psr_config.psr_version > 0) {
10641 		psr_config.psr_exit_link_training_required = 0x1;
10642 		psr_config.psr_frame_capture_indication_req = 0;
10643 		psr_config.psr_rfb_setup_time = 0x37;
10644 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10645 		psr_config.allow_smu_optimizations = 0x0;
10646 
10647 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10648 
10649 	}
10650 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10651 
10652 	return ret;
10653 }
10654 
10655 /*
10656  * amdgpu_dm_psr_enable() - enable psr f/w
10657  * @stream: stream state
10658  *
10659  * Return: true if success
10660  */
10661 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10662 {
10663 	struct dc_link *link = stream->link;
10664 	unsigned int vsync_rate_hz = 0;
10665 	struct dc_static_screen_params params = {0};
10666 	/* Calculate number of static frames before generating interrupt to
10667 	 * enter PSR.
10668 	 */
10669 	// Init fail safe of 2 frames static
10670 	unsigned int num_frames_static = 2;
10671 
10672 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10673 
10674 	vsync_rate_hz = div64_u64(div64_u64((
10675 			stream->timing.pix_clk_100hz * 100),
10676 			stream->timing.v_total),
10677 			stream->timing.h_total);
10678 
10679 	/* Round up
10680 	 * Calculate number of frames such that at least 30 ms of time has
10681 	 * passed.
10682 	 */
10683 	if (vsync_rate_hz != 0) {
10684 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10685 		num_frames_static = (30000 / frame_time_microsec) + 1;
10686 	}
10687 
10688 	params.triggers.cursor_update = true;
10689 	params.triggers.overlay_update = true;
10690 	params.triggers.surface_update = true;
10691 	params.num_frames = num_frames_static;
10692 
10693 	dc_stream_set_static_screen_params(link->ctx->dc,
10694 					   &stream, 1,
10695 					   &params);
10696 
10697 	return dc_link_set_psr_allow_active(link, true, false, false);
10698 }
10699 
10700 /*
10701  * amdgpu_dm_psr_disable() - disable psr f/w
10702  * @stream:  stream state
10703  *
10704  * Return: true if success
10705  */
10706 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10707 {
10708 
10709 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10710 
10711 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10712 }
10713 
10714 /*
10715  * amdgpu_dm_psr_disable() - disable psr f/w
10716  * if psr is enabled on any stream
10717  *
10718  * Return: true if success
10719  */
10720 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10721 {
10722 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10723 	return dc_set_psr_allow_active(dm->dc, false);
10724 }
10725 
10726 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10727 {
10728 	struct amdgpu_device *adev = drm_to_adev(dev);
10729 	struct dc *dc = adev->dm.dc;
10730 	int i;
10731 
10732 	mutex_lock(&adev->dm.dc_lock);
10733 	if (dc->current_state) {
10734 		for (i = 0; i < dc->current_state->stream_count; ++i)
10735 			dc->current_state->streams[i]
10736 				->triggered_crtc_reset.enabled =
10737 				adev->dm.force_timing_sync;
10738 
10739 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10740 		dc_trigger_sync(dc, dc->current_state);
10741 	}
10742 	mutex_unlock(&adev->dm.dc_lock);
10743 }
10744 
10745 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10746 		       uint32_t value, const char *func_name)
10747 {
10748 #ifdef DM_CHECK_ADDR_0
10749 	if (address == 0) {
10750 		DC_ERR("invalid register write. address = 0");
10751 		return;
10752 	}
10753 #endif
10754 	cgs_write_register(ctx->cgs_device, address, value);
10755 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10756 }
10757 
10758 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10759 			  const char *func_name)
10760 {
10761 	uint32_t value;
10762 #ifdef DM_CHECK_ADDR_0
10763 	if (address == 0) {
10764 		DC_ERR("invalid register read; address = 0\n");
10765 		return 0;
10766 	}
10767 #endif
10768 
10769 	if (ctx->dmub_srv &&
10770 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10771 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10772 		ASSERT(false);
10773 		return 0;
10774 	}
10775 
10776 	value = cgs_read_register(ctx->cgs_device, address);
10777 
10778 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10779 
10780 	return value;
10781 }
10782 
10783 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10784 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10785 {
10786 	struct amdgpu_device *adev = ctx->driver_context;
10787 	int ret = 0;
10788 
10789 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10790 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10791 	if (ret == 0) {
10792 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10793 		return -1;
10794 	}
10795 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10796 
10797 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10798 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10799 
10800 		// For read case, Copy data to payload
10801 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10802 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10803 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10804 			adev->dm.dmub_notify->aux_reply.length);
10805 	}
10806 
10807 	return adev->dm.dmub_notify->aux_reply.length;
10808 }
10809