1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 
55 #include "amd_shared.h"
56 #include "amdgpu_dm_irq.h"
57 #include "dm_helpers.h"
58 #include "amdgpu_dm_mst_types.h"
59 #if defined(CONFIG_DEBUG_FS)
60 #include "amdgpu_dm_debugfs.h"
61 #endif
62 #include "amdgpu_dm_psr.h"
63 
64 #include "ivsrcid/ivsrcid_vislands30.h"
65 
66 #include "i2caux_interface.h"
67 #include <linux/module.h>
68 #include <linux/moduleparam.h>
69 #include <linux/types.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/pci.h>
72 #include <linux/firmware.h>
73 #include <linux/component.h>
74 
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84 
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87 
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92 
93 #include "soc15_common.h"
94 #endif
95 
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99 
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 
117 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125 
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128 
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138 
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146 	switch (link->dpcd_caps.dongle_type) {
147 	case DISPLAY_DONGLE_NONE:
148 		return DRM_MODE_SUBCONNECTOR_Native;
149 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 		return DRM_MODE_SUBCONNECTOR_VGA;
151 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 		return DRM_MODE_SUBCONNECTOR_DVID;
154 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 		return DRM_MODE_SUBCONNECTOR_HDMIA;
157 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 	default:
159 		return DRM_MODE_SUBCONNECTOR_Unknown;
160 	}
161 }
162 
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165 	struct dc_link *link = aconnector->dc_link;
166 	struct drm_connector *connector = &aconnector->base;
167 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 
169 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 		return;
171 
172 	if (aconnector->dc_sink)
173 		subconnector = get_subconnector_type(link);
174 
175 	drm_object_property_set_value(&connector->base,
176 			connector->dev->mode_config.dp_subconnector_property,
177 			subconnector);
178 }
179 
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 				struct drm_plane *plane,
193 				unsigned long possible_crtcs,
194 				const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 			       struct drm_plane *plane,
197 			       uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
200 				    uint32_t link_index,
201 				    struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 				  struct amdgpu_encoder *aencoder,
204 				  uint32_t link_index);
205 
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 				  struct drm_atomic_state *state);
212 
213 static void handle_cursor_update(struct drm_plane *plane,
214 				 struct drm_plane_state *old_plane_state);
215 
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218 
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220 
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 				 struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239 	if (crtc >= adev->mode_info.num_crtc)
240 		return 0;
241 	else {
242 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243 
244 		if (acrtc->dm_irq_params.stream == NULL) {
245 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 				  crtc);
247 			return 0;
248 		}
249 
250 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251 	}
252 }
253 
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 				  u32 *vbl, u32 *position)
256 {
257 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
258 
259 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 		return -EINVAL;
261 	else {
262 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263 
264 		if (acrtc->dm_irq_params.stream ==  NULL) {
265 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 				  crtc);
267 			return 0;
268 		}
269 
270 		/*
271 		 * TODO rework base driver to use values directly.
272 		 * for now parse it back into reg-format
273 		 */
274 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275 					 &v_blank_start,
276 					 &v_blank_end,
277 					 &h_position,
278 					 &v_position);
279 
280 		*position = v_position | (h_position << 16);
281 		*vbl = v_blank_start | (v_blank_end << 16);
282 	}
283 
284 	return 0;
285 }
286 
287 static bool dm_is_idle(void *handle)
288 {
289 	/* XXX todo */
290 	return true;
291 }
292 
293 static int dm_wait_for_idle(void *handle)
294 {
295 	/* XXX todo */
296 	return 0;
297 }
298 
299 static bool dm_check_soft_reset(void *handle)
300 {
301 	return false;
302 }
303 
304 static int dm_soft_reset(void *handle)
305 {
306 	/* XXX todo */
307 	return 0;
308 }
309 
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 		     int otg_inst)
313 {
314 	struct drm_device *dev = adev_to_drm(adev);
315 	struct drm_crtc *crtc;
316 	struct amdgpu_crtc *amdgpu_crtc;
317 
318 	if (WARN_ON(otg_inst == -1))
319 		return adev->mode_info.crtcs[0];
320 
321 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 		amdgpu_crtc = to_amdgpu_crtc(crtc);
323 
324 		if (amdgpu_crtc->otg_inst == otg_inst)
325 			return amdgpu_crtc;
326 	}
327 
328 	return NULL;
329 }
330 
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332 {
333 	return acrtc->dm_irq_params.freesync_config.state ==
334 		       VRR_STATE_ACTIVE_VARIABLE ||
335 	       acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_FIXED;
337 }
338 
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340 {
341 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343 }
344 
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 					      struct dm_crtc_state *new_state)
347 {
348 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
349 		return true;
350 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351 		return true;
352 	else
353 		return false;
354 }
355 
356 /**
357  * dm_pflip_high_irq() - Handle pageflip interrupt
358  * @interrupt_params: ignored
359  *
360  * Handles the pageflip interrupt by notifying all interested parties
361  * that the pageflip has been completed.
362  */
363 static void dm_pflip_high_irq(void *interrupt_params)
364 {
365 	struct amdgpu_crtc *amdgpu_crtc;
366 	struct common_irq_params *irq_params = interrupt_params;
367 	struct amdgpu_device *adev = irq_params->adev;
368 	unsigned long flags;
369 	struct drm_pending_vblank_event *e;
370 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
371 	bool vrr_active;
372 
373 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374 
375 	/* IRQ could occur when in initial stage */
376 	/* TODO work and BO cleanup */
377 	if (amdgpu_crtc == NULL) {
378 		DC_LOG_PFLIP("CRTC is null, returning.\n");
379 		return;
380 	}
381 
382 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
383 
384 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386 						 amdgpu_crtc->pflip_status,
387 						 AMDGPU_FLIP_SUBMITTED,
388 						 amdgpu_crtc->crtc_id,
389 						 amdgpu_crtc);
390 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391 		return;
392 	}
393 
394 	/* page flip completed. */
395 	e = amdgpu_crtc->event;
396 	amdgpu_crtc->event = NULL;
397 
398 	WARN_ON(!e);
399 
400 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401 
402 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
403 	if (!vrr_active ||
404 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 				      &v_blank_end, &hpos, &vpos) ||
406 	    (vpos < v_blank_start)) {
407 		/* Update to correct count and vblank timestamp if racing with
408 		 * vblank irq. This also updates to the correct vblank timestamp
409 		 * even in VRR mode, as scanout is past the front-porch atm.
410 		 */
411 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412 
413 		/* Wake up userspace by sending the pageflip event with proper
414 		 * count and timestamp of vblank of flip completion.
415 		 */
416 		if (e) {
417 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418 
419 			/* Event sent, so done with vblank for this flip */
420 			drm_crtc_vblank_put(&amdgpu_crtc->base);
421 		}
422 	} else if (e) {
423 		/* VRR active and inside front-porch: vblank count and
424 		 * timestamp for pageflip event will only be up to date after
425 		 * drm_crtc_handle_vblank() has been executed from late vblank
426 		 * irq handler after start of back-porch (vline 0). We queue the
427 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 		 * updated timestamp and count, once it runs after us.
429 		 *
430 		 * We need to open-code this instead of using the helper
431 		 * drm_crtc_arm_vblank_event(), as that helper would
432 		 * call drm_crtc_accurate_vblank_count(), which we must
433 		 * not call in VRR mode while we are in front-porch!
434 		 */
435 
436 		/* sequence will be replaced by real count during send-out. */
437 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 		e->pipe = amdgpu_crtc->crtc_id;
439 
440 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 		e = NULL;
442 	}
443 
444 	/* Keep track of vblank of this flip for flip throttling. We use the
445 	 * cooked hw counter, as that one incremented at start of this vblank
446 	 * of pageflip completion, so last_flip_vblank is the forbidden count
447 	 * for queueing new pageflips if vsync + VRR is enabled.
448 	 */
449 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451 
452 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454 
455 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
457 		     vrr_active, (int) !e);
458 }
459 
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462 	struct common_irq_params *irq_params = interrupt_params;
463 	struct amdgpu_device *adev = irq_params->adev;
464 	struct amdgpu_crtc *acrtc;
465 	struct drm_device *drm_dev;
466 	struct drm_vblank_crtc *vblank;
467 	ktime_t frame_duration_ns, previous_timestamp;
468 	unsigned long flags;
469 	int vrr_active;
470 
471 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472 
473 	if (acrtc) {
474 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 		drm_dev = acrtc->base.dev;
476 		vblank = &drm_dev->vblank[acrtc->base.index];
477 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 		frame_duration_ns = vblank->time - previous_timestamp;
479 
480 		if (frame_duration_ns > 0) {
481 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
482 						frame_duration_ns,
483 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 		}
486 
487 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488 			      acrtc->crtc_id,
489 			      vrr_active);
490 
491 		/* Core vblank handling is done here after end of front-porch in
492 		 * vrr mode, as vblank timestamping will give valid results
493 		 * while now done after front-porch. This will also deliver
494 		 * page-flip completion events that have been queued to us
495 		 * if a pageflip happened inside front-porch.
496 		 */
497 		if (vrr_active) {
498 			drm_crtc_handle_vblank(&acrtc->base);
499 
500 			/* BTR processing for pre-DCE12 ASICs */
501 			if (acrtc->dm_irq_params.stream &&
502 			    adev->family < AMDGPU_FAMILY_AI) {
503 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 				mod_freesync_handle_v_update(
505 				    adev->dm.freesync_module,
506 				    acrtc->dm_irq_params.stream,
507 				    &acrtc->dm_irq_params.vrr_params);
508 
509 				dc_stream_adjust_vmin_vmax(
510 				    adev->dm.dc,
511 				    acrtc->dm_irq_params.stream,
512 				    &acrtc->dm_irq_params.vrr_params.adjust);
513 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514 			}
515 		}
516 	}
517 }
518 
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528 	struct common_irq_params *irq_params = interrupt_params;
529 	struct amdgpu_device *adev = irq_params->adev;
530 	struct amdgpu_crtc *acrtc;
531 	unsigned long flags;
532 	int vrr_active;
533 
534 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535 	if (!acrtc)
536 		return;
537 
538 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539 
540 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 		      vrr_active, acrtc->dm_irq_params.active_planes);
542 
543 	/**
544 	 * Core vblank handling at start of front-porch is only possible
545 	 * in non-vrr mode, as only there vblank timestamping will give
546 	 * valid results while done in front-porch. Otherwise defer it
547 	 * to dm_vupdate_high_irq after end of front-porch.
548 	 */
549 	if (!vrr_active)
550 		drm_crtc_handle_vblank(&acrtc->base);
551 
552 	/**
553 	 * Following stuff must happen at start of vblank, for crc
554 	 * computation and below-the-range btr support in vrr mode.
555 	 */
556 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557 
558 	/* BTR updates need to happen before VUPDATE on Vega and above. */
559 	if (adev->family < AMDGPU_FAMILY_AI)
560 		return;
561 
562 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563 
564 	if (acrtc->dm_irq_params.stream &&
565 	    acrtc->dm_irq_params.vrr_params.supported &&
566 	    acrtc->dm_irq_params.freesync_config.state ==
567 		    VRR_STATE_ACTIVE_VARIABLE) {
568 		mod_freesync_handle_v_update(adev->dm.freesync_module,
569 					     acrtc->dm_irq_params.stream,
570 					     &acrtc->dm_irq_params.vrr_params);
571 
572 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 					   &acrtc->dm_irq_params.vrr_params.adjust);
574 	}
575 
576 	/*
577 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 	 * In that case, pageflip completion interrupts won't fire and pageflip
579 	 * completion events won't get delivered. Prevent this by sending
580 	 * pending pageflip events from here if a flip is still pending.
581 	 *
582 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 	 * avoid race conditions between flip programming and completion,
584 	 * which could cause too early flip completion events.
585 	 */
586 	if (adev->family >= AMDGPU_FAMILY_RV &&
587 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 	    acrtc->dm_irq_params.active_planes == 0) {
589 		if (acrtc->event) {
590 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591 			acrtc->event = NULL;
592 			drm_crtc_vblank_put(&acrtc->base);
593 		}
594 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 	}
596 
597 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599 
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
602 /**
603  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604  * DCN generation ASICs
605  * @interrupt_params: interrupt parameters
606  *
607  * Used to set crc window/read out crc value at vertical line 0 position
608  */
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611 	struct common_irq_params *irq_params = interrupt_params;
612 	struct amdgpu_device *adev = irq_params->adev;
613 	struct amdgpu_crtc *acrtc;
614 
615 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616 
617 	if (!acrtc)
618 		return;
619 
620 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623 
624 /**
625  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626  * @adev: amdgpu_device pointer
627  * @notify: dmub notification structure
628  *
629  * Dmub AUX or SET_CONFIG command completion processing callback
630  * Copies dmub notification to DM which is to be read by AUX command.
631  * issuing thread and also signals the event to wake up the thread.
632  */
633 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
634 {
635 	if (adev->dm.dmub_notify)
636 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638 		complete(&adev->dm.dmub_aux_transfer_done);
639 }
640 
641 /**
642  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643  * @adev: amdgpu_device pointer
644  * @notify: dmub notification structure
645  *
646  * Dmub Hpd interrupt processing callback. Gets displayindex through the
647  * ink index and calls helper to do the processing.
648  */
649 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
650 {
651 	struct amdgpu_dm_connector *aconnector;
652 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
653 	struct drm_connector *connector;
654 	struct drm_connector_list_iter iter;
655 	struct dc_link *link;
656 	uint8_t link_index = 0;
657 	struct drm_device *dev = adev->dm.ddev;
658 
659 	if (adev == NULL)
660 		return;
661 
662 	if (notify == NULL) {
663 		DRM_ERROR("DMUB HPD callback notification was NULL");
664 		return;
665 	}
666 
667 	if (notify->link_index > adev->dm.dc->link_count) {
668 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
669 		return;
670 	}
671 
672 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
673 
674 	link_index = notify->link_index;
675 
676 	link = adev->dm.dc->links[link_index];
677 
678 	drm_connector_list_iter_begin(dev, &iter);
679 	drm_for_each_connector_iter(connector, &iter) {
680 		aconnector = to_amdgpu_dm_connector(connector);
681 		if (link && aconnector->dc_link == link) {
682 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
683 			hpd_aconnector = aconnector;
684 			break;
685 		}
686 	}
687 	drm_connector_list_iter_end(&iter);
688 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
689 
690 	if (hpd_aconnector)
691 		handle_hpd_irq_helper(hpd_aconnector);
692 }
693 
694 /**
695  * register_dmub_notify_callback - Sets callback for DMUB notify
696  * @adev: amdgpu_device pointer
697  * @type: Type of dmub notification
698  * @callback: Dmub interrupt callback function
699  * @dmub_int_thread_offload: offload indicator
700  *
701  * API to register a dmub callback handler for a dmub notification
702  * Also sets indicator whether callback processing to be offloaded.
703  * to dmub interrupt handling thread
704  * Return: true if successfully registered, false if there is existing registration
705  */
706 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
707 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
708 {
709 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
710 		adev->dm.dmub_callback[type] = callback;
711 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
712 	} else
713 		return false;
714 
715 	return true;
716 }
717 
718 static void dm_handle_hpd_work(struct work_struct *work)
719 {
720 	struct dmub_hpd_work *dmub_hpd_wrk;
721 
722 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
723 
724 	if (!dmub_hpd_wrk->dmub_notify) {
725 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
726 		return;
727 	}
728 
729 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
730 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
731 		dmub_hpd_wrk->dmub_notify);
732 	}
733 
734 	kfree(dmub_hpd_wrk->dmub_notify);
735 	kfree(dmub_hpd_wrk);
736 
737 }
738 
739 #define DMUB_TRACE_MAX_READ 64
740 /**
741  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
742  * @interrupt_params: used for determining the Outbox instance
743  *
744  * Handles the Outbox Interrupt
745  * event handler.
746  */
747 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
748 {
749 	struct dmub_notification notify;
750 	struct common_irq_params *irq_params = interrupt_params;
751 	struct amdgpu_device *adev = irq_params->adev;
752 	struct amdgpu_display_manager *dm = &adev->dm;
753 	struct dmcub_trace_buf_entry entry = { 0 };
754 	uint32_t count = 0;
755 	struct dmub_hpd_work *dmub_hpd_wrk;
756 	struct dc_link *plink = NULL;
757 
758 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
759 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
760 
761 		do {
762 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
763 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
764 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
765 				continue;
766 			}
767 			if (dm->dmub_thread_offload[notify.type] == true) {
768 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
769 				if (!dmub_hpd_wrk) {
770 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
771 					return;
772 				}
773 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
774 				if (!dmub_hpd_wrk->dmub_notify) {
775 					kfree(dmub_hpd_wrk);
776 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
777 					return;
778 				}
779 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
780 				if (dmub_hpd_wrk->dmub_notify)
781 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
782 				dmub_hpd_wrk->adev = adev;
783 				if (notify.type == DMUB_NOTIFICATION_HPD) {
784 					plink = adev->dm.dc->links[notify.link_index];
785 					if (plink) {
786 						plink->hpd_status =
787 							notify.hpd_status ==
788 							DP_HPD_PLUG ? true : false;
789 					}
790 				}
791 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
792 			} else {
793 				dm->dmub_callback[notify.type](adev, &notify);
794 			}
795 		} while (notify.pending_notification);
796 	}
797 
798 
799 	do {
800 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
801 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
802 							entry.param0, entry.param1);
803 
804 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
805 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
806 		} else
807 			break;
808 
809 		count++;
810 
811 	} while (count <= DMUB_TRACE_MAX_READ);
812 
813 	if (count > DMUB_TRACE_MAX_READ)
814 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
815 }
816 #endif
817 
818 static int dm_set_clockgating_state(void *handle,
819 		  enum amd_clockgating_state state)
820 {
821 	return 0;
822 }
823 
824 static int dm_set_powergating_state(void *handle,
825 		  enum amd_powergating_state state)
826 {
827 	return 0;
828 }
829 
830 /* Prototypes of private functions */
831 static int dm_early_init(void* handle);
832 
833 /* Allocate memory for FBC compressed data  */
834 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
835 {
836 	struct drm_device *dev = connector->dev;
837 	struct amdgpu_device *adev = drm_to_adev(dev);
838 	struct dm_compressor_info *compressor = &adev->dm.compressor;
839 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
840 	struct drm_display_mode *mode;
841 	unsigned long max_size = 0;
842 
843 	if (adev->dm.dc->fbc_compressor == NULL)
844 		return;
845 
846 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
847 		return;
848 
849 	if (compressor->bo_ptr)
850 		return;
851 
852 
853 	list_for_each_entry(mode, &connector->modes, head) {
854 		if (max_size < mode->htotal * mode->vtotal)
855 			max_size = mode->htotal * mode->vtotal;
856 	}
857 
858 	if (max_size) {
859 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
860 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
861 			    &compressor->gpu_addr, &compressor->cpu_addr);
862 
863 		if (r)
864 			DRM_ERROR("DM: Failed to initialize FBC\n");
865 		else {
866 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
867 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
868 		}
869 
870 	}
871 
872 }
873 
874 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
875 					  int pipe, bool *enabled,
876 					  unsigned char *buf, int max_bytes)
877 {
878 	struct drm_device *dev = dev_get_drvdata(kdev);
879 	struct amdgpu_device *adev = drm_to_adev(dev);
880 	struct drm_connector *connector;
881 	struct drm_connector_list_iter conn_iter;
882 	struct amdgpu_dm_connector *aconnector;
883 	int ret = 0;
884 
885 	*enabled = false;
886 
887 	mutex_lock(&adev->dm.audio_lock);
888 
889 	drm_connector_list_iter_begin(dev, &conn_iter);
890 	drm_for_each_connector_iter(connector, &conn_iter) {
891 		aconnector = to_amdgpu_dm_connector(connector);
892 		if (aconnector->audio_inst != port)
893 			continue;
894 
895 		*enabled = true;
896 		ret = drm_eld_size(connector->eld);
897 		memcpy(buf, connector->eld, min(max_bytes, ret));
898 
899 		break;
900 	}
901 	drm_connector_list_iter_end(&conn_iter);
902 
903 	mutex_unlock(&adev->dm.audio_lock);
904 
905 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
906 
907 	return ret;
908 }
909 
910 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
911 	.get_eld = amdgpu_dm_audio_component_get_eld,
912 };
913 
914 static int amdgpu_dm_audio_component_bind(struct device *kdev,
915 				       struct device *hda_kdev, void *data)
916 {
917 	struct drm_device *dev = dev_get_drvdata(kdev);
918 	struct amdgpu_device *adev = drm_to_adev(dev);
919 	struct drm_audio_component *acomp = data;
920 
921 	acomp->ops = &amdgpu_dm_audio_component_ops;
922 	acomp->dev = kdev;
923 	adev->dm.audio_component = acomp;
924 
925 	return 0;
926 }
927 
928 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
929 					  struct device *hda_kdev, void *data)
930 {
931 	struct drm_device *dev = dev_get_drvdata(kdev);
932 	struct amdgpu_device *adev = drm_to_adev(dev);
933 	struct drm_audio_component *acomp = data;
934 
935 	acomp->ops = NULL;
936 	acomp->dev = NULL;
937 	adev->dm.audio_component = NULL;
938 }
939 
940 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
941 	.bind	= amdgpu_dm_audio_component_bind,
942 	.unbind	= amdgpu_dm_audio_component_unbind,
943 };
944 
945 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
946 {
947 	int i, ret;
948 
949 	if (!amdgpu_audio)
950 		return 0;
951 
952 	adev->mode_info.audio.enabled = true;
953 
954 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
955 
956 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
957 		adev->mode_info.audio.pin[i].channels = -1;
958 		adev->mode_info.audio.pin[i].rate = -1;
959 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
960 		adev->mode_info.audio.pin[i].status_bits = 0;
961 		adev->mode_info.audio.pin[i].category_code = 0;
962 		adev->mode_info.audio.pin[i].connected = false;
963 		adev->mode_info.audio.pin[i].id =
964 			adev->dm.dc->res_pool->audios[i]->inst;
965 		adev->mode_info.audio.pin[i].offset = 0;
966 	}
967 
968 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
969 	if (ret < 0)
970 		return ret;
971 
972 	adev->dm.audio_registered = true;
973 
974 	return 0;
975 }
976 
977 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
978 {
979 	if (!amdgpu_audio)
980 		return;
981 
982 	if (!adev->mode_info.audio.enabled)
983 		return;
984 
985 	if (adev->dm.audio_registered) {
986 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
987 		adev->dm.audio_registered = false;
988 	}
989 
990 	/* TODO: Disable audio? */
991 
992 	adev->mode_info.audio.enabled = false;
993 }
994 
995 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
996 {
997 	struct drm_audio_component *acomp = adev->dm.audio_component;
998 
999 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1000 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1001 
1002 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1003 						 pin, -1);
1004 	}
1005 }
1006 
1007 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1008 {
1009 	const struct dmcub_firmware_header_v1_0 *hdr;
1010 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1011 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1012 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1013 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1014 	struct abm *abm = adev->dm.dc->res_pool->abm;
1015 	struct dmub_srv_hw_params hw_params;
1016 	enum dmub_status status;
1017 	const unsigned char *fw_inst_const, *fw_bss_data;
1018 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1019 	bool has_hw_support;
1020 	struct dc *dc = adev->dm.dc;
1021 
1022 	if (!dmub_srv)
1023 		/* DMUB isn't supported on the ASIC. */
1024 		return 0;
1025 
1026 	if (!fb_info) {
1027 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1028 		return -EINVAL;
1029 	}
1030 
1031 	if (!dmub_fw) {
1032 		/* Firmware required for DMUB support. */
1033 		DRM_ERROR("No firmware provided for DMUB.\n");
1034 		return -EINVAL;
1035 	}
1036 
1037 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1038 	if (status != DMUB_STATUS_OK) {
1039 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1040 		return -EINVAL;
1041 	}
1042 
1043 	if (!has_hw_support) {
1044 		DRM_INFO("DMUB unsupported on ASIC\n");
1045 		return 0;
1046 	}
1047 
1048 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1049 
1050 	fw_inst_const = dmub_fw->data +
1051 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1052 			PSP_HEADER_BYTES;
1053 
1054 	fw_bss_data = dmub_fw->data +
1055 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1056 		      le32_to_cpu(hdr->inst_const_bytes);
1057 
1058 	/* Copy firmware and bios info into FB memory. */
1059 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1060 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1061 
1062 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1063 
1064 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1065 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1066 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1067 	 * will be done by dm_dmub_hw_init
1068 	 */
1069 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1070 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1071 				fw_inst_const_size);
1072 	}
1073 
1074 	if (fw_bss_data_size)
1075 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1076 		       fw_bss_data, fw_bss_data_size);
1077 
1078 	/* Copy firmware bios info into FB memory. */
1079 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1080 	       adev->bios_size);
1081 
1082 	/* Reset regions that need to be reset. */
1083 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1084 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1085 
1086 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1087 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1088 
1089 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1090 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1091 
1092 	/* Initialize hardware. */
1093 	memset(&hw_params, 0, sizeof(hw_params));
1094 	hw_params.fb_base = adev->gmc.fb_start;
1095 	hw_params.fb_offset = adev->gmc.aper_base;
1096 
1097 	/* backdoor load firmware and trigger dmub running */
1098 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1099 		hw_params.load_inst_const = true;
1100 
1101 	if (dmcu)
1102 		hw_params.psp_version = dmcu->psp_version;
1103 
1104 	for (i = 0; i < fb_info->num_fb; ++i)
1105 		hw_params.fb[i] = &fb_info->fb[i];
1106 
1107 	switch (adev->asic_type) {
1108 	case CHIP_YELLOW_CARP:
1109 		if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1110 			hw_params.dpia_supported = true;
1111 #if defined(CONFIG_DRM_AMD_DC_DCN)
1112 			hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1113 #endif
1114 		}
1115 		break;
1116 	default:
1117 		break;
1118 	}
1119 
1120 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1121 	if (status != DMUB_STATUS_OK) {
1122 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1123 		return -EINVAL;
1124 	}
1125 
1126 	/* Wait for firmware load to finish. */
1127 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1128 	if (status != DMUB_STATUS_OK)
1129 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1130 
1131 	/* Init DMCU and ABM if available. */
1132 	if (dmcu && abm) {
1133 		dmcu->funcs->dmcu_init(dmcu);
1134 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1135 	}
1136 
1137 	if (!adev->dm.dc->ctx->dmub_srv)
1138 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1139 	if (!adev->dm.dc->ctx->dmub_srv) {
1140 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1141 		return -ENOMEM;
1142 	}
1143 
1144 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1145 		 adev->dm.dmcub_fw_version);
1146 
1147 	return 0;
1148 }
1149 
1150 #if defined(CONFIG_DRM_AMD_DC_DCN)
1151 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1152 {
1153 	uint64_t pt_base;
1154 	uint32_t logical_addr_low;
1155 	uint32_t logical_addr_high;
1156 	uint32_t agp_base, agp_bot, agp_top;
1157 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1158 
1159 	memset(pa_config, 0, sizeof(*pa_config));
1160 
1161 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1162 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1163 
1164 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1165 		/*
1166 		 * Raven2 has a HW issue that it is unable to use the vram which
1167 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1168 		 * workaround that increase system aperture high address (add 1)
1169 		 * to get rid of the VM fault and hardware hang.
1170 		 */
1171 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1172 	else
1173 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1174 
1175 	agp_base = 0;
1176 	agp_bot = adev->gmc.agp_start >> 24;
1177 	agp_top = adev->gmc.agp_end >> 24;
1178 
1179 
1180 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1181 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1182 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1183 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1184 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1185 	page_table_base.low_part = lower_32_bits(pt_base);
1186 
1187 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1188 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1189 
1190 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1191 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1192 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1193 
1194 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1195 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1196 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1197 
1198 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1199 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1200 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1201 
1202 	pa_config->is_hvm_enabled = 0;
1203 
1204 }
1205 #endif
1206 #if defined(CONFIG_DRM_AMD_DC_DCN)
1207 static void vblank_control_worker(struct work_struct *work)
1208 {
1209 	struct vblank_control_work *vblank_work =
1210 		container_of(work, struct vblank_control_work, work);
1211 	struct amdgpu_display_manager *dm = vblank_work->dm;
1212 
1213 	mutex_lock(&dm->dc_lock);
1214 
1215 	if (vblank_work->enable)
1216 		dm->active_vblank_irq_count++;
1217 	else if(dm->active_vblank_irq_count)
1218 		dm->active_vblank_irq_count--;
1219 
1220 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1221 
1222 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1223 
1224 	/* Control PSR based on vblank requirements from OS */
1225 	if (vblank_work->stream && vblank_work->stream->link) {
1226 		if (vblank_work->enable) {
1227 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1228 				amdgpu_dm_psr_disable(vblank_work->stream);
1229 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1230 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1231 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1232 			amdgpu_dm_psr_enable(vblank_work->stream);
1233 		}
1234 	}
1235 
1236 	mutex_unlock(&dm->dc_lock);
1237 
1238 	dc_stream_release(vblank_work->stream);
1239 
1240 	kfree(vblank_work);
1241 }
1242 
1243 #endif
1244 
1245 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1246 {
1247 	struct hpd_rx_irq_offload_work *offload_work;
1248 	struct amdgpu_dm_connector *aconnector;
1249 	struct dc_link *dc_link;
1250 	struct amdgpu_device *adev;
1251 	enum dc_connection_type new_connection_type = dc_connection_none;
1252 	unsigned long flags;
1253 
1254 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1255 	aconnector = offload_work->offload_wq->aconnector;
1256 
1257 	if (!aconnector) {
1258 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1259 		goto skip;
1260 	}
1261 
1262 	adev = drm_to_adev(aconnector->base.dev);
1263 	dc_link = aconnector->dc_link;
1264 
1265 	mutex_lock(&aconnector->hpd_lock);
1266 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1267 		DRM_ERROR("KMS: Failed to detect connector\n");
1268 	mutex_unlock(&aconnector->hpd_lock);
1269 
1270 	if (new_connection_type == dc_connection_none)
1271 		goto skip;
1272 
1273 	if (amdgpu_in_reset(adev))
1274 		goto skip;
1275 
1276 	mutex_lock(&adev->dm.dc_lock);
1277 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1278 		dc_link_dp_handle_automated_test(dc_link);
1279 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1280 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1281 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1282 		dc_link_dp_handle_link_loss(dc_link);
1283 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1284 		offload_work->offload_wq->is_handling_link_loss = false;
1285 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1286 	}
1287 	mutex_unlock(&adev->dm.dc_lock);
1288 
1289 skip:
1290 	kfree(offload_work);
1291 
1292 }
1293 
1294 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1295 {
1296 	int max_caps = dc->caps.max_links;
1297 	int i = 0;
1298 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1299 
1300 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1301 
1302 	if (!hpd_rx_offload_wq)
1303 		return NULL;
1304 
1305 
1306 	for (i = 0; i < max_caps; i++) {
1307 		hpd_rx_offload_wq[i].wq =
1308 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1309 
1310 		if (hpd_rx_offload_wq[i].wq == NULL) {
1311 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1312 			return NULL;
1313 		}
1314 
1315 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1316 	}
1317 
1318 	return hpd_rx_offload_wq;
1319 }
1320 
1321 struct amdgpu_stutter_quirk {
1322 	u16 chip_vendor;
1323 	u16 chip_device;
1324 	u16 subsys_vendor;
1325 	u16 subsys_device;
1326 	u8 revision;
1327 };
1328 
1329 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1330 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1331 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1332 	{ 0, 0, 0, 0, 0 },
1333 };
1334 
1335 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1336 {
1337 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1338 
1339 	while (p && p->chip_device != 0) {
1340 		if (pdev->vendor == p->chip_vendor &&
1341 		    pdev->device == p->chip_device &&
1342 		    pdev->subsystem_vendor == p->subsys_vendor &&
1343 		    pdev->subsystem_device == p->subsys_device &&
1344 		    pdev->revision == p->revision) {
1345 			return true;
1346 		}
1347 		++p;
1348 	}
1349 	return false;
1350 }
1351 
1352 static int amdgpu_dm_init(struct amdgpu_device *adev)
1353 {
1354 	struct dc_init_data init_data;
1355 #ifdef CONFIG_DRM_AMD_DC_HDCP
1356 	struct dc_callback_init init_params;
1357 #endif
1358 	int r;
1359 
1360 	adev->dm.ddev = adev_to_drm(adev);
1361 	adev->dm.adev = adev;
1362 
1363 	/* Zero all the fields */
1364 	memset(&init_data, 0, sizeof(init_data));
1365 #ifdef CONFIG_DRM_AMD_DC_HDCP
1366 	memset(&init_params, 0, sizeof(init_params));
1367 #endif
1368 
1369 	mutex_init(&adev->dm.dc_lock);
1370 	mutex_init(&adev->dm.audio_lock);
1371 #if defined(CONFIG_DRM_AMD_DC_DCN)
1372 	spin_lock_init(&adev->dm.vblank_lock);
1373 #endif
1374 
1375 	if(amdgpu_dm_irq_init(adev)) {
1376 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1377 		goto error;
1378 	}
1379 
1380 	init_data.asic_id.chip_family = adev->family;
1381 
1382 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1383 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1384 	init_data.asic_id.chip_id = adev->pdev->device;
1385 
1386 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1387 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1388 	init_data.asic_id.atombios_base_address =
1389 		adev->mode_info.atom_context->bios;
1390 
1391 	init_data.driver = adev;
1392 
1393 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1394 
1395 	if (!adev->dm.cgs_device) {
1396 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1397 		goto error;
1398 	}
1399 
1400 	init_data.cgs_device = adev->dm.cgs_device;
1401 
1402 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1403 
1404 	switch (adev->asic_type) {
1405 	case CHIP_CARRIZO:
1406 	case CHIP_STONEY:
1407 		init_data.flags.gpu_vm_support = true;
1408 		break;
1409 	default:
1410 		switch (adev->ip_versions[DCE_HWIP][0]) {
1411 		case IP_VERSION(2, 1, 0):
1412 			init_data.flags.gpu_vm_support = true;
1413 			init_data.flags.disable_dmcu = true;
1414 			break;
1415 		case IP_VERSION(1, 0, 0):
1416 		case IP_VERSION(1, 0, 1):
1417 		case IP_VERSION(3, 0, 1):
1418 		case IP_VERSION(3, 1, 2):
1419 		case IP_VERSION(3, 1, 3):
1420 			init_data.flags.gpu_vm_support = true;
1421 			break;
1422 		case IP_VERSION(2, 0, 3):
1423 			init_data.flags.disable_dmcu = true;
1424 			break;
1425 		default:
1426 			break;
1427 		}
1428 		break;
1429 	}
1430 
1431 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1432 		init_data.flags.fbc_support = true;
1433 
1434 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1435 		init_data.flags.multi_mon_pp_mclk_switch = true;
1436 
1437 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1438 		init_data.flags.disable_fractional_pwm = true;
1439 
1440 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1441 		init_data.flags.edp_no_power_sequencing = true;
1442 
1443 	init_data.flags.power_down_display_on_boot = true;
1444 
1445 	INIT_LIST_HEAD(&adev->dm.da_list);
1446 	/* Display Core create. */
1447 	adev->dm.dc = dc_create(&init_data);
1448 
1449 	if (adev->dm.dc) {
1450 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1451 	} else {
1452 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1453 		goto error;
1454 	}
1455 
1456 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1457 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1458 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1459 	}
1460 
1461 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1462 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1463 	if (dm_should_disable_stutter(adev->pdev))
1464 		adev->dm.dc->debug.disable_stutter = true;
1465 
1466 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1467 		adev->dm.dc->debug.disable_stutter = true;
1468 
1469 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1470 		adev->dm.dc->debug.disable_dsc = true;
1471 
1472 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1473 		adev->dm.dc->debug.disable_clock_gate = true;
1474 
1475 	r = dm_dmub_hw_init(adev);
1476 	if (r) {
1477 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1478 		goto error;
1479 	}
1480 
1481 	dc_hardware_init(adev->dm.dc);
1482 
1483 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1484 	if (!adev->dm.hpd_rx_offload_wq) {
1485 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1486 		goto error;
1487 	}
1488 
1489 #if defined(CONFIG_DRM_AMD_DC_DCN)
1490 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1491 		struct dc_phy_addr_space_config pa_config;
1492 
1493 		mmhub_read_system_context(adev, &pa_config);
1494 
1495 		// Call the DC init_memory func
1496 		dc_setup_system_context(adev->dm.dc, &pa_config);
1497 	}
1498 #endif
1499 
1500 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1501 	if (!adev->dm.freesync_module) {
1502 		DRM_ERROR(
1503 		"amdgpu: failed to initialize freesync_module.\n");
1504 	} else
1505 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1506 				adev->dm.freesync_module);
1507 
1508 	amdgpu_dm_init_color_mod();
1509 
1510 #if defined(CONFIG_DRM_AMD_DC_DCN)
1511 	if (adev->dm.dc->caps.max_links > 0) {
1512 		adev->dm.vblank_control_workqueue =
1513 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1514 		if (!adev->dm.vblank_control_workqueue)
1515 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1516 	}
1517 #endif
1518 
1519 #ifdef CONFIG_DRM_AMD_DC_HDCP
1520 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1521 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1522 
1523 		if (!adev->dm.hdcp_workqueue)
1524 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1525 		else
1526 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1527 
1528 		dc_init_callbacks(adev->dm.dc, &init_params);
1529 	}
1530 #endif
1531 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1532 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1533 #endif
1534 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1535 		init_completion(&adev->dm.dmub_aux_transfer_done);
1536 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1537 		if (!adev->dm.dmub_notify) {
1538 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1539 			goto error;
1540 		}
1541 
1542 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1543 		if (!adev->dm.delayed_hpd_wq) {
1544 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1545 			goto error;
1546 		}
1547 
1548 		amdgpu_dm_outbox_init(adev);
1549 #if defined(CONFIG_DRM_AMD_DC_DCN)
1550 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1551 			dmub_aux_setconfig_callback, false)) {
1552 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1553 			goto error;
1554 		}
1555 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1556 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1557 			goto error;
1558 		}
1559 #endif
1560 	}
1561 
1562 	if (amdgpu_dm_initialize_drm_device(adev)) {
1563 		DRM_ERROR(
1564 		"amdgpu: failed to initialize sw for display support.\n");
1565 		goto error;
1566 	}
1567 
1568 	/* create fake encoders for MST */
1569 	dm_dp_create_fake_mst_encoders(adev);
1570 
1571 	/* TODO: Add_display_info? */
1572 
1573 	/* TODO use dynamic cursor width */
1574 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1575 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1576 
1577 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1578 		DRM_ERROR(
1579 		"amdgpu: failed to initialize sw for display support.\n");
1580 		goto error;
1581 	}
1582 
1583 
1584 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1585 
1586 	return 0;
1587 error:
1588 	amdgpu_dm_fini(adev);
1589 
1590 	return -EINVAL;
1591 }
1592 
1593 static int amdgpu_dm_early_fini(void *handle)
1594 {
1595 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1596 
1597 	amdgpu_dm_audio_fini(adev);
1598 
1599 	return 0;
1600 }
1601 
1602 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1603 {
1604 	int i;
1605 
1606 #if defined(CONFIG_DRM_AMD_DC_DCN)
1607 	if (adev->dm.vblank_control_workqueue) {
1608 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1609 		adev->dm.vblank_control_workqueue = NULL;
1610 	}
1611 #endif
1612 
1613 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1614 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1615 	}
1616 
1617 	amdgpu_dm_destroy_drm_device(&adev->dm);
1618 
1619 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1620 	if (adev->dm.crc_rd_wrk) {
1621 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1622 		kfree(adev->dm.crc_rd_wrk);
1623 		adev->dm.crc_rd_wrk = NULL;
1624 	}
1625 #endif
1626 #ifdef CONFIG_DRM_AMD_DC_HDCP
1627 	if (adev->dm.hdcp_workqueue) {
1628 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1629 		adev->dm.hdcp_workqueue = NULL;
1630 	}
1631 
1632 	if (adev->dm.dc)
1633 		dc_deinit_callbacks(adev->dm.dc);
1634 #endif
1635 
1636 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1637 
1638 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1639 		kfree(adev->dm.dmub_notify);
1640 		adev->dm.dmub_notify = NULL;
1641 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1642 		adev->dm.delayed_hpd_wq = NULL;
1643 	}
1644 
1645 	if (adev->dm.dmub_bo)
1646 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1647 				      &adev->dm.dmub_bo_gpu_addr,
1648 				      &adev->dm.dmub_bo_cpu_addr);
1649 
1650 	if (adev->dm.hpd_rx_offload_wq) {
1651 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1652 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1653 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1654 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1655 			}
1656 		}
1657 
1658 		kfree(adev->dm.hpd_rx_offload_wq);
1659 		adev->dm.hpd_rx_offload_wq = NULL;
1660 	}
1661 
1662 	/* DC Destroy TODO: Replace destroy DAL */
1663 	if (adev->dm.dc)
1664 		dc_destroy(&adev->dm.dc);
1665 	/*
1666 	 * TODO: pageflip, vlank interrupt
1667 	 *
1668 	 * amdgpu_dm_irq_fini(adev);
1669 	 */
1670 
1671 	if (adev->dm.cgs_device) {
1672 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1673 		adev->dm.cgs_device = NULL;
1674 	}
1675 	if (adev->dm.freesync_module) {
1676 		mod_freesync_destroy(adev->dm.freesync_module);
1677 		adev->dm.freesync_module = NULL;
1678 	}
1679 
1680 	mutex_destroy(&adev->dm.audio_lock);
1681 	mutex_destroy(&adev->dm.dc_lock);
1682 
1683 	return;
1684 }
1685 
1686 static int load_dmcu_fw(struct amdgpu_device *adev)
1687 {
1688 	const char *fw_name_dmcu = NULL;
1689 	int r;
1690 	const struct dmcu_firmware_header_v1_0 *hdr;
1691 
1692 	switch(adev->asic_type) {
1693 #if defined(CONFIG_DRM_AMD_DC_SI)
1694 	case CHIP_TAHITI:
1695 	case CHIP_PITCAIRN:
1696 	case CHIP_VERDE:
1697 	case CHIP_OLAND:
1698 #endif
1699 	case CHIP_BONAIRE:
1700 	case CHIP_HAWAII:
1701 	case CHIP_KAVERI:
1702 	case CHIP_KABINI:
1703 	case CHIP_MULLINS:
1704 	case CHIP_TONGA:
1705 	case CHIP_FIJI:
1706 	case CHIP_CARRIZO:
1707 	case CHIP_STONEY:
1708 	case CHIP_POLARIS11:
1709 	case CHIP_POLARIS10:
1710 	case CHIP_POLARIS12:
1711 	case CHIP_VEGAM:
1712 	case CHIP_VEGA10:
1713 	case CHIP_VEGA12:
1714 	case CHIP_VEGA20:
1715 		return 0;
1716 	case CHIP_NAVI12:
1717 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1718 		break;
1719 	case CHIP_RAVEN:
1720 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1721 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1722 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1723 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1724 		else
1725 			return 0;
1726 		break;
1727 	default:
1728 		switch (adev->ip_versions[DCE_HWIP][0]) {
1729 		case IP_VERSION(2, 0, 2):
1730 		case IP_VERSION(2, 0, 3):
1731 		case IP_VERSION(2, 0, 0):
1732 		case IP_VERSION(2, 1, 0):
1733 		case IP_VERSION(3, 0, 0):
1734 		case IP_VERSION(3, 0, 2):
1735 		case IP_VERSION(3, 0, 3):
1736 		case IP_VERSION(3, 0, 1):
1737 		case IP_VERSION(3, 1, 2):
1738 		case IP_VERSION(3, 1, 3):
1739 			return 0;
1740 		default:
1741 			break;
1742 		}
1743 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1744 		return -EINVAL;
1745 	}
1746 
1747 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1748 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1749 		return 0;
1750 	}
1751 
1752 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1753 	if (r == -ENOENT) {
1754 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1755 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1756 		adev->dm.fw_dmcu = NULL;
1757 		return 0;
1758 	}
1759 	if (r) {
1760 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1761 			fw_name_dmcu);
1762 		return r;
1763 	}
1764 
1765 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1766 	if (r) {
1767 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1768 			fw_name_dmcu);
1769 		release_firmware(adev->dm.fw_dmcu);
1770 		adev->dm.fw_dmcu = NULL;
1771 		return r;
1772 	}
1773 
1774 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1775 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1776 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1777 	adev->firmware.fw_size +=
1778 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1779 
1780 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1781 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1782 	adev->firmware.fw_size +=
1783 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1784 
1785 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1786 
1787 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1788 
1789 	return 0;
1790 }
1791 
1792 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1793 {
1794 	struct amdgpu_device *adev = ctx;
1795 
1796 	return dm_read_reg(adev->dm.dc->ctx, address);
1797 }
1798 
1799 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1800 				     uint32_t value)
1801 {
1802 	struct amdgpu_device *adev = ctx;
1803 
1804 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1805 }
1806 
1807 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1808 {
1809 	struct dmub_srv_create_params create_params;
1810 	struct dmub_srv_region_params region_params;
1811 	struct dmub_srv_region_info region_info;
1812 	struct dmub_srv_fb_params fb_params;
1813 	struct dmub_srv_fb_info *fb_info;
1814 	struct dmub_srv *dmub_srv;
1815 	const struct dmcub_firmware_header_v1_0 *hdr;
1816 	const char *fw_name_dmub;
1817 	enum dmub_asic dmub_asic;
1818 	enum dmub_status status;
1819 	int r;
1820 
1821 	switch (adev->ip_versions[DCE_HWIP][0]) {
1822 	case IP_VERSION(2, 1, 0):
1823 		dmub_asic = DMUB_ASIC_DCN21;
1824 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1825 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1826 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1827 		break;
1828 	case IP_VERSION(3, 0, 0):
1829 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1830 			dmub_asic = DMUB_ASIC_DCN30;
1831 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1832 		} else {
1833 			dmub_asic = DMUB_ASIC_DCN30;
1834 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1835 		}
1836 		break;
1837 	case IP_VERSION(3, 0, 1):
1838 		dmub_asic = DMUB_ASIC_DCN301;
1839 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1840 		break;
1841 	case IP_VERSION(3, 0, 2):
1842 		dmub_asic = DMUB_ASIC_DCN302;
1843 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1844 		break;
1845 	case IP_VERSION(3, 0, 3):
1846 		dmub_asic = DMUB_ASIC_DCN303;
1847 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1848 		break;
1849 	case IP_VERSION(3, 1, 2):
1850 	case IP_VERSION(3, 1, 3):
1851 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1852 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1853 		break;
1854 
1855 	default:
1856 		/* ASIC doesn't support DMUB. */
1857 		return 0;
1858 	}
1859 
1860 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1861 	if (r) {
1862 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1863 		return 0;
1864 	}
1865 
1866 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1867 	if (r) {
1868 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1869 		return 0;
1870 	}
1871 
1872 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1873 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1874 
1875 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1876 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1877 			AMDGPU_UCODE_ID_DMCUB;
1878 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1879 			adev->dm.dmub_fw;
1880 		adev->firmware.fw_size +=
1881 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1882 
1883 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1884 			 adev->dm.dmcub_fw_version);
1885 	}
1886 
1887 
1888 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1889 	dmub_srv = adev->dm.dmub_srv;
1890 
1891 	if (!dmub_srv) {
1892 		DRM_ERROR("Failed to allocate DMUB service!\n");
1893 		return -ENOMEM;
1894 	}
1895 
1896 	memset(&create_params, 0, sizeof(create_params));
1897 	create_params.user_ctx = adev;
1898 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1899 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1900 	create_params.asic = dmub_asic;
1901 
1902 	/* Create the DMUB service. */
1903 	status = dmub_srv_create(dmub_srv, &create_params);
1904 	if (status != DMUB_STATUS_OK) {
1905 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1906 		return -EINVAL;
1907 	}
1908 
1909 	/* Calculate the size of all the regions for the DMUB service. */
1910 	memset(&region_params, 0, sizeof(region_params));
1911 
1912 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1913 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1914 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1915 	region_params.vbios_size = adev->bios_size;
1916 	region_params.fw_bss_data = region_params.bss_data_size ?
1917 		adev->dm.dmub_fw->data +
1918 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1919 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1920 	region_params.fw_inst_const =
1921 		adev->dm.dmub_fw->data +
1922 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1923 		PSP_HEADER_BYTES;
1924 
1925 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1926 					   &region_info);
1927 
1928 	if (status != DMUB_STATUS_OK) {
1929 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1930 		return -EINVAL;
1931 	}
1932 
1933 	/*
1934 	 * Allocate a framebuffer based on the total size of all the regions.
1935 	 * TODO: Move this into GART.
1936 	 */
1937 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1938 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1939 				    &adev->dm.dmub_bo_gpu_addr,
1940 				    &adev->dm.dmub_bo_cpu_addr);
1941 	if (r)
1942 		return r;
1943 
1944 	/* Rebase the regions on the framebuffer address. */
1945 	memset(&fb_params, 0, sizeof(fb_params));
1946 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1947 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1948 	fb_params.region_info = &region_info;
1949 
1950 	adev->dm.dmub_fb_info =
1951 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1952 	fb_info = adev->dm.dmub_fb_info;
1953 
1954 	if (!fb_info) {
1955 		DRM_ERROR(
1956 			"Failed to allocate framebuffer info for DMUB service!\n");
1957 		return -ENOMEM;
1958 	}
1959 
1960 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1961 	if (status != DMUB_STATUS_OK) {
1962 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1963 		return -EINVAL;
1964 	}
1965 
1966 	return 0;
1967 }
1968 
1969 static int dm_sw_init(void *handle)
1970 {
1971 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1972 	int r;
1973 
1974 	r = dm_dmub_sw_init(adev);
1975 	if (r)
1976 		return r;
1977 
1978 	return load_dmcu_fw(adev);
1979 }
1980 
1981 static int dm_sw_fini(void *handle)
1982 {
1983 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1984 
1985 	kfree(adev->dm.dmub_fb_info);
1986 	adev->dm.dmub_fb_info = NULL;
1987 
1988 	if (adev->dm.dmub_srv) {
1989 		dmub_srv_destroy(adev->dm.dmub_srv);
1990 		adev->dm.dmub_srv = NULL;
1991 	}
1992 
1993 	release_firmware(adev->dm.dmub_fw);
1994 	adev->dm.dmub_fw = NULL;
1995 
1996 	release_firmware(adev->dm.fw_dmcu);
1997 	adev->dm.fw_dmcu = NULL;
1998 
1999 	return 0;
2000 }
2001 
2002 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2003 {
2004 	struct amdgpu_dm_connector *aconnector;
2005 	struct drm_connector *connector;
2006 	struct drm_connector_list_iter iter;
2007 	int ret = 0;
2008 
2009 	drm_connector_list_iter_begin(dev, &iter);
2010 	drm_for_each_connector_iter(connector, &iter) {
2011 		aconnector = to_amdgpu_dm_connector(connector);
2012 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2013 		    aconnector->mst_mgr.aux) {
2014 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2015 					 aconnector,
2016 					 aconnector->base.base.id);
2017 
2018 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2019 			if (ret < 0) {
2020 				DRM_ERROR("DM_MST: Failed to start MST\n");
2021 				aconnector->dc_link->type =
2022 					dc_connection_single;
2023 				break;
2024 			}
2025 		}
2026 	}
2027 	drm_connector_list_iter_end(&iter);
2028 
2029 	return ret;
2030 }
2031 
2032 static int dm_late_init(void *handle)
2033 {
2034 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2035 
2036 	struct dmcu_iram_parameters params;
2037 	unsigned int linear_lut[16];
2038 	int i;
2039 	struct dmcu *dmcu = NULL;
2040 
2041 	dmcu = adev->dm.dc->res_pool->dmcu;
2042 
2043 	for (i = 0; i < 16; i++)
2044 		linear_lut[i] = 0xFFFF * i / 15;
2045 
2046 	params.set = 0;
2047 	params.backlight_ramping_override = false;
2048 	params.backlight_ramping_start = 0xCCCC;
2049 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2050 	params.backlight_lut_array_size = 16;
2051 	params.backlight_lut_array = linear_lut;
2052 
2053 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2054 	 * 0xFFFF x 0.01 = 0x28F
2055 	 */
2056 	params.min_abm_backlight = 0x28F;
2057 	/* In the case where abm is implemented on dmcub,
2058 	* dmcu object will be null.
2059 	* ABM 2.4 and up are implemented on dmcub.
2060 	*/
2061 	if (dmcu) {
2062 		if (!dmcu_load_iram(dmcu, params))
2063 			return -EINVAL;
2064 	} else if (adev->dm.dc->ctx->dmub_srv) {
2065 		struct dc_link *edp_links[MAX_NUM_EDP];
2066 		int edp_num;
2067 
2068 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2069 		for (i = 0; i < edp_num; i++) {
2070 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2071 				return -EINVAL;
2072 		}
2073 	}
2074 
2075 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2076 }
2077 
2078 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2079 {
2080 	struct amdgpu_dm_connector *aconnector;
2081 	struct drm_connector *connector;
2082 	struct drm_connector_list_iter iter;
2083 	struct drm_dp_mst_topology_mgr *mgr;
2084 	int ret;
2085 	bool need_hotplug = false;
2086 
2087 	drm_connector_list_iter_begin(dev, &iter);
2088 	drm_for_each_connector_iter(connector, &iter) {
2089 		aconnector = to_amdgpu_dm_connector(connector);
2090 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2091 		    aconnector->mst_port)
2092 			continue;
2093 
2094 		mgr = &aconnector->mst_mgr;
2095 
2096 		if (suspend) {
2097 			drm_dp_mst_topology_mgr_suspend(mgr);
2098 		} else {
2099 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2100 			if (ret < 0) {
2101 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2102 				need_hotplug = true;
2103 			}
2104 		}
2105 	}
2106 	drm_connector_list_iter_end(&iter);
2107 
2108 	if (need_hotplug)
2109 		drm_kms_helper_hotplug_event(dev);
2110 }
2111 
2112 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2113 {
2114 	struct smu_context *smu = &adev->smu;
2115 	int ret = 0;
2116 
2117 	if (!is_support_sw_smu(adev))
2118 		return 0;
2119 
2120 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2121 	 * on window driver dc implementation.
2122 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2123 	 * should be passed to smu during boot up and resume from s3.
2124 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2125 	 * dcn20_resource_construct
2126 	 * then call pplib functions below to pass the settings to smu:
2127 	 * smu_set_watermarks_for_clock_ranges
2128 	 * smu_set_watermarks_table
2129 	 * navi10_set_watermarks_table
2130 	 * smu_write_watermarks_table
2131 	 *
2132 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2133 	 * dc has implemented different flow for window driver:
2134 	 * dc_hardware_init / dc_set_power_state
2135 	 * dcn10_init_hw
2136 	 * notify_wm_ranges
2137 	 * set_wm_ranges
2138 	 * -- Linux
2139 	 * smu_set_watermarks_for_clock_ranges
2140 	 * renoir_set_watermarks_table
2141 	 * smu_write_watermarks_table
2142 	 *
2143 	 * For Linux,
2144 	 * dc_hardware_init -> amdgpu_dm_init
2145 	 * dc_set_power_state --> dm_resume
2146 	 *
2147 	 * therefore, this function apply to navi10/12/14 but not Renoir
2148 	 * *
2149 	 */
2150 	switch (adev->ip_versions[DCE_HWIP][0]) {
2151 	case IP_VERSION(2, 0, 2):
2152 	case IP_VERSION(2, 0, 0):
2153 		break;
2154 	default:
2155 		return 0;
2156 	}
2157 
2158 	ret = smu_write_watermarks_table(smu);
2159 	if (ret) {
2160 		DRM_ERROR("Failed to update WMTABLE!\n");
2161 		return ret;
2162 	}
2163 
2164 	return 0;
2165 }
2166 
2167 /**
2168  * dm_hw_init() - Initialize DC device
2169  * @handle: The base driver device containing the amdgpu_dm device.
2170  *
2171  * Initialize the &struct amdgpu_display_manager device. This involves calling
2172  * the initializers of each DM component, then populating the struct with them.
2173  *
2174  * Although the function implies hardware initialization, both hardware and
2175  * software are initialized here. Splitting them out to their relevant init
2176  * hooks is a future TODO item.
2177  *
2178  * Some notable things that are initialized here:
2179  *
2180  * - Display Core, both software and hardware
2181  * - DC modules that we need (freesync and color management)
2182  * - DRM software states
2183  * - Interrupt sources and handlers
2184  * - Vblank support
2185  * - Debug FS entries, if enabled
2186  */
2187 static int dm_hw_init(void *handle)
2188 {
2189 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2190 	/* Create DAL display manager */
2191 	amdgpu_dm_init(adev);
2192 	amdgpu_dm_hpd_init(adev);
2193 
2194 	return 0;
2195 }
2196 
2197 /**
2198  * dm_hw_fini() - Teardown DC device
2199  * @handle: The base driver device containing the amdgpu_dm device.
2200  *
2201  * Teardown components within &struct amdgpu_display_manager that require
2202  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2203  * were loaded. Also flush IRQ workqueues and disable them.
2204  */
2205 static int dm_hw_fini(void *handle)
2206 {
2207 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2208 
2209 	amdgpu_dm_hpd_fini(adev);
2210 
2211 	amdgpu_dm_irq_fini(adev);
2212 	amdgpu_dm_fini(adev);
2213 	return 0;
2214 }
2215 
2216 
2217 static int dm_enable_vblank(struct drm_crtc *crtc);
2218 static void dm_disable_vblank(struct drm_crtc *crtc);
2219 
2220 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2221 				 struct dc_state *state, bool enable)
2222 {
2223 	enum dc_irq_source irq_source;
2224 	struct amdgpu_crtc *acrtc;
2225 	int rc = -EBUSY;
2226 	int i = 0;
2227 
2228 	for (i = 0; i < state->stream_count; i++) {
2229 		acrtc = get_crtc_by_otg_inst(
2230 				adev, state->stream_status[i].primary_otg_inst);
2231 
2232 		if (acrtc && state->stream_status[i].plane_count != 0) {
2233 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2234 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2235 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2236 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2237 			if (rc)
2238 				DRM_WARN("Failed to %s pflip interrupts\n",
2239 					 enable ? "enable" : "disable");
2240 
2241 			if (enable) {
2242 				rc = dm_enable_vblank(&acrtc->base);
2243 				if (rc)
2244 					DRM_WARN("Failed to enable vblank interrupts\n");
2245 			} else {
2246 				dm_disable_vblank(&acrtc->base);
2247 			}
2248 
2249 		}
2250 	}
2251 
2252 }
2253 
2254 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2255 {
2256 	struct dc_state *context = NULL;
2257 	enum dc_status res = DC_ERROR_UNEXPECTED;
2258 	int i;
2259 	struct dc_stream_state *del_streams[MAX_PIPES];
2260 	int del_streams_count = 0;
2261 
2262 	memset(del_streams, 0, sizeof(del_streams));
2263 
2264 	context = dc_create_state(dc);
2265 	if (context == NULL)
2266 		goto context_alloc_fail;
2267 
2268 	dc_resource_state_copy_construct_current(dc, context);
2269 
2270 	/* First remove from context all streams */
2271 	for (i = 0; i < context->stream_count; i++) {
2272 		struct dc_stream_state *stream = context->streams[i];
2273 
2274 		del_streams[del_streams_count++] = stream;
2275 	}
2276 
2277 	/* Remove all planes for removed streams and then remove the streams */
2278 	for (i = 0; i < del_streams_count; i++) {
2279 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2280 			res = DC_FAIL_DETACH_SURFACES;
2281 			goto fail;
2282 		}
2283 
2284 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2285 		if (res != DC_OK)
2286 			goto fail;
2287 	}
2288 
2289 
2290 	res = dc_validate_global_state(dc, context, false);
2291 
2292 	if (res != DC_OK) {
2293 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2294 		goto fail;
2295 	}
2296 
2297 	res = dc_commit_state(dc, context);
2298 
2299 fail:
2300 	dc_release_state(context);
2301 
2302 context_alloc_fail:
2303 	return res;
2304 }
2305 
2306 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2307 {
2308 	int i;
2309 
2310 	if (dm->hpd_rx_offload_wq) {
2311 		for (i = 0; i < dm->dc->caps.max_links; i++)
2312 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2313 	}
2314 }
2315 
2316 static int dm_suspend(void *handle)
2317 {
2318 	struct amdgpu_device *adev = handle;
2319 	struct amdgpu_display_manager *dm = &adev->dm;
2320 	int ret = 0;
2321 
2322 	if (amdgpu_in_reset(adev)) {
2323 		mutex_lock(&dm->dc_lock);
2324 
2325 #if defined(CONFIG_DRM_AMD_DC_DCN)
2326 		dc_allow_idle_optimizations(adev->dm.dc, false);
2327 #endif
2328 
2329 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2330 
2331 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2332 
2333 		amdgpu_dm_commit_zero_streams(dm->dc);
2334 
2335 		amdgpu_dm_irq_suspend(adev);
2336 
2337 		hpd_rx_irq_work_suspend(dm);
2338 
2339 		return ret;
2340 	}
2341 
2342 	WARN_ON(adev->dm.cached_state);
2343 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2344 
2345 	s3_handle_mst(adev_to_drm(adev), true);
2346 
2347 	amdgpu_dm_irq_suspend(adev);
2348 
2349 	hpd_rx_irq_work_suspend(dm);
2350 
2351 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2352 
2353 	return 0;
2354 }
2355 
2356 static struct amdgpu_dm_connector *
2357 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2358 					     struct drm_crtc *crtc)
2359 {
2360 	uint32_t i;
2361 	struct drm_connector_state *new_con_state;
2362 	struct drm_connector *connector;
2363 	struct drm_crtc *crtc_from_state;
2364 
2365 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2366 		crtc_from_state = new_con_state->crtc;
2367 
2368 		if (crtc_from_state == crtc)
2369 			return to_amdgpu_dm_connector(connector);
2370 	}
2371 
2372 	return NULL;
2373 }
2374 
2375 static void emulated_link_detect(struct dc_link *link)
2376 {
2377 	struct dc_sink_init_data sink_init_data = { 0 };
2378 	struct display_sink_capability sink_caps = { 0 };
2379 	enum dc_edid_status edid_status;
2380 	struct dc_context *dc_ctx = link->ctx;
2381 	struct dc_sink *sink = NULL;
2382 	struct dc_sink *prev_sink = NULL;
2383 
2384 	link->type = dc_connection_none;
2385 	prev_sink = link->local_sink;
2386 
2387 	if (prev_sink)
2388 		dc_sink_release(prev_sink);
2389 
2390 	switch (link->connector_signal) {
2391 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2392 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2393 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2394 		break;
2395 	}
2396 
2397 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2398 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2399 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2400 		break;
2401 	}
2402 
2403 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2404 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2405 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2406 		break;
2407 	}
2408 
2409 	case SIGNAL_TYPE_LVDS: {
2410 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2411 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2412 		break;
2413 	}
2414 
2415 	case SIGNAL_TYPE_EDP: {
2416 		sink_caps.transaction_type =
2417 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2418 		sink_caps.signal = SIGNAL_TYPE_EDP;
2419 		break;
2420 	}
2421 
2422 	case SIGNAL_TYPE_DISPLAY_PORT: {
2423 		sink_caps.transaction_type =
2424 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2425 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2426 		break;
2427 	}
2428 
2429 	default:
2430 		DC_ERROR("Invalid connector type! signal:%d\n",
2431 			link->connector_signal);
2432 		return;
2433 	}
2434 
2435 	sink_init_data.link = link;
2436 	sink_init_data.sink_signal = sink_caps.signal;
2437 
2438 	sink = dc_sink_create(&sink_init_data);
2439 	if (!sink) {
2440 		DC_ERROR("Failed to create sink!\n");
2441 		return;
2442 	}
2443 
2444 	/* dc_sink_create returns a new reference */
2445 	link->local_sink = sink;
2446 
2447 	edid_status = dm_helpers_read_local_edid(
2448 			link->ctx,
2449 			link,
2450 			sink);
2451 
2452 	if (edid_status != EDID_OK)
2453 		DC_ERROR("Failed to read EDID");
2454 
2455 }
2456 
2457 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2458 				     struct amdgpu_display_manager *dm)
2459 {
2460 	struct {
2461 		struct dc_surface_update surface_updates[MAX_SURFACES];
2462 		struct dc_plane_info plane_infos[MAX_SURFACES];
2463 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2464 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2465 		struct dc_stream_update stream_update;
2466 	} * bundle;
2467 	int k, m;
2468 
2469 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2470 
2471 	if (!bundle) {
2472 		dm_error("Failed to allocate update bundle\n");
2473 		goto cleanup;
2474 	}
2475 
2476 	for (k = 0; k < dc_state->stream_count; k++) {
2477 		bundle->stream_update.stream = dc_state->streams[k];
2478 
2479 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2480 			bundle->surface_updates[m].surface =
2481 				dc_state->stream_status->plane_states[m];
2482 			bundle->surface_updates[m].surface->force_full_update =
2483 				true;
2484 		}
2485 		dc_commit_updates_for_stream(
2486 			dm->dc, bundle->surface_updates,
2487 			dc_state->stream_status->plane_count,
2488 			dc_state->streams[k], &bundle->stream_update, dc_state);
2489 	}
2490 
2491 cleanup:
2492 	kfree(bundle);
2493 
2494 	return;
2495 }
2496 
2497 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2498 {
2499 	struct dc_stream_state *stream_state;
2500 	struct amdgpu_dm_connector *aconnector = link->priv;
2501 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2502 	struct dc_stream_update stream_update;
2503 	bool dpms_off = true;
2504 
2505 	memset(&stream_update, 0, sizeof(stream_update));
2506 	stream_update.dpms_off = &dpms_off;
2507 
2508 	mutex_lock(&adev->dm.dc_lock);
2509 	stream_state = dc_stream_find_from_link(link);
2510 
2511 	if (stream_state == NULL) {
2512 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2513 		mutex_unlock(&adev->dm.dc_lock);
2514 		return;
2515 	}
2516 
2517 	stream_update.stream = stream_state;
2518 	acrtc_state->force_dpms_off = true;
2519 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2520 				     stream_state, &stream_update,
2521 				     stream_state->ctx->dc->current_state);
2522 	mutex_unlock(&adev->dm.dc_lock);
2523 }
2524 
2525 static int dm_resume(void *handle)
2526 {
2527 	struct amdgpu_device *adev = handle;
2528 	struct drm_device *ddev = adev_to_drm(adev);
2529 	struct amdgpu_display_manager *dm = &adev->dm;
2530 	struct amdgpu_dm_connector *aconnector;
2531 	struct drm_connector *connector;
2532 	struct drm_connector_list_iter iter;
2533 	struct drm_crtc *crtc;
2534 	struct drm_crtc_state *new_crtc_state;
2535 	struct dm_crtc_state *dm_new_crtc_state;
2536 	struct drm_plane *plane;
2537 	struct drm_plane_state *new_plane_state;
2538 	struct dm_plane_state *dm_new_plane_state;
2539 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2540 	enum dc_connection_type new_connection_type = dc_connection_none;
2541 	struct dc_state *dc_state;
2542 	int i, r, j;
2543 
2544 	if (amdgpu_in_reset(adev)) {
2545 		dc_state = dm->cached_dc_state;
2546 
2547 		r = dm_dmub_hw_init(adev);
2548 		if (r)
2549 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2550 
2551 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2552 		dc_resume(dm->dc);
2553 
2554 		amdgpu_dm_irq_resume_early(adev);
2555 
2556 		for (i = 0; i < dc_state->stream_count; i++) {
2557 			dc_state->streams[i]->mode_changed = true;
2558 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2559 				dc_state->stream_status->plane_states[j]->update_flags.raw
2560 					= 0xffffffff;
2561 			}
2562 		}
2563 #if defined(CONFIG_DRM_AMD_DC_DCN)
2564 		/*
2565 		 * Resource allocation happens for link encoders for newer ASIC in
2566 		 * dc_validate_global_state, so we need to revalidate it.
2567 		 *
2568 		 * This shouldn't fail (it passed once before), so warn if it does.
2569 		 */
2570 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2571 #endif
2572 
2573 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2574 
2575 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2576 
2577 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2578 
2579 		dc_release_state(dm->cached_dc_state);
2580 		dm->cached_dc_state = NULL;
2581 
2582 		amdgpu_dm_irq_resume_late(adev);
2583 
2584 		mutex_unlock(&dm->dc_lock);
2585 
2586 		return 0;
2587 	}
2588 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2589 	dc_release_state(dm_state->context);
2590 	dm_state->context = dc_create_state(dm->dc);
2591 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2592 	dc_resource_state_construct(dm->dc, dm_state->context);
2593 
2594 	/* Before powering on DC we need to re-initialize DMUB. */
2595 	r = dm_dmub_hw_init(adev);
2596 	if (r)
2597 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2598 
2599 	/* power on hardware */
2600 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2601 
2602 	/* program HPD filter */
2603 	dc_resume(dm->dc);
2604 
2605 	/*
2606 	 * early enable HPD Rx IRQ, should be done before set mode as short
2607 	 * pulse interrupts are used for MST
2608 	 */
2609 	amdgpu_dm_irq_resume_early(adev);
2610 
2611 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2612 	s3_handle_mst(ddev, false);
2613 
2614 	/* Do detection*/
2615 	drm_connector_list_iter_begin(ddev, &iter);
2616 	drm_for_each_connector_iter(connector, &iter) {
2617 		aconnector = to_amdgpu_dm_connector(connector);
2618 
2619 		/*
2620 		 * this is the case when traversing through already created
2621 		 * MST connectors, should be skipped
2622 		 */
2623 		if (aconnector->mst_port)
2624 			continue;
2625 
2626 		mutex_lock(&aconnector->hpd_lock);
2627 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2628 			DRM_ERROR("KMS: Failed to detect connector\n");
2629 
2630 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2631 			emulated_link_detect(aconnector->dc_link);
2632 		else
2633 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2634 
2635 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2636 			aconnector->fake_enable = false;
2637 
2638 		if (aconnector->dc_sink)
2639 			dc_sink_release(aconnector->dc_sink);
2640 		aconnector->dc_sink = NULL;
2641 		amdgpu_dm_update_connector_after_detect(aconnector);
2642 		mutex_unlock(&aconnector->hpd_lock);
2643 	}
2644 	drm_connector_list_iter_end(&iter);
2645 
2646 	/* Force mode set in atomic commit */
2647 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2648 		new_crtc_state->active_changed = true;
2649 
2650 	/*
2651 	 * atomic_check is expected to create the dc states. We need to release
2652 	 * them here, since they were duplicated as part of the suspend
2653 	 * procedure.
2654 	 */
2655 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2656 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2657 		if (dm_new_crtc_state->stream) {
2658 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2659 			dc_stream_release(dm_new_crtc_state->stream);
2660 			dm_new_crtc_state->stream = NULL;
2661 		}
2662 	}
2663 
2664 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2665 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2666 		if (dm_new_plane_state->dc_state) {
2667 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2668 			dc_plane_state_release(dm_new_plane_state->dc_state);
2669 			dm_new_plane_state->dc_state = NULL;
2670 		}
2671 	}
2672 
2673 	drm_atomic_helper_resume(ddev, dm->cached_state);
2674 
2675 	dm->cached_state = NULL;
2676 
2677 	amdgpu_dm_irq_resume_late(adev);
2678 
2679 	amdgpu_dm_smu_write_watermarks_table(adev);
2680 
2681 	return 0;
2682 }
2683 
2684 /**
2685  * DOC: DM Lifecycle
2686  *
2687  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2688  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2689  * the base driver's device list to be initialized and torn down accordingly.
2690  *
2691  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2692  */
2693 
2694 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2695 	.name = "dm",
2696 	.early_init = dm_early_init,
2697 	.late_init = dm_late_init,
2698 	.sw_init = dm_sw_init,
2699 	.sw_fini = dm_sw_fini,
2700 	.early_fini = amdgpu_dm_early_fini,
2701 	.hw_init = dm_hw_init,
2702 	.hw_fini = dm_hw_fini,
2703 	.suspend = dm_suspend,
2704 	.resume = dm_resume,
2705 	.is_idle = dm_is_idle,
2706 	.wait_for_idle = dm_wait_for_idle,
2707 	.check_soft_reset = dm_check_soft_reset,
2708 	.soft_reset = dm_soft_reset,
2709 	.set_clockgating_state = dm_set_clockgating_state,
2710 	.set_powergating_state = dm_set_powergating_state,
2711 };
2712 
2713 const struct amdgpu_ip_block_version dm_ip_block =
2714 {
2715 	.type = AMD_IP_BLOCK_TYPE_DCE,
2716 	.major = 1,
2717 	.minor = 0,
2718 	.rev = 0,
2719 	.funcs = &amdgpu_dm_funcs,
2720 };
2721 
2722 
2723 /**
2724  * DOC: atomic
2725  *
2726  * *WIP*
2727  */
2728 
2729 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2730 	.fb_create = amdgpu_display_user_framebuffer_create,
2731 	.get_format_info = amd_get_format_info,
2732 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2733 	.atomic_check = amdgpu_dm_atomic_check,
2734 	.atomic_commit = drm_atomic_helper_commit,
2735 };
2736 
2737 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2738 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2739 };
2740 
2741 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2742 {
2743 	u32 max_cll, min_cll, max, min, q, r;
2744 	struct amdgpu_dm_backlight_caps *caps;
2745 	struct amdgpu_display_manager *dm;
2746 	struct drm_connector *conn_base;
2747 	struct amdgpu_device *adev;
2748 	struct dc_link *link = NULL;
2749 	static const u8 pre_computed_values[] = {
2750 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2751 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2752 	int i;
2753 
2754 	if (!aconnector || !aconnector->dc_link)
2755 		return;
2756 
2757 	link = aconnector->dc_link;
2758 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2759 		return;
2760 
2761 	conn_base = &aconnector->base;
2762 	adev = drm_to_adev(conn_base->dev);
2763 	dm = &adev->dm;
2764 	for (i = 0; i < dm->num_of_edps; i++) {
2765 		if (link == dm->backlight_link[i])
2766 			break;
2767 	}
2768 	if (i >= dm->num_of_edps)
2769 		return;
2770 	caps = &dm->backlight_caps[i];
2771 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2772 	caps->aux_support = false;
2773 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2774 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2775 
2776 	if (caps->ext_caps->bits.oled == 1 /*||
2777 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2778 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2779 		caps->aux_support = true;
2780 
2781 	if (amdgpu_backlight == 0)
2782 		caps->aux_support = false;
2783 	else if (amdgpu_backlight == 1)
2784 		caps->aux_support = true;
2785 
2786 	/* From the specification (CTA-861-G), for calculating the maximum
2787 	 * luminance we need to use:
2788 	 *	Luminance = 50*2**(CV/32)
2789 	 * Where CV is a one-byte value.
2790 	 * For calculating this expression we may need float point precision;
2791 	 * to avoid this complexity level, we take advantage that CV is divided
2792 	 * by a constant. From the Euclids division algorithm, we know that CV
2793 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2794 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2795 	 * need to pre-compute the value of r/32. For pre-computing the values
2796 	 * We just used the following Ruby line:
2797 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2798 	 * The results of the above expressions can be verified at
2799 	 * pre_computed_values.
2800 	 */
2801 	q = max_cll >> 5;
2802 	r = max_cll % 32;
2803 	max = (1 << q) * pre_computed_values[r];
2804 
2805 	// min luminance: maxLum * (CV/255)^2 / 100
2806 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2807 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2808 
2809 	caps->aux_max_input_signal = max;
2810 	caps->aux_min_input_signal = min;
2811 }
2812 
2813 void amdgpu_dm_update_connector_after_detect(
2814 		struct amdgpu_dm_connector *aconnector)
2815 {
2816 	struct drm_connector *connector = &aconnector->base;
2817 	struct drm_device *dev = connector->dev;
2818 	struct dc_sink *sink;
2819 
2820 	/* MST handled by drm_mst framework */
2821 	if (aconnector->mst_mgr.mst_state == true)
2822 		return;
2823 
2824 	sink = aconnector->dc_link->local_sink;
2825 	if (sink)
2826 		dc_sink_retain(sink);
2827 
2828 	/*
2829 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2830 	 * the connector sink is set to either fake or physical sink depends on link status.
2831 	 * Skip if already done during boot.
2832 	 */
2833 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2834 			&& aconnector->dc_em_sink) {
2835 
2836 		/*
2837 		 * For S3 resume with headless use eml_sink to fake stream
2838 		 * because on resume connector->sink is set to NULL
2839 		 */
2840 		mutex_lock(&dev->mode_config.mutex);
2841 
2842 		if (sink) {
2843 			if (aconnector->dc_sink) {
2844 				amdgpu_dm_update_freesync_caps(connector, NULL);
2845 				/*
2846 				 * retain and release below are used to
2847 				 * bump up refcount for sink because the link doesn't point
2848 				 * to it anymore after disconnect, so on next crtc to connector
2849 				 * reshuffle by UMD we will get into unwanted dc_sink release
2850 				 */
2851 				dc_sink_release(aconnector->dc_sink);
2852 			}
2853 			aconnector->dc_sink = sink;
2854 			dc_sink_retain(aconnector->dc_sink);
2855 			amdgpu_dm_update_freesync_caps(connector,
2856 					aconnector->edid);
2857 		} else {
2858 			amdgpu_dm_update_freesync_caps(connector, NULL);
2859 			if (!aconnector->dc_sink) {
2860 				aconnector->dc_sink = aconnector->dc_em_sink;
2861 				dc_sink_retain(aconnector->dc_sink);
2862 			}
2863 		}
2864 
2865 		mutex_unlock(&dev->mode_config.mutex);
2866 
2867 		if (sink)
2868 			dc_sink_release(sink);
2869 		return;
2870 	}
2871 
2872 	/*
2873 	 * TODO: temporary guard to look for proper fix
2874 	 * if this sink is MST sink, we should not do anything
2875 	 */
2876 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2877 		dc_sink_release(sink);
2878 		return;
2879 	}
2880 
2881 	if (aconnector->dc_sink == sink) {
2882 		/*
2883 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2884 		 * Do nothing!!
2885 		 */
2886 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2887 				aconnector->connector_id);
2888 		if (sink)
2889 			dc_sink_release(sink);
2890 		return;
2891 	}
2892 
2893 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2894 		aconnector->connector_id, aconnector->dc_sink, sink);
2895 
2896 	mutex_lock(&dev->mode_config.mutex);
2897 
2898 	/*
2899 	 * 1. Update status of the drm connector
2900 	 * 2. Send an event and let userspace tell us what to do
2901 	 */
2902 	if (sink) {
2903 		/*
2904 		 * TODO: check if we still need the S3 mode update workaround.
2905 		 * If yes, put it here.
2906 		 */
2907 		if (aconnector->dc_sink) {
2908 			amdgpu_dm_update_freesync_caps(connector, NULL);
2909 			dc_sink_release(aconnector->dc_sink);
2910 		}
2911 
2912 		aconnector->dc_sink = sink;
2913 		dc_sink_retain(aconnector->dc_sink);
2914 		if (sink->dc_edid.length == 0) {
2915 			aconnector->edid = NULL;
2916 			if (aconnector->dc_link->aux_mode) {
2917 				drm_dp_cec_unset_edid(
2918 					&aconnector->dm_dp_aux.aux);
2919 			}
2920 		} else {
2921 			aconnector->edid =
2922 				(struct edid *)sink->dc_edid.raw_edid;
2923 
2924 			drm_connector_update_edid_property(connector,
2925 							   aconnector->edid);
2926 			if (aconnector->dc_link->aux_mode)
2927 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2928 						    aconnector->edid);
2929 		}
2930 
2931 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2932 		update_connector_ext_caps(aconnector);
2933 	} else {
2934 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2935 		amdgpu_dm_update_freesync_caps(connector, NULL);
2936 		drm_connector_update_edid_property(connector, NULL);
2937 		aconnector->num_modes = 0;
2938 		dc_sink_release(aconnector->dc_sink);
2939 		aconnector->dc_sink = NULL;
2940 		aconnector->edid = NULL;
2941 #ifdef CONFIG_DRM_AMD_DC_HDCP
2942 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2943 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2944 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2945 #endif
2946 	}
2947 
2948 	mutex_unlock(&dev->mode_config.mutex);
2949 
2950 	update_subconnector_property(aconnector);
2951 
2952 	if (sink)
2953 		dc_sink_release(sink);
2954 }
2955 
2956 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2957 {
2958 	struct drm_connector *connector = &aconnector->base;
2959 	struct drm_device *dev = connector->dev;
2960 	enum dc_connection_type new_connection_type = dc_connection_none;
2961 	struct amdgpu_device *adev = drm_to_adev(dev);
2962 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2963 	struct dm_crtc_state *dm_crtc_state = NULL;
2964 
2965 	if (adev->dm.disable_hpd_irq)
2966 		return;
2967 
2968 	if (dm_con_state->base.state && dm_con_state->base.crtc)
2969 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2970 					dm_con_state->base.state,
2971 					dm_con_state->base.crtc));
2972 	/*
2973 	 * In case of failure or MST no need to update connector status or notify the OS
2974 	 * since (for MST case) MST does this in its own context.
2975 	 */
2976 	mutex_lock(&aconnector->hpd_lock);
2977 
2978 #ifdef CONFIG_DRM_AMD_DC_HDCP
2979 	if (adev->dm.hdcp_workqueue) {
2980 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2981 		dm_con_state->update_hdcp = true;
2982 	}
2983 #endif
2984 	if (aconnector->fake_enable)
2985 		aconnector->fake_enable = false;
2986 
2987 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2988 		DRM_ERROR("KMS: Failed to detect connector\n");
2989 
2990 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2991 		emulated_link_detect(aconnector->dc_link);
2992 
2993 		drm_modeset_lock_all(dev);
2994 		dm_restore_drm_connector_state(dev, connector);
2995 		drm_modeset_unlock_all(dev);
2996 
2997 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2998 			drm_kms_helper_hotplug_event(dev);
2999 
3000 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3001 		if (new_connection_type == dc_connection_none &&
3002 		    aconnector->dc_link->type == dc_connection_none &&
3003 		    dm_crtc_state)
3004 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3005 
3006 		amdgpu_dm_update_connector_after_detect(aconnector);
3007 
3008 		drm_modeset_lock_all(dev);
3009 		dm_restore_drm_connector_state(dev, connector);
3010 		drm_modeset_unlock_all(dev);
3011 
3012 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3013 			drm_kms_helper_hotplug_event(dev);
3014 	}
3015 	mutex_unlock(&aconnector->hpd_lock);
3016 
3017 }
3018 
3019 static void handle_hpd_irq(void *param)
3020 {
3021 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3022 
3023 	handle_hpd_irq_helper(aconnector);
3024 
3025 }
3026 
3027 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3028 {
3029 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3030 	uint8_t dret;
3031 	bool new_irq_handled = false;
3032 	int dpcd_addr;
3033 	int dpcd_bytes_to_read;
3034 
3035 	const int max_process_count = 30;
3036 	int process_count = 0;
3037 
3038 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3039 
3040 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3041 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3042 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3043 		dpcd_addr = DP_SINK_COUNT;
3044 	} else {
3045 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3046 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3047 		dpcd_addr = DP_SINK_COUNT_ESI;
3048 	}
3049 
3050 	dret = drm_dp_dpcd_read(
3051 		&aconnector->dm_dp_aux.aux,
3052 		dpcd_addr,
3053 		esi,
3054 		dpcd_bytes_to_read);
3055 
3056 	while (dret == dpcd_bytes_to_read &&
3057 		process_count < max_process_count) {
3058 		uint8_t retry;
3059 		dret = 0;
3060 
3061 		process_count++;
3062 
3063 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3064 		/* handle HPD short pulse irq */
3065 		if (aconnector->mst_mgr.mst_state)
3066 			drm_dp_mst_hpd_irq(
3067 				&aconnector->mst_mgr,
3068 				esi,
3069 				&new_irq_handled);
3070 
3071 		if (new_irq_handled) {
3072 			/* ACK at DPCD to notify down stream */
3073 			const int ack_dpcd_bytes_to_write =
3074 				dpcd_bytes_to_read - 1;
3075 
3076 			for (retry = 0; retry < 3; retry++) {
3077 				uint8_t wret;
3078 
3079 				wret = drm_dp_dpcd_write(
3080 					&aconnector->dm_dp_aux.aux,
3081 					dpcd_addr + 1,
3082 					&esi[1],
3083 					ack_dpcd_bytes_to_write);
3084 				if (wret == ack_dpcd_bytes_to_write)
3085 					break;
3086 			}
3087 
3088 			/* check if there is new irq to be handled */
3089 			dret = drm_dp_dpcd_read(
3090 				&aconnector->dm_dp_aux.aux,
3091 				dpcd_addr,
3092 				esi,
3093 				dpcd_bytes_to_read);
3094 
3095 			new_irq_handled = false;
3096 		} else {
3097 			break;
3098 		}
3099 	}
3100 
3101 	if (process_count == max_process_count)
3102 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3103 }
3104 
3105 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3106 							union hpd_irq_data hpd_irq_data)
3107 {
3108 	struct hpd_rx_irq_offload_work *offload_work =
3109 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3110 
3111 	if (!offload_work) {
3112 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3113 		return;
3114 	}
3115 
3116 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3117 	offload_work->data = hpd_irq_data;
3118 	offload_work->offload_wq = offload_wq;
3119 
3120 	queue_work(offload_wq->wq, &offload_work->work);
3121 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3122 }
3123 
3124 static void handle_hpd_rx_irq(void *param)
3125 {
3126 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3127 	struct drm_connector *connector = &aconnector->base;
3128 	struct drm_device *dev = connector->dev;
3129 	struct dc_link *dc_link = aconnector->dc_link;
3130 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3131 	bool result = false;
3132 	enum dc_connection_type new_connection_type = dc_connection_none;
3133 	struct amdgpu_device *adev = drm_to_adev(dev);
3134 	union hpd_irq_data hpd_irq_data;
3135 	bool link_loss = false;
3136 	bool has_left_work = false;
3137 	int idx = aconnector->base.index;
3138 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3139 
3140 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3141 
3142 	if (adev->dm.disable_hpd_irq)
3143 		return;
3144 
3145 	/*
3146 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3147 	 * conflict, after implement i2c helper, this mutex should be
3148 	 * retired.
3149 	 */
3150 	mutex_lock(&aconnector->hpd_lock);
3151 
3152 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3153 						&link_loss, true, &has_left_work);
3154 
3155 	if (!has_left_work)
3156 		goto out;
3157 
3158 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3159 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3160 		goto out;
3161 	}
3162 
3163 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3164 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3165 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3166 			dm_handle_mst_sideband_msg(aconnector);
3167 			goto out;
3168 		}
3169 
3170 		if (link_loss) {
3171 			bool skip = false;
3172 
3173 			spin_lock(&offload_wq->offload_lock);
3174 			skip = offload_wq->is_handling_link_loss;
3175 
3176 			if (!skip)
3177 				offload_wq->is_handling_link_loss = true;
3178 
3179 			spin_unlock(&offload_wq->offload_lock);
3180 
3181 			if (!skip)
3182 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3183 
3184 			goto out;
3185 		}
3186 	}
3187 
3188 out:
3189 	if (result && !is_mst_root_connector) {
3190 		/* Downstream Port status changed. */
3191 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3192 			DRM_ERROR("KMS: Failed to detect connector\n");
3193 
3194 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3195 			emulated_link_detect(dc_link);
3196 
3197 			if (aconnector->fake_enable)
3198 				aconnector->fake_enable = false;
3199 
3200 			amdgpu_dm_update_connector_after_detect(aconnector);
3201 
3202 
3203 			drm_modeset_lock_all(dev);
3204 			dm_restore_drm_connector_state(dev, connector);
3205 			drm_modeset_unlock_all(dev);
3206 
3207 			drm_kms_helper_hotplug_event(dev);
3208 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3209 
3210 			if (aconnector->fake_enable)
3211 				aconnector->fake_enable = false;
3212 
3213 			amdgpu_dm_update_connector_after_detect(aconnector);
3214 
3215 
3216 			drm_modeset_lock_all(dev);
3217 			dm_restore_drm_connector_state(dev, connector);
3218 			drm_modeset_unlock_all(dev);
3219 
3220 			drm_kms_helper_hotplug_event(dev);
3221 		}
3222 	}
3223 #ifdef CONFIG_DRM_AMD_DC_HDCP
3224 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3225 		if (adev->dm.hdcp_workqueue)
3226 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3227 	}
3228 #endif
3229 
3230 	if (dc_link->type != dc_connection_mst_branch)
3231 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3232 
3233 	mutex_unlock(&aconnector->hpd_lock);
3234 }
3235 
3236 static void register_hpd_handlers(struct amdgpu_device *adev)
3237 {
3238 	struct drm_device *dev = adev_to_drm(adev);
3239 	struct drm_connector *connector;
3240 	struct amdgpu_dm_connector *aconnector;
3241 	const struct dc_link *dc_link;
3242 	struct dc_interrupt_params int_params = {0};
3243 
3244 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3245 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3246 
3247 	list_for_each_entry(connector,
3248 			&dev->mode_config.connector_list, head)	{
3249 
3250 		aconnector = to_amdgpu_dm_connector(connector);
3251 		dc_link = aconnector->dc_link;
3252 
3253 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3254 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3255 			int_params.irq_source = dc_link->irq_source_hpd;
3256 
3257 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3258 					handle_hpd_irq,
3259 					(void *) aconnector);
3260 		}
3261 
3262 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3263 
3264 			/* Also register for DP short pulse (hpd_rx). */
3265 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3266 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3267 
3268 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3269 					handle_hpd_rx_irq,
3270 					(void *) aconnector);
3271 
3272 			if (adev->dm.hpd_rx_offload_wq)
3273 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3274 					aconnector;
3275 		}
3276 	}
3277 }
3278 
3279 #if defined(CONFIG_DRM_AMD_DC_SI)
3280 /* Register IRQ sources and initialize IRQ callbacks */
3281 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3282 {
3283 	struct dc *dc = adev->dm.dc;
3284 	struct common_irq_params *c_irq_params;
3285 	struct dc_interrupt_params int_params = {0};
3286 	int r;
3287 	int i;
3288 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3289 
3290 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3291 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3292 
3293 	/*
3294 	 * Actions of amdgpu_irq_add_id():
3295 	 * 1. Register a set() function with base driver.
3296 	 *    Base driver will call set() function to enable/disable an
3297 	 *    interrupt in DC hardware.
3298 	 * 2. Register amdgpu_dm_irq_handler().
3299 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3300 	 *    coming from DC hardware.
3301 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3302 	 *    for acknowledging and handling. */
3303 
3304 	/* Use VBLANK interrupt */
3305 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3306 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3307 		if (r) {
3308 			DRM_ERROR("Failed to add crtc irq id!\n");
3309 			return r;
3310 		}
3311 
3312 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3313 		int_params.irq_source =
3314 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3315 
3316 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3317 
3318 		c_irq_params->adev = adev;
3319 		c_irq_params->irq_src = int_params.irq_source;
3320 
3321 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3322 				dm_crtc_high_irq, c_irq_params);
3323 	}
3324 
3325 	/* Use GRPH_PFLIP interrupt */
3326 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3327 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3328 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3329 		if (r) {
3330 			DRM_ERROR("Failed to add page flip irq id!\n");
3331 			return r;
3332 		}
3333 
3334 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3335 		int_params.irq_source =
3336 			dc_interrupt_to_irq_source(dc, i, 0);
3337 
3338 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3339 
3340 		c_irq_params->adev = adev;
3341 		c_irq_params->irq_src = int_params.irq_source;
3342 
3343 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3344 				dm_pflip_high_irq, c_irq_params);
3345 
3346 	}
3347 
3348 	/* HPD */
3349 	r = amdgpu_irq_add_id(adev, client_id,
3350 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3351 	if (r) {
3352 		DRM_ERROR("Failed to add hpd irq id!\n");
3353 		return r;
3354 	}
3355 
3356 	register_hpd_handlers(adev);
3357 
3358 	return 0;
3359 }
3360 #endif
3361 
3362 /* Register IRQ sources and initialize IRQ callbacks */
3363 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3364 {
3365 	struct dc *dc = adev->dm.dc;
3366 	struct common_irq_params *c_irq_params;
3367 	struct dc_interrupt_params int_params = {0};
3368 	int r;
3369 	int i;
3370 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3371 
3372 	if (adev->family >= AMDGPU_FAMILY_AI)
3373 		client_id = SOC15_IH_CLIENTID_DCE;
3374 
3375 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3376 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3377 
3378 	/*
3379 	 * Actions of amdgpu_irq_add_id():
3380 	 * 1. Register a set() function with base driver.
3381 	 *    Base driver will call set() function to enable/disable an
3382 	 *    interrupt in DC hardware.
3383 	 * 2. Register amdgpu_dm_irq_handler().
3384 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3385 	 *    coming from DC hardware.
3386 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3387 	 *    for acknowledging and handling. */
3388 
3389 	/* Use VBLANK interrupt */
3390 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3391 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3392 		if (r) {
3393 			DRM_ERROR("Failed to add crtc irq id!\n");
3394 			return r;
3395 		}
3396 
3397 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3398 		int_params.irq_source =
3399 			dc_interrupt_to_irq_source(dc, i, 0);
3400 
3401 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3402 
3403 		c_irq_params->adev = adev;
3404 		c_irq_params->irq_src = int_params.irq_source;
3405 
3406 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3407 				dm_crtc_high_irq, c_irq_params);
3408 	}
3409 
3410 	/* Use VUPDATE interrupt */
3411 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3412 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3413 		if (r) {
3414 			DRM_ERROR("Failed to add vupdate irq id!\n");
3415 			return r;
3416 		}
3417 
3418 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3419 		int_params.irq_source =
3420 			dc_interrupt_to_irq_source(dc, i, 0);
3421 
3422 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3423 
3424 		c_irq_params->adev = adev;
3425 		c_irq_params->irq_src = int_params.irq_source;
3426 
3427 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3428 				dm_vupdate_high_irq, c_irq_params);
3429 	}
3430 
3431 	/* Use GRPH_PFLIP interrupt */
3432 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3433 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3434 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3435 		if (r) {
3436 			DRM_ERROR("Failed to add page flip irq id!\n");
3437 			return r;
3438 		}
3439 
3440 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3441 		int_params.irq_source =
3442 			dc_interrupt_to_irq_source(dc, i, 0);
3443 
3444 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3445 
3446 		c_irq_params->adev = adev;
3447 		c_irq_params->irq_src = int_params.irq_source;
3448 
3449 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3450 				dm_pflip_high_irq, c_irq_params);
3451 
3452 	}
3453 
3454 	/* HPD */
3455 	r = amdgpu_irq_add_id(adev, client_id,
3456 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3457 	if (r) {
3458 		DRM_ERROR("Failed to add hpd irq id!\n");
3459 		return r;
3460 	}
3461 
3462 	register_hpd_handlers(adev);
3463 
3464 	return 0;
3465 }
3466 
3467 #if defined(CONFIG_DRM_AMD_DC_DCN)
3468 /* Register IRQ sources and initialize IRQ callbacks */
3469 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3470 {
3471 	struct dc *dc = adev->dm.dc;
3472 	struct common_irq_params *c_irq_params;
3473 	struct dc_interrupt_params int_params = {0};
3474 	int r;
3475 	int i;
3476 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3477 	static const unsigned int vrtl_int_srcid[] = {
3478 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3479 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3480 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3481 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3482 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3483 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3484 	};
3485 #endif
3486 
3487 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3488 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3489 
3490 	/*
3491 	 * Actions of amdgpu_irq_add_id():
3492 	 * 1. Register a set() function with base driver.
3493 	 *    Base driver will call set() function to enable/disable an
3494 	 *    interrupt in DC hardware.
3495 	 * 2. Register amdgpu_dm_irq_handler().
3496 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3497 	 *    coming from DC hardware.
3498 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3499 	 *    for acknowledging and handling.
3500 	 */
3501 
3502 	/* Use VSTARTUP interrupt */
3503 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3504 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3505 			i++) {
3506 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3507 
3508 		if (r) {
3509 			DRM_ERROR("Failed to add crtc irq id!\n");
3510 			return r;
3511 		}
3512 
3513 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3514 		int_params.irq_source =
3515 			dc_interrupt_to_irq_source(dc, i, 0);
3516 
3517 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3518 
3519 		c_irq_params->adev = adev;
3520 		c_irq_params->irq_src = int_params.irq_source;
3521 
3522 		amdgpu_dm_irq_register_interrupt(
3523 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3524 	}
3525 
3526 	/* Use otg vertical line interrupt */
3527 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3528 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3529 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3530 				vrtl_int_srcid[i], &adev->vline0_irq);
3531 
3532 		if (r) {
3533 			DRM_ERROR("Failed to add vline0 irq id!\n");
3534 			return r;
3535 		}
3536 
3537 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3538 		int_params.irq_source =
3539 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3540 
3541 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3542 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3543 			break;
3544 		}
3545 
3546 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3547 					- DC_IRQ_SOURCE_DC1_VLINE0];
3548 
3549 		c_irq_params->adev = adev;
3550 		c_irq_params->irq_src = int_params.irq_source;
3551 
3552 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3553 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3554 	}
3555 #endif
3556 
3557 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3558 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3559 	 * to trigger at end of each vblank, regardless of state of the lock,
3560 	 * matching DCE behaviour.
3561 	 */
3562 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3563 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3564 	     i++) {
3565 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3566 
3567 		if (r) {
3568 			DRM_ERROR("Failed to add vupdate irq id!\n");
3569 			return r;
3570 		}
3571 
3572 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3573 		int_params.irq_source =
3574 			dc_interrupt_to_irq_source(dc, i, 0);
3575 
3576 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3577 
3578 		c_irq_params->adev = adev;
3579 		c_irq_params->irq_src = int_params.irq_source;
3580 
3581 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3582 				dm_vupdate_high_irq, c_irq_params);
3583 	}
3584 
3585 	/* Use GRPH_PFLIP interrupt */
3586 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3587 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3588 			i++) {
3589 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3590 		if (r) {
3591 			DRM_ERROR("Failed to add page flip irq id!\n");
3592 			return r;
3593 		}
3594 
3595 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3596 		int_params.irq_source =
3597 			dc_interrupt_to_irq_source(dc, i, 0);
3598 
3599 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3600 
3601 		c_irq_params->adev = adev;
3602 		c_irq_params->irq_src = int_params.irq_source;
3603 
3604 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3605 				dm_pflip_high_irq, c_irq_params);
3606 
3607 	}
3608 
3609 	/* HPD */
3610 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3611 			&adev->hpd_irq);
3612 	if (r) {
3613 		DRM_ERROR("Failed to add hpd irq id!\n");
3614 		return r;
3615 	}
3616 
3617 	register_hpd_handlers(adev);
3618 
3619 	return 0;
3620 }
3621 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3622 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3623 {
3624 	struct dc *dc = adev->dm.dc;
3625 	struct common_irq_params *c_irq_params;
3626 	struct dc_interrupt_params int_params = {0};
3627 	int r, i;
3628 
3629 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3630 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3631 
3632 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3633 			&adev->dmub_outbox_irq);
3634 	if (r) {
3635 		DRM_ERROR("Failed to add outbox irq id!\n");
3636 		return r;
3637 	}
3638 
3639 	if (dc->ctx->dmub_srv) {
3640 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3641 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3642 		int_params.irq_source =
3643 		dc_interrupt_to_irq_source(dc, i, 0);
3644 
3645 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3646 
3647 		c_irq_params->adev = adev;
3648 		c_irq_params->irq_src = int_params.irq_source;
3649 
3650 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3651 				dm_dmub_outbox1_low_irq, c_irq_params);
3652 	}
3653 
3654 	return 0;
3655 }
3656 #endif
3657 
3658 /*
3659  * Acquires the lock for the atomic state object and returns
3660  * the new atomic state.
3661  *
3662  * This should only be called during atomic check.
3663  */
3664 static int dm_atomic_get_state(struct drm_atomic_state *state,
3665 			       struct dm_atomic_state **dm_state)
3666 {
3667 	struct drm_device *dev = state->dev;
3668 	struct amdgpu_device *adev = drm_to_adev(dev);
3669 	struct amdgpu_display_manager *dm = &adev->dm;
3670 	struct drm_private_state *priv_state;
3671 
3672 	if (*dm_state)
3673 		return 0;
3674 
3675 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3676 	if (IS_ERR(priv_state))
3677 		return PTR_ERR(priv_state);
3678 
3679 	*dm_state = to_dm_atomic_state(priv_state);
3680 
3681 	return 0;
3682 }
3683 
3684 static struct dm_atomic_state *
3685 dm_atomic_get_new_state(struct drm_atomic_state *state)
3686 {
3687 	struct drm_device *dev = state->dev;
3688 	struct amdgpu_device *adev = drm_to_adev(dev);
3689 	struct amdgpu_display_manager *dm = &adev->dm;
3690 	struct drm_private_obj *obj;
3691 	struct drm_private_state *new_obj_state;
3692 	int i;
3693 
3694 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3695 		if (obj->funcs == dm->atomic_obj.funcs)
3696 			return to_dm_atomic_state(new_obj_state);
3697 	}
3698 
3699 	return NULL;
3700 }
3701 
3702 static struct drm_private_state *
3703 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3704 {
3705 	struct dm_atomic_state *old_state, *new_state;
3706 
3707 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3708 	if (!new_state)
3709 		return NULL;
3710 
3711 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3712 
3713 	old_state = to_dm_atomic_state(obj->state);
3714 
3715 	if (old_state && old_state->context)
3716 		new_state->context = dc_copy_state(old_state->context);
3717 
3718 	if (!new_state->context) {
3719 		kfree(new_state);
3720 		return NULL;
3721 	}
3722 
3723 	return &new_state->base;
3724 }
3725 
3726 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3727 				    struct drm_private_state *state)
3728 {
3729 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3730 
3731 	if (dm_state && dm_state->context)
3732 		dc_release_state(dm_state->context);
3733 
3734 	kfree(dm_state);
3735 }
3736 
3737 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3738 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3739 	.atomic_destroy_state = dm_atomic_destroy_state,
3740 };
3741 
3742 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3743 {
3744 	struct dm_atomic_state *state;
3745 	int r;
3746 
3747 	adev->mode_info.mode_config_initialized = true;
3748 
3749 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3750 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3751 
3752 	adev_to_drm(adev)->mode_config.max_width = 16384;
3753 	adev_to_drm(adev)->mode_config.max_height = 16384;
3754 
3755 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3756 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3757 	/* indicates support for immediate flip */
3758 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3759 
3760 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3761 
3762 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3763 	if (!state)
3764 		return -ENOMEM;
3765 
3766 	state->context = dc_create_state(adev->dm.dc);
3767 	if (!state->context) {
3768 		kfree(state);
3769 		return -ENOMEM;
3770 	}
3771 
3772 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3773 
3774 	drm_atomic_private_obj_init(adev_to_drm(adev),
3775 				    &adev->dm.atomic_obj,
3776 				    &state->base,
3777 				    &dm_atomic_state_funcs);
3778 
3779 	r = amdgpu_display_modeset_create_props(adev);
3780 	if (r) {
3781 		dc_release_state(state->context);
3782 		kfree(state);
3783 		return r;
3784 	}
3785 
3786 	r = amdgpu_dm_audio_init(adev);
3787 	if (r) {
3788 		dc_release_state(state->context);
3789 		kfree(state);
3790 		return r;
3791 	}
3792 
3793 	return 0;
3794 }
3795 
3796 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3797 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3798 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3799 
3800 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3801 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3802 
3803 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3804 					    int bl_idx)
3805 {
3806 #if defined(CONFIG_ACPI)
3807 	struct amdgpu_dm_backlight_caps caps;
3808 
3809 	memset(&caps, 0, sizeof(caps));
3810 
3811 	if (dm->backlight_caps[bl_idx].caps_valid)
3812 		return;
3813 
3814 	amdgpu_acpi_get_backlight_caps(&caps);
3815 	if (caps.caps_valid) {
3816 		dm->backlight_caps[bl_idx].caps_valid = true;
3817 		if (caps.aux_support)
3818 			return;
3819 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3820 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3821 	} else {
3822 		dm->backlight_caps[bl_idx].min_input_signal =
3823 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3824 		dm->backlight_caps[bl_idx].max_input_signal =
3825 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3826 	}
3827 #else
3828 	if (dm->backlight_caps[bl_idx].aux_support)
3829 		return;
3830 
3831 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3832 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3833 #endif
3834 }
3835 
3836 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3837 				unsigned *min, unsigned *max)
3838 {
3839 	if (!caps)
3840 		return 0;
3841 
3842 	if (caps->aux_support) {
3843 		// Firmware limits are in nits, DC API wants millinits.
3844 		*max = 1000 * caps->aux_max_input_signal;
3845 		*min = 1000 * caps->aux_min_input_signal;
3846 	} else {
3847 		// Firmware limits are 8-bit, PWM control is 16-bit.
3848 		*max = 0x101 * caps->max_input_signal;
3849 		*min = 0x101 * caps->min_input_signal;
3850 	}
3851 	return 1;
3852 }
3853 
3854 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3855 					uint32_t brightness)
3856 {
3857 	unsigned min, max;
3858 
3859 	if (!get_brightness_range(caps, &min, &max))
3860 		return brightness;
3861 
3862 	// Rescale 0..255 to min..max
3863 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3864 				       AMDGPU_MAX_BL_LEVEL);
3865 }
3866 
3867 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3868 				      uint32_t brightness)
3869 {
3870 	unsigned min, max;
3871 
3872 	if (!get_brightness_range(caps, &min, &max))
3873 		return brightness;
3874 
3875 	if (brightness < min)
3876 		return 0;
3877 	// Rescale min..max to 0..255
3878 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3879 				 max - min);
3880 }
3881 
3882 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3883 					 int bl_idx,
3884 					 u32 user_brightness)
3885 {
3886 	struct amdgpu_dm_backlight_caps caps;
3887 	struct dc_link *link;
3888 	u32 brightness;
3889 	bool rc;
3890 
3891 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3892 	caps = dm->backlight_caps[bl_idx];
3893 
3894 	dm->brightness[bl_idx] = user_brightness;
3895 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3896 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3897 
3898 	/* Change brightness based on AUX property */
3899 	if (caps.aux_support) {
3900 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3901 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3902 		if (!rc)
3903 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3904 	} else {
3905 		rc = dc_link_set_backlight_level(link, brightness, 0);
3906 		if (!rc)
3907 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3908 	}
3909 
3910 	return rc ? 0 : 1;
3911 }
3912 
3913 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3914 {
3915 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3916 	int i;
3917 
3918 	for (i = 0; i < dm->num_of_edps; i++) {
3919 		if (bd == dm->backlight_dev[i])
3920 			break;
3921 	}
3922 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3923 		i = 0;
3924 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3925 
3926 	return 0;
3927 }
3928 
3929 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3930 					 int bl_idx)
3931 {
3932 	struct amdgpu_dm_backlight_caps caps;
3933 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3934 
3935 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3936 	caps = dm->backlight_caps[bl_idx];
3937 
3938 	if (caps.aux_support) {
3939 		u32 avg, peak;
3940 		bool rc;
3941 
3942 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3943 		if (!rc)
3944 			return dm->brightness[bl_idx];
3945 		return convert_brightness_to_user(&caps, avg);
3946 	} else {
3947 		int ret = dc_link_get_backlight_level(link);
3948 
3949 		if (ret == DC_ERROR_UNEXPECTED)
3950 			return dm->brightness[bl_idx];
3951 		return convert_brightness_to_user(&caps, ret);
3952 	}
3953 }
3954 
3955 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3956 {
3957 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3958 	int i;
3959 
3960 	for (i = 0; i < dm->num_of_edps; i++) {
3961 		if (bd == dm->backlight_dev[i])
3962 			break;
3963 	}
3964 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3965 		i = 0;
3966 	return amdgpu_dm_backlight_get_level(dm, i);
3967 }
3968 
3969 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3970 	.options = BL_CORE_SUSPENDRESUME,
3971 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3972 	.update_status	= amdgpu_dm_backlight_update_status,
3973 };
3974 
3975 static void
3976 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3977 {
3978 	char bl_name[16];
3979 	struct backlight_properties props = { 0 };
3980 
3981 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3982 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3983 
3984 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3985 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3986 	props.type = BACKLIGHT_RAW;
3987 
3988 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3989 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3990 
3991 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3992 								       adev_to_drm(dm->adev)->dev,
3993 								       dm,
3994 								       &amdgpu_dm_backlight_ops,
3995 								       &props);
3996 
3997 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3998 		DRM_ERROR("DM: Backlight registration failed!\n");
3999 	else
4000 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4001 }
4002 #endif
4003 
4004 static int initialize_plane(struct amdgpu_display_manager *dm,
4005 			    struct amdgpu_mode_info *mode_info, int plane_id,
4006 			    enum drm_plane_type plane_type,
4007 			    const struct dc_plane_cap *plane_cap)
4008 {
4009 	struct drm_plane *plane;
4010 	unsigned long possible_crtcs;
4011 	int ret = 0;
4012 
4013 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4014 	if (!plane) {
4015 		DRM_ERROR("KMS: Failed to allocate plane\n");
4016 		return -ENOMEM;
4017 	}
4018 	plane->type = plane_type;
4019 
4020 	/*
4021 	 * HACK: IGT tests expect that the primary plane for a CRTC
4022 	 * can only have one possible CRTC. Only expose support for
4023 	 * any CRTC if they're not going to be used as a primary plane
4024 	 * for a CRTC - like overlay or underlay planes.
4025 	 */
4026 	possible_crtcs = 1 << plane_id;
4027 	if (plane_id >= dm->dc->caps.max_streams)
4028 		possible_crtcs = 0xff;
4029 
4030 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4031 
4032 	if (ret) {
4033 		DRM_ERROR("KMS: Failed to initialize plane\n");
4034 		kfree(plane);
4035 		return ret;
4036 	}
4037 
4038 	if (mode_info)
4039 		mode_info->planes[plane_id] = plane;
4040 
4041 	return ret;
4042 }
4043 
4044 
4045 static void register_backlight_device(struct amdgpu_display_manager *dm,
4046 				      struct dc_link *link)
4047 {
4048 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4049 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4050 
4051 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4052 	    link->type != dc_connection_none) {
4053 		/*
4054 		 * Event if registration failed, we should continue with
4055 		 * DM initialization because not having a backlight control
4056 		 * is better then a black screen.
4057 		 */
4058 		if (!dm->backlight_dev[dm->num_of_edps])
4059 			amdgpu_dm_register_backlight_device(dm);
4060 
4061 		if (dm->backlight_dev[dm->num_of_edps]) {
4062 			dm->backlight_link[dm->num_of_edps] = link;
4063 			dm->num_of_edps++;
4064 		}
4065 	}
4066 #endif
4067 }
4068 
4069 
4070 /*
4071  * In this architecture, the association
4072  * connector -> encoder -> crtc
4073  * id not really requried. The crtc and connector will hold the
4074  * display_index as an abstraction to use with DAL component
4075  *
4076  * Returns 0 on success
4077  */
4078 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4079 {
4080 	struct amdgpu_display_manager *dm = &adev->dm;
4081 	int32_t i;
4082 	struct amdgpu_dm_connector *aconnector = NULL;
4083 	struct amdgpu_encoder *aencoder = NULL;
4084 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4085 	uint32_t link_cnt;
4086 	int32_t primary_planes;
4087 	enum dc_connection_type new_connection_type = dc_connection_none;
4088 	const struct dc_plane_cap *plane;
4089 	bool psr_feature_enabled = false;
4090 
4091 	dm->display_indexes_num = dm->dc->caps.max_streams;
4092 	/* Update the actual used number of crtc */
4093 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4094 
4095 	link_cnt = dm->dc->caps.max_links;
4096 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4097 		DRM_ERROR("DM: Failed to initialize mode config\n");
4098 		return -EINVAL;
4099 	}
4100 
4101 	/* There is one primary plane per CRTC */
4102 	primary_planes = dm->dc->caps.max_streams;
4103 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4104 
4105 	/*
4106 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4107 	 * Order is reversed to match iteration order in atomic check.
4108 	 */
4109 	for (i = (primary_planes - 1); i >= 0; i--) {
4110 		plane = &dm->dc->caps.planes[i];
4111 
4112 		if (initialize_plane(dm, mode_info, i,
4113 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4114 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4115 			goto fail;
4116 		}
4117 	}
4118 
4119 	/*
4120 	 * Initialize overlay planes, index starting after primary planes.
4121 	 * These planes have a higher DRM index than the primary planes since
4122 	 * they should be considered as having a higher z-order.
4123 	 * Order is reversed to match iteration order in atomic check.
4124 	 *
4125 	 * Only support DCN for now, and only expose one so we don't encourage
4126 	 * userspace to use up all the pipes.
4127 	 */
4128 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4129 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4130 
4131 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4132 			continue;
4133 
4134 		if (!plane->blends_with_above || !plane->blends_with_below)
4135 			continue;
4136 
4137 		if (!plane->pixel_format_support.argb8888)
4138 			continue;
4139 
4140 		if (initialize_plane(dm, NULL, primary_planes + i,
4141 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4142 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4143 			goto fail;
4144 		}
4145 
4146 		/* Only create one overlay plane. */
4147 		break;
4148 	}
4149 
4150 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4151 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4152 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4153 			goto fail;
4154 		}
4155 
4156 #if defined(CONFIG_DRM_AMD_DC_DCN)
4157 	/* Use Outbox interrupt */
4158 	switch (adev->ip_versions[DCE_HWIP][0]) {
4159 	case IP_VERSION(3, 0, 0):
4160 	case IP_VERSION(3, 1, 2):
4161 	case IP_VERSION(3, 1, 3):
4162 	case IP_VERSION(2, 1, 0):
4163 		if (register_outbox_irq_handlers(dm->adev)) {
4164 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4165 			goto fail;
4166 		}
4167 		break;
4168 	default:
4169 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4170 			      adev->ip_versions[DCE_HWIP][0]);
4171 	}
4172 
4173 	/* Determine whether to enable PSR support by default. */
4174 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4175 		switch (adev->ip_versions[DCE_HWIP][0]) {
4176 		case IP_VERSION(3, 1, 2):
4177 		case IP_VERSION(3, 1, 3):
4178 			psr_feature_enabled = true;
4179 			break;
4180 		default:
4181 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4182 			break;
4183 		}
4184 	}
4185 #endif
4186 
4187 	/* loops over all connectors on the board */
4188 	for (i = 0; i < link_cnt; i++) {
4189 		struct dc_link *link = NULL;
4190 
4191 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4192 			DRM_ERROR(
4193 				"KMS: Cannot support more than %d display indexes\n",
4194 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4195 			continue;
4196 		}
4197 
4198 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4199 		if (!aconnector)
4200 			goto fail;
4201 
4202 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4203 		if (!aencoder)
4204 			goto fail;
4205 
4206 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4207 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4208 			goto fail;
4209 		}
4210 
4211 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4212 			DRM_ERROR("KMS: Failed to initialize connector\n");
4213 			goto fail;
4214 		}
4215 
4216 		link = dc_get_link_at_index(dm->dc, i);
4217 
4218 		if (!dc_link_detect_sink(link, &new_connection_type))
4219 			DRM_ERROR("KMS: Failed to detect connector\n");
4220 
4221 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4222 			emulated_link_detect(link);
4223 			amdgpu_dm_update_connector_after_detect(aconnector);
4224 
4225 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4226 			amdgpu_dm_update_connector_after_detect(aconnector);
4227 			register_backlight_device(dm, link);
4228 
4229 			if (psr_feature_enabled)
4230 				amdgpu_dm_set_psr_caps(link);
4231 		}
4232 
4233 
4234 	}
4235 
4236 	/* Software is initialized. Now we can register interrupt handlers. */
4237 	switch (adev->asic_type) {
4238 #if defined(CONFIG_DRM_AMD_DC_SI)
4239 	case CHIP_TAHITI:
4240 	case CHIP_PITCAIRN:
4241 	case CHIP_VERDE:
4242 	case CHIP_OLAND:
4243 		if (dce60_register_irq_handlers(dm->adev)) {
4244 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4245 			goto fail;
4246 		}
4247 		break;
4248 #endif
4249 	case CHIP_BONAIRE:
4250 	case CHIP_HAWAII:
4251 	case CHIP_KAVERI:
4252 	case CHIP_KABINI:
4253 	case CHIP_MULLINS:
4254 	case CHIP_TONGA:
4255 	case CHIP_FIJI:
4256 	case CHIP_CARRIZO:
4257 	case CHIP_STONEY:
4258 	case CHIP_POLARIS11:
4259 	case CHIP_POLARIS10:
4260 	case CHIP_POLARIS12:
4261 	case CHIP_VEGAM:
4262 	case CHIP_VEGA10:
4263 	case CHIP_VEGA12:
4264 	case CHIP_VEGA20:
4265 		if (dce110_register_irq_handlers(dm->adev)) {
4266 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4267 			goto fail;
4268 		}
4269 		break;
4270 	default:
4271 #if defined(CONFIG_DRM_AMD_DC_DCN)
4272 		switch (adev->ip_versions[DCE_HWIP][0]) {
4273 		case IP_VERSION(1, 0, 0):
4274 		case IP_VERSION(1, 0, 1):
4275 		case IP_VERSION(2, 0, 2):
4276 		case IP_VERSION(2, 0, 3):
4277 		case IP_VERSION(2, 0, 0):
4278 		case IP_VERSION(2, 1, 0):
4279 		case IP_VERSION(3, 0, 0):
4280 		case IP_VERSION(3, 0, 2):
4281 		case IP_VERSION(3, 0, 3):
4282 		case IP_VERSION(3, 0, 1):
4283 		case IP_VERSION(3, 1, 2):
4284 		case IP_VERSION(3, 1, 3):
4285 			if (dcn10_register_irq_handlers(dm->adev)) {
4286 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4287 				goto fail;
4288 			}
4289 			break;
4290 		default:
4291 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4292 					adev->ip_versions[DCE_HWIP][0]);
4293 			goto fail;
4294 		}
4295 #endif
4296 		break;
4297 	}
4298 
4299 	return 0;
4300 fail:
4301 	kfree(aencoder);
4302 	kfree(aconnector);
4303 
4304 	return -EINVAL;
4305 }
4306 
4307 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4308 {
4309 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4310 	return;
4311 }
4312 
4313 /******************************************************************************
4314  * amdgpu_display_funcs functions
4315  *****************************************************************************/
4316 
4317 /*
4318  * dm_bandwidth_update - program display watermarks
4319  *
4320  * @adev: amdgpu_device pointer
4321  *
4322  * Calculate and program the display watermarks and line buffer allocation.
4323  */
4324 static void dm_bandwidth_update(struct amdgpu_device *adev)
4325 {
4326 	/* TODO: implement later */
4327 }
4328 
4329 static const struct amdgpu_display_funcs dm_display_funcs = {
4330 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4331 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4332 	.backlight_set_level = NULL, /* never called for DC */
4333 	.backlight_get_level = NULL, /* never called for DC */
4334 	.hpd_sense = NULL,/* called unconditionally */
4335 	.hpd_set_polarity = NULL, /* called unconditionally */
4336 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4337 	.page_flip_get_scanoutpos =
4338 		dm_crtc_get_scanoutpos,/* called unconditionally */
4339 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4340 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4341 };
4342 
4343 #if defined(CONFIG_DEBUG_KERNEL_DC)
4344 
4345 static ssize_t s3_debug_store(struct device *device,
4346 			      struct device_attribute *attr,
4347 			      const char *buf,
4348 			      size_t count)
4349 {
4350 	int ret;
4351 	int s3_state;
4352 	struct drm_device *drm_dev = dev_get_drvdata(device);
4353 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4354 
4355 	ret = kstrtoint(buf, 0, &s3_state);
4356 
4357 	if (ret == 0) {
4358 		if (s3_state) {
4359 			dm_resume(adev);
4360 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4361 		} else
4362 			dm_suspend(adev);
4363 	}
4364 
4365 	return ret == 0 ? count : 0;
4366 }
4367 
4368 DEVICE_ATTR_WO(s3_debug);
4369 
4370 #endif
4371 
4372 static int dm_early_init(void *handle)
4373 {
4374 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4375 
4376 	switch (adev->asic_type) {
4377 #if defined(CONFIG_DRM_AMD_DC_SI)
4378 	case CHIP_TAHITI:
4379 	case CHIP_PITCAIRN:
4380 	case CHIP_VERDE:
4381 		adev->mode_info.num_crtc = 6;
4382 		adev->mode_info.num_hpd = 6;
4383 		adev->mode_info.num_dig = 6;
4384 		break;
4385 	case CHIP_OLAND:
4386 		adev->mode_info.num_crtc = 2;
4387 		adev->mode_info.num_hpd = 2;
4388 		adev->mode_info.num_dig = 2;
4389 		break;
4390 #endif
4391 	case CHIP_BONAIRE:
4392 	case CHIP_HAWAII:
4393 		adev->mode_info.num_crtc = 6;
4394 		adev->mode_info.num_hpd = 6;
4395 		adev->mode_info.num_dig = 6;
4396 		break;
4397 	case CHIP_KAVERI:
4398 		adev->mode_info.num_crtc = 4;
4399 		adev->mode_info.num_hpd = 6;
4400 		adev->mode_info.num_dig = 7;
4401 		break;
4402 	case CHIP_KABINI:
4403 	case CHIP_MULLINS:
4404 		adev->mode_info.num_crtc = 2;
4405 		adev->mode_info.num_hpd = 6;
4406 		adev->mode_info.num_dig = 6;
4407 		break;
4408 	case CHIP_FIJI:
4409 	case CHIP_TONGA:
4410 		adev->mode_info.num_crtc = 6;
4411 		adev->mode_info.num_hpd = 6;
4412 		adev->mode_info.num_dig = 7;
4413 		break;
4414 	case CHIP_CARRIZO:
4415 		adev->mode_info.num_crtc = 3;
4416 		adev->mode_info.num_hpd = 6;
4417 		adev->mode_info.num_dig = 9;
4418 		break;
4419 	case CHIP_STONEY:
4420 		adev->mode_info.num_crtc = 2;
4421 		adev->mode_info.num_hpd = 6;
4422 		adev->mode_info.num_dig = 9;
4423 		break;
4424 	case CHIP_POLARIS11:
4425 	case CHIP_POLARIS12:
4426 		adev->mode_info.num_crtc = 5;
4427 		adev->mode_info.num_hpd = 5;
4428 		adev->mode_info.num_dig = 5;
4429 		break;
4430 	case CHIP_POLARIS10:
4431 	case CHIP_VEGAM:
4432 		adev->mode_info.num_crtc = 6;
4433 		adev->mode_info.num_hpd = 6;
4434 		adev->mode_info.num_dig = 6;
4435 		break;
4436 	case CHIP_VEGA10:
4437 	case CHIP_VEGA12:
4438 	case CHIP_VEGA20:
4439 		adev->mode_info.num_crtc = 6;
4440 		adev->mode_info.num_hpd = 6;
4441 		adev->mode_info.num_dig = 6;
4442 		break;
4443 	default:
4444 #if defined(CONFIG_DRM_AMD_DC_DCN)
4445 		switch (adev->ip_versions[DCE_HWIP][0]) {
4446 		case IP_VERSION(2, 0, 2):
4447 		case IP_VERSION(3, 0, 0):
4448 			adev->mode_info.num_crtc = 6;
4449 			adev->mode_info.num_hpd = 6;
4450 			adev->mode_info.num_dig = 6;
4451 			break;
4452 		case IP_VERSION(2, 0, 0):
4453 		case IP_VERSION(3, 0, 2):
4454 			adev->mode_info.num_crtc = 5;
4455 			adev->mode_info.num_hpd = 5;
4456 			adev->mode_info.num_dig = 5;
4457 			break;
4458 		case IP_VERSION(2, 0, 3):
4459 		case IP_VERSION(3, 0, 3):
4460 			adev->mode_info.num_crtc = 2;
4461 			adev->mode_info.num_hpd = 2;
4462 			adev->mode_info.num_dig = 2;
4463 			break;
4464 		case IP_VERSION(1, 0, 0):
4465 		case IP_VERSION(1, 0, 1):
4466 		case IP_VERSION(3, 0, 1):
4467 		case IP_VERSION(2, 1, 0):
4468 		case IP_VERSION(3, 1, 2):
4469 		case IP_VERSION(3, 1, 3):
4470 			adev->mode_info.num_crtc = 4;
4471 			adev->mode_info.num_hpd = 4;
4472 			adev->mode_info.num_dig = 4;
4473 			break;
4474 		default:
4475 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4476 					adev->ip_versions[DCE_HWIP][0]);
4477 			return -EINVAL;
4478 		}
4479 #endif
4480 		break;
4481 	}
4482 
4483 	amdgpu_dm_set_irq_funcs(adev);
4484 
4485 	if (adev->mode_info.funcs == NULL)
4486 		adev->mode_info.funcs = &dm_display_funcs;
4487 
4488 	/*
4489 	 * Note: Do NOT change adev->audio_endpt_rreg and
4490 	 * adev->audio_endpt_wreg because they are initialised in
4491 	 * amdgpu_device_init()
4492 	 */
4493 #if defined(CONFIG_DEBUG_KERNEL_DC)
4494 	device_create_file(
4495 		adev_to_drm(adev)->dev,
4496 		&dev_attr_s3_debug);
4497 #endif
4498 
4499 	return 0;
4500 }
4501 
4502 static bool modeset_required(struct drm_crtc_state *crtc_state,
4503 			     struct dc_stream_state *new_stream,
4504 			     struct dc_stream_state *old_stream)
4505 {
4506 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4507 }
4508 
4509 static bool modereset_required(struct drm_crtc_state *crtc_state)
4510 {
4511 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4512 }
4513 
4514 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4515 {
4516 	drm_encoder_cleanup(encoder);
4517 	kfree(encoder);
4518 }
4519 
4520 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4521 	.destroy = amdgpu_dm_encoder_destroy,
4522 };
4523 
4524 
4525 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4526 					 struct drm_framebuffer *fb,
4527 					 int *min_downscale, int *max_upscale)
4528 {
4529 	struct amdgpu_device *adev = drm_to_adev(dev);
4530 	struct dc *dc = adev->dm.dc;
4531 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4532 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4533 
4534 	switch (fb->format->format) {
4535 	case DRM_FORMAT_P010:
4536 	case DRM_FORMAT_NV12:
4537 	case DRM_FORMAT_NV21:
4538 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4539 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4540 		break;
4541 
4542 	case DRM_FORMAT_XRGB16161616F:
4543 	case DRM_FORMAT_ARGB16161616F:
4544 	case DRM_FORMAT_XBGR16161616F:
4545 	case DRM_FORMAT_ABGR16161616F:
4546 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4547 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4548 		break;
4549 
4550 	default:
4551 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4552 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4553 		break;
4554 	}
4555 
4556 	/*
4557 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4558 	 * scaling factor of 1.0 == 1000 units.
4559 	 */
4560 	if (*max_upscale == 1)
4561 		*max_upscale = 1000;
4562 
4563 	if (*min_downscale == 1)
4564 		*min_downscale = 1000;
4565 }
4566 
4567 
4568 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4569 				struct dc_scaling_info *scaling_info)
4570 {
4571 	int scale_w, scale_h, min_downscale, max_upscale;
4572 
4573 	memset(scaling_info, 0, sizeof(*scaling_info));
4574 
4575 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4576 	scaling_info->src_rect.x = state->src_x >> 16;
4577 	scaling_info->src_rect.y = state->src_y >> 16;
4578 
4579 	/*
4580 	 * For reasons we don't (yet) fully understand a non-zero
4581 	 * src_y coordinate into an NV12 buffer can cause a
4582 	 * system hang. To avoid hangs (and maybe be overly cautious)
4583 	 * let's reject both non-zero src_x and src_y.
4584 	 *
4585 	 * We currently know of only one use-case to reproduce a
4586 	 * scenario with non-zero src_x and src_y for NV12, which
4587 	 * is to gesture the YouTube Android app into full screen
4588 	 * on ChromeOS.
4589 	 */
4590 	if (state->fb &&
4591 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4592 	    (scaling_info->src_rect.x != 0 ||
4593 	     scaling_info->src_rect.y != 0))
4594 		return -EINVAL;
4595 
4596 	scaling_info->src_rect.width = state->src_w >> 16;
4597 	if (scaling_info->src_rect.width == 0)
4598 		return -EINVAL;
4599 
4600 	scaling_info->src_rect.height = state->src_h >> 16;
4601 	if (scaling_info->src_rect.height == 0)
4602 		return -EINVAL;
4603 
4604 	scaling_info->dst_rect.x = state->crtc_x;
4605 	scaling_info->dst_rect.y = state->crtc_y;
4606 
4607 	if (state->crtc_w == 0)
4608 		return -EINVAL;
4609 
4610 	scaling_info->dst_rect.width = state->crtc_w;
4611 
4612 	if (state->crtc_h == 0)
4613 		return -EINVAL;
4614 
4615 	scaling_info->dst_rect.height = state->crtc_h;
4616 
4617 	/* DRM doesn't specify clipping on destination output. */
4618 	scaling_info->clip_rect = scaling_info->dst_rect;
4619 
4620 	/* Validate scaling per-format with DC plane caps */
4621 	if (state->plane && state->plane->dev && state->fb) {
4622 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4623 					     &min_downscale, &max_upscale);
4624 	} else {
4625 		min_downscale = 250;
4626 		max_upscale = 16000;
4627 	}
4628 
4629 	scale_w = scaling_info->dst_rect.width * 1000 /
4630 		  scaling_info->src_rect.width;
4631 
4632 	if (scale_w < min_downscale || scale_w > max_upscale)
4633 		return -EINVAL;
4634 
4635 	scale_h = scaling_info->dst_rect.height * 1000 /
4636 		  scaling_info->src_rect.height;
4637 
4638 	if (scale_h < min_downscale || scale_h > max_upscale)
4639 		return -EINVAL;
4640 
4641 	/*
4642 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4643 	 * assume reasonable defaults based on the format.
4644 	 */
4645 
4646 	return 0;
4647 }
4648 
4649 static void
4650 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4651 				 uint64_t tiling_flags)
4652 {
4653 	/* Fill GFX8 params */
4654 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4655 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4656 
4657 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4658 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4659 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4660 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4661 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4662 
4663 		/* XXX fix me for VI */
4664 		tiling_info->gfx8.num_banks = num_banks;
4665 		tiling_info->gfx8.array_mode =
4666 				DC_ARRAY_2D_TILED_THIN1;
4667 		tiling_info->gfx8.tile_split = tile_split;
4668 		tiling_info->gfx8.bank_width = bankw;
4669 		tiling_info->gfx8.bank_height = bankh;
4670 		tiling_info->gfx8.tile_aspect = mtaspect;
4671 		tiling_info->gfx8.tile_mode =
4672 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4673 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4674 			== DC_ARRAY_1D_TILED_THIN1) {
4675 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4676 	}
4677 
4678 	tiling_info->gfx8.pipe_config =
4679 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4680 }
4681 
4682 static void
4683 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4684 				  union dc_tiling_info *tiling_info)
4685 {
4686 	tiling_info->gfx9.num_pipes =
4687 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4688 	tiling_info->gfx9.num_banks =
4689 		adev->gfx.config.gb_addr_config_fields.num_banks;
4690 	tiling_info->gfx9.pipe_interleave =
4691 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4692 	tiling_info->gfx9.num_shader_engines =
4693 		adev->gfx.config.gb_addr_config_fields.num_se;
4694 	tiling_info->gfx9.max_compressed_frags =
4695 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4696 	tiling_info->gfx9.num_rb_per_se =
4697 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4698 	tiling_info->gfx9.shaderEnable = 1;
4699 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4700 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4701 }
4702 
4703 static int
4704 validate_dcc(struct amdgpu_device *adev,
4705 	     const enum surface_pixel_format format,
4706 	     const enum dc_rotation_angle rotation,
4707 	     const union dc_tiling_info *tiling_info,
4708 	     const struct dc_plane_dcc_param *dcc,
4709 	     const struct dc_plane_address *address,
4710 	     const struct plane_size *plane_size)
4711 {
4712 	struct dc *dc = adev->dm.dc;
4713 	struct dc_dcc_surface_param input;
4714 	struct dc_surface_dcc_cap output;
4715 
4716 	memset(&input, 0, sizeof(input));
4717 	memset(&output, 0, sizeof(output));
4718 
4719 	if (!dcc->enable)
4720 		return 0;
4721 
4722 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4723 	    !dc->cap_funcs.get_dcc_compression_cap)
4724 		return -EINVAL;
4725 
4726 	input.format = format;
4727 	input.surface_size.width = plane_size->surface_size.width;
4728 	input.surface_size.height = plane_size->surface_size.height;
4729 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4730 
4731 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4732 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4733 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4734 		input.scan = SCAN_DIRECTION_VERTICAL;
4735 
4736 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4737 		return -EINVAL;
4738 
4739 	if (!output.capable)
4740 		return -EINVAL;
4741 
4742 	if (dcc->independent_64b_blks == 0 &&
4743 	    output.grph.rgb.independent_64b_blks != 0)
4744 		return -EINVAL;
4745 
4746 	return 0;
4747 }
4748 
4749 static bool
4750 modifier_has_dcc(uint64_t modifier)
4751 {
4752 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4753 }
4754 
4755 static unsigned
4756 modifier_gfx9_swizzle_mode(uint64_t modifier)
4757 {
4758 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4759 		return 0;
4760 
4761 	return AMD_FMT_MOD_GET(TILE, modifier);
4762 }
4763 
4764 static const struct drm_format_info *
4765 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4766 {
4767 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4768 }
4769 
4770 static void
4771 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4772 				    union dc_tiling_info *tiling_info,
4773 				    uint64_t modifier)
4774 {
4775 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4776 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4777 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4778 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4779 
4780 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4781 
4782 	if (!IS_AMD_FMT_MOD(modifier))
4783 		return;
4784 
4785 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4786 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4787 
4788 	if (adev->family >= AMDGPU_FAMILY_NV) {
4789 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4790 	} else {
4791 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4792 
4793 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4794 	}
4795 }
4796 
4797 enum dm_micro_swizzle {
4798 	MICRO_SWIZZLE_Z = 0,
4799 	MICRO_SWIZZLE_S = 1,
4800 	MICRO_SWIZZLE_D = 2,
4801 	MICRO_SWIZZLE_R = 3
4802 };
4803 
4804 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4805 					  uint32_t format,
4806 					  uint64_t modifier)
4807 {
4808 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4809 	const struct drm_format_info *info = drm_format_info(format);
4810 	int i;
4811 
4812 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4813 
4814 	if (!info)
4815 		return false;
4816 
4817 	/*
4818 	 * We always have to allow these modifiers:
4819 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4820 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4821 	 */
4822 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4823 	    modifier == DRM_FORMAT_MOD_INVALID) {
4824 		return true;
4825 	}
4826 
4827 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4828 	for (i = 0; i < plane->modifier_count; i++) {
4829 		if (modifier == plane->modifiers[i])
4830 			break;
4831 	}
4832 	if (i == plane->modifier_count)
4833 		return false;
4834 
4835 	/*
4836 	 * For D swizzle the canonical modifier depends on the bpp, so check
4837 	 * it here.
4838 	 */
4839 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4840 	    adev->family >= AMDGPU_FAMILY_NV) {
4841 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4842 			return false;
4843 	}
4844 
4845 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4846 	    info->cpp[0] < 8)
4847 		return false;
4848 
4849 	if (modifier_has_dcc(modifier)) {
4850 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4851 		if (info->cpp[0] != 4)
4852 			return false;
4853 		/* We support multi-planar formats, but not when combined with
4854 		 * additional DCC metadata planes. */
4855 		if (info->num_planes > 1)
4856 			return false;
4857 	}
4858 
4859 	return true;
4860 }
4861 
4862 static void
4863 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4864 {
4865 	if (!*mods)
4866 		return;
4867 
4868 	if (*cap - *size < 1) {
4869 		uint64_t new_cap = *cap * 2;
4870 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4871 
4872 		if (!new_mods) {
4873 			kfree(*mods);
4874 			*mods = NULL;
4875 			return;
4876 		}
4877 
4878 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4879 		kfree(*mods);
4880 		*mods = new_mods;
4881 		*cap = new_cap;
4882 	}
4883 
4884 	(*mods)[*size] = mod;
4885 	*size += 1;
4886 }
4887 
4888 static void
4889 add_gfx9_modifiers(const struct amdgpu_device *adev,
4890 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4891 {
4892 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4893 	int pipe_xor_bits = min(8, pipes +
4894 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4895 	int bank_xor_bits = min(8 - pipe_xor_bits,
4896 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4897 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4898 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4899 
4900 
4901 	if (adev->family == AMDGPU_FAMILY_RV) {
4902 		/* Raven2 and later */
4903 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4904 
4905 		/*
4906 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4907 		 * doesn't support _D on DCN
4908 		 */
4909 
4910 		if (has_constant_encode) {
4911 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4912 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4913 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4914 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4915 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4916 				    AMD_FMT_MOD_SET(DCC, 1) |
4917 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4918 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4919 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4920 		}
4921 
4922 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4923 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4924 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4925 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4926 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4927 			    AMD_FMT_MOD_SET(DCC, 1) |
4928 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4929 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4930 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4931 
4932 		if (has_constant_encode) {
4933 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4934 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4935 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4936 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4937 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4938 				    AMD_FMT_MOD_SET(DCC, 1) |
4939 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4940 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4941 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4942 
4943 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4944 				    AMD_FMT_MOD_SET(RB, rb) |
4945 				    AMD_FMT_MOD_SET(PIPE, pipes));
4946 		}
4947 
4948 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4949 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4950 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4951 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4952 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4953 			    AMD_FMT_MOD_SET(DCC, 1) |
4954 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4955 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4956 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4957 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4958 			    AMD_FMT_MOD_SET(RB, rb) |
4959 			    AMD_FMT_MOD_SET(PIPE, pipes));
4960 	}
4961 
4962 	/*
4963 	 * Only supported for 64bpp on Raven, will be filtered on format in
4964 	 * dm_plane_format_mod_supported.
4965 	 */
4966 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4967 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4968 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4969 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4970 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4971 
4972 	if (adev->family == AMDGPU_FAMILY_RV) {
4973 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4974 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4975 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4976 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4977 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4978 	}
4979 
4980 	/*
4981 	 * Only supported for 64bpp on Raven, will be filtered on format in
4982 	 * dm_plane_format_mod_supported.
4983 	 */
4984 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4985 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4986 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4987 
4988 	if (adev->family == AMDGPU_FAMILY_RV) {
4989 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4990 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4991 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4992 	}
4993 }
4994 
4995 static void
4996 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4997 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4998 {
4999 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5000 
5001 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5002 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5003 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5004 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5005 		    AMD_FMT_MOD_SET(DCC, 1) |
5006 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5007 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5008 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5009 
5010 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5011 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5012 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5013 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5014 		    AMD_FMT_MOD_SET(DCC, 1) |
5015 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5016 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5017 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5018 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5019 
5020 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5021 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5022 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5023 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5024 
5025 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5026 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5027 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5028 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5029 
5030 
5031 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5032 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5033 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5034 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5035 
5036 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5037 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5038 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5039 }
5040 
5041 static void
5042 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5043 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5044 {
5045 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5046 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5047 
5048 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5049 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5050 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5051 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5052 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5053 		    AMD_FMT_MOD_SET(DCC, 1) |
5054 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5055 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5056 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5057 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5058 
5059 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5060 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5061 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5062 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5063 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5064 		    AMD_FMT_MOD_SET(DCC, 1) |
5065 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5066 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5067 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5068 
5069 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5070 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5071 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5072 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5073 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5074 		    AMD_FMT_MOD_SET(DCC, 1) |
5075 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5076 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5077 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5078 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5079 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5080 
5081 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5082 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5083 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5084 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5085 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5086 		    AMD_FMT_MOD_SET(DCC, 1) |
5087 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5088 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5089 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5090 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5091 
5092 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5093 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5094 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5095 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5096 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5097 
5098 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5099 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5100 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5101 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5102 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5103 
5104 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5105 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5106 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5107 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5108 
5109 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5111 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5112 }
5113 
5114 static int
5115 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5116 {
5117 	uint64_t size = 0, capacity = 128;
5118 	*mods = NULL;
5119 
5120 	/* We have not hooked up any pre-GFX9 modifiers. */
5121 	if (adev->family < AMDGPU_FAMILY_AI)
5122 		return 0;
5123 
5124 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5125 
5126 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5127 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5128 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5129 		return *mods ? 0 : -ENOMEM;
5130 	}
5131 
5132 	switch (adev->family) {
5133 	case AMDGPU_FAMILY_AI:
5134 	case AMDGPU_FAMILY_RV:
5135 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5136 		break;
5137 	case AMDGPU_FAMILY_NV:
5138 	case AMDGPU_FAMILY_VGH:
5139 	case AMDGPU_FAMILY_YC:
5140 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5141 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5142 		else
5143 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5144 		break;
5145 	}
5146 
5147 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5148 
5149 	/* INVALID marks the end of the list. */
5150 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5151 
5152 	if (!*mods)
5153 		return -ENOMEM;
5154 
5155 	return 0;
5156 }
5157 
5158 static int
5159 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5160 					  const struct amdgpu_framebuffer *afb,
5161 					  const enum surface_pixel_format format,
5162 					  const enum dc_rotation_angle rotation,
5163 					  const struct plane_size *plane_size,
5164 					  union dc_tiling_info *tiling_info,
5165 					  struct dc_plane_dcc_param *dcc,
5166 					  struct dc_plane_address *address,
5167 					  const bool force_disable_dcc)
5168 {
5169 	const uint64_t modifier = afb->base.modifier;
5170 	int ret = 0;
5171 
5172 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5173 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5174 
5175 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5176 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5177 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5178 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5179 
5180 		dcc->enable = 1;
5181 		dcc->meta_pitch = afb->base.pitches[1];
5182 		dcc->independent_64b_blks = independent_64b_blks;
5183 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5184 			if (independent_64b_blks && independent_128b_blks)
5185 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5186 			else if (independent_128b_blks)
5187 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5188 			else if (independent_64b_blks && !independent_128b_blks)
5189 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5190 			else
5191 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5192 		} else {
5193 			if (independent_64b_blks)
5194 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5195 			else
5196 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5197 		}
5198 
5199 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5200 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5201 	}
5202 
5203 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5204 	if (ret)
5205 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5206 
5207 	return ret;
5208 }
5209 
5210 static int
5211 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5212 			     const struct amdgpu_framebuffer *afb,
5213 			     const enum surface_pixel_format format,
5214 			     const enum dc_rotation_angle rotation,
5215 			     const uint64_t tiling_flags,
5216 			     union dc_tiling_info *tiling_info,
5217 			     struct plane_size *plane_size,
5218 			     struct dc_plane_dcc_param *dcc,
5219 			     struct dc_plane_address *address,
5220 			     bool tmz_surface,
5221 			     bool force_disable_dcc)
5222 {
5223 	const struct drm_framebuffer *fb = &afb->base;
5224 	int ret;
5225 
5226 	memset(tiling_info, 0, sizeof(*tiling_info));
5227 	memset(plane_size, 0, sizeof(*plane_size));
5228 	memset(dcc, 0, sizeof(*dcc));
5229 	memset(address, 0, sizeof(*address));
5230 
5231 	address->tmz_surface = tmz_surface;
5232 
5233 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5234 		uint64_t addr = afb->address + fb->offsets[0];
5235 
5236 		plane_size->surface_size.x = 0;
5237 		plane_size->surface_size.y = 0;
5238 		plane_size->surface_size.width = fb->width;
5239 		plane_size->surface_size.height = fb->height;
5240 		plane_size->surface_pitch =
5241 			fb->pitches[0] / fb->format->cpp[0];
5242 
5243 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5244 		address->grph.addr.low_part = lower_32_bits(addr);
5245 		address->grph.addr.high_part = upper_32_bits(addr);
5246 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5247 		uint64_t luma_addr = afb->address + fb->offsets[0];
5248 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5249 
5250 		plane_size->surface_size.x = 0;
5251 		plane_size->surface_size.y = 0;
5252 		plane_size->surface_size.width = fb->width;
5253 		plane_size->surface_size.height = fb->height;
5254 		plane_size->surface_pitch =
5255 			fb->pitches[0] / fb->format->cpp[0];
5256 
5257 		plane_size->chroma_size.x = 0;
5258 		plane_size->chroma_size.y = 0;
5259 		/* TODO: set these based on surface format */
5260 		plane_size->chroma_size.width = fb->width / 2;
5261 		plane_size->chroma_size.height = fb->height / 2;
5262 
5263 		plane_size->chroma_pitch =
5264 			fb->pitches[1] / fb->format->cpp[1];
5265 
5266 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5267 		address->video_progressive.luma_addr.low_part =
5268 			lower_32_bits(luma_addr);
5269 		address->video_progressive.luma_addr.high_part =
5270 			upper_32_bits(luma_addr);
5271 		address->video_progressive.chroma_addr.low_part =
5272 			lower_32_bits(chroma_addr);
5273 		address->video_progressive.chroma_addr.high_part =
5274 			upper_32_bits(chroma_addr);
5275 	}
5276 
5277 	if (adev->family >= AMDGPU_FAMILY_AI) {
5278 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5279 								rotation, plane_size,
5280 								tiling_info, dcc,
5281 								address,
5282 								force_disable_dcc);
5283 		if (ret)
5284 			return ret;
5285 	} else {
5286 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5287 	}
5288 
5289 	return 0;
5290 }
5291 
5292 static void
5293 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5294 			       bool *per_pixel_alpha, bool *global_alpha,
5295 			       int *global_alpha_value)
5296 {
5297 	*per_pixel_alpha = false;
5298 	*global_alpha = false;
5299 	*global_alpha_value = 0xff;
5300 
5301 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5302 		return;
5303 
5304 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5305 		static const uint32_t alpha_formats[] = {
5306 			DRM_FORMAT_ARGB8888,
5307 			DRM_FORMAT_RGBA8888,
5308 			DRM_FORMAT_ABGR8888,
5309 		};
5310 		uint32_t format = plane_state->fb->format->format;
5311 		unsigned int i;
5312 
5313 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5314 			if (format == alpha_formats[i]) {
5315 				*per_pixel_alpha = true;
5316 				break;
5317 			}
5318 		}
5319 	}
5320 
5321 	if (plane_state->alpha < 0xffff) {
5322 		*global_alpha = true;
5323 		*global_alpha_value = plane_state->alpha >> 8;
5324 	}
5325 }
5326 
5327 static int
5328 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5329 			    const enum surface_pixel_format format,
5330 			    enum dc_color_space *color_space)
5331 {
5332 	bool full_range;
5333 
5334 	*color_space = COLOR_SPACE_SRGB;
5335 
5336 	/* DRM color properties only affect non-RGB formats. */
5337 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5338 		return 0;
5339 
5340 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5341 
5342 	switch (plane_state->color_encoding) {
5343 	case DRM_COLOR_YCBCR_BT601:
5344 		if (full_range)
5345 			*color_space = COLOR_SPACE_YCBCR601;
5346 		else
5347 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5348 		break;
5349 
5350 	case DRM_COLOR_YCBCR_BT709:
5351 		if (full_range)
5352 			*color_space = COLOR_SPACE_YCBCR709;
5353 		else
5354 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5355 		break;
5356 
5357 	case DRM_COLOR_YCBCR_BT2020:
5358 		if (full_range)
5359 			*color_space = COLOR_SPACE_2020_YCBCR;
5360 		else
5361 			return -EINVAL;
5362 		break;
5363 
5364 	default:
5365 		return -EINVAL;
5366 	}
5367 
5368 	return 0;
5369 }
5370 
5371 static int
5372 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5373 			    const struct drm_plane_state *plane_state,
5374 			    const uint64_t tiling_flags,
5375 			    struct dc_plane_info *plane_info,
5376 			    struct dc_plane_address *address,
5377 			    bool tmz_surface,
5378 			    bool force_disable_dcc)
5379 {
5380 	const struct drm_framebuffer *fb = plane_state->fb;
5381 	const struct amdgpu_framebuffer *afb =
5382 		to_amdgpu_framebuffer(plane_state->fb);
5383 	int ret;
5384 
5385 	memset(plane_info, 0, sizeof(*plane_info));
5386 
5387 	switch (fb->format->format) {
5388 	case DRM_FORMAT_C8:
5389 		plane_info->format =
5390 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5391 		break;
5392 	case DRM_FORMAT_RGB565:
5393 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5394 		break;
5395 	case DRM_FORMAT_XRGB8888:
5396 	case DRM_FORMAT_ARGB8888:
5397 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5398 		break;
5399 	case DRM_FORMAT_XRGB2101010:
5400 	case DRM_FORMAT_ARGB2101010:
5401 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5402 		break;
5403 	case DRM_FORMAT_XBGR2101010:
5404 	case DRM_FORMAT_ABGR2101010:
5405 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5406 		break;
5407 	case DRM_FORMAT_XBGR8888:
5408 	case DRM_FORMAT_ABGR8888:
5409 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5410 		break;
5411 	case DRM_FORMAT_NV21:
5412 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5413 		break;
5414 	case DRM_FORMAT_NV12:
5415 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5416 		break;
5417 	case DRM_FORMAT_P010:
5418 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5419 		break;
5420 	case DRM_FORMAT_XRGB16161616F:
5421 	case DRM_FORMAT_ARGB16161616F:
5422 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5423 		break;
5424 	case DRM_FORMAT_XBGR16161616F:
5425 	case DRM_FORMAT_ABGR16161616F:
5426 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5427 		break;
5428 	case DRM_FORMAT_XRGB16161616:
5429 	case DRM_FORMAT_ARGB16161616:
5430 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5431 		break;
5432 	case DRM_FORMAT_XBGR16161616:
5433 	case DRM_FORMAT_ABGR16161616:
5434 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5435 		break;
5436 	default:
5437 		DRM_ERROR(
5438 			"Unsupported screen format %p4cc\n",
5439 			&fb->format->format);
5440 		return -EINVAL;
5441 	}
5442 
5443 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5444 	case DRM_MODE_ROTATE_0:
5445 		plane_info->rotation = ROTATION_ANGLE_0;
5446 		break;
5447 	case DRM_MODE_ROTATE_90:
5448 		plane_info->rotation = ROTATION_ANGLE_90;
5449 		break;
5450 	case DRM_MODE_ROTATE_180:
5451 		plane_info->rotation = ROTATION_ANGLE_180;
5452 		break;
5453 	case DRM_MODE_ROTATE_270:
5454 		plane_info->rotation = ROTATION_ANGLE_270;
5455 		break;
5456 	default:
5457 		plane_info->rotation = ROTATION_ANGLE_0;
5458 		break;
5459 	}
5460 
5461 	plane_info->visible = true;
5462 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5463 
5464 	plane_info->layer_index = 0;
5465 
5466 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5467 					  &plane_info->color_space);
5468 	if (ret)
5469 		return ret;
5470 
5471 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5472 					   plane_info->rotation, tiling_flags,
5473 					   &plane_info->tiling_info,
5474 					   &plane_info->plane_size,
5475 					   &plane_info->dcc, address, tmz_surface,
5476 					   force_disable_dcc);
5477 	if (ret)
5478 		return ret;
5479 
5480 	fill_blending_from_plane_state(
5481 		plane_state, &plane_info->per_pixel_alpha,
5482 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5483 
5484 	return 0;
5485 }
5486 
5487 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5488 				    struct dc_plane_state *dc_plane_state,
5489 				    struct drm_plane_state *plane_state,
5490 				    struct drm_crtc_state *crtc_state)
5491 {
5492 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5493 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5494 	struct dc_scaling_info scaling_info;
5495 	struct dc_plane_info plane_info;
5496 	int ret;
5497 	bool force_disable_dcc = false;
5498 
5499 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5500 	if (ret)
5501 		return ret;
5502 
5503 	dc_plane_state->src_rect = scaling_info.src_rect;
5504 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5505 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5506 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5507 
5508 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5509 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5510 					  afb->tiling_flags,
5511 					  &plane_info,
5512 					  &dc_plane_state->address,
5513 					  afb->tmz_surface,
5514 					  force_disable_dcc);
5515 	if (ret)
5516 		return ret;
5517 
5518 	dc_plane_state->format = plane_info.format;
5519 	dc_plane_state->color_space = plane_info.color_space;
5520 	dc_plane_state->format = plane_info.format;
5521 	dc_plane_state->plane_size = plane_info.plane_size;
5522 	dc_plane_state->rotation = plane_info.rotation;
5523 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5524 	dc_plane_state->stereo_format = plane_info.stereo_format;
5525 	dc_plane_state->tiling_info = plane_info.tiling_info;
5526 	dc_plane_state->visible = plane_info.visible;
5527 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5528 	dc_plane_state->global_alpha = plane_info.global_alpha;
5529 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5530 	dc_plane_state->dcc = plane_info.dcc;
5531 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5532 	dc_plane_state->flip_int_enabled = true;
5533 
5534 	/*
5535 	 * Always set input transfer function, since plane state is refreshed
5536 	 * every time.
5537 	 */
5538 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5539 	if (ret)
5540 		return ret;
5541 
5542 	return 0;
5543 }
5544 
5545 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5546 					   const struct dm_connector_state *dm_state,
5547 					   struct dc_stream_state *stream)
5548 {
5549 	enum amdgpu_rmx_type rmx_type;
5550 
5551 	struct rect src = { 0 }; /* viewport in composition space*/
5552 	struct rect dst = { 0 }; /* stream addressable area */
5553 
5554 	/* no mode. nothing to be done */
5555 	if (!mode)
5556 		return;
5557 
5558 	/* Full screen scaling by default */
5559 	src.width = mode->hdisplay;
5560 	src.height = mode->vdisplay;
5561 	dst.width = stream->timing.h_addressable;
5562 	dst.height = stream->timing.v_addressable;
5563 
5564 	if (dm_state) {
5565 		rmx_type = dm_state->scaling;
5566 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5567 			if (src.width * dst.height <
5568 					src.height * dst.width) {
5569 				/* height needs less upscaling/more downscaling */
5570 				dst.width = src.width *
5571 						dst.height / src.height;
5572 			} else {
5573 				/* width needs less upscaling/more downscaling */
5574 				dst.height = src.height *
5575 						dst.width / src.width;
5576 			}
5577 		} else if (rmx_type == RMX_CENTER) {
5578 			dst = src;
5579 		}
5580 
5581 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5582 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5583 
5584 		if (dm_state->underscan_enable) {
5585 			dst.x += dm_state->underscan_hborder / 2;
5586 			dst.y += dm_state->underscan_vborder / 2;
5587 			dst.width -= dm_state->underscan_hborder;
5588 			dst.height -= dm_state->underscan_vborder;
5589 		}
5590 	}
5591 
5592 	stream->src = src;
5593 	stream->dst = dst;
5594 
5595 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5596 		      dst.x, dst.y, dst.width, dst.height);
5597 
5598 }
5599 
5600 static enum dc_color_depth
5601 convert_color_depth_from_display_info(const struct drm_connector *connector,
5602 				      bool is_y420, int requested_bpc)
5603 {
5604 	uint8_t bpc;
5605 
5606 	if (is_y420) {
5607 		bpc = 8;
5608 
5609 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5610 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5611 			bpc = 16;
5612 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5613 			bpc = 12;
5614 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5615 			bpc = 10;
5616 	} else {
5617 		bpc = (uint8_t)connector->display_info.bpc;
5618 		/* Assume 8 bpc by default if no bpc is specified. */
5619 		bpc = bpc ? bpc : 8;
5620 	}
5621 
5622 	if (requested_bpc > 0) {
5623 		/*
5624 		 * Cap display bpc based on the user requested value.
5625 		 *
5626 		 * The value for state->max_bpc may not correctly updated
5627 		 * depending on when the connector gets added to the state
5628 		 * or if this was called outside of atomic check, so it
5629 		 * can't be used directly.
5630 		 */
5631 		bpc = min_t(u8, bpc, requested_bpc);
5632 
5633 		/* Round down to the nearest even number. */
5634 		bpc = bpc - (bpc & 1);
5635 	}
5636 
5637 	switch (bpc) {
5638 	case 0:
5639 		/*
5640 		 * Temporary Work around, DRM doesn't parse color depth for
5641 		 * EDID revision before 1.4
5642 		 * TODO: Fix edid parsing
5643 		 */
5644 		return COLOR_DEPTH_888;
5645 	case 6:
5646 		return COLOR_DEPTH_666;
5647 	case 8:
5648 		return COLOR_DEPTH_888;
5649 	case 10:
5650 		return COLOR_DEPTH_101010;
5651 	case 12:
5652 		return COLOR_DEPTH_121212;
5653 	case 14:
5654 		return COLOR_DEPTH_141414;
5655 	case 16:
5656 		return COLOR_DEPTH_161616;
5657 	default:
5658 		return COLOR_DEPTH_UNDEFINED;
5659 	}
5660 }
5661 
5662 static enum dc_aspect_ratio
5663 get_aspect_ratio(const struct drm_display_mode *mode_in)
5664 {
5665 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5666 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5667 }
5668 
5669 static enum dc_color_space
5670 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5671 {
5672 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5673 
5674 	switch (dc_crtc_timing->pixel_encoding)	{
5675 	case PIXEL_ENCODING_YCBCR422:
5676 	case PIXEL_ENCODING_YCBCR444:
5677 	case PIXEL_ENCODING_YCBCR420:
5678 	{
5679 		/*
5680 		 * 27030khz is the separation point between HDTV and SDTV
5681 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5682 		 * respectively
5683 		 */
5684 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5685 			if (dc_crtc_timing->flags.Y_ONLY)
5686 				color_space =
5687 					COLOR_SPACE_YCBCR709_LIMITED;
5688 			else
5689 				color_space = COLOR_SPACE_YCBCR709;
5690 		} else {
5691 			if (dc_crtc_timing->flags.Y_ONLY)
5692 				color_space =
5693 					COLOR_SPACE_YCBCR601_LIMITED;
5694 			else
5695 				color_space = COLOR_SPACE_YCBCR601;
5696 		}
5697 
5698 	}
5699 	break;
5700 	case PIXEL_ENCODING_RGB:
5701 		color_space = COLOR_SPACE_SRGB;
5702 		break;
5703 
5704 	default:
5705 		WARN_ON(1);
5706 		break;
5707 	}
5708 
5709 	return color_space;
5710 }
5711 
5712 static bool adjust_colour_depth_from_display_info(
5713 	struct dc_crtc_timing *timing_out,
5714 	const struct drm_display_info *info)
5715 {
5716 	enum dc_color_depth depth = timing_out->display_color_depth;
5717 	int normalized_clk;
5718 	do {
5719 		normalized_clk = timing_out->pix_clk_100hz / 10;
5720 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5721 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5722 			normalized_clk /= 2;
5723 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5724 		switch (depth) {
5725 		case COLOR_DEPTH_888:
5726 			break;
5727 		case COLOR_DEPTH_101010:
5728 			normalized_clk = (normalized_clk * 30) / 24;
5729 			break;
5730 		case COLOR_DEPTH_121212:
5731 			normalized_clk = (normalized_clk * 36) / 24;
5732 			break;
5733 		case COLOR_DEPTH_161616:
5734 			normalized_clk = (normalized_clk * 48) / 24;
5735 			break;
5736 		default:
5737 			/* The above depths are the only ones valid for HDMI. */
5738 			return false;
5739 		}
5740 		if (normalized_clk <= info->max_tmds_clock) {
5741 			timing_out->display_color_depth = depth;
5742 			return true;
5743 		}
5744 	} while (--depth > COLOR_DEPTH_666);
5745 	return false;
5746 }
5747 
5748 static void fill_stream_properties_from_drm_display_mode(
5749 	struct dc_stream_state *stream,
5750 	const struct drm_display_mode *mode_in,
5751 	const struct drm_connector *connector,
5752 	const struct drm_connector_state *connector_state,
5753 	const struct dc_stream_state *old_stream,
5754 	int requested_bpc)
5755 {
5756 	struct dc_crtc_timing *timing_out = &stream->timing;
5757 	const struct drm_display_info *info = &connector->display_info;
5758 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5759 	struct hdmi_vendor_infoframe hv_frame;
5760 	struct hdmi_avi_infoframe avi_frame;
5761 
5762 	memset(&hv_frame, 0, sizeof(hv_frame));
5763 	memset(&avi_frame, 0, sizeof(avi_frame));
5764 
5765 	timing_out->h_border_left = 0;
5766 	timing_out->h_border_right = 0;
5767 	timing_out->v_border_top = 0;
5768 	timing_out->v_border_bottom = 0;
5769 	/* TODO: un-hardcode */
5770 	if (drm_mode_is_420_only(info, mode_in)
5771 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5772 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5773 	else if (drm_mode_is_420_also(info, mode_in)
5774 			&& aconnector->force_yuv420_output)
5775 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5776 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5777 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5778 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5779 	else
5780 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5781 
5782 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5783 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5784 		connector,
5785 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5786 		requested_bpc);
5787 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5788 	timing_out->hdmi_vic = 0;
5789 
5790 	if(old_stream) {
5791 		timing_out->vic = old_stream->timing.vic;
5792 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5793 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5794 	} else {
5795 		timing_out->vic = drm_match_cea_mode(mode_in);
5796 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5797 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5798 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5799 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5800 	}
5801 
5802 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5803 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5804 		timing_out->vic = avi_frame.video_code;
5805 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5806 		timing_out->hdmi_vic = hv_frame.vic;
5807 	}
5808 
5809 	if (is_freesync_video_mode(mode_in, aconnector)) {
5810 		timing_out->h_addressable = mode_in->hdisplay;
5811 		timing_out->h_total = mode_in->htotal;
5812 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5813 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5814 		timing_out->v_total = mode_in->vtotal;
5815 		timing_out->v_addressable = mode_in->vdisplay;
5816 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5817 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5818 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5819 	} else {
5820 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5821 		timing_out->h_total = mode_in->crtc_htotal;
5822 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5823 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5824 		timing_out->v_total = mode_in->crtc_vtotal;
5825 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5826 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5827 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5828 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5829 	}
5830 
5831 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5832 
5833 	stream->output_color_space = get_output_color_space(timing_out);
5834 
5835 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5836 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5837 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5838 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5839 		    drm_mode_is_420_also(info, mode_in) &&
5840 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5841 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5842 			adjust_colour_depth_from_display_info(timing_out, info);
5843 		}
5844 	}
5845 }
5846 
5847 static void fill_audio_info(struct audio_info *audio_info,
5848 			    const struct drm_connector *drm_connector,
5849 			    const struct dc_sink *dc_sink)
5850 {
5851 	int i = 0;
5852 	int cea_revision = 0;
5853 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5854 
5855 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5856 	audio_info->product_id = edid_caps->product_id;
5857 
5858 	cea_revision = drm_connector->display_info.cea_rev;
5859 
5860 	strscpy(audio_info->display_name,
5861 		edid_caps->display_name,
5862 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5863 
5864 	if (cea_revision >= 3) {
5865 		audio_info->mode_count = edid_caps->audio_mode_count;
5866 
5867 		for (i = 0; i < audio_info->mode_count; ++i) {
5868 			audio_info->modes[i].format_code =
5869 					(enum audio_format_code)
5870 					(edid_caps->audio_modes[i].format_code);
5871 			audio_info->modes[i].channel_count =
5872 					edid_caps->audio_modes[i].channel_count;
5873 			audio_info->modes[i].sample_rates.all =
5874 					edid_caps->audio_modes[i].sample_rate;
5875 			audio_info->modes[i].sample_size =
5876 					edid_caps->audio_modes[i].sample_size;
5877 		}
5878 	}
5879 
5880 	audio_info->flags.all = edid_caps->speaker_flags;
5881 
5882 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5883 	if (drm_connector->latency_present[0]) {
5884 		audio_info->video_latency = drm_connector->video_latency[0];
5885 		audio_info->audio_latency = drm_connector->audio_latency[0];
5886 	}
5887 
5888 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5889 
5890 }
5891 
5892 static void
5893 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5894 				      struct drm_display_mode *dst_mode)
5895 {
5896 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5897 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5898 	dst_mode->crtc_clock = src_mode->crtc_clock;
5899 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5900 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5901 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5902 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5903 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5904 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5905 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5906 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5907 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5908 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5909 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5910 }
5911 
5912 static void
5913 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5914 					const struct drm_display_mode *native_mode,
5915 					bool scale_enabled)
5916 {
5917 	if (scale_enabled) {
5918 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5919 	} else if (native_mode->clock == drm_mode->clock &&
5920 			native_mode->htotal == drm_mode->htotal &&
5921 			native_mode->vtotal == drm_mode->vtotal) {
5922 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5923 	} else {
5924 		/* no scaling nor amdgpu inserted, no need to patch */
5925 	}
5926 }
5927 
5928 static struct dc_sink *
5929 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5930 {
5931 	struct dc_sink_init_data sink_init_data = { 0 };
5932 	struct dc_sink *sink = NULL;
5933 	sink_init_data.link = aconnector->dc_link;
5934 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5935 
5936 	sink = dc_sink_create(&sink_init_data);
5937 	if (!sink) {
5938 		DRM_ERROR("Failed to create sink!\n");
5939 		return NULL;
5940 	}
5941 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5942 
5943 	return sink;
5944 }
5945 
5946 static void set_multisync_trigger_params(
5947 		struct dc_stream_state *stream)
5948 {
5949 	struct dc_stream_state *master = NULL;
5950 
5951 	if (stream->triggered_crtc_reset.enabled) {
5952 		master = stream->triggered_crtc_reset.event_source;
5953 		stream->triggered_crtc_reset.event =
5954 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5955 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5956 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5957 	}
5958 }
5959 
5960 static void set_master_stream(struct dc_stream_state *stream_set[],
5961 			      int stream_count)
5962 {
5963 	int j, highest_rfr = 0, master_stream = 0;
5964 
5965 	for (j = 0;  j < stream_count; j++) {
5966 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5967 			int refresh_rate = 0;
5968 
5969 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5970 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5971 			if (refresh_rate > highest_rfr) {
5972 				highest_rfr = refresh_rate;
5973 				master_stream = j;
5974 			}
5975 		}
5976 	}
5977 	for (j = 0;  j < stream_count; j++) {
5978 		if (stream_set[j])
5979 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5980 	}
5981 }
5982 
5983 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5984 {
5985 	int i = 0;
5986 	struct dc_stream_state *stream;
5987 
5988 	if (context->stream_count < 2)
5989 		return;
5990 	for (i = 0; i < context->stream_count ; i++) {
5991 		if (!context->streams[i])
5992 			continue;
5993 		/*
5994 		 * TODO: add a function to read AMD VSDB bits and set
5995 		 * crtc_sync_master.multi_sync_enabled flag
5996 		 * For now it's set to false
5997 		 */
5998 	}
5999 
6000 	set_master_stream(context->streams, context->stream_count);
6001 
6002 	for (i = 0; i < context->stream_count ; i++) {
6003 		stream = context->streams[i];
6004 
6005 		if (!stream)
6006 			continue;
6007 
6008 		set_multisync_trigger_params(stream);
6009 	}
6010 }
6011 
6012 #if defined(CONFIG_DRM_AMD_DC_DCN)
6013 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6014 							struct dc_sink *sink, struct dc_stream_state *stream,
6015 							struct dsc_dec_dpcd_caps *dsc_caps)
6016 {
6017 	stream->timing.flags.DSC = 0;
6018 
6019 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6020 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6021 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6022 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6023 				      dsc_caps);
6024 	}
6025 }
6026 
6027 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6028 										struct dc_sink *sink, struct dc_stream_state *stream,
6029 										struct dsc_dec_dpcd_caps *dsc_caps)
6030 {
6031 	struct drm_connector *drm_connector = &aconnector->base;
6032 	uint32_t link_bandwidth_kbps;
6033 	uint32_t max_dsc_target_bpp_limit_override = 0;
6034 
6035 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6036 							dc_link_get_link_cap(aconnector->dc_link));
6037 
6038 	if (stream->link && stream->link->local_sink)
6039 		max_dsc_target_bpp_limit_override =
6040 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6041 
6042 	/* Set DSC policy according to dsc_clock_en */
6043 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6044 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6045 
6046 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6047 
6048 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6049 						dsc_caps,
6050 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6051 						max_dsc_target_bpp_limit_override,
6052 						link_bandwidth_kbps,
6053 						&stream->timing,
6054 						&stream->timing.dsc_cfg)) {
6055 			stream->timing.flags.DSC = 1;
6056 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6057 		}
6058 	}
6059 
6060 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6061 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6062 		stream->timing.flags.DSC = 1;
6063 
6064 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6065 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6066 
6067 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6068 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6069 
6070 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6071 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6072 }
6073 #endif
6074 
6075 /**
6076  * DOC: FreeSync Video
6077  *
6078  * When a userspace application wants to play a video, the content follows a
6079  * standard format definition that usually specifies the FPS for that format.
6080  * The below list illustrates some video format and the expected FPS,
6081  * respectively:
6082  *
6083  * - TV/NTSC (23.976 FPS)
6084  * - Cinema (24 FPS)
6085  * - TV/PAL (25 FPS)
6086  * - TV/NTSC (29.97 FPS)
6087  * - TV/NTSC (30 FPS)
6088  * - Cinema HFR (48 FPS)
6089  * - TV/PAL (50 FPS)
6090  * - Commonly used (60 FPS)
6091  * - Multiples of 24 (48,72,96,120 FPS)
6092  *
6093  * The list of standards video format is not huge and can be added to the
6094  * connector modeset list beforehand. With that, userspace can leverage
6095  * FreeSync to extends the front porch in order to attain the target refresh
6096  * rate. Such a switch will happen seamlessly, without screen blanking or
6097  * reprogramming of the output in any other way. If the userspace requests a
6098  * modesetting change compatible with FreeSync modes that only differ in the
6099  * refresh rate, DC will skip the full update and avoid blink during the
6100  * transition. For example, the video player can change the modesetting from
6101  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6102  * causing any display blink. This same concept can be applied to a mode
6103  * setting change.
6104  */
6105 static struct drm_display_mode *
6106 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6107 			  bool use_probed_modes)
6108 {
6109 	struct drm_display_mode *m, *m_pref = NULL;
6110 	u16 current_refresh, highest_refresh;
6111 	struct list_head *list_head = use_probed_modes ?
6112 						    &aconnector->base.probed_modes :
6113 						    &aconnector->base.modes;
6114 
6115 	if (aconnector->freesync_vid_base.clock != 0)
6116 		return &aconnector->freesync_vid_base;
6117 
6118 	/* Find the preferred mode */
6119 	list_for_each_entry (m, list_head, head) {
6120 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6121 			m_pref = m;
6122 			break;
6123 		}
6124 	}
6125 
6126 	if (!m_pref) {
6127 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6128 		m_pref = list_first_entry_or_null(
6129 			&aconnector->base.modes, struct drm_display_mode, head);
6130 		if (!m_pref) {
6131 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6132 			return NULL;
6133 		}
6134 	}
6135 
6136 	highest_refresh = drm_mode_vrefresh(m_pref);
6137 
6138 	/*
6139 	 * Find the mode with highest refresh rate with same resolution.
6140 	 * For some monitors, preferred mode is not the mode with highest
6141 	 * supported refresh rate.
6142 	 */
6143 	list_for_each_entry (m, list_head, head) {
6144 		current_refresh  = drm_mode_vrefresh(m);
6145 
6146 		if (m->hdisplay == m_pref->hdisplay &&
6147 		    m->vdisplay == m_pref->vdisplay &&
6148 		    highest_refresh < current_refresh) {
6149 			highest_refresh = current_refresh;
6150 			m_pref = m;
6151 		}
6152 	}
6153 
6154 	aconnector->freesync_vid_base = *m_pref;
6155 	return m_pref;
6156 }
6157 
6158 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6159 				   struct amdgpu_dm_connector *aconnector)
6160 {
6161 	struct drm_display_mode *high_mode;
6162 	int timing_diff;
6163 
6164 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6165 	if (!high_mode || !mode)
6166 		return false;
6167 
6168 	timing_diff = high_mode->vtotal - mode->vtotal;
6169 
6170 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6171 	    high_mode->hdisplay != mode->hdisplay ||
6172 	    high_mode->vdisplay != mode->vdisplay ||
6173 	    high_mode->hsync_start != mode->hsync_start ||
6174 	    high_mode->hsync_end != mode->hsync_end ||
6175 	    high_mode->htotal != mode->htotal ||
6176 	    high_mode->hskew != mode->hskew ||
6177 	    high_mode->vscan != mode->vscan ||
6178 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6179 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6180 		return false;
6181 	else
6182 		return true;
6183 }
6184 
6185 static struct dc_stream_state *
6186 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6187 		       const struct drm_display_mode *drm_mode,
6188 		       const struct dm_connector_state *dm_state,
6189 		       const struct dc_stream_state *old_stream,
6190 		       int requested_bpc)
6191 {
6192 	struct drm_display_mode *preferred_mode = NULL;
6193 	struct drm_connector *drm_connector;
6194 	const struct drm_connector_state *con_state =
6195 		dm_state ? &dm_state->base : NULL;
6196 	struct dc_stream_state *stream = NULL;
6197 	struct drm_display_mode mode = *drm_mode;
6198 	struct drm_display_mode saved_mode;
6199 	struct drm_display_mode *freesync_mode = NULL;
6200 	bool native_mode_found = false;
6201 	bool recalculate_timing = false;
6202 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6203 	int mode_refresh;
6204 	int preferred_refresh = 0;
6205 #if defined(CONFIG_DRM_AMD_DC_DCN)
6206 	struct dsc_dec_dpcd_caps dsc_caps;
6207 #endif
6208 	struct dc_sink *sink = NULL;
6209 
6210 	memset(&saved_mode, 0, sizeof(saved_mode));
6211 
6212 	if (aconnector == NULL) {
6213 		DRM_ERROR("aconnector is NULL!\n");
6214 		return stream;
6215 	}
6216 
6217 	drm_connector = &aconnector->base;
6218 
6219 	if (!aconnector->dc_sink) {
6220 		sink = create_fake_sink(aconnector);
6221 		if (!sink)
6222 			return stream;
6223 	} else {
6224 		sink = aconnector->dc_sink;
6225 		dc_sink_retain(sink);
6226 	}
6227 
6228 	stream = dc_create_stream_for_sink(sink);
6229 
6230 	if (stream == NULL) {
6231 		DRM_ERROR("Failed to create stream for sink!\n");
6232 		goto finish;
6233 	}
6234 
6235 	stream->dm_stream_context = aconnector;
6236 
6237 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6238 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6239 
6240 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6241 		/* Search for preferred mode */
6242 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6243 			native_mode_found = true;
6244 			break;
6245 		}
6246 	}
6247 	if (!native_mode_found)
6248 		preferred_mode = list_first_entry_or_null(
6249 				&aconnector->base.modes,
6250 				struct drm_display_mode,
6251 				head);
6252 
6253 	mode_refresh = drm_mode_vrefresh(&mode);
6254 
6255 	if (preferred_mode == NULL) {
6256 		/*
6257 		 * This may not be an error, the use case is when we have no
6258 		 * usermode calls to reset and set mode upon hotplug. In this
6259 		 * case, we call set mode ourselves to restore the previous mode
6260 		 * and the modelist may not be filled in in time.
6261 		 */
6262 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6263 	} else {
6264 		recalculate_timing = amdgpu_freesync_vid_mode &&
6265 				 is_freesync_video_mode(&mode, aconnector);
6266 		if (recalculate_timing) {
6267 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6268 			saved_mode = mode;
6269 			mode = *freesync_mode;
6270 		} else {
6271 			decide_crtc_timing_for_drm_display_mode(
6272 				&mode, preferred_mode, scale);
6273 
6274 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6275 		}
6276 	}
6277 
6278 	if (recalculate_timing)
6279 		drm_mode_set_crtcinfo(&saved_mode, 0);
6280 	else if (!dm_state)
6281 		drm_mode_set_crtcinfo(&mode, 0);
6282 
6283        /*
6284 	* If scaling is enabled and refresh rate didn't change
6285 	* we copy the vic and polarities of the old timings
6286 	*/
6287 	if (!scale || mode_refresh != preferred_refresh)
6288 		fill_stream_properties_from_drm_display_mode(
6289 			stream, &mode, &aconnector->base, con_state, NULL,
6290 			requested_bpc);
6291 	else
6292 		fill_stream_properties_from_drm_display_mode(
6293 			stream, &mode, &aconnector->base, con_state, old_stream,
6294 			requested_bpc);
6295 
6296 #if defined(CONFIG_DRM_AMD_DC_DCN)
6297 	/* SST DSC determination policy */
6298 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6299 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6300 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6301 #endif
6302 
6303 	update_stream_scaling_settings(&mode, dm_state, stream);
6304 
6305 	fill_audio_info(
6306 		&stream->audio_info,
6307 		drm_connector,
6308 		sink);
6309 
6310 	update_stream_signal(stream, sink);
6311 
6312 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6313 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6314 
6315 	if (stream->link->psr_settings.psr_feature_enabled) {
6316 		//
6317 		// should decide stream support vsc sdp colorimetry capability
6318 		// before building vsc info packet
6319 		//
6320 		stream->use_vsc_sdp_for_colorimetry = false;
6321 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6322 			stream->use_vsc_sdp_for_colorimetry =
6323 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6324 		} else {
6325 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6326 				stream->use_vsc_sdp_for_colorimetry = true;
6327 		}
6328 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6329 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6330 
6331 	}
6332 finish:
6333 	dc_sink_release(sink);
6334 
6335 	return stream;
6336 }
6337 
6338 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6339 {
6340 	drm_crtc_cleanup(crtc);
6341 	kfree(crtc);
6342 }
6343 
6344 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6345 				  struct drm_crtc_state *state)
6346 {
6347 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6348 
6349 	/* TODO Destroy dc_stream objects are stream object is flattened */
6350 	if (cur->stream)
6351 		dc_stream_release(cur->stream);
6352 
6353 
6354 	__drm_atomic_helper_crtc_destroy_state(state);
6355 
6356 
6357 	kfree(state);
6358 }
6359 
6360 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6361 {
6362 	struct dm_crtc_state *state;
6363 
6364 	if (crtc->state)
6365 		dm_crtc_destroy_state(crtc, crtc->state);
6366 
6367 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6368 	if (WARN_ON(!state))
6369 		return;
6370 
6371 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6372 }
6373 
6374 static struct drm_crtc_state *
6375 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6376 {
6377 	struct dm_crtc_state *state, *cur;
6378 
6379 	cur = to_dm_crtc_state(crtc->state);
6380 
6381 	if (WARN_ON(!crtc->state))
6382 		return NULL;
6383 
6384 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6385 	if (!state)
6386 		return NULL;
6387 
6388 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6389 
6390 	if (cur->stream) {
6391 		state->stream = cur->stream;
6392 		dc_stream_retain(state->stream);
6393 	}
6394 
6395 	state->active_planes = cur->active_planes;
6396 	state->vrr_infopacket = cur->vrr_infopacket;
6397 	state->abm_level = cur->abm_level;
6398 	state->vrr_supported = cur->vrr_supported;
6399 	state->freesync_config = cur->freesync_config;
6400 	state->cm_has_degamma = cur->cm_has_degamma;
6401 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6402 	state->force_dpms_off = cur->force_dpms_off;
6403 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6404 
6405 	return &state->base;
6406 }
6407 
6408 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6409 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6410 {
6411 	crtc_debugfs_init(crtc);
6412 
6413 	return 0;
6414 }
6415 #endif
6416 
6417 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6418 {
6419 	enum dc_irq_source irq_source;
6420 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6421 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6422 	int rc;
6423 
6424 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6425 
6426 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6427 
6428 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6429 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6430 	return rc;
6431 }
6432 
6433 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6434 {
6435 	enum dc_irq_source irq_source;
6436 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6437 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6438 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6439 #if defined(CONFIG_DRM_AMD_DC_DCN)
6440 	struct amdgpu_display_manager *dm = &adev->dm;
6441 	struct vblank_control_work *work;
6442 #endif
6443 	int rc = 0;
6444 
6445 	if (enable) {
6446 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6447 		if (amdgpu_dm_vrr_active(acrtc_state))
6448 			rc = dm_set_vupdate_irq(crtc, true);
6449 	} else {
6450 		/* vblank irq off -> vupdate irq off */
6451 		rc = dm_set_vupdate_irq(crtc, false);
6452 	}
6453 
6454 	if (rc)
6455 		return rc;
6456 
6457 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6458 
6459 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6460 		return -EBUSY;
6461 
6462 	if (amdgpu_in_reset(adev))
6463 		return 0;
6464 
6465 #if defined(CONFIG_DRM_AMD_DC_DCN)
6466 	if (dm->vblank_control_workqueue) {
6467 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6468 		if (!work)
6469 			return -ENOMEM;
6470 
6471 		INIT_WORK(&work->work, vblank_control_worker);
6472 		work->dm = dm;
6473 		work->acrtc = acrtc;
6474 		work->enable = enable;
6475 
6476 		if (acrtc_state->stream) {
6477 			dc_stream_retain(acrtc_state->stream);
6478 			work->stream = acrtc_state->stream;
6479 		}
6480 
6481 		queue_work(dm->vblank_control_workqueue, &work->work);
6482 	}
6483 #endif
6484 
6485 	return 0;
6486 }
6487 
6488 static int dm_enable_vblank(struct drm_crtc *crtc)
6489 {
6490 	return dm_set_vblank(crtc, true);
6491 }
6492 
6493 static void dm_disable_vblank(struct drm_crtc *crtc)
6494 {
6495 	dm_set_vblank(crtc, false);
6496 }
6497 
6498 /* Implemented only the options currently availible for the driver */
6499 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6500 	.reset = dm_crtc_reset_state,
6501 	.destroy = amdgpu_dm_crtc_destroy,
6502 	.set_config = drm_atomic_helper_set_config,
6503 	.page_flip = drm_atomic_helper_page_flip,
6504 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6505 	.atomic_destroy_state = dm_crtc_destroy_state,
6506 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6507 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6508 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6509 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6510 	.enable_vblank = dm_enable_vblank,
6511 	.disable_vblank = dm_disable_vblank,
6512 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6513 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6514 	.late_register = amdgpu_dm_crtc_late_register,
6515 #endif
6516 };
6517 
6518 static enum drm_connector_status
6519 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6520 {
6521 	bool connected;
6522 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6523 
6524 	/*
6525 	 * Notes:
6526 	 * 1. This interface is NOT called in context of HPD irq.
6527 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6528 	 * makes it a bad place for *any* MST-related activity.
6529 	 */
6530 
6531 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6532 	    !aconnector->fake_enable)
6533 		connected = (aconnector->dc_sink != NULL);
6534 	else
6535 		connected = (aconnector->base.force == DRM_FORCE_ON);
6536 
6537 	update_subconnector_property(aconnector);
6538 
6539 	return (connected ? connector_status_connected :
6540 			connector_status_disconnected);
6541 }
6542 
6543 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6544 					    struct drm_connector_state *connector_state,
6545 					    struct drm_property *property,
6546 					    uint64_t val)
6547 {
6548 	struct drm_device *dev = connector->dev;
6549 	struct amdgpu_device *adev = drm_to_adev(dev);
6550 	struct dm_connector_state *dm_old_state =
6551 		to_dm_connector_state(connector->state);
6552 	struct dm_connector_state *dm_new_state =
6553 		to_dm_connector_state(connector_state);
6554 
6555 	int ret = -EINVAL;
6556 
6557 	if (property == dev->mode_config.scaling_mode_property) {
6558 		enum amdgpu_rmx_type rmx_type;
6559 
6560 		switch (val) {
6561 		case DRM_MODE_SCALE_CENTER:
6562 			rmx_type = RMX_CENTER;
6563 			break;
6564 		case DRM_MODE_SCALE_ASPECT:
6565 			rmx_type = RMX_ASPECT;
6566 			break;
6567 		case DRM_MODE_SCALE_FULLSCREEN:
6568 			rmx_type = RMX_FULL;
6569 			break;
6570 		case DRM_MODE_SCALE_NONE:
6571 		default:
6572 			rmx_type = RMX_OFF;
6573 			break;
6574 		}
6575 
6576 		if (dm_old_state->scaling == rmx_type)
6577 			return 0;
6578 
6579 		dm_new_state->scaling = rmx_type;
6580 		ret = 0;
6581 	} else if (property == adev->mode_info.underscan_hborder_property) {
6582 		dm_new_state->underscan_hborder = val;
6583 		ret = 0;
6584 	} else if (property == adev->mode_info.underscan_vborder_property) {
6585 		dm_new_state->underscan_vborder = val;
6586 		ret = 0;
6587 	} else if (property == adev->mode_info.underscan_property) {
6588 		dm_new_state->underscan_enable = val;
6589 		ret = 0;
6590 	} else if (property == adev->mode_info.abm_level_property) {
6591 		dm_new_state->abm_level = val;
6592 		ret = 0;
6593 	}
6594 
6595 	return ret;
6596 }
6597 
6598 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6599 					    const struct drm_connector_state *state,
6600 					    struct drm_property *property,
6601 					    uint64_t *val)
6602 {
6603 	struct drm_device *dev = connector->dev;
6604 	struct amdgpu_device *adev = drm_to_adev(dev);
6605 	struct dm_connector_state *dm_state =
6606 		to_dm_connector_state(state);
6607 	int ret = -EINVAL;
6608 
6609 	if (property == dev->mode_config.scaling_mode_property) {
6610 		switch (dm_state->scaling) {
6611 		case RMX_CENTER:
6612 			*val = DRM_MODE_SCALE_CENTER;
6613 			break;
6614 		case RMX_ASPECT:
6615 			*val = DRM_MODE_SCALE_ASPECT;
6616 			break;
6617 		case RMX_FULL:
6618 			*val = DRM_MODE_SCALE_FULLSCREEN;
6619 			break;
6620 		case RMX_OFF:
6621 		default:
6622 			*val = DRM_MODE_SCALE_NONE;
6623 			break;
6624 		}
6625 		ret = 0;
6626 	} else if (property == adev->mode_info.underscan_hborder_property) {
6627 		*val = dm_state->underscan_hborder;
6628 		ret = 0;
6629 	} else if (property == adev->mode_info.underscan_vborder_property) {
6630 		*val = dm_state->underscan_vborder;
6631 		ret = 0;
6632 	} else if (property == adev->mode_info.underscan_property) {
6633 		*val = dm_state->underscan_enable;
6634 		ret = 0;
6635 	} else if (property == adev->mode_info.abm_level_property) {
6636 		*val = dm_state->abm_level;
6637 		ret = 0;
6638 	}
6639 
6640 	return ret;
6641 }
6642 
6643 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6644 {
6645 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6646 
6647 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6648 }
6649 
6650 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6651 {
6652 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6653 	const struct dc_link *link = aconnector->dc_link;
6654 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6655 	struct amdgpu_display_manager *dm = &adev->dm;
6656 	int i;
6657 
6658 	/*
6659 	 * Call only if mst_mgr was iniitalized before since it's not done
6660 	 * for all connector types.
6661 	 */
6662 	if (aconnector->mst_mgr.dev)
6663 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6664 
6665 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6666 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6667 	for (i = 0; i < dm->num_of_edps; i++) {
6668 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6669 			backlight_device_unregister(dm->backlight_dev[i]);
6670 			dm->backlight_dev[i] = NULL;
6671 		}
6672 	}
6673 #endif
6674 
6675 	if (aconnector->dc_em_sink)
6676 		dc_sink_release(aconnector->dc_em_sink);
6677 	aconnector->dc_em_sink = NULL;
6678 	if (aconnector->dc_sink)
6679 		dc_sink_release(aconnector->dc_sink);
6680 	aconnector->dc_sink = NULL;
6681 
6682 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6683 	drm_connector_unregister(connector);
6684 	drm_connector_cleanup(connector);
6685 	if (aconnector->i2c) {
6686 		i2c_del_adapter(&aconnector->i2c->base);
6687 		kfree(aconnector->i2c);
6688 	}
6689 	kfree(aconnector->dm_dp_aux.aux.name);
6690 
6691 	kfree(connector);
6692 }
6693 
6694 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6695 {
6696 	struct dm_connector_state *state =
6697 		to_dm_connector_state(connector->state);
6698 
6699 	if (connector->state)
6700 		__drm_atomic_helper_connector_destroy_state(connector->state);
6701 
6702 	kfree(state);
6703 
6704 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6705 
6706 	if (state) {
6707 		state->scaling = RMX_OFF;
6708 		state->underscan_enable = false;
6709 		state->underscan_hborder = 0;
6710 		state->underscan_vborder = 0;
6711 		state->base.max_requested_bpc = 8;
6712 		state->vcpi_slots = 0;
6713 		state->pbn = 0;
6714 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6715 			state->abm_level = amdgpu_dm_abm_level;
6716 
6717 		__drm_atomic_helper_connector_reset(connector, &state->base);
6718 	}
6719 }
6720 
6721 struct drm_connector_state *
6722 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6723 {
6724 	struct dm_connector_state *state =
6725 		to_dm_connector_state(connector->state);
6726 
6727 	struct dm_connector_state *new_state =
6728 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6729 
6730 	if (!new_state)
6731 		return NULL;
6732 
6733 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6734 
6735 	new_state->freesync_capable = state->freesync_capable;
6736 	new_state->abm_level = state->abm_level;
6737 	new_state->scaling = state->scaling;
6738 	new_state->underscan_enable = state->underscan_enable;
6739 	new_state->underscan_hborder = state->underscan_hborder;
6740 	new_state->underscan_vborder = state->underscan_vborder;
6741 	new_state->vcpi_slots = state->vcpi_slots;
6742 	new_state->pbn = state->pbn;
6743 	return &new_state->base;
6744 }
6745 
6746 static int
6747 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6748 {
6749 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6750 		to_amdgpu_dm_connector(connector);
6751 	int r;
6752 
6753 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6754 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6755 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6756 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6757 		if (r)
6758 			return r;
6759 	}
6760 
6761 #if defined(CONFIG_DEBUG_FS)
6762 	connector_debugfs_init(amdgpu_dm_connector);
6763 #endif
6764 
6765 	return 0;
6766 }
6767 
6768 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6769 	.reset = amdgpu_dm_connector_funcs_reset,
6770 	.detect = amdgpu_dm_connector_detect,
6771 	.fill_modes = drm_helper_probe_single_connector_modes,
6772 	.destroy = amdgpu_dm_connector_destroy,
6773 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6774 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6775 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6776 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6777 	.late_register = amdgpu_dm_connector_late_register,
6778 	.early_unregister = amdgpu_dm_connector_unregister
6779 };
6780 
6781 static int get_modes(struct drm_connector *connector)
6782 {
6783 	return amdgpu_dm_connector_get_modes(connector);
6784 }
6785 
6786 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6787 {
6788 	struct dc_sink_init_data init_params = {
6789 			.link = aconnector->dc_link,
6790 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6791 	};
6792 	struct edid *edid;
6793 
6794 	if (!aconnector->base.edid_blob_ptr) {
6795 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6796 				aconnector->base.name);
6797 
6798 		aconnector->base.force = DRM_FORCE_OFF;
6799 		aconnector->base.override_edid = false;
6800 		return;
6801 	}
6802 
6803 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6804 
6805 	aconnector->edid = edid;
6806 
6807 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6808 		aconnector->dc_link,
6809 		(uint8_t *)edid,
6810 		(edid->extensions + 1) * EDID_LENGTH,
6811 		&init_params);
6812 
6813 	if (aconnector->base.force == DRM_FORCE_ON) {
6814 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6815 		aconnector->dc_link->local_sink :
6816 		aconnector->dc_em_sink;
6817 		dc_sink_retain(aconnector->dc_sink);
6818 	}
6819 }
6820 
6821 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6822 {
6823 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6824 
6825 	/*
6826 	 * In case of headless boot with force on for DP managed connector
6827 	 * Those settings have to be != 0 to get initial modeset
6828 	 */
6829 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6830 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6831 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6832 	}
6833 
6834 
6835 	aconnector->base.override_edid = true;
6836 	create_eml_sink(aconnector);
6837 }
6838 
6839 static struct dc_stream_state *
6840 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6841 				const struct drm_display_mode *drm_mode,
6842 				const struct dm_connector_state *dm_state,
6843 				const struct dc_stream_state *old_stream)
6844 {
6845 	struct drm_connector *connector = &aconnector->base;
6846 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6847 	struct dc_stream_state *stream;
6848 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6849 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6850 	enum dc_status dc_result = DC_OK;
6851 
6852 	do {
6853 		stream = create_stream_for_sink(aconnector, drm_mode,
6854 						dm_state, old_stream,
6855 						requested_bpc);
6856 		if (stream == NULL) {
6857 			DRM_ERROR("Failed to create stream for sink!\n");
6858 			break;
6859 		}
6860 
6861 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6862 
6863 		if (dc_result != DC_OK) {
6864 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6865 				      drm_mode->hdisplay,
6866 				      drm_mode->vdisplay,
6867 				      drm_mode->clock,
6868 				      dc_result,
6869 				      dc_status_to_str(dc_result));
6870 
6871 			dc_stream_release(stream);
6872 			stream = NULL;
6873 			requested_bpc -= 2; /* lower bpc to retry validation */
6874 		}
6875 
6876 	} while (stream == NULL && requested_bpc >= 6);
6877 
6878 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6879 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6880 
6881 		aconnector->force_yuv420_output = true;
6882 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6883 						dm_state, old_stream);
6884 		aconnector->force_yuv420_output = false;
6885 	}
6886 
6887 	return stream;
6888 }
6889 
6890 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6891 				   struct drm_display_mode *mode)
6892 {
6893 	int result = MODE_ERROR;
6894 	struct dc_sink *dc_sink;
6895 	/* TODO: Unhardcode stream count */
6896 	struct dc_stream_state *stream;
6897 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6898 
6899 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6900 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6901 		return result;
6902 
6903 	/*
6904 	 * Only run this the first time mode_valid is called to initilialize
6905 	 * EDID mgmt
6906 	 */
6907 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6908 		!aconnector->dc_em_sink)
6909 		handle_edid_mgmt(aconnector);
6910 
6911 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6912 
6913 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6914 				aconnector->base.force != DRM_FORCE_ON) {
6915 		DRM_ERROR("dc_sink is NULL!\n");
6916 		goto fail;
6917 	}
6918 
6919 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6920 	if (stream) {
6921 		dc_stream_release(stream);
6922 		result = MODE_OK;
6923 	}
6924 
6925 fail:
6926 	/* TODO: error handling*/
6927 	return result;
6928 }
6929 
6930 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6931 				struct dc_info_packet *out)
6932 {
6933 	struct hdmi_drm_infoframe frame;
6934 	unsigned char buf[30]; /* 26 + 4 */
6935 	ssize_t len;
6936 	int ret, i;
6937 
6938 	memset(out, 0, sizeof(*out));
6939 
6940 	if (!state->hdr_output_metadata)
6941 		return 0;
6942 
6943 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6944 	if (ret)
6945 		return ret;
6946 
6947 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6948 	if (len < 0)
6949 		return (int)len;
6950 
6951 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6952 	if (len != 30)
6953 		return -EINVAL;
6954 
6955 	/* Prepare the infopacket for DC. */
6956 	switch (state->connector->connector_type) {
6957 	case DRM_MODE_CONNECTOR_HDMIA:
6958 		out->hb0 = 0x87; /* type */
6959 		out->hb1 = 0x01; /* version */
6960 		out->hb2 = 0x1A; /* length */
6961 		out->sb[0] = buf[3]; /* checksum */
6962 		i = 1;
6963 		break;
6964 
6965 	case DRM_MODE_CONNECTOR_DisplayPort:
6966 	case DRM_MODE_CONNECTOR_eDP:
6967 		out->hb0 = 0x00; /* sdp id, zero */
6968 		out->hb1 = 0x87; /* type */
6969 		out->hb2 = 0x1D; /* payload len - 1 */
6970 		out->hb3 = (0x13 << 2); /* sdp version */
6971 		out->sb[0] = 0x01; /* version */
6972 		out->sb[1] = 0x1A; /* length */
6973 		i = 2;
6974 		break;
6975 
6976 	default:
6977 		return -EINVAL;
6978 	}
6979 
6980 	memcpy(&out->sb[i], &buf[4], 26);
6981 	out->valid = true;
6982 
6983 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6984 		       sizeof(out->sb), false);
6985 
6986 	return 0;
6987 }
6988 
6989 static int
6990 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6991 				 struct drm_atomic_state *state)
6992 {
6993 	struct drm_connector_state *new_con_state =
6994 		drm_atomic_get_new_connector_state(state, conn);
6995 	struct drm_connector_state *old_con_state =
6996 		drm_atomic_get_old_connector_state(state, conn);
6997 	struct drm_crtc *crtc = new_con_state->crtc;
6998 	struct drm_crtc_state *new_crtc_state;
6999 	int ret;
7000 
7001 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7002 
7003 	if (!crtc)
7004 		return 0;
7005 
7006 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7007 		struct dc_info_packet hdr_infopacket;
7008 
7009 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7010 		if (ret)
7011 			return ret;
7012 
7013 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7014 		if (IS_ERR(new_crtc_state))
7015 			return PTR_ERR(new_crtc_state);
7016 
7017 		/*
7018 		 * DC considers the stream backends changed if the
7019 		 * static metadata changes. Forcing the modeset also
7020 		 * gives a simple way for userspace to switch from
7021 		 * 8bpc to 10bpc when setting the metadata to enter
7022 		 * or exit HDR.
7023 		 *
7024 		 * Changing the static metadata after it's been
7025 		 * set is permissible, however. So only force a
7026 		 * modeset if we're entering or exiting HDR.
7027 		 */
7028 		new_crtc_state->mode_changed =
7029 			!old_con_state->hdr_output_metadata ||
7030 			!new_con_state->hdr_output_metadata;
7031 	}
7032 
7033 	return 0;
7034 }
7035 
7036 static const struct drm_connector_helper_funcs
7037 amdgpu_dm_connector_helper_funcs = {
7038 	/*
7039 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7040 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7041 	 * are missing after user start lightdm. So we need to renew modes list.
7042 	 * in get_modes call back, not just return the modes count
7043 	 */
7044 	.get_modes = get_modes,
7045 	.mode_valid = amdgpu_dm_connector_mode_valid,
7046 	.atomic_check = amdgpu_dm_connector_atomic_check,
7047 };
7048 
7049 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7050 {
7051 }
7052 
7053 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7054 {
7055 	struct drm_atomic_state *state = new_crtc_state->state;
7056 	struct drm_plane *plane;
7057 	int num_active = 0;
7058 
7059 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7060 		struct drm_plane_state *new_plane_state;
7061 
7062 		/* Cursor planes are "fake". */
7063 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7064 			continue;
7065 
7066 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7067 
7068 		if (!new_plane_state) {
7069 			/*
7070 			 * The plane is enable on the CRTC and hasn't changed
7071 			 * state. This means that it previously passed
7072 			 * validation and is therefore enabled.
7073 			 */
7074 			num_active += 1;
7075 			continue;
7076 		}
7077 
7078 		/* We need a framebuffer to be considered enabled. */
7079 		num_active += (new_plane_state->fb != NULL);
7080 	}
7081 
7082 	return num_active;
7083 }
7084 
7085 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7086 					 struct drm_crtc_state *new_crtc_state)
7087 {
7088 	struct dm_crtc_state *dm_new_crtc_state =
7089 		to_dm_crtc_state(new_crtc_state);
7090 
7091 	dm_new_crtc_state->active_planes = 0;
7092 
7093 	if (!dm_new_crtc_state->stream)
7094 		return;
7095 
7096 	dm_new_crtc_state->active_planes =
7097 		count_crtc_active_planes(new_crtc_state);
7098 }
7099 
7100 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7101 				       struct drm_atomic_state *state)
7102 {
7103 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7104 									  crtc);
7105 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7106 	struct dc *dc = adev->dm.dc;
7107 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7108 	int ret = -EINVAL;
7109 
7110 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7111 
7112 	dm_update_crtc_active_planes(crtc, crtc_state);
7113 
7114 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7115 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7116 		return ret;
7117 	}
7118 
7119 	/*
7120 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7121 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7122 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7123 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7124 	 */
7125 	if (crtc_state->enable &&
7126 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7127 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7128 		return -EINVAL;
7129 	}
7130 
7131 	/* In some use cases, like reset, no stream is attached */
7132 	if (!dm_crtc_state->stream)
7133 		return 0;
7134 
7135 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7136 		return 0;
7137 
7138 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7139 	return ret;
7140 }
7141 
7142 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7143 				      const struct drm_display_mode *mode,
7144 				      struct drm_display_mode *adjusted_mode)
7145 {
7146 	return true;
7147 }
7148 
7149 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7150 	.disable = dm_crtc_helper_disable,
7151 	.atomic_check = dm_crtc_helper_atomic_check,
7152 	.mode_fixup = dm_crtc_helper_mode_fixup,
7153 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7154 };
7155 
7156 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7157 {
7158 
7159 }
7160 
7161 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7162 {
7163 	switch (display_color_depth) {
7164 		case COLOR_DEPTH_666:
7165 			return 6;
7166 		case COLOR_DEPTH_888:
7167 			return 8;
7168 		case COLOR_DEPTH_101010:
7169 			return 10;
7170 		case COLOR_DEPTH_121212:
7171 			return 12;
7172 		case COLOR_DEPTH_141414:
7173 			return 14;
7174 		case COLOR_DEPTH_161616:
7175 			return 16;
7176 		default:
7177 			break;
7178 		}
7179 	return 0;
7180 }
7181 
7182 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7183 					  struct drm_crtc_state *crtc_state,
7184 					  struct drm_connector_state *conn_state)
7185 {
7186 	struct drm_atomic_state *state = crtc_state->state;
7187 	struct drm_connector *connector = conn_state->connector;
7188 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7189 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7190 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7191 	struct drm_dp_mst_topology_mgr *mst_mgr;
7192 	struct drm_dp_mst_port *mst_port;
7193 	enum dc_color_depth color_depth;
7194 	int clock, bpp = 0;
7195 	bool is_y420 = false;
7196 
7197 	if (!aconnector->port || !aconnector->dc_sink)
7198 		return 0;
7199 
7200 	mst_port = aconnector->port;
7201 	mst_mgr = &aconnector->mst_port->mst_mgr;
7202 
7203 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7204 		return 0;
7205 
7206 	if (!state->duplicated) {
7207 		int max_bpc = conn_state->max_requested_bpc;
7208 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7209 				aconnector->force_yuv420_output;
7210 		color_depth = convert_color_depth_from_display_info(connector,
7211 								    is_y420,
7212 								    max_bpc);
7213 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7214 		clock = adjusted_mode->clock;
7215 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7216 	}
7217 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7218 									   mst_mgr,
7219 									   mst_port,
7220 									   dm_new_connector_state->pbn,
7221 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7222 	if (dm_new_connector_state->vcpi_slots < 0) {
7223 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7224 		return dm_new_connector_state->vcpi_slots;
7225 	}
7226 	return 0;
7227 }
7228 
7229 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7230 	.disable = dm_encoder_helper_disable,
7231 	.atomic_check = dm_encoder_helper_atomic_check
7232 };
7233 
7234 #if defined(CONFIG_DRM_AMD_DC_DCN)
7235 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7236 					    struct dc_state *dc_state,
7237 					    struct dsc_mst_fairness_vars *vars)
7238 {
7239 	struct dc_stream_state *stream = NULL;
7240 	struct drm_connector *connector;
7241 	struct drm_connector_state *new_con_state;
7242 	struct amdgpu_dm_connector *aconnector;
7243 	struct dm_connector_state *dm_conn_state;
7244 	int i, j, clock;
7245 	int vcpi, pbn_div, pbn = 0;
7246 
7247 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7248 
7249 		aconnector = to_amdgpu_dm_connector(connector);
7250 
7251 		if (!aconnector->port)
7252 			continue;
7253 
7254 		if (!new_con_state || !new_con_state->crtc)
7255 			continue;
7256 
7257 		dm_conn_state = to_dm_connector_state(new_con_state);
7258 
7259 		for (j = 0; j < dc_state->stream_count; j++) {
7260 			stream = dc_state->streams[j];
7261 			if (!stream)
7262 				continue;
7263 
7264 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7265 				break;
7266 
7267 			stream = NULL;
7268 		}
7269 
7270 		if (!stream)
7271 			continue;
7272 
7273 		if (stream->timing.flags.DSC != 1) {
7274 			drm_dp_mst_atomic_enable_dsc(state,
7275 						     aconnector->port,
7276 						     dm_conn_state->pbn,
7277 						     0,
7278 						     false);
7279 			continue;
7280 		}
7281 
7282 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7283 		clock = stream->timing.pix_clk_100hz / 10;
7284 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7285 		for (j = 0; j < dc_state->stream_count; j++) {
7286 			if (vars[j].aconnector == aconnector) {
7287 				pbn = vars[j].pbn;
7288 				break;
7289 			}
7290 		}
7291 
7292 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7293 						    aconnector->port,
7294 						    pbn, pbn_div,
7295 						    true);
7296 		if (vcpi < 0)
7297 			return vcpi;
7298 
7299 		dm_conn_state->pbn = pbn;
7300 		dm_conn_state->vcpi_slots = vcpi;
7301 	}
7302 	return 0;
7303 }
7304 #endif
7305 
7306 static void dm_drm_plane_reset(struct drm_plane *plane)
7307 {
7308 	struct dm_plane_state *amdgpu_state = NULL;
7309 
7310 	if (plane->state)
7311 		plane->funcs->atomic_destroy_state(plane, plane->state);
7312 
7313 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7314 	WARN_ON(amdgpu_state == NULL);
7315 
7316 	if (amdgpu_state)
7317 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7318 }
7319 
7320 static struct drm_plane_state *
7321 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7322 {
7323 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7324 
7325 	old_dm_plane_state = to_dm_plane_state(plane->state);
7326 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7327 	if (!dm_plane_state)
7328 		return NULL;
7329 
7330 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7331 
7332 	if (old_dm_plane_state->dc_state) {
7333 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7334 		dc_plane_state_retain(dm_plane_state->dc_state);
7335 	}
7336 
7337 	return &dm_plane_state->base;
7338 }
7339 
7340 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7341 				struct drm_plane_state *state)
7342 {
7343 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7344 
7345 	if (dm_plane_state->dc_state)
7346 		dc_plane_state_release(dm_plane_state->dc_state);
7347 
7348 	drm_atomic_helper_plane_destroy_state(plane, state);
7349 }
7350 
7351 static const struct drm_plane_funcs dm_plane_funcs = {
7352 	.update_plane	= drm_atomic_helper_update_plane,
7353 	.disable_plane	= drm_atomic_helper_disable_plane,
7354 	.destroy	= drm_primary_helper_destroy,
7355 	.reset = dm_drm_plane_reset,
7356 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7357 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7358 	.format_mod_supported = dm_plane_format_mod_supported,
7359 };
7360 
7361 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7362 				      struct drm_plane_state *new_state)
7363 {
7364 	struct amdgpu_framebuffer *afb;
7365 	struct drm_gem_object *obj;
7366 	struct amdgpu_device *adev;
7367 	struct amdgpu_bo *rbo;
7368 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7369 	struct list_head list;
7370 	struct ttm_validate_buffer tv;
7371 	struct ww_acquire_ctx ticket;
7372 	uint32_t domain;
7373 	int r;
7374 
7375 	if (!new_state->fb) {
7376 		DRM_DEBUG_KMS("No FB bound\n");
7377 		return 0;
7378 	}
7379 
7380 	afb = to_amdgpu_framebuffer(new_state->fb);
7381 	obj = new_state->fb->obj[0];
7382 	rbo = gem_to_amdgpu_bo(obj);
7383 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7384 	INIT_LIST_HEAD(&list);
7385 
7386 	tv.bo = &rbo->tbo;
7387 	tv.num_shared = 1;
7388 	list_add(&tv.head, &list);
7389 
7390 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7391 	if (r) {
7392 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7393 		return r;
7394 	}
7395 
7396 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7397 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7398 	else
7399 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7400 
7401 	r = amdgpu_bo_pin(rbo, domain);
7402 	if (unlikely(r != 0)) {
7403 		if (r != -ERESTARTSYS)
7404 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7405 		ttm_eu_backoff_reservation(&ticket, &list);
7406 		return r;
7407 	}
7408 
7409 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7410 	if (unlikely(r != 0)) {
7411 		amdgpu_bo_unpin(rbo);
7412 		ttm_eu_backoff_reservation(&ticket, &list);
7413 		DRM_ERROR("%p bind failed\n", rbo);
7414 		return r;
7415 	}
7416 
7417 	ttm_eu_backoff_reservation(&ticket, &list);
7418 
7419 	afb->address = amdgpu_bo_gpu_offset(rbo);
7420 
7421 	amdgpu_bo_ref(rbo);
7422 
7423 	/**
7424 	 * We don't do surface updates on planes that have been newly created,
7425 	 * but we also don't have the afb->address during atomic check.
7426 	 *
7427 	 * Fill in buffer attributes depending on the address here, but only on
7428 	 * newly created planes since they're not being used by DC yet and this
7429 	 * won't modify global state.
7430 	 */
7431 	dm_plane_state_old = to_dm_plane_state(plane->state);
7432 	dm_plane_state_new = to_dm_plane_state(new_state);
7433 
7434 	if (dm_plane_state_new->dc_state &&
7435 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7436 		struct dc_plane_state *plane_state =
7437 			dm_plane_state_new->dc_state;
7438 		bool force_disable_dcc = !plane_state->dcc.enable;
7439 
7440 		fill_plane_buffer_attributes(
7441 			adev, afb, plane_state->format, plane_state->rotation,
7442 			afb->tiling_flags,
7443 			&plane_state->tiling_info, &plane_state->plane_size,
7444 			&plane_state->dcc, &plane_state->address,
7445 			afb->tmz_surface, force_disable_dcc);
7446 	}
7447 
7448 	return 0;
7449 }
7450 
7451 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7452 				       struct drm_plane_state *old_state)
7453 {
7454 	struct amdgpu_bo *rbo;
7455 	int r;
7456 
7457 	if (!old_state->fb)
7458 		return;
7459 
7460 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7461 	r = amdgpu_bo_reserve(rbo, false);
7462 	if (unlikely(r)) {
7463 		DRM_ERROR("failed to reserve rbo before unpin\n");
7464 		return;
7465 	}
7466 
7467 	amdgpu_bo_unpin(rbo);
7468 	amdgpu_bo_unreserve(rbo);
7469 	amdgpu_bo_unref(&rbo);
7470 }
7471 
7472 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7473 				       struct drm_crtc_state *new_crtc_state)
7474 {
7475 	struct drm_framebuffer *fb = state->fb;
7476 	int min_downscale, max_upscale;
7477 	int min_scale = 0;
7478 	int max_scale = INT_MAX;
7479 
7480 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7481 	if (fb && state->crtc) {
7482 		/* Validate viewport to cover the case when only the position changes */
7483 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7484 			int viewport_width = state->crtc_w;
7485 			int viewport_height = state->crtc_h;
7486 
7487 			if (state->crtc_x < 0)
7488 				viewport_width += state->crtc_x;
7489 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7490 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7491 
7492 			if (state->crtc_y < 0)
7493 				viewport_height += state->crtc_y;
7494 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7495 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7496 
7497 			if (viewport_width < 0 || viewport_height < 0) {
7498 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7499 				return -EINVAL;
7500 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7501 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7502 				return -EINVAL;
7503 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7504 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7505 				return -EINVAL;
7506 			}
7507 
7508 		}
7509 
7510 		/* Get min/max allowed scaling factors from plane caps. */
7511 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7512 					     &min_downscale, &max_upscale);
7513 		/*
7514 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7515 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7516 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7517 		 */
7518 		min_scale = (1000 << 16) / max_upscale;
7519 		max_scale = (1000 << 16) / min_downscale;
7520 	}
7521 
7522 	return drm_atomic_helper_check_plane_state(
7523 		state, new_crtc_state, min_scale, max_scale, true, true);
7524 }
7525 
7526 static int dm_plane_atomic_check(struct drm_plane *plane,
7527 				 struct drm_atomic_state *state)
7528 {
7529 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7530 										 plane);
7531 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7532 	struct dc *dc = adev->dm.dc;
7533 	struct dm_plane_state *dm_plane_state;
7534 	struct dc_scaling_info scaling_info;
7535 	struct drm_crtc_state *new_crtc_state;
7536 	int ret;
7537 
7538 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7539 
7540 	dm_plane_state = to_dm_plane_state(new_plane_state);
7541 
7542 	if (!dm_plane_state->dc_state)
7543 		return 0;
7544 
7545 	new_crtc_state =
7546 		drm_atomic_get_new_crtc_state(state,
7547 					      new_plane_state->crtc);
7548 	if (!new_crtc_state)
7549 		return -EINVAL;
7550 
7551 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7552 	if (ret)
7553 		return ret;
7554 
7555 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7556 	if (ret)
7557 		return ret;
7558 
7559 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7560 		return 0;
7561 
7562 	return -EINVAL;
7563 }
7564 
7565 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7566 				       struct drm_atomic_state *state)
7567 {
7568 	/* Only support async updates on cursor planes. */
7569 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7570 		return -EINVAL;
7571 
7572 	return 0;
7573 }
7574 
7575 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7576 					 struct drm_atomic_state *state)
7577 {
7578 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7579 									   plane);
7580 	struct drm_plane_state *old_state =
7581 		drm_atomic_get_old_plane_state(state, plane);
7582 
7583 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7584 
7585 	swap(plane->state->fb, new_state->fb);
7586 
7587 	plane->state->src_x = new_state->src_x;
7588 	plane->state->src_y = new_state->src_y;
7589 	plane->state->src_w = new_state->src_w;
7590 	plane->state->src_h = new_state->src_h;
7591 	plane->state->crtc_x = new_state->crtc_x;
7592 	plane->state->crtc_y = new_state->crtc_y;
7593 	plane->state->crtc_w = new_state->crtc_w;
7594 	plane->state->crtc_h = new_state->crtc_h;
7595 
7596 	handle_cursor_update(plane, old_state);
7597 }
7598 
7599 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7600 	.prepare_fb = dm_plane_helper_prepare_fb,
7601 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7602 	.atomic_check = dm_plane_atomic_check,
7603 	.atomic_async_check = dm_plane_atomic_async_check,
7604 	.atomic_async_update = dm_plane_atomic_async_update
7605 };
7606 
7607 /*
7608  * TODO: these are currently initialized to rgb formats only.
7609  * For future use cases we should either initialize them dynamically based on
7610  * plane capabilities, or initialize this array to all formats, so internal drm
7611  * check will succeed, and let DC implement proper check
7612  */
7613 static const uint32_t rgb_formats[] = {
7614 	DRM_FORMAT_XRGB8888,
7615 	DRM_FORMAT_ARGB8888,
7616 	DRM_FORMAT_RGBA8888,
7617 	DRM_FORMAT_XRGB2101010,
7618 	DRM_FORMAT_XBGR2101010,
7619 	DRM_FORMAT_ARGB2101010,
7620 	DRM_FORMAT_ABGR2101010,
7621 	DRM_FORMAT_XRGB16161616,
7622 	DRM_FORMAT_XBGR16161616,
7623 	DRM_FORMAT_ARGB16161616,
7624 	DRM_FORMAT_ABGR16161616,
7625 	DRM_FORMAT_XBGR8888,
7626 	DRM_FORMAT_ABGR8888,
7627 	DRM_FORMAT_RGB565,
7628 };
7629 
7630 static const uint32_t overlay_formats[] = {
7631 	DRM_FORMAT_XRGB8888,
7632 	DRM_FORMAT_ARGB8888,
7633 	DRM_FORMAT_RGBA8888,
7634 	DRM_FORMAT_XBGR8888,
7635 	DRM_FORMAT_ABGR8888,
7636 	DRM_FORMAT_RGB565
7637 };
7638 
7639 static const u32 cursor_formats[] = {
7640 	DRM_FORMAT_ARGB8888
7641 };
7642 
7643 static int get_plane_formats(const struct drm_plane *plane,
7644 			     const struct dc_plane_cap *plane_cap,
7645 			     uint32_t *formats, int max_formats)
7646 {
7647 	int i, num_formats = 0;
7648 
7649 	/*
7650 	 * TODO: Query support for each group of formats directly from
7651 	 * DC plane caps. This will require adding more formats to the
7652 	 * caps list.
7653 	 */
7654 
7655 	switch (plane->type) {
7656 	case DRM_PLANE_TYPE_PRIMARY:
7657 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7658 			if (num_formats >= max_formats)
7659 				break;
7660 
7661 			formats[num_formats++] = rgb_formats[i];
7662 		}
7663 
7664 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7665 			formats[num_formats++] = DRM_FORMAT_NV12;
7666 		if (plane_cap && plane_cap->pixel_format_support.p010)
7667 			formats[num_formats++] = DRM_FORMAT_P010;
7668 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7669 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7670 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7671 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7672 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7673 		}
7674 		break;
7675 
7676 	case DRM_PLANE_TYPE_OVERLAY:
7677 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7678 			if (num_formats >= max_formats)
7679 				break;
7680 
7681 			formats[num_formats++] = overlay_formats[i];
7682 		}
7683 		break;
7684 
7685 	case DRM_PLANE_TYPE_CURSOR:
7686 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7687 			if (num_formats >= max_formats)
7688 				break;
7689 
7690 			formats[num_formats++] = cursor_formats[i];
7691 		}
7692 		break;
7693 	}
7694 
7695 	return num_formats;
7696 }
7697 
7698 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7699 				struct drm_plane *plane,
7700 				unsigned long possible_crtcs,
7701 				const struct dc_plane_cap *plane_cap)
7702 {
7703 	uint32_t formats[32];
7704 	int num_formats;
7705 	int res = -EPERM;
7706 	unsigned int supported_rotations;
7707 	uint64_t *modifiers = NULL;
7708 
7709 	num_formats = get_plane_formats(plane, plane_cap, formats,
7710 					ARRAY_SIZE(formats));
7711 
7712 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7713 	if (res)
7714 		return res;
7715 
7716 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7717 				       &dm_plane_funcs, formats, num_formats,
7718 				       modifiers, plane->type, NULL);
7719 	kfree(modifiers);
7720 	if (res)
7721 		return res;
7722 
7723 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7724 	    plane_cap && plane_cap->per_pixel_alpha) {
7725 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7726 					  BIT(DRM_MODE_BLEND_PREMULTI);
7727 
7728 		drm_plane_create_alpha_property(plane);
7729 		drm_plane_create_blend_mode_property(plane, blend_caps);
7730 	}
7731 
7732 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7733 	    plane_cap &&
7734 	    (plane_cap->pixel_format_support.nv12 ||
7735 	     plane_cap->pixel_format_support.p010)) {
7736 		/* This only affects YUV formats. */
7737 		drm_plane_create_color_properties(
7738 			plane,
7739 			BIT(DRM_COLOR_YCBCR_BT601) |
7740 			BIT(DRM_COLOR_YCBCR_BT709) |
7741 			BIT(DRM_COLOR_YCBCR_BT2020),
7742 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7743 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7744 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7745 	}
7746 
7747 	supported_rotations =
7748 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7749 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7750 
7751 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7752 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7753 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7754 						   supported_rotations);
7755 
7756 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7757 
7758 	/* Create (reset) the plane state */
7759 	if (plane->funcs->reset)
7760 		plane->funcs->reset(plane);
7761 
7762 	return 0;
7763 }
7764 
7765 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7766 			       struct drm_plane *plane,
7767 			       uint32_t crtc_index)
7768 {
7769 	struct amdgpu_crtc *acrtc = NULL;
7770 	struct drm_plane *cursor_plane;
7771 
7772 	int res = -ENOMEM;
7773 
7774 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7775 	if (!cursor_plane)
7776 		goto fail;
7777 
7778 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7779 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7780 
7781 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7782 	if (!acrtc)
7783 		goto fail;
7784 
7785 	res = drm_crtc_init_with_planes(
7786 			dm->ddev,
7787 			&acrtc->base,
7788 			plane,
7789 			cursor_plane,
7790 			&amdgpu_dm_crtc_funcs, NULL);
7791 
7792 	if (res)
7793 		goto fail;
7794 
7795 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7796 
7797 	/* Create (reset) the plane state */
7798 	if (acrtc->base.funcs->reset)
7799 		acrtc->base.funcs->reset(&acrtc->base);
7800 
7801 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7802 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7803 
7804 	acrtc->crtc_id = crtc_index;
7805 	acrtc->base.enabled = false;
7806 	acrtc->otg_inst = -1;
7807 
7808 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7809 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7810 				   true, MAX_COLOR_LUT_ENTRIES);
7811 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7812 
7813 	return 0;
7814 
7815 fail:
7816 	kfree(acrtc);
7817 	kfree(cursor_plane);
7818 	return res;
7819 }
7820 
7821 
7822 static int to_drm_connector_type(enum signal_type st)
7823 {
7824 	switch (st) {
7825 	case SIGNAL_TYPE_HDMI_TYPE_A:
7826 		return DRM_MODE_CONNECTOR_HDMIA;
7827 	case SIGNAL_TYPE_EDP:
7828 		return DRM_MODE_CONNECTOR_eDP;
7829 	case SIGNAL_TYPE_LVDS:
7830 		return DRM_MODE_CONNECTOR_LVDS;
7831 	case SIGNAL_TYPE_RGB:
7832 		return DRM_MODE_CONNECTOR_VGA;
7833 	case SIGNAL_TYPE_DISPLAY_PORT:
7834 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7835 		return DRM_MODE_CONNECTOR_DisplayPort;
7836 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7837 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7838 		return DRM_MODE_CONNECTOR_DVID;
7839 	case SIGNAL_TYPE_VIRTUAL:
7840 		return DRM_MODE_CONNECTOR_VIRTUAL;
7841 
7842 	default:
7843 		return DRM_MODE_CONNECTOR_Unknown;
7844 	}
7845 }
7846 
7847 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7848 {
7849 	struct drm_encoder *encoder;
7850 
7851 	/* There is only one encoder per connector */
7852 	drm_connector_for_each_possible_encoder(connector, encoder)
7853 		return encoder;
7854 
7855 	return NULL;
7856 }
7857 
7858 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7859 {
7860 	struct drm_encoder *encoder;
7861 	struct amdgpu_encoder *amdgpu_encoder;
7862 
7863 	encoder = amdgpu_dm_connector_to_encoder(connector);
7864 
7865 	if (encoder == NULL)
7866 		return;
7867 
7868 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7869 
7870 	amdgpu_encoder->native_mode.clock = 0;
7871 
7872 	if (!list_empty(&connector->probed_modes)) {
7873 		struct drm_display_mode *preferred_mode = NULL;
7874 
7875 		list_for_each_entry(preferred_mode,
7876 				    &connector->probed_modes,
7877 				    head) {
7878 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7879 				amdgpu_encoder->native_mode = *preferred_mode;
7880 
7881 			break;
7882 		}
7883 
7884 	}
7885 }
7886 
7887 static struct drm_display_mode *
7888 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7889 			     char *name,
7890 			     int hdisplay, int vdisplay)
7891 {
7892 	struct drm_device *dev = encoder->dev;
7893 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7894 	struct drm_display_mode *mode = NULL;
7895 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7896 
7897 	mode = drm_mode_duplicate(dev, native_mode);
7898 
7899 	if (mode == NULL)
7900 		return NULL;
7901 
7902 	mode->hdisplay = hdisplay;
7903 	mode->vdisplay = vdisplay;
7904 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7905 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7906 
7907 	return mode;
7908 
7909 }
7910 
7911 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7912 						 struct drm_connector *connector)
7913 {
7914 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7915 	struct drm_display_mode *mode = NULL;
7916 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7917 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7918 				to_amdgpu_dm_connector(connector);
7919 	int i;
7920 	int n;
7921 	struct mode_size {
7922 		char name[DRM_DISPLAY_MODE_LEN];
7923 		int w;
7924 		int h;
7925 	} common_modes[] = {
7926 		{  "640x480",  640,  480},
7927 		{  "800x600",  800,  600},
7928 		{ "1024x768", 1024,  768},
7929 		{ "1280x720", 1280,  720},
7930 		{ "1280x800", 1280,  800},
7931 		{"1280x1024", 1280, 1024},
7932 		{ "1440x900", 1440,  900},
7933 		{"1680x1050", 1680, 1050},
7934 		{"1600x1200", 1600, 1200},
7935 		{"1920x1080", 1920, 1080},
7936 		{"1920x1200", 1920, 1200}
7937 	};
7938 
7939 	n = ARRAY_SIZE(common_modes);
7940 
7941 	for (i = 0; i < n; i++) {
7942 		struct drm_display_mode *curmode = NULL;
7943 		bool mode_existed = false;
7944 
7945 		if (common_modes[i].w > native_mode->hdisplay ||
7946 		    common_modes[i].h > native_mode->vdisplay ||
7947 		   (common_modes[i].w == native_mode->hdisplay &&
7948 		    common_modes[i].h == native_mode->vdisplay))
7949 			continue;
7950 
7951 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7952 			if (common_modes[i].w == curmode->hdisplay &&
7953 			    common_modes[i].h == curmode->vdisplay) {
7954 				mode_existed = true;
7955 				break;
7956 			}
7957 		}
7958 
7959 		if (mode_existed)
7960 			continue;
7961 
7962 		mode = amdgpu_dm_create_common_mode(encoder,
7963 				common_modes[i].name, common_modes[i].w,
7964 				common_modes[i].h);
7965 		drm_mode_probed_add(connector, mode);
7966 		amdgpu_dm_connector->num_modes++;
7967 	}
7968 }
7969 
7970 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7971 {
7972 	struct drm_encoder *encoder;
7973 	struct amdgpu_encoder *amdgpu_encoder;
7974 	const struct drm_display_mode *native_mode;
7975 
7976 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7977 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7978 		return;
7979 
7980 	encoder = amdgpu_dm_connector_to_encoder(connector);
7981 	if (!encoder)
7982 		return;
7983 
7984 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7985 
7986 	native_mode = &amdgpu_encoder->native_mode;
7987 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7988 		return;
7989 
7990 	drm_connector_set_panel_orientation_with_quirk(connector,
7991 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7992 						       native_mode->hdisplay,
7993 						       native_mode->vdisplay);
7994 }
7995 
7996 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7997 					      struct edid *edid)
7998 {
7999 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8000 			to_amdgpu_dm_connector(connector);
8001 
8002 	if (edid) {
8003 		/* empty probed_modes */
8004 		INIT_LIST_HEAD(&connector->probed_modes);
8005 		amdgpu_dm_connector->num_modes =
8006 				drm_add_edid_modes(connector, edid);
8007 
8008 		/* sorting the probed modes before calling function
8009 		 * amdgpu_dm_get_native_mode() since EDID can have
8010 		 * more than one preferred mode. The modes that are
8011 		 * later in the probed mode list could be of higher
8012 		 * and preferred resolution. For example, 3840x2160
8013 		 * resolution in base EDID preferred timing and 4096x2160
8014 		 * preferred resolution in DID extension block later.
8015 		 */
8016 		drm_mode_sort(&connector->probed_modes);
8017 		amdgpu_dm_get_native_mode(connector);
8018 
8019 		/* Freesync capabilities are reset by calling
8020 		 * drm_add_edid_modes() and need to be
8021 		 * restored here.
8022 		 */
8023 		amdgpu_dm_update_freesync_caps(connector, edid);
8024 
8025 		amdgpu_set_panel_orientation(connector);
8026 	} else {
8027 		amdgpu_dm_connector->num_modes = 0;
8028 	}
8029 }
8030 
8031 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8032 			      struct drm_display_mode *mode)
8033 {
8034 	struct drm_display_mode *m;
8035 
8036 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8037 		if (drm_mode_equal(m, mode))
8038 			return true;
8039 	}
8040 
8041 	return false;
8042 }
8043 
8044 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8045 {
8046 	const struct drm_display_mode *m;
8047 	struct drm_display_mode *new_mode;
8048 	uint i;
8049 	uint32_t new_modes_count = 0;
8050 
8051 	/* Standard FPS values
8052 	 *
8053 	 * 23.976       - TV/NTSC
8054 	 * 24 	        - Cinema
8055 	 * 25 	        - TV/PAL
8056 	 * 29.97        - TV/NTSC
8057 	 * 30 	        - TV/NTSC
8058 	 * 48 	        - Cinema HFR
8059 	 * 50 	        - TV/PAL
8060 	 * 60 	        - Commonly used
8061 	 * 48,72,96,120 - Multiples of 24
8062 	 */
8063 	static const uint32_t common_rates[] = {
8064 		23976, 24000, 25000, 29970, 30000,
8065 		48000, 50000, 60000, 72000, 96000, 120000
8066 	};
8067 
8068 	/*
8069 	 * Find mode with highest refresh rate with the same resolution
8070 	 * as the preferred mode. Some monitors report a preferred mode
8071 	 * with lower resolution than the highest refresh rate supported.
8072 	 */
8073 
8074 	m = get_highest_refresh_rate_mode(aconnector, true);
8075 	if (!m)
8076 		return 0;
8077 
8078 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8079 		uint64_t target_vtotal, target_vtotal_diff;
8080 		uint64_t num, den;
8081 
8082 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8083 			continue;
8084 
8085 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8086 		    common_rates[i] > aconnector->max_vfreq * 1000)
8087 			continue;
8088 
8089 		num = (unsigned long long)m->clock * 1000 * 1000;
8090 		den = common_rates[i] * (unsigned long long)m->htotal;
8091 		target_vtotal = div_u64(num, den);
8092 		target_vtotal_diff = target_vtotal - m->vtotal;
8093 
8094 		/* Check for illegal modes */
8095 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8096 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8097 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8098 			continue;
8099 
8100 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8101 		if (!new_mode)
8102 			goto out;
8103 
8104 		new_mode->vtotal += (u16)target_vtotal_diff;
8105 		new_mode->vsync_start += (u16)target_vtotal_diff;
8106 		new_mode->vsync_end += (u16)target_vtotal_diff;
8107 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8108 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8109 
8110 		if (!is_duplicate_mode(aconnector, new_mode)) {
8111 			drm_mode_probed_add(&aconnector->base, new_mode);
8112 			new_modes_count += 1;
8113 		} else
8114 			drm_mode_destroy(aconnector->base.dev, new_mode);
8115 	}
8116  out:
8117 	return new_modes_count;
8118 }
8119 
8120 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8121 						   struct edid *edid)
8122 {
8123 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8124 		to_amdgpu_dm_connector(connector);
8125 
8126 	if (!(amdgpu_freesync_vid_mode && edid))
8127 		return;
8128 
8129 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8130 		amdgpu_dm_connector->num_modes +=
8131 			add_fs_modes(amdgpu_dm_connector);
8132 }
8133 
8134 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8135 {
8136 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8137 			to_amdgpu_dm_connector(connector);
8138 	struct drm_encoder *encoder;
8139 	struct edid *edid = amdgpu_dm_connector->edid;
8140 
8141 	encoder = amdgpu_dm_connector_to_encoder(connector);
8142 
8143 	if (!drm_edid_is_valid(edid)) {
8144 		amdgpu_dm_connector->num_modes =
8145 				drm_add_modes_noedid(connector, 640, 480);
8146 	} else {
8147 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8148 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8149 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8150 	}
8151 	amdgpu_dm_fbc_init(connector);
8152 
8153 	return amdgpu_dm_connector->num_modes;
8154 }
8155 
8156 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8157 				     struct amdgpu_dm_connector *aconnector,
8158 				     int connector_type,
8159 				     struct dc_link *link,
8160 				     int link_index)
8161 {
8162 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8163 
8164 	/*
8165 	 * Some of the properties below require access to state, like bpc.
8166 	 * Allocate some default initial connector state with our reset helper.
8167 	 */
8168 	if (aconnector->base.funcs->reset)
8169 		aconnector->base.funcs->reset(&aconnector->base);
8170 
8171 	aconnector->connector_id = link_index;
8172 	aconnector->dc_link = link;
8173 	aconnector->base.interlace_allowed = false;
8174 	aconnector->base.doublescan_allowed = false;
8175 	aconnector->base.stereo_allowed = false;
8176 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8177 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8178 	aconnector->audio_inst = -1;
8179 	mutex_init(&aconnector->hpd_lock);
8180 
8181 	/*
8182 	 * configure support HPD hot plug connector_>polled default value is 0
8183 	 * which means HPD hot plug not supported
8184 	 */
8185 	switch (connector_type) {
8186 	case DRM_MODE_CONNECTOR_HDMIA:
8187 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8188 		aconnector->base.ycbcr_420_allowed =
8189 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8190 		break;
8191 	case DRM_MODE_CONNECTOR_DisplayPort:
8192 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8193 		if (link->is_dig_mapping_flexible &&
8194 		    link->dc->res_pool->funcs->link_encs_assign) {
8195 			link->link_enc =
8196 				link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8197 			if (!link->link_enc)
8198 				link->link_enc =
8199 					link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8200 		}
8201 
8202 		if (link->link_enc)
8203 			aconnector->base.ycbcr_420_allowed =
8204 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8205 		break;
8206 	case DRM_MODE_CONNECTOR_DVID:
8207 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8208 		break;
8209 	default:
8210 		break;
8211 	}
8212 
8213 	drm_object_attach_property(&aconnector->base.base,
8214 				dm->ddev->mode_config.scaling_mode_property,
8215 				DRM_MODE_SCALE_NONE);
8216 
8217 	drm_object_attach_property(&aconnector->base.base,
8218 				adev->mode_info.underscan_property,
8219 				UNDERSCAN_OFF);
8220 	drm_object_attach_property(&aconnector->base.base,
8221 				adev->mode_info.underscan_hborder_property,
8222 				0);
8223 	drm_object_attach_property(&aconnector->base.base,
8224 				adev->mode_info.underscan_vborder_property,
8225 				0);
8226 
8227 	if (!aconnector->mst_port)
8228 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8229 
8230 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8231 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8232 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8233 
8234 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8235 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8236 		drm_object_attach_property(&aconnector->base.base,
8237 				adev->mode_info.abm_level_property, 0);
8238 	}
8239 
8240 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8241 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8242 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8243 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8244 
8245 		if (!aconnector->mst_port)
8246 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8247 
8248 #ifdef CONFIG_DRM_AMD_DC_HDCP
8249 		if (adev->dm.hdcp_workqueue)
8250 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8251 #endif
8252 	}
8253 }
8254 
8255 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8256 			      struct i2c_msg *msgs, int num)
8257 {
8258 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8259 	struct ddc_service *ddc_service = i2c->ddc_service;
8260 	struct i2c_command cmd;
8261 	int i;
8262 	int result = -EIO;
8263 
8264 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8265 
8266 	if (!cmd.payloads)
8267 		return result;
8268 
8269 	cmd.number_of_payloads = num;
8270 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8271 	cmd.speed = 100;
8272 
8273 	for (i = 0; i < num; i++) {
8274 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8275 		cmd.payloads[i].address = msgs[i].addr;
8276 		cmd.payloads[i].length = msgs[i].len;
8277 		cmd.payloads[i].data = msgs[i].buf;
8278 	}
8279 
8280 	if (dc_submit_i2c(
8281 			ddc_service->ctx->dc,
8282 			ddc_service->ddc_pin->hw_info.ddc_channel,
8283 			&cmd))
8284 		result = num;
8285 
8286 	kfree(cmd.payloads);
8287 	return result;
8288 }
8289 
8290 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8291 {
8292 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8293 }
8294 
8295 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8296 	.master_xfer = amdgpu_dm_i2c_xfer,
8297 	.functionality = amdgpu_dm_i2c_func,
8298 };
8299 
8300 static struct amdgpu_i2c_adapter *
8301 create_i2c(struct ddc_service *ddc_service,
8302 	   int link_index,
8303 	   int *res)
8304 {
8305 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8306 	struct amdgpu_i2c_adapter *i2c;
8307 
8308 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8309 	if (!i2c)
8310 		return NULL;
8311 	i2c->base.owner = THIS_MODULE;
8312 	i2c->base.class = I2C_CLASS_DDC;
8313 	i2c->base.dev.parent = &adev->pdev->dev;
8314 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8315 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8316 	i2c_set_adapdata(&i2c->base, i2c);
8317 	i2c->ddc_service = ddc_service;
8318 	if (i2c->ddc_service->ddc_pin)
8319 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8320 
8321 	return i2c;
8322 }
8323 
8324 
8325 /*
8326  * Note: this function assumes that dc_link_detect() was called for the
8327  * dc_link which will be represented by this aconnector.
8328  */
8329 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8330 				    struct amdgpu_dm_connector *aconnector,
8331 				    uint32_t link_index,
8332 				    struct amdgpu_encoder *aencoder)
8333 {
8334 	int res = 0;
8335 	int connector_type;
8336 	struct dc *dc = dm->dc;
8337 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8338 	struct amdgpu_i2c_adapter *i2c;
8339 
8340 	link->priv = aconnector;
8341 
8342 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8343 
8344 	i2c = create_i2c(link->ddc, link->link_index, &res);
8345 	if (!i2c) {
8346 		DRM_ERROR("Failed to create i2c adapter data\n");
8347 		return -ENOMEM;
8348 	}
8349 
8350 	aconnector->i2c = i2c;
8351 	res = i2c_add_adapter(&i2c->base);
8352 
8353 	if (res) {
8354 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8355 		goto out_free;
8356 	}
8357 
8358 	connector_type = to_drm_connector_type(link->connector_signal);
8359 
8360 	res = drm_connector_init_with_ddc(
8361 			dm->ddev,
8362 			&aconnector->base,
8363 			&amdgpu_dm_connector_funcs,
8364 			connector_type,
8365 			&i2c->base);
8366 
8367 	if (res) {
8368 		DRM_ERROR("connector_init failed\n");
8369 		aconnector->connector_id = -1;
8370 		goto out_free;
8371 	}
8372 
8373 	drm_connector_helper_add(
8374 			&aconnector->base,
8375 			&amdgpu_dm_connector_helper_funcs);
8376 
8377 	amdgpu_dm_connector_init_helper(
8378 		dm,
8379 		aconnector,
8380 		connector_type,
8381 		link,
8382 		link_index);
8383 
8384 	drm_connector_attach_encoder(
8385 		&aconnector->base, &aencoder->base);
8386 
8387 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8388 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8389 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8390 
8391 out_free:
8392 	if (res) {
8393 		kfree(i2c);
8394 		aconnector->i2c = NULL;
8395 	}
8396 	return res;
8397 }
8398 
8399 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8400 {
8401 	switch (adev->mode_info.num_crtc) {
8402 	case 1:
8403 		return 0x1;
8404 	case 2:
8405 		return 0x3;
8406 	case 3:
8407 		return 0x7;
8408 	case 4:
8409 		return 0xf;
8410 	case 5:
8411 		return 0x1f;
8412 	case 6:
8413 	default:
8414 		return 0x3f;
8415 	}
8416 }
8417 
8418 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8419 				  struct amdgpu_encoder *aencoder,
8420 				  uint32_t link_index)
8421 {
8422 	struct amdgpu_device *adev = drm_to_adev(dev);
8423 
8424 	int res = drm_encoder_init(dev,
8425 				   &aencoder->base,
8426 				   &amdgpu_dm_encoder_funcs,
8427 				   DRM_MODE_ENCODER_TMDS,
8428 				   NULL);
8429 
8430 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8431 
8432 	if (!res)
8433 		aencoder->encoder_id = link_index;
8434 	else
8435 		aencoder->encoder_id = -1;
8436 
8437 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8438 
8439 	return res;
8440 }
8441 
8442 static void manage_dm_interrupts(struct amdgpu_device *adev,
8443 				 struct amdgpu_crtc *acrtc,
8444 				 bool enable)
8445 {
8446 	/*
8447 	 * We have no guarantee that the frontend index maps to the same
8448 	 * backend index - some even map to more than one.
8449 	 *
8450 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8451 	 */
8452 	int irq_type =
8453 		amdgpu_display_crtc_idx_to_irq_type(
8454 			adev,
8455 			acrtc->crtc_id);
8456 
8457 	if (enable) {
8458 		drm_crtc_vblank_on(&acrtc->base);
8459 		amdgpu_irq_get(
8460 			adev,
8461 			&adev->pageflip_irq,
8462 			irq_type);
8463 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8464 		amdgpu_irq_get(
8465 			adev,
8466 			&adev->vline0_irq,
8467 			irq_type);
8468 #endif
8469 	} else {
8470 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8471 		amdgpu_irq_put(
8472 			adev,
8473 			&adev->vline0_irq,
8474 			irq_type);
8475 #endif
8476 		amdgpu_irq_put(
8477 			adev,
8478 			&adev->pageflip_irq,
8479 			irq_type);
8480 		drm_crtc_vblank_off(&acrtc->base);
8481 	}
8482 }
8483 
8484 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8485 				      struct amdgpu_crtc *acrtc)
8486 {
8487 	int irq_type =
8488 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8489 
8490 	/**
8491 	 * This reads the current state for the IRQ and force reapplies
8492 	 * the setting to hardware.
8493 	 */
8494 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8495 }
8496 
8497 static bool
8498 is_scaling_state_different(const struct dm_connector_state *dm_state,
8499 			   const struct dm_connector_state *old_dm_state)
8500 {
8501 	if (dm_state->scaling != old_dm_state->scaling)
8502 		return true;
8503 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8504 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8505 			return true;
8506 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8507 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8508 			return true;
8509 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8510 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8511 		return true;
8512 	return false;
8513 }
8514 
8515 #ifdef CONFIG_DRM_AMD_DC_HDCP
8516 static bool is_content_protection_different(struct drm_connector_state *state,
8517 					    const struct drm_connector_state *old_state,
8518 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8519 {
8520 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8521 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8522 
8523 	/* Handle: Type0/1 change */
8524 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8525 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8526 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8527 		return true;
8528 	}
8529 
8530 	/* CP is being re enabled, ignore this
8531 	 *
8532 	 * Handles:	ENABLED -> DESIRED
8533 	 */
8534 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8535 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8536 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8537 		return false;
8538 	}
8539 
8540 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8541 	 *
8542 	 * Handles:	UNDESIRED -> ENABLED
8543 	 */
8544 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8545 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8546 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8547 
8548 	/* Stream removed and re-enabled
8549 	 *
8550 	 * Can sometimes overlap with the HPD case,
8551 	 * thus set update_hdcp to false to avoid
8552 	 * setting HDCP multiple times.
8553 	 *
8554 	 * Handles:	DESIRED -> DESIRED (Special case)
8555 	 */
8556 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8557 		state->crtc && state->crtc->enabled &&
8558 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8559 		dm_con_state->update_hdcp = false;
8560 		return true;
8561 	}
8562 
8563 	/* Hot-plug, headless s3, dpms
8564 	 *
8565 	 * Only start HDCP if the display is connected/enabled.
8566 	 * update_hdcp flag will be set to false until the next
8567 	 * HPD comes in.
8568 	 *
8569 	 * Handles:	DESIRED -> DESIRED (Special case)
8570 	 */
8571 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8572 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8573 		dm_con_state->update_hdcp = false;
8574 		return true;
8575 	}
8576 
8577 	/*
8578 	 * Handles:	UNDESIRED -> UNDESIRED
8579 	 *		DESIRED -> DESIRED
8580 	 *		ENABLED -> ENABLED
8581 	 */
8582 	if (old_state->content_protection == state->content_protection)
8583 		return false;
8584 
8585 	/*
8586 	 * Handles:	UNDESIRED -> DESIRED
8587 	 *		DESIRED -> UNDESIRED
8588 	 *		ENABLED -> UNDESIRED
8589 	 */
8590 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8591 		return true;
8592 
8593 	/*
8594 	 * Handles:	DESIRED -> ENABLED
8595 	 */
8596 	return false;
8597 }
8598 
8599 #endif
8600 static void remove_stream(struct amdgpu_device *adev,
8601 			  struct amdgpu_crtc *acrtc,
8602 			  struct dc_stream_state *stream)
8603 {
8604 	/* this is the update mode case */
8605 
8606 	acrtc->otg_inst = -1;
8607 	acrtc->enabled = false;
8608 }
8609 
8610 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8611 			       struct dc_cursor_position *position)
8612 {
8613 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8614 	int x, y;
8615 	int xorigin = 0, yorigin = 0;
8616 
8617 	if (!crtc || !plane->state->fb)
8618 		return 0;
8619 
8620 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8621 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8622 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8623 			  __func__,
8624 			  plane->state->crtc_w,
8625 			  plane->state->crtc_h);
8626 		return -EINVAL;
8627 	}
8628 
8629 	x = plane->state->crtc_x;
8630 	y = plane->state->crtc_y;
8631 
8632 	if (x <= -amdgpu_crtc->max_cursor_width ||
8633 	    y <= -amdgpu_crtc->max_cursor_height)
8634 		return 0;
8635 
8636 	if (x < 0) {
8637 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8638 		x = 0;
8639 	}
8640 	if (y < 0) {
8641 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8642 		y = 0;
8643 	}
8644 	position->enable = true;
8645 	position->translate_by_source = true;
8646 	position->x = x;
8647 	position->y = y;
8648 	position->x_hotspot = xorigin;
8649 	position->y_hotspot = yorigin;
8650 
8651 	return 0;
8652 }
8653 
8654 static void handle_cursor_update(struct drm_plane *plane,
8655 				 struct drm_plane_state *old_plane_state)
8656 {
8657 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8658 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8659 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8660 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8661 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8662 	uint64_t address = afb ? afb->address : 0;
8663 	struct dc_cursor_position position = {0};
8664 	struct dc_cursor_attributes attributes;
8665 	int ret;
8666 
8667 	if (!plane->state->fb && !old_plane_state->fb)
8668 		return;
8669 
8670 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8671 		      __func__,
8672 		      amdgpu_crtc->crtc_id,
8673 		      plane->state->crtc_w,
8674 		      plane->state->crtc_h);
8675 
8676 	ret = get_cursor_position(plane, crtc, &position);
8677 	if (ret)
8678 		return;
8679 
8680 	if (!position.enable) {
8681 		/* turn off cursor */
8682 		if (crtc_state && crtc_state->stream) {
8683 			mutex_lock(&adev->dm.dc_lock);
8684 			dc_stream_set_cursor_position(crtc_state->stream,
8685 						      &position);
8686 			mutex_unlock(&adev->dm.dc_lock);
8687 		}
8688 		return;
8689 	}
8690 
8691 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8692 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8693 
8694 	memset(&attributes, 0, sizeof(attributes));
8695 	attributes.address.high_part = upper_32_bits(address);
8696 	attributes.address.low_part  = lower_32_bits(address);
8697 	attributes.width             = plane->state->crtc_w;
8698 	attributes.height            = plane->state->crtc_h;
8699 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8700 	attributes.rotation_angle    = 0;
8701 	attributes.attribute_flags.value = 0;
8702 
8703 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8704 
8705 	if (crtc_state->stream) {
8706 		mutex_lock(&adev->dm.dc_lock);
8707 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8708 							 &attributes))
8709 			DRM_ERROR("DC failed to set cursor attributes\n");
8710 
8711 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8712 						   &position))
8713 			DRM_ERROR("DC failed to set cursor position\n");
8714 		mutex_unlock(&adev->dm.dc_lock);
8715 	}
8716 }
8717 
8718 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8719 {
8720 
8721 	assert_spin_locked(&acrtc->base.dev->event_lock);
8722 	WARN_ON(acrtc->event);
8723 
8724 	acrtc->event = acrtc->base.state->event;
8725 
8726 	/* Set the flip status */
8727 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8728 
8729 	/* Mark this event as consumed */
8730 	acrtc->base.state->event = NULL;
8731 
8732 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8733 		     acrtc->crtc_id);
8734 }
8735 
8736 static void update_freesync_state_on_stream(
8737 	struct amdgpu_display_manager *dm,
8738 	struct dm_crtc_state *new_crtc_state,
8739 	struct dc_stream_state *new_stream,
8740 	struct dc_plane_state *surface,
8741 	u32 flip_timestamp_in_us)
8742 {
8743 	struct mod_vrr_params vrr_params;
8744 	struct dc_info_packet vrr_infopacket = {0};
8745 	struct amdgpu_device *adev = dm->adev;
8746 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8747 	unsigned long flags;
8748 	bool pack_sdp_v1_3 = false;
8749 
8750 	if (!new_stream)
8751 		return;
8752 
8753 	/*
8754 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8755 	 * For now it's sufficient to just guard against these conditions.
8756 	 */
8757 
8758 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8759 		return;
8760 
8761 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8762         vrr_params = acrtc->dm_irq_params.vrr_params;
8763 
8764 	if (surface) {
8765 		mod_freesync_handle_preflip(
8766 			dm->freesync_module,
8767 			surface,
8768 			new_stream,
8769 			flip_timestamp_in_us,
8770 			&vrr_params);
8771 
8772 		if (adev->family < AMDGPU_FAMILY_AI &&
8773 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8774 			mod_freesync_handle_v_update(dm->freesync_module,
8775 						     new_stream, &vrr_params);
8776 
8777 			/* Need to call this before the frame ends. */
8778 			dc_stream_adjust_vmin_vmax(dm->dc,
8779 						   new_crtc_state->stream,
8780 						   &vrr_params.adjust);
8781 		}
8782 	}
8783 
8784 	mod_freesync_build_vrr_infopacket(
8785 		dm->freesync_module,
8786 		new_stream,
8787 		&vrr_params,
8788 		PACKET_TYPE_VRR,
8789 		TRANSFER_FUNC_UNKNOWN,
8790 		&vrr_infopacket,
8791 		pack_sdp_v1_3);
8792 
8793 	new_crtc_state->freesync_timing_changed |=
8794 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8795 			&vrr_params.adjust,
8796 			sizeof(vrr_params.adjust)) != 0);
8797 
8798 	new_crtc_state->freesync_vrr_info_changed |=
8799 		(memcmp(&new_crtc_state->vrr_infopacket,
8800 			&vrr_infopacket,
8801 			sizeof(vrr_infopacket)) != 0);
8802 
8803 	acrtc->dm_irq_params.vrr_params = vrr_params;
8804 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8805 
8806 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8807 	new_stream->vrr_infopacket = vrr_infopacket;
8808 
8809 	if (new_crtc_state->freesync_vrr_info_changed)
8810 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8811 			      new_crtc_state->base.crtc->base.id,
8812 			      (int)new_crtc_state->base.vrr_enabled,
8813 			      (int)vrr_params.state);
8814 
8815 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8816 }
8817 
8818 static void update_stream_irq_parameters(
8819 	struct amdgpu_display_manager *dm,
8820 	struct dm_crtc_state *new_crtc_state)
8821 {
8822 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8823 	struct mod_vrr_params vrr_params;
8824 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8825 	struct amdgpu_device *adev = dm->adev;
8826 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8827 	unsigned long flags;
8828 
8829 	if (!new_stream)
8830 		return;
8831 
8832 	/*
8833 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8834 	 * For now it's sufficient to just guard against these conditions.
8835 	 */
8836 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8837 		return;
8838 
8839 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8840 	vrr_params = acrtc->dm_irq_params.vrr_params;
8841 
8842 	if (new_crtc_state->vrr_supported &&
8843 	    config.min_refresh_in_uhz &&
8844 	    config.max_refresh_in_uhz) {
8845 		/*
8846 		 * if freesync compatible mode was set, config.state will be set
8847 		 * in atomic check
8848 		 */
8849 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8850 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8851 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8852 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8853 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8854 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8855 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8856 		} else {
8857 			config.state = new_crtc_state->base.vrr_enabled ?
8858 						     VRR_STATE_ACTIVE_VARIABLE :
8859 						     VRR_STATE_INACTIVE;
8860 		}
8861 	} else {
8862 		config.state = VRR_STATE_UNSUPPORTED;
8863 	}
8864 
8865 	mod_freesync_build_vrr_params(dm->freesync_module,
8866 				      new_stream,
8867 				      &config, &vrr_params);
8868 
8869 	new_crtc_state->freesync_timing_changed |=
8870 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8871 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8872 
8873 	new_crtc_state->freesync_config = config;
8874 	/* Copy state for access from DM IRQ handler */
8875 	acrtc->dm_irq_params.freesync_config = config;
8876 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8877 	acrtc->dm_irq_params.vrr_params = vrr_params;
8878 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8879 }
8880 
8881 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8882 					    struct dm_crtc_state *new_state)
8883 {
8884 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8885 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8886 
8887 	if (!old_vrr_active && new_vrr_active) {
8888 		/* Transition VRR inactive -> active:
8889 		 * While VRR is active, we must not disable vblank irq, as a
8890 		 * reenable after disable would compute bogus vblank/pflip
8891 		 * timestamps if it likely happened inside display front-porch.
8892 		 *
8893 		 * We also need vupdate irq for the actual core vblank handling
8894 		 * at end of vblank.
8895 		 */
8896 		dm_set_vupdate_irq(new_state->base.crtc, true);
8897 		drm_crtc_vblank_get(new_state->base.crtc);
8898 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8899 				 __func__, new_state->base.crtc->base.id);
8900 	} else if (old_vrr_active && !new_vrr_active) {
8901 		/* Transition VRR active -> inactive:
8902 		 * Allow vblank irq disable again for fixed refresh rate.
8903 		 */
8904 		dm_set_vupdate_irq(new_state->base.crtc, false);
8905 		drm_crtc_vblank_put(new_state->base.crtc);
8906 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8907 				 __func__, new_state->base.crtc->base.id);
8908 	}
8909 }
8910 
8911 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8912 {
8913 	struct drm_plane *plane;
8914 	struct drm_plane_state *old_plane_state;
8915 	int i;
8916 
8917 	/*
8918 	 * TODO: Make this per-stream so we don't issue redundant updates for
8919 	 * commits with multiple streams.
8920 	 */
8921 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8922 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8923 			handle_cursor_update(plane, old_plane_state);
8924 }
8925 
8926 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8927 				    struct dc_state *dc_state,
8928 				    struct drm_device *dev,
8929 				    struct amdgpu_display_manager *dm,
8930 				    struct drm_crtc *pcrtc,
8931 				    bool wait_for_vblank)
8932 {
8933 	uint32_t i;
8934 	uint64_t timestamp_ns;
8935 	struct drm_plane *plane;
8936 	struct drm_plane_state *old_plane_state, *new_plane_state;
8937 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8938 	struct drm_crtc_state *new_pcrtc_state =
8939 			drm_atomic_get_new_crtc_state(state, pcrtc);
8940 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8941 	struct dm_crtc_state *dm_old_crtc_state =
8942 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8943 	int planes_count = 0, vpos, hpos;
8944 	long r;
8945 	unsigned long flags;
8946 	struct amdgpu_bo *abo;
8947 	uint32_t target_vblank, last_flip_vblank;
8948 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8949 	bool pflip_present = false;
8950 	struct {
8951 		struct dc_surface_update surface_updates[MAX_SURFACES];
8952 		struct dc_plane_info plane_infos[MAX_SURFACES];
8953 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8954 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8955 		struct dc_stream_update stream_update;
8956 	} *bundle;
8957 
8958 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8959 
8960 	if (!bundle) {
8961 		dm_error("Failed to allocate update bundle\n");
8962 		goto cleanup;
8963 	}
8964 
8965 	/*
8966 	 * Disable the cursor first if we're disabling all the planes.
8967 	 * It'll remain on the screen after the planes are re-enabled
8968 	 * if we don't.
8969 	 */
8970 	if (acrtc_state->active_planes == 0)
8971 		amdgpu_dm_commit_cursors(state);
8972 
8973 	/* update planes when needed */
8974 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8975 		struct drm_crtc *crtc = new_plane_state->crtc;
8976 		struct drm_crtc_state *new_crtc_state;
8977 		struct drm_framebuffer *fb = new_plane_state->fb;
8978 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8979 		bool plane_needs_flip;
8980 		struct dc_plane_state *dc_plane;
8981 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8982 
8983 		/* Cursor plane is handled after stream updates */
8984 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8985 			continue;
8986 
8987 		if (!fb || !crtc || pcrtc != crtc)
8988 			continue;
8989 
8990 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8991 		if (!new_crtc_state->active)
8992 			continue;
8993 
8994 		dc_plane = dm_new_plane_state->dc_state;
8995 
8996 		bundle->surface_updates[planes_count].surface = dc_plane;
8997 		if (new_pcrtc_state->color_mgmt_changed) {
8998 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8999 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9000 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9001 		}
9002 
9003 		fill_dc_scaling_info(new_plane_state,
9004 				     &bundle->scaling_infos[planes_count]);
9005 
9006 		bundle->surface_updates[planes_count].scaling_info =
9007 			&bundle->scaling_infos[planes_count];
9008 
9009 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9010 
9011 		pflip_present = pflip_present || plane_needs_flip;
9012 
9013 		if (!plane_needs_flip) {
9014 			planes_count += 1;
9015 			continue;
9016 		}
9017 
9018 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9019 
9020 		/*
9021 		 * Wait for all fences on this FB. Do limited wait to avoid
9022 		 * deadlock during GPU reset when this fence will not signal
9023 		 * but we hold reservation lock for the BO.
9024 		 */
9025 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9026 					  msecs_to_jiffies(5000));
9027 		if (unlikely(r <= 0))
9028 			DRM_ERROR("Waiting for fences timed out!");
9029 
9030 		fill_dc_plane_info_and_addr(
9031 			dm->adev, new_plane_state,
9032 			afb->tiling_flags,
9033 			&bundle->plane_infos[planes_count],
9034 			&bundle->flip_addrs[planes_count].address,
9035 			afb->tmz_surface, false);
9036 
9037 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9038 				 new_plane_state->plane->index,
9039 				 bundle->plane_infos[planes_count].dcc.enable);
9040 
9041 		bundle->surface_updates[planes_count].plane_info =
9042 			&bundle->plane_infos[planes_count];
9043 
9044 		/*
9045 		 * Only allow immediate flips for fast updates that don't
9046 		 * change FB pitch, DCC state, rotation or mirroing.
9047 		 */
9048 		bundle->flip_addrs[planes_count].flip_immediate =
9049 			crtc->state->async_flip &&
9050 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9051 
9052 		timestamp_ns = ktime_get_ns();
9053 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9054 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9055 		bundle->surface_updates[planes_count].surface = dc_plane;
9056 
9057 		if (!bundle->surface_updates[planes_count].surface) {
9058 			DRM_ERROR("No surface for CRTC: id=%d\n",
9059 					acrtc_attach->crtc_id);
9060 			continue;
9061 		}
9062 
9063 		if (plane == pcrtc->primary)
9064 			update_freesync_state_on_stream(
9065 				dm,
9066 				acrtc_state,
9067 				acrtc_state->stream,
9068 				dc_plane,
9069 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9070 
9071 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9072 				 __func__,
9073 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9074 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9075 
9076 		planes_count += 1;
9077 
9078 	}
9079 
9080 	if (pflip_present) {
9081 		if (!vrr_active) {
9082 			/* Use old throttling in non-vrr fixed refresh rate mode
9083 			 * to keep flip scheduling based on target vblank counts
9084 			 * working in a backwards compatible way, e.g., for
9085 			 * clients using the GLX_OML_sync_control extension or
9086 			 * DRI3/Present extension with defined target_msc.
9087 			 */
9088 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9089 		}
9090 		else {
9091 			/* For variable refresh rate mode only:
9092 			 * Get vblank of last completed flip to avoid > 1 vrr
9093 			 * flips per video frame by use of throttling, but allow
9094 			 * flip programming anywhere in the possibly large
9095 			 * variable vrr vblank interval for fine-grained flip
9096 			 * timing control and more opportunity to avoid stutter
9097 			 * on late submission of flips.
9098 			 */
9099 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9100 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9101 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9102 		}
9103 
9104 		target_vblank = last_flip_vblank + wait_for_vblank;
9105 
9106 		/*
9107 		 * Wait until we're out of the vertical blank period before the one
9108 		 * targeted by the flip
9109 		 */
9110 		while ((acrtc_attach->enabled &&
9111 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9112 							    0, &vpos, &hpos, NULL,
9113 							    NULL, &pcrtc->hwmode)
9114 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9115 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9116 			(int)(target_vblank -
9117 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9118 			usleep_range(1000, 1100);
9119 		}
9120 
9121 		/**
9122 		 * Prepare the flip event for the pageflip interrupt to handle.
9123 		 *
9124 		 * This only works in the case where we've already turned on the
9125 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9126 		 * from 0 -> n planes we have to skip a hardware generated event
9127 		 * and rely on sending it from software.
9128 		 */
9129 		if (acrtc_attach->base.state->event &&
9130 		    acrtc_state->active_planes > 0 &&
9131 		    !acrtc_state->force_dpms_off) {
9132 			drm_crtc_vblank_get(pcrtc);
9133 
9134 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9135 
9136 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9137 			prepare_flip_isr(acrtc_attach);
9138 
9139 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9140 		}
9141 
9142 		if (acrtc_state->stream) {
9143 			if (acrtc_state->freesync_vrr_info_changed)
9144 				bundle->stream_update.vrr_infopacket =
9145 					&acrtc_state->stream->vrr_infopacket;
9146 		}
9147 	}
9148 
9149 	/* Update the planes if changed or disable if we don't have any. */
9150 	if ((planes_count || acrtc_state->active_planes == 0) &&
9151 		acrtc_state->stream) {
9152 #if defined(CONFIG_DRM_AMD_DC_DCN)
9153 		/*
9154 		 * If PSR or idle optimizations are enabled then flush out
9155 		 * any pending work before hardware programming.
9156 		 */
9157 		if (dm->vblank_control_workqueue)
9158 			flush_workqueue(dm->vblank_control_workqueue);
9159 #endif
9160 
9161 		bundle->stream_update.stream = acrtc_state->stream;
9162 		if (new_pcrtc_state->mode_changed) {
9163 			bundle->stream_update.src = acrtc_state->stream->src;
9164 			bundle->stream_update.dst = acrtc_state->stream->dst;
9165 		}
9166 
9167 		if (new_pcrtc_state->color_mgmt_changed) {
9168 			/*
9169 			 * TODO: This isn't fully correct since we've actually
9170 			 * already modified the stream in place.
9171 			 */
9172 			bundle->stream_update.gamut_remap =
9173 				&acrtc_state->stream->gamut_remap_matrix;
9174 			bundle->stream_update.output_csc_transform =
9175 				&acrtc_state->stream->csc_color_matrix;
9176 			bundle->stream_update.out_transfer_func =
9177 				acrtc_state->stream->out_transfer_func;
9178 		}
9179 
9180 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9181 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9182 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9183 
9184 		/*
9185 		 * If FreeSync state on the stream has changed then we need to
9186 		 * re-adjust the min/max bounds now that DC doesn't handle this
9187 		 * as part of commit.
9188 		 */
9189 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9190 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9191 			dc_stream_adjust_vmin_vmax(
9192 				dm->dc, acrtc_state->stream,
9193 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9194 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9195 		}
9196 		mutex_lock(&dm->dc_lock);
9197 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9198 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9199 			amdgpu_dm_psr_disable(acrtc_state->stream);
9200 
9201 		dc_commit_updates_for_stream(dm->dc,
9202 						     bundle->surface_updates,
9203 						     planes_count,
9204 						     acrtc_state->stream,
9205 						     &bundle->stream_update,
9206 						     dc_state);
9207 
9208 		/**
9209 		 * Enable or disable the interrupts on the backend.
9210 		 *
9211 		 * Most pipes are put into power gating when unused.
9212 		 *
9213 		 * When power gating is enabled on a pipe we lose the
9214 		 * interrupt enablement state when power gating is disabled.
9215 		 *
9216 		 * So we need to update the IRQ control state in hardware
9217 		 * whenever the pipe turns on (since it could be previously
9218 		 * power gated) or off (since some pipes can't be power gated
9219 		 * on some ASICs).
9220 		 */
9221 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9222 			dm_update_pflip_irq_state(drm_to_adev(dev),
9223 						  acrtc_attach);
9224 
9225 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9226 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9227 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9228 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9229 
9230 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9231 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9232 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9233 			struct amdgpu_dm_connector *aconn =
9234 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9235 
9236 			if (aconn->psr_skip_count > 0)
9237 				aconn->psr_skip_count--;
9238 
9239 			/* Allow PSR when skip count is 0. */
9240 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9241 		} else {
9242 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9243 		}
9244 
9245 		mutex_unlock(&dm->dc_lock);
9246 	}
9247 
9248 	/*
9249 	 * Update cursor state *after* programming all the planes.
9250 	 * This avoids redundant programming in the case where we're going
9251 	 * to be disabling a single plane - those pipes are being disabled.
9252 	 */
9253 	if (acrtc_state->active_planes)
9254 		amdgpu_dm_commit_cursors(state);
9255 
9256 cleanup:
9257 	kfree(bundle);
9258 }
9259 
9260 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9261 				   struct drm_atomic_state *state)
9262 {
9263 	struct amdgpu_device *adev = drm_to_adev(dev);
9264 	struct amdgpu_dm_connector *aconnector;
9265 	struct drm_connector *connector;
9266 	struct drm_connector_state *old_con_state, *new_con_state;
9267 	struct drm_crtc_state *new_crtc_state;
9268 	struct dm_crtc_state *new_dm_crtc_state;
9269 	const struct dc_stream_status *status;
9270 	int i, inst;
9271 
9272 	/* Notify device removals. */
9273 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9274 		if (old_con_state->crtc != new_con_state->crtc) {
9275 			/* CRTC changes require notification. */
9276 			goto notify;
9277 		}
9278 
9279 		if (!new_con_state->crtc)
9280 			continue;
9281 
9282 		new_crtc_state = drm_atomic_get_new_crtc_state(
9283 			state, new_con_state->crtc);
9284 
9285 		if (!new_crtc_state)
9286 			continue;
9287 
9288 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9289 			continue;
9290 
9291 	notify:
9292 		aconnector = to_amdgpu_dm_connector(connector);
9293 
9294 		mutex_lock(&adev->dm.audio_lock);
9295 		inst = aconnector->audio_inst;
9296 		aconnector->audio_inst = -1;
9297 		mutex_unlock(&adev->dm.audio_lock);
9298 
9299 		amdgpu_dm_audio_eld_notify(adev, inst);
9300 	}
9301 
9302 	/* Notify audio device additions. */
9303 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9304 		if (!new_con_state->crtc)
9305 			continue;
9306 
9307 		new_crtc_state = drm_atomic_get_new_crtc_state(
9308 			state, new_con_state->crtc);
9309 
9310 		if (!new_crtc_state)
9311 			continue;
9312 
9313 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9314 			continue;
9315 
9316 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9317 		if (!new_dm_crtc_state->stream)
9318 			continue;
9319 
9320 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9321 		if (!status)
9322 			continue;
9323 
9324 		aconnector = to_amdgpu_dm_connector(connector);
9325 
9326 		mutex_lock(&adev->dm.audio_lock);
9327 		inst = status->audio_inst;
9328 		aconnector->audio_inst = inst;
9329 		mutex_unlock(&adev->dm.audio_lock);
9330 
9331 		amdgpu_dm_audio_eld_notify(adev, inst);
9332 	}
9333 }
9334 
9335 /*
9336  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9337  * @crtc_state: the DRM CRTC state
9338  * @stream_state: the DC stream state.
9339  *
9340  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9341  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9342  */
9343 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9344 						struct dc_stream_state *stream_state)
9345 {
9346 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9347 }
9348 
9349 /**
9350  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9351  * @state: The atomic state to commit
9352  *
9353  * This will tell DC to commit the constructed DC state from atomic_check,
9354  * programming the hardware. Any failures here implies a hardware failure, since
9355  * atomic check should have filtered anything non-kosher.
9356  */
9357 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9358 {
9359 	struct drm_device *dev = state->dev;
9360 	struct amdgpu_device *adev = drm_to_adev(dev);
9361 	struct amdgpu_display_manager *dm = &adev->dm;
9362 	struct dm_atomic_state *dm_state;
9363 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9364 	uint32_t i, j;
9365 	struct drm_crtc *crtc;
9366 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9367 	unsigned long flags;
9368 	bool wait_for_vblank = true;
9369 	struct drm_connector *connector;
9370 	struct drm_connector_state *old_con_state, *new_con_state;
9371 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9372 	int crtc_disable_count = 0;
9373 	bool mode_set_reset_required = false;
9374 
9375 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9376 
9377 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9378 
9379 	dm_state = dm_atomic_get_new_state(state);
9380 	if (dm_state && dm_state->context) {
9381 		dc_state = dm_state->context;
9382 	} else {
9383 		/* No state changes, retain current state. */
9384 		dc_state_temp = dc_create_state(dm->dc);
9385 		ASSERT(dc_state_temp);
9386 		dc_state = dc_state_temp;
9387 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9388 	}
9389 
9390 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9391 				       new_crtc_state, i) {
9392 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9393 
9394 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9395 
9396 		if (old_crtc_state->active &&
9397 		    (!new_crtc_state->active ||
9398 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9399 			manage_dm_interrupts(adev, acrtc, false);
9400 			dc_stream_release(dm_old_crtc_state->stream);
9401 		}
9402 	}
9403 
9404 	drm_atomic_helper_calc_timestamping_constants(state);
9405 
9406 	/* update changed items */
9407 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9408 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9409 
9410 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9411 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9412 
9413 		DRM_DEBUG_ATOMIC(
9414 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9415 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9416 			"connectors_changed:%d\n",
9417 			acrtc->crtc_id,
9418 			new_crtc_state->enable,
9419 			new_crtc_state->active,
9420 			new_crtc_state->planes_changed,
9421 			new_crtc_state->mode_changed,
9422 			new_crtc_state->active_changed,
9423 			new_crtc_state->connectors_changed);
9424 
9425 		/* Disable cursor if disabling crtc */
9426 		if (old_crtc_state->active && !new_crtc_state->active) {
9427 			struct dc_cursor_position position;
9428 
9429 			memset(&position, 0, sizeof(position));
9430 			mutex_lock(&dm->dc_lock);
9431 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9432 			mutex_unlock(&dm->dc_lock);
9433 		}
9434 
9435 		/* Copy all transient state flags into dc state */
9436 		if (dm_new_crtc_state->stream) {
9437 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9438 							    dm_new_crtc_state->stream);
9439 		}
9440 
9441 		/* handles headless hotplug case, updating new_state and
9442 		 * aconnector as needed
9443 		 */
9444 
9445 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9446 
9447 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9448 
9449 			if (!dm_new_crtc_state->stream) {
9450 				/*
9451 				 * this could happen because of issues with
9452 				 * userspace notifications delivery.
9453 				 * In this case userspace tries to set mode on
9454 				 * display which is disconnected in fact.
9455 				 * dc_sink is NULL in this case on aconnector.
9456 				 * We expect reset mode will come soon.
9457 				 *
9458 				 * This can also happen when unplug is done
9459 				 * during resume sequence ended
9460 				 *
9461 				 * In this case, we want to pretend we still
9462 				 * have a sink to keep the pipe running so that
9463 				 * hw state is consistent with the sw state
9464 				 */
9465 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9466 						__func__, acrtc->base.base.id);
9467 				continue;
9468 			}
9469 
9470 			if (dm_old_crtc_state->stream)
9471 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9472 
9473 			pm_runtime_get_noresume(dev->dev);
9474 
9475 			acrtc->enabled = true;
9476 			acrtc->hw_mode = new_crtc_state->mode;
9477 			crtc->hwmode = new_crtc_state->mode;
9478 			mode_set_reset_required = true;
9479 		} else if (modereset_required(new_crtc_state)) {
9480 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9481 			/* i.e. reset mode */
9482 			if (dm_old_crtc_state->stream)
9483 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9484 
9485 			mode_set_reset_required = true;
9486 		}
9487 	} /* for_each_crtc_in_state() */
9488 
9489 	if (dc_state) {
9490 		/* if there mode set or reset, disable eDP PSR */
9491 		if (mode_set_reset_required) {
9492 #if defined(CONFIG_DRM_AMD_DC_DCN)
9493 			if (dm->vblank_control_workqueue)
9494 				flush_workqueue(dm->vblank_control_workqueue);
9495 #endif
9496 			amdgpu_dm_psr_disable_all(dm);
9497 		}
9498 
9499 		dm_enable_per_frame_crtc_master_sync(dc_state);
9500 		mutex_lock(&dm->dc_lock);
9501 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9502 #if defined(CONFIG_DRM_AMD_DC_DCN)
9503                /* Allow idle optimization when vblank count is 0 for display off */
9504                if (dm->active_vblank_irq_count == 0)
9505                    dc_allow_idle_optimizations(dm->dc,true);
9506 #endif
9507 		mutex_unlock(&dm->dc_lock);
9508 	}
9509 
9510 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9511 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9512 
9513 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9514 
9515 		if (dm_new_crtc_state->stream != NULL) {
9516 			const struct dc_stream_status *status =
9517 					dc_stream_get_status(dm_new_crtc_state->stream);
9518 
9519 			if (!status)
9520 				status = dc_stream_get_status_from_state(dc_state,
9521 									 dm_new_crtc_state->stream);
9522 			if (!status)
9523 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9524 			else
9525 				acrtc->otg_inst = status->primary_otg_inst;
9526 		}
9527 	}
9528 #ifdef CONFIG_DRM_AMD_DC_HDCP
9529 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9530 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9531 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9532 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9533 
9534 		new_crtc_state = NULL;
9535 
9536 		if (acrtc)
9537 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9538 
9539 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9540 
9541 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9542 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9543 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9544 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9545 			dm_new_con_state->update_hdcp = true;
9546 			continue;
9547 		}
9548 
9549 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9550 			hdcp_update_display(
9551 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9552 				new_con_state->hdcp_content_type,
9553 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9554 	}
9555 #endif
9556 
9557 	/* Handle connector state changes */
9558 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9559 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9560 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9561 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9562 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9563 		struct dc_stream_update stream_update;
9564 		struct dc_info_packet hdr_packet;
9565 		struct dc_stream_status *status = NULL;
9566 		bool abm_changed, hdr_changed, scaling_changed;
9567 
9568 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9569 		memset(&stream_update, 0, sizeof(stream_update));
9570 
9571 		if (acrtc) {
9572 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9573 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9574 		}
9575 
9576 		/* Skip any modesets/resets */
9577 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9578 			continue;
9579 
9580 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9581 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9582 
9583 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9584 							     dm_old_con_state);
9585 
9586 		abm_changed = dm_new_crtc_state->abm_level !=
9587 			      dm_old_crtc_state->abm_level;
9588 
9589 		hdr_changed =
9590 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9591 
9592 		if (!scaling_changed && !abm_changed && !hdr_changed)
9593 			continue;
9594 
9595 		stream_update.stream = dm_new_crtc_state->stream;
9596 		if (scaling_changed) {
9597 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9598 					dm_new_con_state, dm_new_crtc_state->stream);
9599 
9600 			stream_update.src = dm_new_crtc_state->stream->src;
9601 			stream_update.dst = dm_new_crtc_state->stream->dst;
9602 		}
9603 
9604 		if (abm_changed) {
9605 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9606 
9607 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9608 		}
9609 
9610 		if (hdr_changed) {
9611 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9612 			stream_update.hdr_static_metadata = &hdr_packet;
9613 		}
9614 
9615 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9616 
9617 		if (WARN_ON(!status))
9618 			continue;
9619 
9620 		WARN_ON(!status->plane_count);
9621 
9622 		/*
9623 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9624 		 * Here we create an empty update on each plane.
9625 		 * To fix this, DC should permit updating only stream properties.
9626 		 */
9627 		for (j = 0; j < status->plane_count; j++)
9628 			dummy_updates[j].surface = status->plane_states[0];
9629 
9630 
9631 		mutex_lock(&dm->dc_lock);
9632 		dc_commit_updates_for_stream(dm->dc,
9633 						     dummy_updates,
9634 						     status->plane_count,
9635 						     dm_new_crtc_state->stream,
9636 						     &stream_update,
9637 						     dc_state);
9638 		mutex_unlock(&dm->dc_lock);
9639 	}
9640 
9641 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9642 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9643 				      new_crtc_state, i) {
9644 		if (old_crtc_state->active && !new_crtc_state->active)
9645 			crtc_disable_count++;
9646 
9647 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9648 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9649 
9650 		/* For freesync config update on crtc state and params for irq */
9651 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9652 
9653 		/* Handle vrr on->off / off->on transitions */
9654 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9655 						dm_new_crtc_state);
9656 	}
9657 
9658 	/**
9659 	 * Enable interrupts for CRTCs that are newly enabled or went through
9660 	 * a modeset. It was intentionally deferred until after the front end
9661 	 * state was modified to wait until the OTG was on and so the IRQ
9662 	 * handlers didn't access stale or invalid state.
9663 	 */
9664 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9665 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9666 #ifdef CONFIG_DEBUG_FS
9667 		bool configure_crc = false;
9668 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9669 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9670 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9671 #endif
9672 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9673 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9674 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9675 #endif
9676 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9677 
9678 		if (new_crtc_state->active &&
9679 		    (!old_crtc_state->active ||
9680 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9681 			dc_stream_retain(dm_new_crtc_state->stream);
9682 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9683 			manage_dm_interrupts(adev, acrtc, true);
9684 
9685 #ifdef CONFIG_DEBUG_FS
9686 			/**
9687 			 * Frontend may have changed so reapply the CRC capture
9688 			 * settings for the stream.
9689 			 */
9690 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9691 
9692 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9693 				configure_crc = true;
9694 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9695 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9696 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9697 					acrtc->dm_irq_params.crc_window.update_win = true;
9698 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9699 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9700 					crc_rd_wrk->crtc = crtc;
9701 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9702 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9703 				}
9704 #endif
9705 			}
9706 
9707 			if (configure_crc)
9708 				if (amdgpu_dm_crtc_configure_crc_source(
9709 					crtc, dm_new_crtc_state, cur_crc_src))
9710 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9711 #endif
9712 		}
9713 	}
9714 
9715 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9716 		if (new_crtc_state->async_flip)
9717 			wait_for_vblank = false;
9718 
9719 	/* update planes when needed per crtc*/
9720 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9721 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9722 
9723 		if (dm_new_crtc_state->stream)
9724 			amdgpu_dm_commit_planes(state, dc_state, dev,
9725 						dm, crtc, wait_for_vblank);
9726 	}
9727 
9728 	/* Update audio instances for each connector. */
9729 	amdgpu_dm_commit_audio(dev, state);
9730 
9731 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9732 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9733 	/* restore the backlight level */
9734 	for (i = 0; i < dm->num_of_edps; i++) {
9735 		if (dm->backlight_dev[i] &&
9736 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9737 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9738 	}
9739 #endif
9740 	/*
9741 	 * send vblank event on all events not handled in flip and
9742 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9743 	 */
9744 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9745 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9746 
9747 		if (new_crtc_state->event)
9748 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9749 
9750 		new_crtc_state->event = NULL;
9751 	}
9752 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9753 
9754 	/* Signal HW programming completion */
9755 	drm_atomic_helper_commit_hw_done(state);
9756 
9757 	if (wait_for_vblank)
9758 		drm_atomic_helper_wait_for_flip_done(dev, state);
9759 
9760 	drm_atomic_helper_cleanup_planes(dev, state);
9761 
9762 	/* return the stolen vga memory back to VRAM */
9763 	if (!adev->mman.keep_stolen_vga_memory)
9764 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9765 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9766 
9767 	/*
9768 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9769 	 * so we can put the GPU into runtime suspend if we're not driving any
9770 	 * displays anymore
9771 	 */
9772 	for (i = 0; i < crtc_disable_count; i++)
9773 		pm_runtime_put_autosuspend(dev->dev);
9774 	pm_runtime_mark_last_busy(dev->dev);
9775 
9776 	if (dc_state_temp)
9777 		dc_release_state(dc_state_temp);
9778 }
9779 
9780 
9781 static int dm_force_atomic_commit(struct drm_connector *connector)
9782 {
9783 	int ret = 0;
9784 	struct drm_device *ddev = connector->dev;
9785 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9786 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9787 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9788 	struct drm_connector_state *conn_state;
9789 	struct drm_crtc_state *crtc_state;
9790 	struct drm_plane_state *plane_state;
9791 
9792 	if (!state)
9793 		return -ENOMEM;
9794 
9795 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9796 
9797 	/* Construct an atomic state to restore previous display setting */
9798 
9799 	/*
9800 	 * Attach connectors to drm_atomic_state
9801 	 */
9802 	conn_state = drm_atomic_get_connector_state(state, connector);
9803 
9804 	ret = PTR_ERR_OR_ZERO(conn_state);
9805 	if (ret)
9806 		goto out;
9807 
9808 	/* Attach crtc to drm_atomic_state*/
9809 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9810 
9811 	ret = PTR_ERR_OR_ZERO(crtc_state);
9812 	if (ret)
9813 		goto out;
9814 
9815 	/* force a restore */
9816 	crtc_state->mode_changed = true;
9817 
9818 	/* Attach plane to drm_atomic_state */
9819 	plane_state = drm_atomic_get_plane_state(state, plane);
9820 
9821 	ret = PTR_ERR_OR_ZERO(plane_state);
9822 	if (ret)
9823 		goto out;
9824 
9825 	/* Call commit internally with the state we just constructed */
9826 	ret = drm_atomic_commit(state);
9827 
9828 out:
9829 	drm_atomic_state_put(state);
9830 	if (ret)
9831 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9832 
9833 	return ret;
9834 }
9835 
9836 /*
9837  * This function handles all cases when set mode does not come upon hotplug.
9838  * This includes when a display is unplugged then plugged back into the
9839  * same port and when running without usermode desktop manager supprot
9840  */
9841 void dm_restore_drm_connector_state(struct drm_device *dev,
9842 				    struct drm_connector *connector)
9843 {
9844 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9845 	struct amdgpu_crtc *disconnected_acrtc;
9846 	struct dm_crtc_state *acrtc_state;
9847 
9848 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9849 		return;
9850 
9851 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9852 	if (!disconnected_acrtc)
9853 		return;
9854 
9855 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9856 	if (!acrtc_state->stream)
9857 		return;
9858 
9859 	/*
9860 	 * If the previous sink is not released and different from the current,
9861 	 * we deduce we are in a state where we can not rely on usermode call
9862 	 * to turn on the display, so we do it here
9863 	 */
9864 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9865 		dm_force_atomic_commit(&aconnector->base);
9866 }
9867 
9868 /*
9869  * Grabs all modesetting locks to serialize against any blocking commits,
9870  * Waits for completion of all non blocking commits.
9871  */
9872 static int do_aquire_global_lock(struct drm_device *dev,
9873 				 struct drm_atomic_state *state)
9874 {
9875 	struct drm_crtc *crtc;
9876 	struct drm_crtc_commit *commit;
9877 	long ret;
9878 
9879 	/*
9880 	 * Adding all modeset locks to aquire_ctx will
9881 	 * ensure that when the framework release it the
9882 	 * extra locks we are locking here will get released to
9883 	 */
9884 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9885 	if (ret)
9886 		return ret;
9887 
9888 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9889 		spin_lock(&crtc->commit_lock);
9890 		commit = list_first_entry_or_null(&crtc->commit_list,
9891 				struct drm_crtc_commit, commit_entry);
9892 		if (commit)
9893 			drm_crtc_commit_get(commit);
9894 		spin_unlock(&crtc->commit_lock);
9895 
9896 		if (!commit)
9897 			continue;
9898 
9899 		/*
9900 		 * Make sure all pending HW programming completed and
9901 		 * page flips done
9902 		 */
9903 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9904 
9905 		if (ret > 0)
9906 			ret = wait_for_completion_interruptible_timeout(
9907 					&commit->flip_done, 10*HZ);
9908 
9909 		if (ret == 0)
9910 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9911 				  "timed out\n", crtc->base.id, crtc->name);
9912 
9913 		drm_crtc_commit_put(commit);
9914 	}
9915 
9916 	return ret < 0 ? ret : 0;
9917 }
9918 
9919 static void get_freesync_config_for_crtc(
9920 	struct dm_crtc_state *new_crtc_state,
9921 	struct dm_connector_state *new_con_state)
9922 {
9923 	struct mod_freesync_config config = {0};
9924 	struct amdgpu_dm_connector *aconnector =
9925 			to_amdgpu_dm_connector(new_con_state->base.connector);
9926 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9927 	int vrefresh = drm_mode_vrefresh(mode);
9928 	bool fs_vid_mode = false;
9929 
9930 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9931 					vrefresh >= aconnector->min_vfreq &&
9932 					vrefresh <= aconnector->max_vfreq;
9933 
9934 	if (new_crtc_state->vrr_supported) {
9935 		new_crtc_state->stream->ignore_msa_timing_param = true;
9936 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9937 
9938 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9939 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9940 		config.vsif_supported = true;
9941 		config.btr = true;
9942 
9943 		if (fs_vid_mode) {
9944 			config.state = VRR_STATE_ACTIVE_FIXED;
9945 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9946 			goto out;
9947 		} else if (new_crtc_state->base.vrr_enabled) {
9948 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9949 		} else {
9950 			config.state = VRR_STATE_INACTIVE;
9951 		}
9952 	}
9953 out:
9954 	new_crtc_state->freesync_config = config;
9955 }
9956 
9957 static void reset_freesync_config_for_crtc(
9958 	struct dm_crtc_state *new_crtc_state)
9959 {
9960 	new_crtc_state->vrr_supported = false;
9961 
9962 	memset(&new_crtc_state->vrr_infopacket, 0,
9963 	       sizeof(new_crtc_state->vrr_infopacket));
9964 }
9965 
9966 static bool
9967 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9968 				 struct drm_crtc_state *new_crtc_state)
9969 {
9970 	struct drm_display_mode old_mode, new_mode;
9971 
9972 	if (!old_crtc_state || !new_crtc_state)
9973 		return false;
9974 
9975 	old_mode = old_crtc_state->mode;
9976 	new_mode = new_crtc_state->mode;
9977 
9978 	if (old_mode.clock       == new_mode.clock &&
9979 	    old_mode.hdisplay    == new_mode.hdisplay &&
9980 	    old_mode.vdisplay    == new_mode.vdisplay &&
9981 	    old_mode.htotal      == new_mode.htotal &&
9982 	    old_mode.vtotal      != new_mode.vtotal &&
9983 	    old_mode.hsync_start == new_mode.hsync_start &&
9984 	    old_mode.vsync_start != new_mode.vsync_start &&
9985 	    old_mode.hsync_end   == new_mode.hsync_end &&
9986 	    old_mode.vsync_end   != new_mode.vsync_end &&
9987 	    old_mode.hskew       == new_mode.hskew &&
9988 	    old_mode.vscan       == new_mode.vscan &&
9989 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9990 	    (new_mode.vsync_end - new_mode.vsync_start))
9991 		return true;
9992 
9993 	return false;
9994 }
9995 
9996 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9997 	uint64_t num, den, res;
9998 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9999 
10000 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10001 
10002 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10003 	den = (unsigned long long)new_crtc_state->mode.htotal *
10004 	      (unsigned long long)new_crtc_state->mode.vtotal;
10005 
10006 	res = div_u64(num, den);
10007 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10008 }
10009 
10010 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10011 				struct drm_atomic_state *state,
10012 				struct drm_crtc *crtc,
10013 				struct drm_crtc_state *old_crtc_state,
10014 				struct drm_crtc_state *new_crtc_state,
10015 				bool enable,
10016 				bool *lock_and_validation_needed)
10017 {
10018 	struct dm_atomic_state *dm_state = NULL;
10019 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10020 	struct dc_stream_state *new_stream;
10021 	int ret = 0;
10022 
10023 	/*
10024 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10025 	 * update changed items
10026 	 */
10027 	struct amdgpu_crtc *acrtc = NULL;
10028 	struct amdgpu_dm_connector *aconnector = NULL;
10029 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10030 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10031 
10032 	new_stream = NULL;
10033 
10034 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10035 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10036 	acrtc = to_amdgpu_crtc(crtc);
10037 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10038 
10039 	/* TODO This hack should go away */
10040 	if (aconnector && enable) {
10041 		/* Make sure fake sink is created in plug-in scenario */
10042 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10043 							    &aconnector->base);
10044 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10045 							    &aconnector->base);
10046 
10047 		if (IS_ERR(drm_new_conn_state)) {
10048 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10049 			goto fail;
10050 		}
10051 
10052 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10053 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10054 
10055 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10056 			goto skip_modeset;
10057 
10058 		new_stream = create_validate_stream_for_sink(aconnector,
10059 							     &new_crtc_state->mode,
10060 							     dm_new_conn_state,
10061 							     dm_old_crtc_state->stream);
10062 
10063 		/*
10064 		 * we can have no stream on ACTION_SET if a display
10065 		 * was disconnected during S3, in this case it is not an
10066 		 * error, the OS will be updated after detection, and
10067 		 * will do the right thing on next atomic commit
10068 		 */
10069 
10070 		if (!new_stream) {
10071 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10072 					__func__, acrtc->base.base.id);
10073 			ret = -ENOMEM;
10074 			goto fail;
10075 		}
10076 
10077 		/*
10078 		 * TODO: Check VSDB bits to decide whether this should
10079 		 * be enabled or not.
10080 		 */
10081 		new_stream->triggered_crtc_reset.enabled =
10082 			dm->force_timing_sync;
10083 
10084 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10085 
10086 		ret = fill_hdr_info_packet(drm_new_conn_state,
10087 					   &new_stream->hdr_static_metadata);
10088 		if (ret)
10089 			goto fail;
10090 
10091 		/*
10092 		 * If we already removed the old stream from the context
10093 		 * (and set the new stream to NULL) then we can't reuse
10094 		 * the old stream even if the stream and scaling are unchanged.
10095 		 * We'll hit the BUG_ON and black screen.
10096 		 *
10097 		 * TODO: Refactor this function to allow this check to work
10098 		 * in all conditions.
10099 		 */
10100 		if (amdgpu_freesync_vid_mode &&
10101 		    dm_new_crtc_state->stream &&
10102 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10103 			goto skip_modeset;
10104 
10105 		if (dm_new_crtc_state->stream &&
10106 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10107 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10108 			new_crtc_state->mode_changed = false;
10109 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10110 					 new_crtc_state->mode_changed);
10111 		}
10112 	}
10113 
10114 	/* mode_changed flag may get updated above, need to check again */
10115 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10116 		goto skip_modeset;
10117 
10118 	DRM_DEBUG_ATOMIC(
10119 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10120 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10121 		"connectors_changed:%d\n",
10122 		acrtc->crtc_id,
10123 		new_crtc_state->enable,
10124 		new_crtc_state->active,
10125 		new_crtc_state->planes_changed,
10126 		new_crtc_state->mode_changed,
10127 		new_crtc_state->active_changed,
10128 		new_crtc_state->connectors_changed);
10129 
10130 	/* Remove stream for any changed/disabled CRTC */
10131 	if (!enable) {
10132 
10133 		if (!dm_old_crtc_state->stream)
10134 			goto skip_modeset;
10135 
10136 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10137 		    is_timing_unchanged_for_freesync(new_crtc_state,
10138 						     old_crtc_state)) {
10139 			new_crtc_state->mode_changed = false;
10140 			DRM_DEBUG_DRIVER(
10141 				"Mode change not required for front porch change, "
10142 				"setting mode_changed to %d",
10143 				new_crtc_state->mode_changed);
10144 
10145 			set_freesync_fixed_config(dm_new_crtc_state);
10146 
10147 			goto skip_modeset;
10148 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10149 			   is_freesync_video_mode(&new_crtc_state->mode,
10150 						  aconnector)) {
10151 			struct drm_display_mode *high_mode;
10152 
10153 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10154 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10155 				set_freesync_fixed_config(dm_new_crtc_state);
10156 			}
10157 		}
10158 
10159 		ret = dm_atomic_get_state(state, &dm_state);
10160 		if (ret)
10161 			goto fail;
10162 
10163 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10164 				crtc->base.id);
10165 
10166 		/* i.e. reset mode */
10167 		if (dc_remove_stream_from_ctx(
10168 				dm->dc,
10169 				dm_state->context,
10170 				dm_old_crtc_state->stream) != DC_OK) {
10171 			ret = -EINVAL;
10172 			goto fail;
10173 		}
10174 
10175 		dc_stream_release(dm_old_crtc_state->stream);
10176 		dm_new_crtc_state->stream = NULL;
10177 
10178 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10179 
10180 		*lock_and_validation_needed = true;
10181 
10182 	} else {/* Add stream for any updated/enabled CRTC */
10183 		/*
10184 		 * Quick fix to prevent NULL pointer on new_stream when
10185 		 * added MST connectors not found in existing crtc_state in the chained mode
10186 		 * TODO: need to dig out the root cause of that
10187 		 */
10188 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10189 			goto skip_modeset;
10190 
10191 		if (modereset_required(new_crtc_state))
10192 			goto skip_modeset;
10193 
10194 		if (modeset_required(new_crtc_state, new_stream,
10195 				     dm_old_crtc_state->stream)) {
10196 
10197 			WARN_ON(dm_new_crtc_state->stream);
10198 
10199 			ret = dm_atomic_get_state(state, &dm_state);
10200 			if (ret)
10201 				goto fail;
10202 
10203 			dm_new_crtc_state->stream = new_stream;
10204 
10205 			dc_stream_retain(new_stream);
10206 
10207 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10208 					 crtc->base.id);
10209 
10210 			if (dc_add_stream_to_ctx(
10211 					dm->dc,
10212 					dm_state->context,
10213 					dm_new_crtc_state->stream) != DC_OK) {
10214 				ret = -EINVAL;
10215 				goto fail;
10216 			}
10217 
10218 			*lock_and_validation_needed = true;
10219 		}
10220 	}
10221 
10222 skip_modeset:
10223 	/* Release extra reference */
10224 	if (new_stream)
10225 		 dc_stream_release(new_stream);
10226 
10227 	/*
10228 	 * We want to do dc stream updates that do not require a
10229 	 * full modeset below.
10230 	 */
10231 	if (!(enable && aconnector && new_crtc_state->active))
10232 		return 0;
10233 	/*
10234 	 * Given above conditions, the dc state cannot be NULL because:
10235 	 * 1. We're in the process of enabling CRTCs (just been added
10236 	 *    to the dc context, or already is on the context)
10237 	 * 2. Has a valid connector attached, and
10238 	 * 3. Is currently active and enabled.
10239 	 * => The dc stream state currently exists.
10240 	 */
10241 	BUG_ON(dm_new_crtc_state->stream == NULL);
10242 
10243 	/* Scaling or underscan settings */
10244 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10245 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10246 		update_stream_scaling_settings(
10247 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10248 
10249 	/* ABM settings */
10250 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10251 
10252 	/*
10253 	 * Color management settings. We also update color properties
10254 	 * when a modeset is needed, to ensure it gets reprogrammed.
10255 	 */
10256 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10257 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10258 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10259 		if (ret)
10260 			goto fail;
10261 	}
10262 
10263 	/* Update Freesync settings. */
10264 	get_freesync_config_for_crtc(dm_new_crtc_state,
10265 				     dm_new_conn_state);
10266 
10267 	return ret;
10268 
10269 fail:
10270 	if (new_stream)
10271 		dc_stream_release(new_stream);
10272 	return ret;
10273 }
10274 
10275 static bool should_reset_plane(struct drm_atomic_state *state,
10276 			       struct drm_plane *plane,
10277 			       struct drm_plane_state *old_plane_state,
10278 			       struct drm_plane_state *new_plane_state)
10279 {
10280 	struct drm_plane *other;
10281 	struct drm_plane_state *old_other_state, *new_other_state;
10282 	struct drm_crtc_state *new_crtc_state;
10283 	int i;
10284 
10285 	/*
10286 	 * TODO: Remove this hack once the checks below are sufficient
10287 	 * enough to determine when we need to reset all the planes on
10288 	 * the stream.
10289 	 */
10290 	if (state->allow_modeset)
10291 		return true;
10292 
10293 	/* Exit early if we know that we're adding or removing the plane. */
10294 	if (old_plane_state->crtc != new_plane_state->crtc)
10295 		return true;
10296 
10297 	/* old crtc == new_crtc == NULL, plane not in context. */
10298 	if (!new_plane_state->crtc)
10299 		return false;
10300 
10301 	new_crtc_state =
10302 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10303 
10304 	if (!new_crtc_state)
10305 		return true;
10306 
10307 	/* CRTC Degamma changes currently require us to recreate planes. */
10308 	if (new_crtc_state->color_mgmt_changed)
10309 		return true;
10310 
10311 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10312 		return true;
10313 
10314 	/*
10315 	 * If there are any new primary or overlay planes being added or
10316 	 * removed then the z-order can potentially change. To ensure
10317 	 * correct z-order and pipe acquisition the current DC architecture
10318 	 * requires us to remove and recreate all existing planes.
10319 	 *
10320 	 * TODO: Come up with a more elegant solution for this.
10321 	 */
10322 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10323 		struct amdgpu_framebuffer *old_afb, *new_afb;
10324 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10325 			continue;
10326 
10327 		if (old_other_state->crtc != new_plane_state->crtc &&
10328 		    new_other_state->crtc != new_plane_state->crtc)
10329 			continue;
10330 
10331 		if (old_other_state->crtc != new_other_state->crtc)
10332 			return true;
10333 
10334 		/* Src/dst size and scaling updates. */
10335 		if (old_other_state->src_w != new_other_state->src_w ||
10336 		    old_other_state->src_h != new_other_state->src_h ||
10337 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10338 		    old_other_state->crtc_h != new_other_state->crtc_h)
10339 			return true;
10340 
10341 		/* Rotation / mirroring updates. */
10342 		if (old_other_state->rotation != new_other_state->rotation)
10343 			return true;
10344 
10345 		/* Blending updates. */
10346 		if (old_other_state->pixel_blend_mode !=
10347 		    new_other_state->pixel_blend_mode)
10348 			return true;
10349 
10350 		/* Alpha updates. */
10351 		if (old_other_state->alpha != new_other_state->alpha)
10352 			return true;
10353 
10354 		/* Colorspace changes. */
10355 		if (old_other_state->color_range != new_other_state->color_range ||
10356 		    old_other_state->color_encoding != new_other_state->color_encoding)
10357 			return true;
10358 
10359 		/* Framebuffer checks fall at the end. */
10360 		if (!old_other_state->fb || !new_other_state->fb)
10361 			continue;
10362 
10363 		/* Pixel format changes can require bandwidth updates. */
10364 		if (old_other_state->fb->format != new_other_state->fb->format)
10365 			return true;
10366 
10367 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10368 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10369 
10370 		/* Tiling and DCC changes also require bandwidth updates. */
10371 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10372 		    old_afb->base.modifier != new_afb->base.modifier)
10373 			return true;
10374 	}
10375 
10376 	return false;
10377 }
10378 
10379 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10380 			      struct drm_plane_state *new_plane_state,
10381 			      struct drm_framebuffer *fb)
10382 {
10383 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10384 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10385 	unsigned int pitch;
10386 	bool linear;
10387 
10388 	if (fb->width > new_acrtc->max_cursor_width ||
10389 	    fb->height > new_acrtc->max_cursor_height) {
10390 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10391 				 new_plane_state->fb->width,
10392 				 new_plane_state->fb->height);
10393 		return -EINVAL;
10394 	}
10395 	if (new_plane_state->src_w != fb->width << 16 ||
10396 	    new_plane_state->src_h != fb->height << 16) {
10397 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10398 		return -EINVAL;
10399 	}
10400 
10401 	/* Pitch in pixels */
10402 	pitch = fb->pitches[0] / fb->format->cpp[0];
10403 
10404 	if (fb->width != pitch) {
10405 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10406 				 fb->width, pitch);
10407 		return -EINVAL;
10408 	}
10409 
10410 	switch (pitch) {
10411 	case 64:
10412 	case 128:
10413 	case 256:
10414 		/* FB pitch is supported by cursor plane */
10415 		break;
10416 	default:
10417 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10418 		return -EINVAL;
10419 	}
10420 
10421 	/* Core DRM takes care of checking FB modifiers, so we only need to
10422 	 * check tiling flags when the FB doesn't have a modifier. */
10423 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10424 		if (adev->family < AMDGPU_FAMILY_AI) {
10425 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10426 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10427 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10428 		} else {
10429 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10430 		}
10431 		if (!linear) {
10432 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10433 			return -EINVAL;
10434 		}
10435 	}
10436 
10437 	return 0;
10438 }
10439 
10440 static int dm_update_plane_state(struct dc *dc,
10441 				 struct drm_atomic_state *state,
10442 				 struct drm_plane *plane,
10443 				 struct drm_plane_state *old_plane_state,
10444 				 struct drm_plane_state *new_plane_state,
10445 				 bool enable,
10446 				 bool *lock_and_validation_needed)
10447 {
10448 
10449 	struct dm_atomic_state *dm_state = NULL;
10450 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10451 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10452 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10453 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10454 	struct amdgpu_crtc *new_acrtc;
10455 	bool needs_reset;
10456 	int ret = 0;
10457 
10458 
10459 	new_plane_crtc = new_plane_state->crtc;
10460 	old_plane_crtc = old_plane_state->crtc;
10461 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10462 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10463 
10464 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10465 		if (!enable || !new_plane_crtc ||
10466 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10467 			return 0;
10468 
10469 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10470 
10471 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10472 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10473 			return -EINVAL;
10474 		}
10475 
10476 		if (new_plane_state->fb) {
10477 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10478 						 new_plane_state->fb);
10479 			if (ret)
10480 				return ret;
10481 		}
10482 
10483 		return 0;
10484 	}
10485 
10486 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10487 					 new_plane_state);
10488 
10489 	/* Remove any changed/removed planes */
10490 	if (!enable) {
10491 		if (!needs_reset)
10492 			return 0;
10493 
10494 		if (!old_plane_crtc)
10495 			return 0;
10496 
10497 		old_crtc_state = drm_atomic_get_old_crtc_state(
10498 				state, old_plane_crtc);
10499 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10500 
10501 		if (!dm_old_crtc_state->stream)
10502 			return 0;
10503 
10504 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10505 				plane->base.id, old_plane_crtc->base.id);
10506 
10507 		ret = dm_atomic_get_state(state, &dm_state);
10508 		if (ret)
10509 			return ret;
10510 
10511 		if (!dc_remove_plane_from_context(
10512 				dc,
10513 				dm_old_crtc_state->stream,
10514 				dm_old_plane_state->dc_state,
10515 				dm_state->context)) {
10516 
10517 			return -EINVAL;
10518 		}
10519 
10520 
10521 		dc_plane_state_release(dm_old_plane_state->dc_state);
10522 		dm_new_plane_state->dc_state = NULL;
10523 
10524 		*lock_and_validation_needed = true;
10525 
10526 	} else { /* Add new planes */
10527 		struct dc_plane_state *dc_new_plane_state;
10528 
10529 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10530 			return 0;
10531 
10532 		if (!new_plane_crtc)
10533 			return 0;
10534 
10535 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10536 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10537 
10538 		if (!dm_new_crtc_state->stream)
10539 			return 0;
10540 
10541 		if (!needs_reset)
10542 			return 0;
10543 
10544 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10545 		if (ret)
10546 			return ret;
10547 
10548 		WARN_ON(dm_new_plane_state->dc_state);
10549 
10550 		dc_new_plane_state = dc_create_plane_state(dc);
10551 		if (!dc_new_plane_state)
10552 			return -ENOMEM;
10553 
10554 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10555 				 plane->base.id, new_plane_crtc->base.id);
10556 
10557 		ret = fill_dc_plane_attributes(
10558 			drm_to_adev(new_plane_crtc->dev),
10559 			dc_new_plane_state,
10560 			new_plane_state,
10561 			new_crtc_state);
10562 		if (ret) {
10563 			dc_plane_state_release(dc_new_plane_state);
10564 			return ret;
10565 		}
10566 
10567 		ret = dm_atomic_get_state(state, &dm_state);
10568 		if (ret) {
10569 			dc_plane_state_release(dc_new_plane_state);
10570 			return ret;
10571 		}
10572 
10573 		/*
10574 		 * Any atomic check errors that occur after this will
10575 		 * not need a release. The plane state will be attached
10576 		 * to the stream, and therefore part of the atomic
10577 		 * state. It'll be released when the atomic state is
10578 		 * cleaned.
10579 		 */
10580 		if (!dc_add_plane_to_context(
10581 				dc,
10582 				dm_new_crtc_state->stream,
10583 				dc_new_plane_state,
10584 				dm_state->context)) {
10585 
10586 			dc_plane_state_release(dc_new_plane_state);
10587 			return -EINVAL;
10588 		}
10589 
10590 		dm_new_plane_state->dc_state = dc_new_plane_state;
10591 
10592 		/* Tell DC to do a full surface update every time there
10593 		 * is a plane change. Inefficient, but works for now.
10594 		 */
10595 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10596 
10597 		*lock_and_validation_needed = true;
10598 	}
10599 
10600 
10601 	return ret;
10602 }
10603 
10604 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10605 				struct drm_crtc *crtc,
10606 				struct drm_crtc_state *new_crtc_state)
10607 {
10608 	struct drm_plane *cursor = crtc->cursor, *underlying;
10609 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10610 	int i;
10611 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10612 
10613 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10614 	 * cursor per pipe but it's going to inherit the scaling and
10615 	 * positioning from the underlying pipe. Check the cursor plane's
10616 	 * blending properties match the underlying planes'. */
10617 
10618 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10619 	if (!new_cursor_state || !new_cursor_state->fb) {
10620 		return 0;
10621 	}
10622 
10623 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10624 			 (new_cursor_state->src_w >> 16);
10625 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10626 			 (new_cursor_state->src_h >> 16);
10627 
10628 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10629 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10630 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10631 			continue;
10632 
10633 		/* Ignore disabled planes */
10634 		if (!new_underlying_state->fb)
10635 			continue;
10636 
10637 		underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10638 				     (new_underlying_state->src_w >> 16);
10639 		underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10640 				     (new_underlying_state->src_h >> 16);
10641 
10642 		if (cursor_scale_w != underlying_scale_w ||
10643 		    cursor_scale_h != underlying_scale_h) {
10644 			drm_dbg_atomic(crtc->dev,
10645 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10646 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10647 			return -EINVAL;
10648 		}
10649 
10650 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10651 		if (new_underlying_state->crtc_x <= 0 &&
10652 		    new_underlying_state->crtc_y <= 0 &&
10653 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10654 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10655 			break;
10656 	}
10657 
10658 	return 0;
10659 }
10660 
10661 #if defined(CONFIG_DRM_AMD_DC_DCN)
10662 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10663 {
10664 	struct drm_connector *connector;
10665 	struct drm_connector_state *conn_state;
10666 	struct amdgpu_dm_connector *aconnector = NULL;
10667 	int i;
10668 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10669 		if (conn_state->crtc != crtc)
10670 			continue;
10671 
10672 		aconnector = to_amdgpu_dm_connector(connector);
10673 		if (!aconnector->port || !aconnector->mst_port)
10674 			aconnector = NULL;
10675 		else
10676 			break;
10677 	}
10678 
10679 	if (!aconnector)
10680 		return 0;
10681 
10682 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10683 }
10684 #endif
10685 
10686 /**
10687  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10688  * @dev: The DRM device
10689  * @state: The atomic state to commit
10690  *
10691  * Validate that the given atomic state is programmable by DC into hardware.
10692  * This involves constructing a &struct dc_state reflecting the new hardware
10693  * state we wish to commit, then querying DC to see if it is programmable. It's
10694  * important not to modify the existing DC state. Otherwise, atomic_check
10695  * may unexpectedly commit hardware changes.
10696  *
10697  * When validating the DC state, it's important that the right locks are
10698  * acquired. For full updates case which removes/adds/updates streams on one
10699  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10700  * that any such full update commit will wait for completion of any outstanding
10701  * flip using DRMs synchronization events.
10702  *
10703  * Note that DM adds the affected connectors for all CRTCs in state, when that
10704  * might not seem necessary. This is because DC stream creation requires the
10705  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10706  * be possible but non-trivial - a possible TODO item.
10707  *
10708  * Return: -Error code if validation failed.
10709  */
10710 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10711 				  struct drm_atomic_state *state)
10712 {
10713 	struct amdgpu_device *adev = drm_to_adev(dev);
10714 	struct dm_atomic_state *dm_state = NULL;
10715 	struct dc *dc = adev->dm.dc;
10716 	struct drm_connector *connector;
10717 	struct drm_connector_state *old_con_state, *new_con_state;
10718 	struct drm_crtc *crtc;
10719 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10720 	struct drm_plane *plane;
10721 	struct drm_plane_state *old_plane_state, *new_plane_state;
10722 	enum dc_status status;
10723 	int ret, i;
10724 	bool lock_and_validation_needed = false;
10725 	struct dm_crtc_state *dm_old_crtc_state;
10726 #if defined(CONFIG_DRM_AMD_DC_DCN)
10727 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10728 	struct drm_dp_mst_topology_state *mst_state;
10729 	struct drm_dp_mst_topology_mgr *mgr;
10730 #endif
10731 
10732 	trace_amdgpu_dm_atomic_check_begin(state);
10733 
10734 	ret = drm_atomic_helper_check_modeset(dev, state);
10735 	if (ret)
10736 		goto fail;
10737 
10738 	/* Check connector changes */
10739 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10740 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10741 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10742 
10743 		/* Skip connectors that are disabled or part of modeset already. */
10744 		if (!old_con_state->crtc && !new_con_state->crtc)
10745 			continue;
10746 
10747 		if (!new_con_state->crtc)
10748 			continue;
10749 
10750 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10751 		if (IS_ERR(new_crtc_state)) {
10752 			ret = PTR_ERR(new_crtc_state);
10753 			goto fail;
10754 		}
10755 
10756 		if (dm_old_con_state->abm_level !=
10757 		    dm_new_con_state->abm_level)
10758 			new_crtc_state->connectors_changed = true;
10759 	}
10760 
10761 #if defined(CONFIG_DRM_AMD_DC_DCN)
10762 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10763 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10764 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10765 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10766 				if (ret)
10767 					goto fail;
10768 			}
10769 		}
10770 	}
10771 #endif
10772 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10773 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10774 
10775 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10776 		    !new_crtc_state->color_mgmt_changed &&
10777 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10778 			dm_old_crtc_state->dsc_force_changed == false)
10779 			continue;
10780 
10781 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10782 		if (ret)
10783 			goto fail;
10784 
10785 		if (!new_crtc_state->enable)
10786 			continue;
10787 
10788 		ret = drm_atomic_add_affected_connectors(state, crtc);
10789 		if (ret)
10790 			return ret;
10791 
10792 		ret = drm_atomic_add_affected_planes(state, crtc);
10793 		if (ret)
10794 			goto fail;
10795 
10796 		if (dm_old_crtc_state->dsc_force_changed)
10797 			new_crtc_state->mode_changed = true;
10798 	}
10799 
10800 	/*
10801 	 * Add all primary and overlay planes on the CRTC to the state
10802 	 * whenever a plane is enabled to maintain correct z-ordering
10803 	 * and to enable fast surface updates.
10804 	 */
10805 	drm_for_each_crtc(crtc, dev) {
10806 		bool modified = false;
10807 
10808 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10809 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10810 				continue;
10811 
10812 			if (new_plane_state->crtc == crtc ||
10813 			    old_plane_state->crtc == crtc) {
10814 				modified = true;
10815 				break;
10816 			}
10817 		}
10818 
10819 		if (!modified)
10820 			continue;
10821 
10822 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10823 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10824 				continue;
10825 
10826 			new_plane_state =
10827 				drm_atomic_get_plane_state(state, plane);
10828 
10829 			if (IS_ERR(new_plane_state)) {
10830 				ret = PTR_ERR(new_plane_state);
10831 				goto fail;
10832 			}
10833 		}
10834 	}
10835 
10836 	/* Remove exiting planes if they are modified */
10837 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10838 		ret = dm_update_plane_state(dc, state, plane,
10839 					    old_plane_state,
10840 					    new_plane_state,
10841 					    false,
10842 					    &lock_and_validation_needed);
10843 		if (ret)
10844 			goto fail;
10845 	}
10846 
10847 	/* Disable all crtcs which require disable */
10848 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10849 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10850 					   old_crtc_state,
10851 					   new_crtc_state,
10852 					   false,
10853 					   &lock_and_validation_needed);
10854 		if (ret)
10855 			goto fail;
10856 	}
10857 
10858 	/* Enable all crtcs which require enable */
10859 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10860 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10861 					   old_crtc_state,
10862 					   new_crtc_state,
10863 					   true,
10864 					   &lock_and_validation_needed);
10865 		if (ret)
10866 			goto fail;
10867 	}
10868 
10869 	/* Add new/modified planes */
10870 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10871 		ret = dm_update_plane_state(dc, state, plane,
10872 					    old_plane_state,
10873 					    new_plane_state,
10874 					    true,
10875 					    &lock_and_validation_needed);
10876 		if (ret)
10877 			goto fail;
10878 	}
10879 
10880 	/* Run this here since we want to validate the streams we created */
10881 	ret = drm_atomic_helper_check_planes(dev, state);
10882 	if (ret)
10883 		goto fail;
10884 
10885 	/* Check cursor planes scaling */
10886 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10887 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10888 		if (ret)
10889 			goto fail;
10890 	}
10891 
10892 	if (state->legacy_cursor_update) {
10893 		/*
10894 		 * This is a fast cursor update coming from the plane update
10895 		 * helper, check if it can be done asynchronously for better
10896 		 * performance.
10897 		 */
10898 		state->async_update =
10899 			!drm_atomic_helper_async_check(dev, state);
10900 
10901 		/*
10902 		 * Skip the remaining global validation if this is an async
10903 		 * update. Cursor updates can be done without affecting
10904 		 * state or bandwidth calcs and this avoids the performance
10905 		 * penalty of locking the private state object and
10906 		 * allocating a new dc_state.
10907 		 */
10908 		if (state->async_update)
10909 			return 0;
10910 	}
10911 
10912 	/* Check scaling and underscan changes*/
10913 	/* TODO Removed scaling changes validation due to inability to commit
10914 	 * new stream into context w\o causing full reset. Need to
10915 	 * decide how to handle.
10916 	 */
10917 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10918 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10919 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10920 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10921 
10922 		/* Skip any modesets/resets */
10923 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10924 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10925 			continue;
10926 
10927 		/* Skip any thing not scale or underscan changes */
10928 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10929 			continue;
10930 
10931 		lock_and_validation_needed = true;
10932 	}
10933 
10934 #if defined(CONFIG_DRM_AMD_DC_DCN)
10935 	/* set the slot info for each mst_state based on the link encoding format */
10936 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10937 		struct amdgpu_dm_connector *aconnector;
10938 		struct drm_connector *connector;
10939 		struct drm_connector_list_iter iter;
10940 		u8 link_coding_cap;
10941 
10942 		if (!mgr->mst_state )
10943 			continue;
10944 
10945 		drm_connector_list_iter_begin(dev, &iter);
10946 		drm_for_each_connector_iter(connector, &iter) {
10947 			int id = connector->index;
10948 
10949 			if (id == mst_state->mgr->conn_base_id) {
10950 				aconnector = to_amdgpu_dm_connector(connector);
10951 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10952 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
10953 
10954 				break;
10955 			}
10956 		}
10957 		drm_connector_list_iter_end(&iter);
10958 
10959 	}
10960 #endif
10961 	/**
10962 	 * Streams and planes are reset when there are changes that affect
10963 	 * bandwidth. Anything that affects bandwidth needs to go through
10964 	 * DC global validation to ensure that the configuration can be applied
10965 	 * to hardware.
10966 	 *
10967 	 * We have to currently stall out here in atomic_check for outstanding
10968 	 * commits to finish in this case because our IRQ handlers reference
10969 	 * DRM state directly - we can end up disabling interrupts too early
10970 	 * if we don't.
10971 	 *
10972 	 * TODO: Remove this stall and drop DM state private objects.
10973 	 */
10974 	if (lock_and_validation_needed) {
10975 		ret = dm_atomic_get_state(state, &dm_state);
10976 		if (ret)
10977 			goto fail;
10978 
10979 		ret = do_aquire_global_lock(dev, state);
10980 		if (ret)
10981 			goto fail;
10982 
10983 #if defined(CONFIG_DRM_AMD_DC_DCN)
10984 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10985 			goto fail;
10986 
10987 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10988 		if (ret)
10989 			goto fail;
10990 #endif
10991 
10992 		/*
10993 		 * Perform validation of MST topology in the state:
10994 		 * We need to perform MST atomic check before calling
10995 		 * dc_validate_global_state(), or there is a chance
10996 		 * to get stuck in an infinite loop and hang eventually.
10997 		 */
10998 		ret = drm_dp_mst_atomic_check(state);
10999 		if (ret)
11000 			goto fail;
11001 		status = dc_validate_global_state(dc, dm_state->context, false);
11002 		if (status != DC_OK) {
11003 			drm_dbg_atomic(dev,
11004 				       "DC global validation failure: %s (%d)",
11005 				       dc_status_to_str(status), status);
11006 			ret = -EINVAL;
11007 			goto fail;
11008 		}
11009 	} else {
11010 		/*
11011 		 * The commit is a fast update. Fast updates shouldn't change
11012 		 * the DC context, affect global validation, and can have their
11013 		 * commit work done in parallel with other commits not touching
11014 		 * the same resource. If we have a new DC context as part of
11015 		 * the DM atomic state from validation we need to free it and
11016 		 * retain the existing one instead.
11017 		 *
11018 		 * Furthermore, since the DM atomic state only contains the DC
11019 		 * context and can safely be annulled, we can free the state
11020 		 * and clear the associated private object now to free
11021 		 * some memory and avoid a possible use-after-free later.
11022 		 */
11023 
11024 		for (i = 0; i < state->num_private_objs; i++) {
11025 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11026 
11027 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11028 				int j = state->num_private_objs-1;
11029 
11030 				dm_atomic_destroy_state(obj,
11031 						state->private_objs[i].state);
11032 
11033 				/* If i is not at the end of the array then the
11034 				 * last element needs to be moved to where i was
11035 				 * before the array can safely be truncated.
11036 				 */
11037 				if (i != j)
11038 					state->private_objs[i] =
11039 						state->private_objs[j];
11040 
11041 				state->private_objs[j].ptr = NULL;
11042 				state->private_objs[j].state = NULL;
11043 				state->private_objs[j].old_state = NULL;
11044 				state->private_objs[j].new_state = NULL;
11045 
11046 				state->num_private_objs = j;
11047 				break;
11048 			}
11049 		}
11050 	}
11051 
11052 	/* Store the overall update type for use later in atomic check. */
11053 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11054 		struct dm_crtc_state *dm_new_crtc_state =
11055 			to_dm_crtc_state(new_crtc_state);
11056 
11057 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11058 							 UPDATE_TYPE_FULL :
11059 							 UPDATE_TYPE_FAST;
11060 	}
11061 
11062 	/* Must be success */
11063 	WARN_ON(ret);
11064 
11065 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11066 
11067 	return ret;
11068 
11069 fail:
11070 	if (ret == -EDEADLK)
11071 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11072 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11073 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11074 	else
11075 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11076 
11077 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11078 
11079 	return ret;
11080 }
11081 
11082 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11083 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11084 {
11085 	uint8_t dpcd_data;
11086 	bool capable = false;
11087 
11088 	if (amdgpu_dm_connector->dc_link &&
11089 		dm_helpers_dp_read_dpcd(
11090 				NULL,
11091 				amdgpu_dm_connector->dc_link,
11092 				DP_DOWN_STREAM_PORT_COUNT,
11093 				&dpcd_data,
11094 				sizeof(dpcd_data))) {
11095 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11096 	}
11097 
11098 	return capable;
11099 }
11100 
11101 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11102 		unsigned int offset,
11103 		unsigned int total_length,
11104 		uint8_t *data,
11105 		unsigned int length,
11106 		struct amdgpu_hdmi_vsdb_info *vsdb)
11107 {
11108 	bool res;
11109 	union dmub_rb_cmd cmd;
11110 	struct dmub_cmd_send_edid_cea *input;
11111 	struct dmub_cmd_edid_cea_output *output;
11112 
11113 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11114 		return false;
11115 
11116 	memset(&cmd, 0, sizeof(cmd));
11117 
11118 	input = &cmd.edid_cea.data.input;
11119 
11120 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11121 	cmd.edid_cea.header.sub_type = 0;
11122 	cmd.edid_cea.header.payload_bytes =
11123 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11124 	input->offset = offset;
11125 	input->length = length;
11126 	input->total_length = total_length;
11127 	memcpy(input->payload, data, length);
11128 
11129 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11130 	if (!res) {
11131 		DRM_ERROR("EDID CEA parser failed\n");
11132 		return false;
11133 	}
11134 
11135 	output = &cmd.edid_cea.data.output;
11136 
11137 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11138 		if (!output->ack.success) {
11139 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11140 					output->ack.offset);
11141 		}
11142 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11143 		if (!output->amd_vsdb.vsdb_found)
11144 			return false;
11145 
11146 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11147 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11148 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11149 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11150 	} else {
11151 		DRM_WARN("Unknown EDID CEA parser results\n");
11152 		return false;
11153 	}
11154 
11155 	return true;
11156 }
11157 
11158 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11159 		uint8_t *edid_ext, int len,
11160 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11161 {
11162 	int i;
11163 
11164 	/* send extension block to DMCU for parsing */
11165 	for (i = 0; i < len; i += 8) {
11166 		bool res;
11167 		int offset;
11168 
11169 		/* send 8 bytes a time */
11170 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11171 			return false;
11172 
11173 		if (i+8 == len) {
11174 			/* EDID block sent completed, expect result */
11175 			int version, min_rate, max_rate;
11176 
11177 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11178 			if (res) {
11179 				/* amd vsdb found */
11180 				vsdb_info->freesync_supported = 1;
11181 				vsdb_info->amd_vsdb_version = version;
11182 				vsdb_info->min_refresh_rate_hz = min_rate;
11183 				vsdb_info->max_refresh_rate_hz = max_rate;
11184 				return true;
11185 			}
11186 			/* not amd vsdb */
11187 			return false;
11188 		}
11189 
11190 		/* check for ack*/
11191 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11192 		if (!res)
11193 			return false;
11194 	}
11195 
11196 	return false;
11197 }
11198 
11199 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11200 		uint8_t *edid_ext, int len,
11201 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11202 {
11203 	int i;
11204 
11205 	/* send extension block to DMCU for parsing */
11206 	for (i = 0; i < len; i += 8) {
11207 		/* send 8 bytes a time */
11208 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11209 			return false;
11210 	}
11211 
11212 	return vsdb_info->freesync_supported;
11213 }
11214 
11215 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11216 		uint8_t *edid_ext, int len,
11217 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11218 {
11219 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11220 
11221 	if (adev->dm.dmub_srv)
11222 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11223 	else
11224 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11225 }
11226 
11227 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11228 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11229 {
11230 	uint8_t *edid_ext = NULL;
11231 	int i;
11232 	bool valid_vsdb_found = false;
11233 
11234 	/*----- drm_find_cea_extension() -----*/
11235 	/* No EDID or EDID extensions */
11236 	if (edid == NULL || edid->extensions == 0)
11237 		return -ENODEV;
11238 
11239 	/* Find CEA extension */
11240 	for (i = 0; i < edid->extensions; i++) {
11241 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11242 		if (edid_ext[0] == CEA_EXT)
11243 			break;
11244 	}
11245 
11246 	if (i == edid->extensions)
11247 		return -ENODEV;
11248 
11249 	/*----- cea_db_offsets() -----*/
11250 	if (edid_ext[0] != CEA_EXT)
11251 		return -ENODEV;
11252 
11253 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11254 
11255 	return valid_vsdb_found ? i : -ENODEV;
11256 }
11257 
11258 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11259 					struct edid *edid)
11260 {
11261 	int i = 0;
11262 	struct detailed_timing *timing;
11263 	struct detailed_non_pixel *data;
11264 	struct detailed_data_monitor_range *range;
11265 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11266 			to_amdgpu_dm_connector(connector);
11267 	struct dm_connector_state *dm_con_state = NULL;
11268 	struct dc_sink *sink;
11269 
11270 	struct drm_device *dev = connector->dev;
11271 	struct amdgpu_device *adev = drm_to_adev(dev);
11272 	bool freesync_capable = false;
11273 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11274 
11275 	if (!connector->state) {
11276 		DRM_ERROR("%s - Connector has no state", __func__);
11277 		goto update;
11278 	}
11279 
11280 	sink = amdgpu_dm_connector->dc_sink ?
11281 		amdgpu_dm_connector->dc_sink :
11282 		amdgpu_dm_connector->dc_em_sink;
11283 
11284 	if (!edid || !sink) {
11285 		dm_con_state = to_dm_connector_state(connector->state);
11286 
11287 		amdgpu_dm_connector->min_vfreq = 0;
11288 		amdgpu_dm_connector->max_vfreq = 0;
11289 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11290 		connector->display_info.monitor_range.min_vfreq = 0;
11291 		connector->display_info.monitor_range.max_vfreq = 0;
11292 		freesync_capable = false;
11293 
11294 		goto update;
11295 	}
11296 
11297 	dm_con_state = to_dm_connector_state(connector->state);
11298 
11299 	if (!adev->dm.freesync_module)
11300 		goto update;
11301 
11302 
11303 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11304 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11305 		bool edid_check_required = false;
11306 
11307 		if (edid) {
11308 			edid_check_required = is_dp_capable_without_timing_msa(
11309 						adev->dm.dc,
11310 						amdgpu_dm_connector);
11311 		}
11312 
11313 		if (edid_check_required == true && (edid->version > 1 ||
11314 		   (edid->version == 1 && edid->revision > 1))) {
11315 			for (i = 0; i < 4; i++) {
11316 
11317 				timing	= &edid->detailed_timings[i];
11318 				data	= &timing->data.other_data;
11319 				range	= &data->data.range;
11320 				/*
11321 				 * Check if monitor has continuous frequency mode
11322 				 */
11323 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11324 					continue;
11325 				/*
11326 				 * Check for flag range limits only. If flag == 1 then
11327 				 * no additional timing information provided.
11328 				 * Default GTF, GTF Secondary curve and CVT are not
11329 				 * supported
11330 				 */
11331 				if (range->flags != 1)
11332 					continue;
11333 
11334 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11335 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11336 				amdgpu_dm_connector->pixel_clock_mhz =
11337 					range->pixel_clock_mhz * 10;
11338 
11339 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11340 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11341 
11342 				break;
11343 			}
11344 
11345 			if (amdgpu_dm_connector->max_vfreq -
11346 			    amdgpu_dm_connector->min_vfreq > 10) {
11347 
11348 				freesync_capable = true;
11349 			}
11350 		}
11351 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11352 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11353 		if (i >= 0 && vsdb_info.freesync_supported) {
11354 			timing  = &edid->detailed_timings[i];
11355 			data    = &timing->data.other_data;
11356 
11357 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11358 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11359 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11360 				freesync_capable = true;
11361 
11362 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11363 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11364 		}
11365 	}
11366 
11367 update:
11368 	if (dm_con_state)
11369 		dm_con_state->freesync_capable = freesync_capable;
11370 
11371 	if (connector->vrr_capable_property)
11372 		drm_connector_set_vrr_capable_property(connector,
11373 						       freesync_capable);
11374 }
11375 
11376 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11377 {
11378 	struct amdgpu_device *adev = drm_to_adev(dev);
11379 	struct dc *dc = adev->dm.dc;
11380 	int i;
11381 
11382 	mutex_lock(&adev->dm.dc_lock);
11383 	if (dc->current_state) {
11384 		for (i = 0; i < dc->current_state->stream_count; ++i)
11385 			dc->current_state->streams[i]
11386 				->triggered_crtc_reset.enabled =
11387 				adev->dm.force_timing_sync;
11388 
11389 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11390 		dc_trigger_sync(dc, dc->current_state);
11391 	}
11392 	mutex_unlock(&adev->dm.dc_lock);
11393 }
11394 
11395 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11396 		       uint32_t value, const char *func_name)
11397 {
11398 #ifdef DM_CHECK_ADDR_0
11399 	if (address == 0) {
11400 		DC_ERR("invalid register write. address = 0");
11401 		return;
11402 	}
11403 #endif
11404 	cgs_write_register(ctx->cgs_device, address, value);
11405 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11406 }
11407 
11408 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11409 			  const char *func_name)
11410 {
11411 	uint32_t value;
11412 #ifdef DM_CHECK_ADDR_0
11413 	if (address == 0) {
11414 		DC_ERR("invalid register read; address = 0\n");
11415 		return 0;
11416 	}
11417 #endif
11418 
11419 	if (ctx->dmub_srv &&
11420 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11421 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11422 		ASSERT(false);
11423 		return 0;
11424 	}
11425 
11426 	value = cgs_read_register(ctx->cgs_device, address);
11427 
11428 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11429 
11430 	return value;
11431 }
11432 
11433 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11434 	uint8_t status_type, uint32_t *operation_result)
11435 {
11436 	struct amdgpu_device *adev = ctx->driver_context;
11437 	int return_status = -1;
11438 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11439 
11440 	if (is_cmd_aux) {
11441 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11442 			return_status = p_notify->aux_reply.length;
11443 			*operation_result = p_notify->result;
11444 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11445 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11446 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11447 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11448 		} else {
11449 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11450 		}
11451 	} else {
11452 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11453 			return_status = 0;
11454 			*operation_result = p_notify->sc_status;
11455 		} else {
11456 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11457 		}
11458 	}
11459 
11460 	return return_status;
11461 }
11462 
11463 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11464 	unsigned int link_index, void *cmd_payload, void *operation_result)
11465 {
11466 	struct amdgpu_device *adev = ctx->driver_context;
11467 	int ret = 0;
11468 
11469 	if (is_cmd_aux) {
11470 		dc_process_dmub_aux_transfer_async(ctx->dc,
11471 			link_index, (struct aux_payload *)cmd_payload);
11472 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11473 					(struct set_config_cmd_payload *)cmd_payload,
11474 					adev->dm.dmub_notify)) {
11475 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11476 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11477 					(uint32_t *)operation_result);
11478 	}
11479 
11480 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11481 	if (ret == 0) {
11482 		DRM_ERROR("wait_for_completion_timeout timeout!");
11483 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11484 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11485 				(uint32_t *)operation_result);
11486 	}
11487 
11488 	if (is_cmd_aux) {
11489 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11490 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11491 
11492 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11493 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11494 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11495 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11496 				       adev->dm.dmub_notify->aux_reply.length);
11497 			}
11498 		}
11499 	}
11500 
11501 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11502 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11503 			(uint32_t *)operation_result);
11504 }
11505