1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #include "amdgpu_dm_plane.h"
50 #include "amdgpu_dm_crtc.h"
51 #ifdef CONFIG_DRM_AMD_DC_HDCP
52 #include "amdgpu_dm_hdcp.h"
53 #include <drm/display/drm_hdcp_helper.h>
54 #endif
55 #include "amdgpu_pm.h"
56 #include "amdgpu_atombios.h"
57 
58 #include "amd_shared.h"
59 #include "amdgpu_dm_irq.h"
60 #include "dm_helpers.h"
61 #include "amdgpu_dm_mst_types.h"
62 #if defined(CONFIG_DEBUG_FS)
63 #include "amdgpu_dm_debugfs.h"
64 #endif
65 #include "amdgpu_dm_psr.h"
66 
67 #include "ivsrcid/ivsrcid_vislands30.h"
68 
69 #include "i2caux_interface.h"
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/types.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/pci.h>
75 #include <linux/firmware.h>
76 #include <linux/component.h>
77 #include <linux/dmi.h>
78 
79 #include <drm/display/drm_dp_mst_helper.h>
80 #include <drm/display/drm_hdmi_helper.h>
81 #include <drm/drm_atomic.h>
82 #include <drm/drm_atomic_uapi.h>
83 #include <drm/drm_atomic_helper.h>
84 #include <drm/drm_blend.h>
85 #include <drm/drm_fb_helper.h>
86 #include <drm/drm_fourcc.h>
87 #include <drm/drm_edid.h>
88 #include <drm/drm_vblank.h>
89 #include <drm/drm_audio_component.h>
90 #include <drm/drm_gem_atomic_helper.h>
91 
92 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
93 
94 #include "dcn/dcn_1_0_offset.h"
95 #include "dcn/dcn_1_0_sh_mask.h"
96 #include "soc15_hw_ip.h"
97 #include "soc15_common.h"
98 #include "vega10_ip_offset.h"
99 
100 #include "soc15_common.h"
101 
102 #include "gc/gc_11_0_0_offset.h"
103 #include "gc/gc_11_0_0_sh_mask.h"
104 
105 #include "modules/inc/mod_freesync.h"
106 #include "modules/power/power_helpers.h"
107 #include "modules/inc/mod_info_packet.h"
108 
109 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
111 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
113 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
115 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
117 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
119 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
121 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
123 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
125 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
127 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
129 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
130 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
131 
132 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
134 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
136 
137 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
138 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
139 
140 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
141 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
142 
143 /* Number of bytes in PSP header for firmware. */
144 #define PSP_HEADER_BYTES 0x100
145 
146 /* Number of bytes in PSP footer for firmware. */
147 #define PSP_FOOTER_BYTES 0x100
148 
149 /**
150  * DOC: overview
151  *
152  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
153  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
154  * requests into DC requests, and DC responses into DRM responses.
155  *
156  * The root control structure is &struct amdgpu_display_manager.
157  */
158 
159 /* basic init/fini API */
160 static int amdgpu_dm_init(struct amdgpu_device *adev);
161 static void amdgpu_dm_fini(struct amdgpu_device *adev);
162 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
163 
164 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
165 {
166 	switch (link->dpcd_caps.dongle_type) {
167 	case DISPLAY_DONGLE_NONE:
168 		return DRM_MODE_SUBCONNECTOR_Native;
169 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
170 		return DRM_MODE_SUBCONNECTOR_VGA;
171 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
172 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
173 		return DRM_MODE_SUBCONNECTOR_DVID;
174 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
175 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
176 		return DRM_MODE_SUBCONNECTOR_HDMIA;
177 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
178 	default:
179 		return DRM_MODE_SUBCONNECTOR_Unknown;
180 	}
181 }
182 
183 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
184 {
185 	struct dc_link *link = aconnector->dc_link;
186 	struct drm_connector *connector = &aconnector->base;
187 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
188 
189 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
190 		return;
191 
192 	if (aconnector->dc_sink)
193 		subconnector = get_subconnector_type(link);
194 
195 	drm_object_property_set_value(&connector->base,
196 			connector->dev->mode_config.dp_subconnector_property,
197 			subconnector);
198 }
199 
200 /*
201  * initializes drm_device display related structures, based on the information
202  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
203  * drm_encoder, drm_mode_config
204  *
205  * Returns 0 on success
206  */
207 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
208 /* removes and deallocates the drm structures, created by the above function */
209 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
210 
211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
213 				    uint32_t link_index,
214 				    struct amdgpu_encoder *amdgpu_encoder);
215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
216 				  struct amdgpu_encoder *aencoder,
217 				  uint32_t link_index);
218 
219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
220 
221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
222 
223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
224 				  struct drm_atomic_state *state);
225 
226 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
227 static void handle_hpd_rx_irq(void *param);
228 
229 static bool
230 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
231 				 struct drm_crtc_state *new_crtc_state);
232 /*
233  * dm_vblank_get_counter
234  *
235  * @brief
236  * Get counter for number of vertical blanks
237  *
238  * @param
239  * struct amdgpu_device *adev - [in] desired amdgpu device
240  * int disp_idx - [in] which CRTC to get the counter from
241  *
242  * @return
243  * Counter for vertical blanks
244  */
245 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
246 {
247 	if (crtc >= adev->mode_info.num_crtc)
248 		return 0;
249 	else {
250 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
251 
252 		if (acrtc->dm_irq_params.stream == NULL) {
253 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
254 				  crtc);
255 			return 0;
256 		}
257 
258 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
259 	}
260 }
261 
262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
263 				  u32 *vbl, u32 *position)
264 {
265 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
266 
267 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
268 		return -EINVAL;
269 	else {
270 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
271 
272 		if (acrtc->dm_irq_params.stream ==  NULL) {
273 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
274 				  crtc);
275 			return 0;
276 		}
277 
278 		/*
279 		 * TODO rework base driver to use values directly.
280 		 * for now parse it back into reg-format
281 		 */
282 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
283 					 &v_blank_start,
284 					 &v_blank_end,
285 					 &h_position,
286 					 &v_position);
287 
288 		*position = v_position | (h_position << 16);
289 		*vbl = v_blank_start | (v_blank_end << 16);
290 	}
291 
292 	return 0;
293 }
294 
295 static bool dm_is_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return true;
299 }
300 
301 static int dm_wait_for_idle(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static bool dm_check_soft_reset(void *handle)
308 {
309 	return false;
310 }
311 
312 static int dm_soft_reset(void *handle)
313 {
314 	/* XXX todo */
315 	return 0;
316 }
317 
318 static struct amdgpu_crtc *
319 get_crtc_by_otg_inst(struct amdgpu_device *adev,
320 		     int otg_inst)
321 {
322 	struct drm_device *dev = adev_to_drm(adev);
323 	struct drm_crtc *crtc;
324 	struct amdgpu_crtc *amdgpu_crtc;
325 
326 	if (WARN_ON(otg_inst == -1))
327 		return adev->mode_info.crtcs[0];
328 
329 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
330 		amdgpu_crtc = to_amdgpu_crtc(crtc);
331 
332 		if (amdgpu_crtc->otg_inst == otg_inst)
333 			return amdgpu_crtc;
334 	}
335 
336 	return NULL;
337 }
338 
339 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
340 					      struct dm_crtc_state *new_state)
341 {
342 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
343 		return true;
344 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
345 		return true;
346 	else
347 		return false;
348 }
349 
350 /**
351  * dm_pflip_high_irq() - Handle pageflip interrupt
352  * @interrupt_params: ignored
353  *
354  * Handles the pageflip interrupt by notifying all interested parties
355  * that the pageflip has been completed.
356  */
357 static void dm_pflip_high_irq(void *interrupt_params)
358 {
359 	struct amdgpu_crtc *amdgpu_crtc;
360 	struct common_irq_params *irq_params = interrupt_params;
361 	struct amdgpu_device *adev = irq_params->adev;
362 	unsigned long flags;
363 	struct drm_pending_vblank_event *e;
364 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
365 	bool vrr_active;
366 
367 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
368 
369 	/* IRQ could occur when in initial stage */
370 	/* TODO work and BO cleanup */
371 	if (amdgpu_crtc == NULL) {
372 		DC_LOG_PFLIP("CRTC is null, returning.\n");
373 		return;
374 	}
375 
376 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
377 
378 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
379 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
380 						 amdgpu_crtc->pflip_status,
381 						 AMDGPU_FLIP_SUBMITTED,
382 						 amdgpu_crtc->crtc_id,
383 						 amdgpu_crtc);
384 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
385 		return;
386 	}
387 
388 	/* page flip completed. */
389 	e = amdgpu_crtc->event;
390 	amdgpu_crtc->event = NULL;
391 
392 	WARN_ON(!e);
393 
394 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
395 
396 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
397 	if (!vrr_active ||
398 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
399 				      &v_blank_end, &hpos, &vpos) ||
400 	    (vpos < v_blank_start)) {
401 		/* Update to correct count and vblank timestamp if racing with
402 		 * vblank irq. This also updates to the correct vblank timestamp
403 		 * even in VRR mode, as scanout is past the front-porch atm.
404 		 */
405 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
406 
407 		/* Wake up userspace by sending the pageflip event with proper
408 		 * count and timestamp of vblank of flip completion.
409 		 */
410 		if (e) {
411 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
412 
413 			/* Event sent, so done with vblank for this flip */
414 			drm_crtc_vblank_put(&amdgpu_crtc->base);
415 		}
416 	} else if (e) {
417 		/* VRR active and inside front-porch: vblank count and
418 		 * timestamp for pageflip event will only be up to date after
419 		 * drm_crtc_handle_vblank() has been executed from late vblank
420 		 * irq handler after start of back-porch (vline 0). We queue the
421 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
422 		 * updated timestamp and count, once it runs after us.
423 		 *
424 		 * We need to open-code this instead of using the helper
425 		 * drm_crtc_arm_vblank_event(), as that helper would
426 		 * call drm_crtc_accurate_vblank_count(), which we must
427 		 * not call in VRR mode while we are in front-porch!
428 		 */
429 
430 		/* sequence will be replaced by real count during send-out. */
431 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
432 		e->pipe = amdgpu_crtc->crtc_id;
433 
434 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
435 		e = NULL;
436 	}
437 
438 	/* Keep track of vblank of this flip for flip throttling. We use the
439 	 * cooked hw counter, as that one incremented at start of this vblank
440 	 * of pageflip completion, so last_flip_vblank is the forbidden count
441 	 * for queueing new pageflips if vsync + VRR is enabled.
442 	 */
443 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
444 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
445 
446 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
447 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
448 
449 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
450 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
451 		     vrr_active, (int) !e);
452 }
453 
454 static void dm_vupdate_high_irq(void *interrupt_params)
455 {
456 	struct common_irq_params *irq_params = interrupt_params;
457 	struct amdgpu_device *adev = irq_params->adev;
458 	struct amdgpu_crtc *acrtc;
459 	struct drm_device *drm_dev;
460 	struct drm_vblank_crtc *vblank;
461 	ktime_t frame_duration_ns, previous_timestamp;
462 	unsigned long flags;
463 	int vrr_active;
464 
465 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
466 
467 	if (acrtc) {
468 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
469 		drm_dev = acrtc->base.dev;
470 		vblank = &drm_dev->vblank[acrtc->base.index];
471 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
472 		frame_duration_ns = vblank->time - previous_timestamp;
473 
474 		if (frame_duration_ns > 0) {
475 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
476 						frame_duration_ns,
477 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
478 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
479 		}
480 
481 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
482 			      acrtc->crtc_id,
483 			      vrr_active);
484 
485 		/* Core vblank handling is done here after end of front-porch in
486 		 * vrr mode, as vblank timestamping will give valid results
487 		 * while now done after front-porch. This will also deliver
488 		 * page-flip completion events that have been queued to us
489 		 * if a pageflip happened inside front-porch.
490 		 */
491 		if (vrr_active) {
492 			dm_crtc_handle_vblank(acrtc);
493 
494 			/* BTR processing for pre-DCE12 ASICs */
495 			if (acrtc->dm_irq_params.stream &&
496 			    adev->family < AMDGPU_FAMILY_AI) {
497 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
498 				mod_freesync_handle_v_update(
499 				    adev->dm.freesync_module,
500 				    acrtc->dm_irq_params.stream,
501 				    &acrtc->dm_irq_params.vrr_params);
502 
503 				dc_stream_adjust_vmin_vmax(
504 				    adev->dm.dc,
505 				    acrtc->dm_irq_params.stream,
506 				    &acrtc->dm_irq_params.vrr_params.adjust);
507 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
508 			}
509 		}
510 	}
511 }
512 
513 /**
514  * dm_crtc_high_irq() - Handles CRTC interrupt
515  * @interrupt_params: used for determining the CRTC instance
516  *
517  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
518  * event handler.
519  */
520 static void dm_crtc_high_irq(void *interrupt_params)
521 {
522 	struct common_irq_params *irq_params = interrupt_params;
523 	struct amdgpu_device *adev = irq_params->adev;
524 	struct amdgpu_crtc *acrtc;
525 	unsigned long flags;
526 	int vrr_active;
527 
528 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
529 	if (!acrtc)
530 		return;
531 
532 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
533 
534 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
535 		      vrr_active, acrtc->dm_irq_params.active_planes);
536 
537 	/**
538 	 * Core vblank handling at start of front-porch is only possible
539 	 * in non-vrr mode, as only there vblank timestamping will give
540 	 * valid results while done in front-porch. Otherwise defer it
541 	 * to dm_vupdate_high_irq after end of front-porch.
542 	 */
543 	if (!vrr_active)
544 		dm_crtc_handle_vblank(acrtc);
545 
546 	/**
547 	 * Following stuff must happen at start of vblank, for crc
548 	 * computation and below-the-range btr support in vrr mode.
549 	 */
550 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
551 
552 	/* BTR updates need to happen before VUPDATE on Vega and above. */
553 	if (adev->family < AMDGPU_FAMILY_AI)
554 		return;
555 
556 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
557 
558 	if (acrtc->dm_irq_params.stream &&
559 	    acrtc->dm_irq_params.vrr_params.supported &&
560 	    acrtc->dm_irq_params.freesync_config.state ==
561 		    VRR_STATE_ACTIVE_VARIABLE) {
562 		mod_freesync_handle_v_update(adev->dm.freesync_module,
563 					     acrtc->dm_irq_params.stream,
564 					     &acrtc->dm_irq_params.vrr_params);
565 
566 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
567 					   &acrtc->dm_irq_params.vrr_params.adjust);
568 	}
569 
570 	/*
571 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
572 	 * In that case, pageflip completion interrupts won't fire and pageflip
573 	 * completion events won't get delivered. Prevent this by sending
574 	 * pending pageflip events from here if a flip is still pending.
575 	 *
576 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
577 	 * avoid race conditions between flip programming and completion,
578 	 * which could cause too early flip completion events.
579 	 */
580 	if (adev->family >= AMDGPU_FAMILY_RV &&
581 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
582 	    acrtc->dm_irq_params.active_planes == 0) {
583 		if (acrtc->event) {
584 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
585 			acrtc->event = NULL;
586 			drm_crtc_vblank_put(&acrtc->base);
587 		}
588 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
589 	}
590 
591 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
592 }
593 
594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
595 /**
596  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
597  * DCN generation ASICs
598  * @interrupt_params: interrupt parameters
599  *
600  * Used to set crc window/read out crc value at vertical line 0 position
601  */
602 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
603 {
604 	struct common_irq_params *irq_params = interrupt_params;
605 	struct amdgpu_device *adev = irq_params->adev;
606 	struct amdgpu_crtc *acrtc;
607 
608 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
609 
610 	if (!acrtc)
611 		return;
612 
613 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
614 }
615 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
616 
617 /**
618  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
619  * @adev: amdgpu_device pointer
620  * @notify: dmub notification structure
621  *
622  * Dmub AUX or SET_CONFIG command completion processing callback
623  * Copies dmub notification to DM which is to be read by AUX command.
624  * issuing thread and also signals the event to wake up the thread.
625  */
626 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
627 					struct dmub_notification *notify)
628 {
629 	if (adev->dm.dmub_notify)
630 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
631 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
632 		complete(&adev->dm.dmub_aux_transfer_done);
633 }
634 
635 /**
636  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
637  * @adev: amdgpu_device pointer
638  * @notify: dmub notification structure
639  *
640  * Dmub Hpd interrupt processing callback. Gets displayindex through the
641  * ink index and calls helper to do the processing.
642  */
643 static void dmub_hpd_callback(struct amdgpu_device *adev,
644 			      struct dmub_notification *notify)
645 {
646 	struct amdgpu_dm_connector *aconnector;
647 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
648 	struct drm_connector *connector;
649 	struct drm_connector_list_iter iter;
650 	struct dc_link *link;
651 	uint8_t link_index = 0;
652 	struct drm_device *dev;
653 
654 	if (adev == NULL)
655 		return;
656 
657 	if (notify == NULL) {
658 		DRM_ERROR("DMUB HPD callback notification was NULL");
659 		return;
660 	}
661 
662 	if (notify->link_index > adev->dm.dc->link_count) {
663 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
664 		return;
665 	}
666 
667 	link_index = notify->link_index;
668 	link = adev->dm.dc->links[link_index];
669 	dev = adev->dm.ddev;
670 
671 	drm_connector_list_iter_begin(dev, &iter);
672 	drm_for_each_connector_iter(connector, &iter) {
673 		aconnector = to_amdgpu_dm_connector(connector);
674 		if (link && aconnector->dc_link == link) {
675 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
676 			hpd_aconnector = aconnector;
677 			break;
678 		}
679 	}
680 	drm_connector_list_iter_end(&iter);
681 
682 	if (hpd_aconnector) {
683 		if (notify->type == DMUB_NOTIFICATION_HPD)
684 			handle_hpd_irq_helper(hpd_aconnector);
685 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
686 			handle_hpd_rx_irq(hpd_aconnector);
687 	}
688 }
689 
690 /**
691  * register_dmub_notify_callback - Sets callback for DMUB notify
692  * @adev: amdgpu_device pointer
693  * @type: Type of dmub notification
694  * @callback: Dmub interrupt callback function
695  * @dmub_int_thread_offload: offload indicator
696  *
697  * API to register a dmub callback handler for a dmub notification
698  * Also sets indicator whether callback processing to be offloaded.
699  * to dmub interrupt handling thread
700  * Return: true if successfully registered, false if there is existing registration
701  */
702 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
703 					  enum dmub_notification_type type,
704 					  dmub_notify_interrupt_callback_t callback,
705 					  bool dmub_int_thread_offload)
706 {
707 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
708 		adev->dm.dmub_callback[type] = callback;
709 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
710 	} else
711 		return false;
712 
713 	return true;
714 }
715 
716 static void dm_handle_hpd_work(struct work_struct *work)
717 {
718 	struct dmub_hpd_work *dmub_hpd_wrk;
719 
720 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
721 
722 	if (!dmub_hpd_wrk->dmub_notify) {
723 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
724 		return;
725 	}
726 
727 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
728 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
729 		dmub_hpd_wrk->dmub_notify);
730 	}
731 
732 	kfree(dmub_hpd_wrk->dmub_notify);
733 	kfree(dmub_hpd_wrk);
734 
735 }
736 
737 #define DMUB_TRACE_MAX_READ 64
738 /**
739  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740  * @interrupt_params: used for determining the Outbox instance
741  *
742  * Handles the Outbox Interrupt
743  * event handler.
744  */
745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
746 {
747 	struct dmub_notification notify;
748 	struct common_irq_params *irq_params = interrupt_params;
749 	struct amdgpu_device *adev = irq_params->adev;
750 	struct amdgpu_display_manager *dm = &adev->dm;
751 	struct dmcub_trace_buf_entry entry = { 0 };
752 	uint32_t count = 0;
753 	struct dmub_hpd_work *dmub_hpd_wrk;
754 	struct dc_link *plink = NULL;
755 
756 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
757 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758 
759 		do {
760 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
761 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
762 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
763 				continue;
764 			}
765 			if (!dm->dmub_callback[notify.type]) {
766 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
767 				continue;
768 			}
769 			if (dm->dmub_thread_offload[notify.type] == true) {
770 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
771 				if (!dmub_hpd_wrk) {
772 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
773 					return;
774 				}
775 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
776 				if (!dmub_hpd_wrk->dmub_notify) {
777 					kfree(dmub_hpd_wrk);
778 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
779 					return;
780 				}
781 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
782 				if (dmub_hpd_wrk->dmub_notify)
783 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
784 				dmub_hpd_wrk->adev = adev;
785 				if (notify.type == DMUB_NOTIFICATION_HPD) {
786 					plink = adev->dm.dc->links[notify.link_index];
787 					if (plink) {
788 						plink->hpd_status =
789 							notify.hpd_status == DP_HPD_PLUG;
790 					}
791 				}
792 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
793 			} else {
794 				dm->dmub_callback[notify.type](adev, &notify);
795 			}
796 		} while (notify.pending_notification);
797 	}
798 
799 
800 	do {
801 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
802 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
803 							entry.param0, entry.param1);
804 
805 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
806 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
807 		} else
808 			break;
809 
810 		count++;
811 
812 	} while (count <= DMUB_TRACE_MAX_READ);
813 
814 	if (count > DMUB_TRACE_MAX_READ)
815 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
816 }
817 
818 static int dm_set_clockgating_state(void *handle,
819 		  enum amd_clockgating_state state)
820 {
821 	return 0;
822 }
823 
824 static int dm_set_powergating_state(void *handle,
825 		  enum amd_powergating_state state)
826 {
827 	return 0;
828 }
829 
830 /* Prototypes of private functions */
831 static int dm_early_init(void* handle);
832 
833 /* Allocate memory for FBC compressed data  */
834 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
835 {
836 	struct drm_device *dev = connector->dev;
837 	struct amdgpu_device *adev = drm_to_adev(dev);
838 	struct dm_compressor_info *compressor = &adev->dm.compressor;
839 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
840 	struct drm_display_mode *mode;
841 	unsigned long max_size = 0;
842 
843 	if (adev->dm.dc->fbc_compressor == NULL)
844 		return;
845 
846 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
847 		return;
848 
849 	if (compressor->bo_ptr)
850 		return;
851 
852 
853 	list_for_each_entry(mode, &connector->modes, head) {
854 		if (max_size < mode->htotal * mode->vtotal)
855 			max_size = mode->htotal * mode->vtotal;
856 	}
857 
858 	if (max_size) {
859 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
860 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
861 			    &compressor->gpu_addr, &compressor->cpu_addr);
862 
863 		if (r)
864 			DRM_ERROR("DM: Failed to initialize FBC\n");
865 		else {
866 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
867 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
868 		}
869 
870 	}
871 
872 }
873 
874 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
875 					  int pipe, bool *enabled,
876 					  unsigned char *buf, int max_bytes)
877 {
878 	struct drm_device *dev = dev_get_drvdata(kdev);
879 	struct amdgpu_device *adev = drm_to_adev(dev);
880 	struct drm_connector *connector;
881 	struct drm_connector_list_iter conn_iter;
882 	struct amdgpu_dm_connector *aconnector;
883 	int ret = 0;
884 
885 	*enabled = false;
886 
887 	mutex_lock(&adev->dm.audio_lock);
888 
889 	drm_connector_list_iter_begin(dev, &conn_iter);
890 	drm_for_each_connector_iter(connector, &conn_iter) {
891 		aconnector = to_amdgpu_dm_connector(connector);
892 		if (aconnector->audio_inst != port)
893 			continue;
894 
895 		*enabled = true;
896 		ret = drm_eld_size(connector->eld);
897 		memcpy(buf, connector->eld, min(max_bytes, ret));
898 
899 		break;
900 	}
901 	drm_connector_list_iter_end(&conn_iter);
902 
903 	mutex_unlock(&adev->dm.audio_lock);
904 
905 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
906 
907 	return ret;
908 }
909 
910 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
911 	.get_eld = amdgpu_dm_audio_component_get_eld,
912 };
913 
914 static int amdgpu_dm_audio_component_bind(struct device *kdev,
915 				       struct device *hda_kdev, void *data)
916 {
917 	struct drm_device *dev = dev_get_drvdata(kdev);
918 	struct amdgpu_device *adev = drm_to_adev(dev);
919 	struct drm_audio_component *acomp = data;
920 
921 	acomp->ops = &amdgpu_dm_audio_component_ops;
922 	acomp->dev = kdev;
923 	adev->dm.audio_component = acomp;
924 
925 	return 0;
926 }
927 
928 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
929 					  struct device *hda_kdev, void *data)
930 {
931 	struct drm_device *dev = dev_get_drvdata(kdev);
932 	struct amdgpu_device *adev = drm_to_adev(dev);
933 	struct drm_audio_component *acomp = data;
934 
935 	acomp->ops = NULL;
936 	acomp->dev = NULL;
937 	adev->dm.audio_component = NULL;
938 }
939 
940 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
941 	.bind	= amdgpu_dm_audio_component_bind,
942 	.unbind	= amdgpu_dm_audio_component_unbind,
943 };
944 
945 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
946 {
947 	int i, ret;
948 
949 	if (!amdgpu_audio)
950 		return 0;
951 
952 	adev->mode_info.audio.enabled = true;
953 
954 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
955 
956 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
957 		adev->mode_info.audio.pin[i].channels = -1;
958 		adev->mode_info.audio.pin[i].rate = -1;
959 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
960 		adev->mode_info.audio.pin[i].status_bits = 0;
961 		adev->mode_info.audio.pin[i].category_code = 0;
962 		adev->mode_info.audio.pin[i].connected = false;
963 		adev->mode_info.audio.pin[i].id =
964 			adev->dm.dc->res_pool->audios[i]->inst;
965 		adev->mode_info.audio.pin[i].offset = 0;
966 	}
967 
968 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
969 	if (ret < 0)
970 		return ret;
971 
972 	adev->dm.audio_registered = true;
973 
974 	return 0;
975 }
976 
977 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
978 {
979 	if (!amdgpu_audio)
980 		return;
981 
982 	if (!adev->mode_info.audio.enabled)
983 		return;
984 
985 	if (adev->dm.audio_registered) {
986 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
987 		adev->dm.audio_registered = false;
988 	}
989 
990 	/* TODO: Disable audio? */
991 
992 	adev->mode_info.audio.enabled = false;
993 }
994 
995 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
996 {
997 	struct drm_audio_component *acomp = adev->dm.audio_component;
998 
999 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1000 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1001 
1002 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1003 						 pin, -1);
1004 	}
1005 }
1006 
1007 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1008 {
1009 	const struct dmcub_firmware_header_v1_0 *hdr;
1010 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1011 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1012 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1013 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1014 	struct abm *abm = adev->dm.dc->res_pool->abm;
1015 	struct dmub_srv_hw_params hw_params;
1016 	enum dmub_status status;
1017 	const unsigned char *fw_inst_const, *fw_bss_data;
1018 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1019 	bool has_hw_support;
1020 
1021 	if (!dmub_srv)
1022 		/* DMUB isn't supported on the ASIC. */
1023 		return 0;
1024 
1025 	if (!fb_info) {
1026 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1027 		return -EINVAL;
1028 	}
1029 
1030 	if (!dmub_fw) {
1031 		/* Firmware required for DMUB support. */
1032 		DRM_ERROR("No firmware provided for DMUB.\n");
1033 		return -EINVAL;
1034 	}
1035 
1036 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1037 	if (status != DMUB_STATUS_OK) {
1038 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1039 		return -EINVAL;
1040 	}
1041 
1042 	if (!has_hw_support) {
1043 		DRM_INFO("DMUB unsupported on ASIC\n");
1044 		return 0;
1045 	}
1046 
1047 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1048 	status = dmub_srv_hw_reset(dmub_srv);
1049 	if (status != DMUB_STATUS_OK)
1050 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1051 
1052 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1053 
1054 	fw_inst_const = dmub_fw->data +
1055 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1056 			PSP_HEADER_BYTES;
1057 
1058 	fw_bss_data = dmub_fw->data +
1059 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060 		      le32_to_cpu(hdr->inst_const_bytes);
1061 
1062 	/* Copy firmware and bios info into FB memory. */
1063 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1065 
1066 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1067 
1068 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1069 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1070 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1071 	 * will be done by dm_dmub_hw_init
1072 	 */
1073 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075 				fw_inst_const_size);
1076 	}
1077 
1078 	if (fw_bss_data_size)
1079 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080 		       fw_bss_data, fw_bss_data_size);
1081 
1082 	/* Copy firmware bios info into FB memory. */
1083 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1084 	       adev->bios_size);
1085 
1086 	/* Reset regions that need to be reset. */
1087 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1089 
1090 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1092 
1093 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1095 
1096 	/* Initialize hardware. */
1097 	memset(&hw_params, 0, sizeof(hw_params));
1098 	hw_params.fb_base = adev->gmc.fb_start;
1099 	hw_params.fb_offset = adev->gmc.aper_base;
1100 
1101 	/* backdoor load firmware and trigger dmub running */
1102 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103 		hw_params.load_inst_const = true;
1104 
1105 	if (dmcu)
1106 		hw_params.psp_version = dmcu->psp_version;
1107 
1108 	for (i = 0; i < fb_info->num_fb; ++i)
1109 		hw_params.fb[i] = &fb_info->fb[i];
1110 
1111 	switch (adev->ip_versions[DCE_HWIP][0]) {
1112 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1113 		hw_params.dpia_supported = true;
1114 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1115 		break;
1116 	default:
1117 		break;
1118 	}
1119 
1120 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1121 	if (status != DMUB_STATUS_OK) {
1122 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1123 		return -EINVAL;
1124 	}
1125 
1126 	/* Wait for firmware load to finish. */
1127 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1128 	if (status != DMUB_STATUS_OK)
1129 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1130 
1131 	/* Init DMCU and ABM if available. */
1132 	if (dmcu && abm) {
1133 		dmcu->funcs->dmcu_init(dmcu);
1134 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1135 	}
1136 
1137 	if (!adev->dm.dc->ctx->dmub_srv)
1138 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1139 	if (!adev->dm.dc->ctx->dmub_srv) {
1140 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1141 		return -ENOMEM;
1142 	}
1143 
1144 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1145 		 adev->dm.dmcub_fw_version);
1146 
1147 	return 0;
1148 }
1149 
1150 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1151 {
1152 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1153 	enum dmub_status status;
1154 	bool init;
1155 
1156 	if (!dmub_srv) {
1157 		/* DMUB isn't supported on the ASIC. */
1158 		return;
1159 	}
1160 
1161 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1162 	if (status != DMUB_STATUS_OK)
1163 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1164 
1165 	if (status == DMUB_STATUS_OK && init) {
1166 		/* Wait for firmware load to finish. */
1167 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1168 		if (status != DMUB_STATUS_OK)
1169 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1170 	} else {
1171 		/* Perform the full hardware initialization. */
1172 		dm_dmub_hw_init(adev);
1173 	}
1174 }
1175 
1176 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1177 {
1178 	uint64_t pt_base;
1179 	uint32_t logical_addr_low;
1180 	uint32_t logical_addr_high;
1181 	uint32_t agp_base, agp_bot, agp_top;
1182 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1183 
1184 	memset(pa_config, 0, sizeof(*pa_config));
1185 
1186 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1187 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1188 
1189 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1190 		/*
1191 		 * Raven2 has a HW issue that it is unable to use the vram which
1192 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1193 		 * workaround that increase system aperture high address (add 1)
1194 		 * to get rid of the VM fault and hardware hang.
1195 		 */
1196 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1197 	else
1198 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1199 
1200 	agp_base = 0;
1201 	agp_bot = adev->gmc.agp_start >> 24;
1202 	agp_top = adev->gmc.agp_end >> 24;
1203 
1204 
1205 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1206 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1207 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1208 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1209 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1210 	page_table_base.low_part = lower_32_bits(pt_base);
1211 
1212 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1213 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1214 
1215 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1216 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1217 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1218 
1219 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1220 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1221 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1222 
1223 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1224 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1225 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1226 
1227 	pa_config->is_hvm_enabled = 0;
1228 
1229 }
1230 
1231 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1232 {
1233 	struct hpd_rx_irq_offload_work *offload_work;
1234 	struct amdgpu_dm_connector *aconnector;
1235 	struct dc_link *dc_link;
1236 	struct amdgpu_device *adev;
1237 	enum dc_connection_type new_connection_type = dc_connection_none;
1238 	unsigned long flags;
1239 
1240 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1241 	aconnector = offload_work->offload_wq->aconnector;
1242 
1243 	if (!aconnector) {
1244 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1245 		goto skip;
1246 	}
1247 
1248 	adev = drm_to_adev(aconnector->base.dev);
1249 	dc_link = aconnector->dc_link;
1250 
1251 	mutex_lock(&aconnector->hpd_lock);
1252 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1253 		DRM_ERROR("KMS: Failed to detect connector\n");
1254 	mutex_unlock(&aconnector->hpd_lock);
1255 
1256 	if (new_connection_type == dc_connection_none)
1257 		goto skip;
1258 
1259 	if (amdgpu_in_reset(adev))
1260 		goto skip;
1261 
1262 	mutex_lock(&adev->dm.dc_lock);
1263 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1264 		dc_link_dp_handle_automated_test(dc_link);
1265 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1266 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1267 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1268 		dc_link_dp_handle_link_loss(dc_link);
1269 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1270 		offload_work->offload_wq->is_handling_link_loss = false;
1271 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1272 	}
1273 	mutex_unlock(&adev->dm.dc_lock);
1274 
1275 skip:
1276 	kfree(offload_work);
1277 
1278 }
1279 
1280 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1281 {
1282 	int max_caps = dc->caps.max_links;
1283 	int i = 0;
1284 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1285 
1286 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1287 
1288 	if (!hpd_rx_offload_wq)
1289 		return NULL;
1290 
1291 
1292 	for (i = 0; i < max_caps; i++) {
1293 		hpd_rx_offload_wq[i].wq =
1294 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1295 
1296 		if (hpd_rx_offload_wq[i].wq == NULL) {
1297 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1298 			return NULL;
1299 		}
1300 
1301 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1302 	}
1303 
1304 	return hpd_rx_offload_wq;
1305 }
1306 
1307 struct amdgpu_stutter_quirk {
1308 	u16 chip_vendor;
1309 	u16 chip_device;
1310 	u16 subsys_vendor;
1311 	u16 subsys_device;
1312 	u8 revision;
1313 };
1314 
1315 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1316 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1317 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1318 	{ 0, 0, 0, 0, 0 },
1319 };
1320 
1321 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1322 {
1323 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1324 
1325 	while (p && p->chip_device != 0) {
1326 		if (pdev->vendor == p->chip_vendor &&
1327 		    pdev->device == p->chip_device &&
1328 		    pdev->subsystem_vendor == p->subsys_vendor &&
1329 		    pdev->subsystem_device == p->subsys_device &&
1330 		    pdev->revision == p->revision) {
1331 			return true;
1332 		}
1333 		++p;
1334 	}
1335 	return false;
1336 }
1337 
1338 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1339 	{
1340 		.matches = {
1341 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1342 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1343 		},
1344 	},
1345 	{
1346 		.matches = {
1347 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1348 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1349 		},
1350 	},
1351 	{
1352 		.matches = {
1353 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1354 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1355 		},
1356 	},
1357 	{}
1358 };
1359 
1360 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1361 {
1362 	const struct dmi_system_id *dmi_id;
1363 
1364 	dm->aux_hpd_discon_quirk = false;
1365 
1366 	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1367 	if (dmi_id) {
1368 		dm->aux_hpd_discon_quirk = true;
1369 		DRM_INFO("aux_hpd_discon_quirk attached\n");
1370 	}
1371 }
1372 
1373 static int amdgpu_dm_init(struct amdgpu_device *adev)
1374 {
1375 	struct dc_init_data init_data;
1376 #ifdef CONFIG_DRM_AMD_DC_HDCP
1377 	struct dc_callback_init init_params;
1378 #endif
1379 	int r;
1380 
1381 	adev->dm.ddev = adev_to_drm(adev);
1382 	adev->dm.adev = adev;
1383 
1384 	/* Zero all the fields */
1385 	memset(&init_data, 0, sizeof(init_data));
1386 #ifdef CONFIG_DRM_AMD_DC_HDCP
1387 	memset(&init_params, 0, sizeof(init_params));
1388 #endif
1389 
1390 	mutex_init(&adev->dm.dc_lock);
1391 	mutex_init(&adev->dm.audio_lock);
1392 	spin_lock_init(&adev->dm.vblank_lock);
1393 
1394 	if(amdgpu_dm_irq_init(adev)) {
1395 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1396 		goto error;
1397 	}
1398 
1399 	init_data.asic_id.chip_family = adev->family;
1400 
1401 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1402 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1403 	init_data.asic_id.chip_id = adev->pdev->device;
1404 
1405 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1406 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1407 	init_data.asic_id.atombios_base_address =
1408 		adev->mode_info.atom_context->bios;
1409 
1410 	init_data.driver = adev;
1411 
1412 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1413 
1414 	if (!adev->dm.cgs_device) {
1415 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1416 		goto error;
1417 	}
1418 
1419 	init_data.cgs_device = adev->dm.cgs_device;
1420 
1421 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1422 
1423 	switch (adev->ip_versions[DCE_HWIP][0]) {
1424 	case IP_VERSION(2, 1, 0):
1425 		switch (adev->dm.dmcub_fw_version) {
1426 		case 0: /* development */
1427 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1428 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1429 			init_data.flags.disable_dmcu = false;
1430 			break;
1431 		default:
1432 			init_data.flags.disable_dmcu = true;
1433 		}
1434 		break;
1435 	case IP_VERSION(2, 0, 3):
1436 		init_data.flags.disable_dmcu = true;
1437 		break;
1438 	default:
1439 		break;
1440 	}
1441 
1442 	switch (adev->asic_type) {
1443 	case CHIP_CARRIZO:
1444 	case CHIP_STONEY:
1445 		init_data.flags.gpu_vm_support = true;
1446 		break;
1447 	default:
1448 		switch (adev->ip_versions[DCE_HWIP][0]) {
1449 		case IP_VERSION(1, 0, 0):
1450 		case IP_VERSION(1, 0, 1):
1451 			/* enable S/G on PCO and RV2 */
1452 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1453 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1454 				init_data.flags.gpu_vm_support = true;
1455 			break;
1456 		case IP_VERSION(2, 1, 0):
1457 		case IP_VERSION(3, 0, 1):
1458 		case IP_VERSION(3, 1, 2):
1459 		case IP_VERSION(3, 1, 3):
1460 		case IP_VERSION(3, 1, 5):
1461 		case IP_VERSION(3, 1, 6):
1462 			init_data.flags.gpu_vm_support = true;
1463 			break;
1464 		default:
1465 			break;
1466 		}
1467 		break;
1468 	}
1469 
1470 	if (init_data.flags.gpu_vm_support)
1471 		adev->mode_info.gpu_vm_support = true;
1472 
1473 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1474 		init_data.flags.fbc_support = true;
1475 
1476 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1477 		init_data.flags.multi_mon_pp_mclk_switch = true;
1478 
1479 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1480 		init_data.flags.disable_fractional_pwm = true;
1481 
1482 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1483 		init_data.flags.edp_no_power_sequencing = true;
1484 
1485 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1486 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1487 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1488 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1489 
1490 	init_data.flags.seamless_boot_edp_requested = false;
1491 
1492 	if (check_seamless_boot_capability(adev)) {
1493 		init_data.flags.seamless_boot_edp_requested = true;
1494 		init_data.flags.allow_seamless_boot_optimization = true;
1495 		DRM_INFO("Seamless boot condition check passed\n");
1496 	}
1497 
1498 	init_data.flags.enable_mipi_converter_optimization = true;
1499 
1500 	init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1501 	init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1502 
1503 	INIT_LIST_HEAD(&adev->dm.da_list);
1504 
1505 	retrieve_dmi_info(&adev->dm);
1506 
1507 	/* Display Core create. */
1508 	adev->dm.dc = dc_create(&init_data);
1509 
1510 	if (adev->dm.dc) {
1511 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1512 	} else {
1513 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1514 		goto error;
1515 	}
1516 
1517 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1518 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1519 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1520 	}
1521 
1522 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1523 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1524 	if (dm_should_disable_stutter(adev->pdev))
1525 		adev->dm.dc->debug.disable_stutter = true;
1526 
1527 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1528 		adev->dm.dc->debug.disable_stutter = true;
1529 
1530 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1531 		adev->dm.dc->debug.disable_dsc = true;
1532 		adev->dm.dc->debug.disable_dsc_edp = true;
1533 	}
1534 
1535 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1536 		adev->dm.dc->debug.disable_clock_gate = true;
1537 
1538 	if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1539 		adev->dm.dc->debug.force_subvp_mclk_switch = true;
1540 
1541 	adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1542 
1543 	r = dm_dmub_hw_init(adev);
1544 	if (r) {
1545 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1546 		goto error;
1547 	}
1548 
1549 	dc_hardware_init(adev->dm.dc);
1550 
1551 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1552 	if (!adev->dm.hpd_rx_offload_wq) {
1553 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1554 		goto error;
1555 	}
1556 
1557 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1558 		struct dc_phy_addr_space_config pa_config;
1559 
1560 		mmhub_read_system_context(adev, &pa_config);
1561 
1562 		// Call the DC init_memory func
1563 		dc_setup_system_context(adev->dm.dc, &pa_config);
1564 	}
1565 
1566 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1567 	if (!adev->dm.freesync_module) {
1568 		DRM_ERROR(
1569 		"amdgpu: failed to initialize freesync_module.\n");
1570 	} else
1571 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1572 				adev->dm.freesync_module);
1573 
1574 	amdgpu_dm_init_color_mod();
1575 
1576 	if (adev->dm.dc->caps.max_links > 0) {
1577 		adev->dm.vblank_control_workqueue =
1578 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1579 		if (!adev->dm.vblank_control_workqueue)
1580 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1581 	}
1582 
1583 #ifdef CONFIG_DRM_AMD_DC_HDCP
1584 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1585 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1586 
1587 		if (!adev->dm.hdcp_workqueue)
1588 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1589 		else
1590 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1591 
1592 		dc_init_callbacks(adev->dm.dc, &init_params);
1593 	}
1594 #endif
1595 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1596 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1597 #endif
1598 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1599 		init_completion(&adev->dm.dmub_aux_transfer_done);
1600 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1601 		if (!adev->dm.dmub_notify) {
1602 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1603 			goto error;
1604 		}
1605 
1606 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1607 		if (!adev->dm.delayed_hpd_wq) {
1608 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1609 			goto error;
1610 		}
1611 
1612 		amdgpu_dm_outbox_init(adev);
1613 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1614 			dmub_aux_setconfig_callback, false)) {
1615 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1616 			goto error;
1617 		}
1618 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1619 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1620 			goto error;
1621 		}
1622 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1623 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1624 			goto error;
1625 		}
1626 	}
1627 
1628 	if (amdgpu_dm_initialize_drm_device(adev)) {
1629 		DRM_ERROR(
1630 		"amdgpu: failed to initialize sw for display support.\n");
1631 		goto error;
1632 	}
1633 
1634 	/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1635 	 * It is expected that DMUB will resend any pending notifications at this point, for
1636 	 * example HPD from DPIA.
1637 	 */
1638 	if (dc_is_dmub_outbox_supported(adev->dm.dc))
1639 		dc_enable_dmub_outbox(adev->dm.dc);
1640 
1641 	/* create fake encoders for MST */
1642 	dm_dp_create_fake_mst_encoders(adev);
1643 
1644 	/* TODO: Add_display_info? */
1645 
1646 	/* TODO use dynamic cursor width */
1647 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1648 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1649 
1650 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1651 		DRM_ERROR(
1652 		"amdgpu: failed to initialize sw for display support.\n");
1653 		goto error;
1654 	}
1655 
1656 
1657 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1658 
1659 	return 0;
1660 error:
1661 	amdgpu_dm_fini(adev);
1662 
1663 	return -EINVAL;
1664 }
1665 
1666 static int amdgpu_dm_early_fini(void *handle)
1667 {
1668 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1669 
1670 	amdgpu_dm_audio_fini(adev);
1671 
1672 	return 0;
1673 }
1674 
1675 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1676 {
1677 	int i;
1678 
1679 	if (adev->dm.vblank_control_workqueue) {
1680 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1681 		adev->dm.vblank_control_workqueue = NULL;
1682 	}
1683 
1684 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1685 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1686 	}
1687 
1688 	amdgpu_dm_destroy_drm_device(&adev->dm);
1689 
1690 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1691 	if (adev->dm.crc_rd_wrk) {
1692 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1693 		kfree(adev->dm.crc_rd_wrk);
1694 		adev->dm.crc_rd_wrk = NULL;
1695 	}
1696 #endif
1697 #ifdef CONFIG_DRM_AMD_DC_HDCP
1698 	if (adev->dm.hdcp_workqueue) {
1699 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1700 		adev->dm.hdcp_workqueue = NULL;
1701 	}
1702 
1703 	if (adev->dm.dc)
1704 		dc_deinit_callbacks(adev->dm.dc);
1705 #endif
1706 
1707 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1708 
1709 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1710 		kfree(adev->dm.dmub_notify);
1711 		adev->dm.dmub_notify = NULL;
1712 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1713 		adev->dm.delayed_hpd_wq = NULL;
1714 	}
1715 
1716 	if (adev->dm.dmub_bo)
1717 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1718 				      &adev->dm.dmub_bo_gpu_addr,
1719 				      &adev->dm.dmub_bo_cpu_addr);
1720 
1721 	if (adev->dm.hpd_rx_offload_wq) {
1722 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1723 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1724 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1725 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1726 			}
1727 		}
1728 
1729 		kfree(adev->dm.hpd_rx_offload_wq);
1730 		adev->dm.hpd_rx_offload_wq = NULL;
1731 	}
1732 
1733 	/* DC Destroy TODO: Replace destroy DAL */
1734 	if (adev->dm.dc)
1735 		dc_destroy(&adev->dm.dc);
1736 	/*
1737 	 * TODO: pageflip, vlank interrupt
1738 	 *
1739 	 * amdgpu_dm_irq_fini(adev);
1740 	 */
1741 
1742 	if (adev->dm.cgs_device) {
1743 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1744 		adev->dm.cgs_device = NULL;
1745 	}
1746 	if (adev->dm.freesync_module) {
1747 		mod_freesync_destroy(adev->dm.freesync_module);
1748 		adev->dm.freesync_module = NULL;
1749 	}
1750 
1751 	mutex_destroy(&adev->dm.audio_lock);
1752 	mutex_destroy(&adev->dm.dc_lock);
1753 
1754 	return;
1755 }
1756 
1757 static int load_dmcu_fw(struct amdgpu_device *adev)
1758 {
1759 	const char *fw_name_dmcu = NULL;
1760 	int r;
1761 	const struct dmcu_firmware_header_v1_0 *hdr;
1762 
1763 	switch(adev->asic_type) {
1764 #if defined(CONFIG_DRM_AMD_DC_SI)
1765 	case CHIP_TAHITI:
1766 	case CHIP_PITCAIRN:
1767 	case CHIP_VERDE:
1768 	case CHIP_OLAND:
1769 #endif
1770 	case CHIP_BONAIRE:
1771 	case CHIP_HAWAII:
1772 	case CHIP_KAVERI:
1773 	case CHIP_KABINI:
1774 	case CHIP_MULLINS:
1775 	case CHIP_TONGA:
1776 	case CHIP_FIJI:
1777 	case CHIP_CARRIZO:
1778 	case CHIP_STONEY:
1779 	case CHIP_POLARIS11:
1780 	case CHIP_POLARIS10:
1781 	case CHIP_POLARIS12:
1782 	case CHIP_VEGAM:
1783 	case CHIP_VEGA10:
1784 	case CHIP_VEGA12:
1785 	case CHIP_VEGA20:
1786 		return 0;
1787 	case CHIP_NAVI12:
1788 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1789 		break;
1790 	case CHIP_RAVEN:
1791 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1792 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1793 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1794 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1795 		else
1796 			return 0;
1797 		break;
1798 	default:
1799 		switch (adev->ip_versions[DCE_HWIP][0]) {
1800 		case IP_VERSION(2, 0, 2):
1801 		case IP_VERSION(2, 0, 3):
1802 		case IP_VERSION(2, 0, 0):
1803 		case IP_VERSION(2, 1, 0):
1804 		case IP_VERSION(3, 0, 0):
1805 		case IP_VERSION(3, 0, 2):
1806 		case IP_VERSION(3, 0, 3):
1807 		case IP_VERSION(3, 0, 1):
1808 		case IP_VERSION(3, 1, 2):
1809 		case IP_VERSION(3, 1, 3):
1810 		case IP_VERSION(3, 1, 4):
1811 		case IP_VERSION(3, 1, 5):
1812 		case IP_VERSION(3, 1, 6):
1813 		case IP_VERSION(3, 2, 0):
1814 		case IP_VERSION(3, 2, 1):
1815 			return 0;
1816 		default:
1817 			break;
1818 		}
1819 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1820 		return -EINVAL;
1821 	}
1822 
1823 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1824 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1825 		return 0;
1826 	}
1827 
1828 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1829 	if (r == -ENOENT) {
1830 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1831 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1832 		adev->dm.fw_dmcu = NULL;
1833 		return 0;
1834 	}
1835 	if (r) {
1836 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1837 			fw_name_dmcu);
1838 		return r;
1839 	}
1840 
1841 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1842 	if (r) {
1843 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1844 			fw_name_dmcu);
1845 		release_firmware(adev->dm.fw_dmcu);
1846 		adev->dm.fw_dmcu = NULL;
1847 		return r;
1848 	}
1849 
1850 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1851 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1852 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1853 	adev->firmware.fw_size +=
1854 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1855 
1856 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1857 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1858 	adev->firmware.fw_size +=
1859 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1860 
1861 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1862 
1863 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1864 
1865 	return 0;
1866 }
1867 
1868 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1869 {
1870 	struct amdgpu_device *adev = ctx;
1871 
1872 	return dm_read_reg(adev->dm.dc->ctx, address);
1873 }
1874 
1875 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1876 				     uint32_t value)
1877 {
1878 	struct amdgpu_device *adev = ctx;
1879 
1880 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1881 }
1882 
1883 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1884 {
1885 	struct dmub_srv_create_params create_params;
1886 	struct dmub_srv_region_params region_params;
1887 	struct dmub_srv_region_info region_info;
1888 	struct dmub_srv_fb_params fb_params;
1889 	struct dmub_srv_fb_info *fb_info;
1890 	struct dmub_srv *dmub_srv;
1891 	const struct dmcub_firmware_header_v1_0 *hdr;
1892 	const char *fw_name_dmub;
1893 	enum dmub_asic dmub_asic;
1894 	enum dmub_status status;
1895 	int r;
1896 
1897 	switch (adev->ip_versions[DCE_HWIP][0]) {
1898 	case IP_VERSION(2, 1, 0):
1899 		dmub_asic = DMUB_ASIC_DCN21;
1900 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1901 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1902 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1903 		break;
1904 	case IP_VERSION(3, 0, 0):
1905 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1906 			dmub_asic = DMUB_ASIC_DCN30;
1907 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1908 		} else {
1909 			dmub_asic = DMUB_ASIC_DCN30;
1910 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1911 		}
1912 		break;
1913 	case IP_VERSION(3, 0, 1):
1914 		dmub_asic = DMUB_ASIC_DCN301;
1915 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1916 		break;
1917 	case IP_VERSION(3, 0, 2):
1918 		dmub_asic = DMUB_ASIC_DCN302;
1919 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1920 		break;
1921 	case IP_VERSION(3, 0, 3):
1922 		dmub_asic = DMUB_ASIC_DCN303;
1923 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1924 		break;
1925 	case IP_VERSION(3, 1, 2):
1926 	case IP_VERSION(3, 1, 3):
1927 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1928 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1929 		break;
1930 	case IP_VERSION(3, 1, 4):
1931 		dmub_asic = DMUB_ASIC_DCN314;
1932 		fw_name_dmub = FIRMWARE_DCN_314_DMUB;
1933 		break;
1934 	case IP_VERSION(3, 1, 5):
1935 		dmub_asic = DMUB_ASIC_DCN315;
1936 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1937 		break;
1938 	case IP_VERSION(3, 1, 6):
1939 		dmub_asic = DMUB_ASIC_DCN316;
1940 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1941 		break;
1942 	case IP_VERSION(3, 2, 0):
1943 		dmub_asic = DMUB_ASIC_DCN32;
1944 		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1945 		break;
1946 	case IP_VERSION(3, 2, 1):
1947 		dmub_asic = DMUB_ASIC_DCN321;
1948 		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1949 		break;
1950 	default:
1951 		/* ASIC doesn't support DMUB. */
1952 		return 0;
1953 	}
1954 
1955 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1956 	if (r) {
1957 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1958 		return 0;
1959 	}
1960 
1961 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1962 	if (r) {
1963 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1964 		return 0;
1965 	}
1966 
1967 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1968 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1969 
1970 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1971 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1972 			AMDGPU_UCODE_ID_DMCUB;
1973 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1974 			adev->dm.dmub_fw;
1975 		adev->firmware.fw_size +=
1976 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1977 
1978 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1979 			 adev->dm.dmcub_fw_version);
1980 	}
1981 
1982 
1983 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1984 	dmub_srv = adev->dm.dmub_srv;
1985 
1986 	if (!dmub_srv) {
1987 		DRM_ERROR("Failed to allocate DMUB service!\n");
1988 		return -ENOMEM;
1989 	}
1990 
1991 	memset(&create_params, 0, sizeof(create_params));
1992 	create_params.user_ctx = adev;
1993 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1994 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1995 	create_params.asic = dmub_asic;
1996 
1997 	/* Create the DMUB service. */
1998 	status = dmub_srv_create(dmub_srv, &create_params);
1999 	if (status != DMUB_STATUS_OK) {
2000 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2001 		return -EINVAL;
2002 	}
2003 
2004 	/* Calculate the size of all the regions for the DMUB service. */
2005 	memset(&region_params, 0, sizeof(region_params));
2006 
2007 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2008 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2009 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2010 	region_params.vbios_size = adev->bios_size;
2011 	region_params.fw_bss_data = region_params.bss_data_size ?
2012 		adev->dm.dmub_fw->data +
2013 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2014 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2015 	region_params.fw_inst_const =
2016 		adev->dm.dmub_fw->data +
2017 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2018 		PSP_HEADER_BYTES;
2019 
2020 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2021 					   &region_info);
2022 
2023 	if (status != DMUB_STATUS_OK) {
2024 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2025 		return -EINVAL;
2026 	}
2027 
2028 	/*
2029 	 * Allocate a framebuffer based on the total size of all the regions.
2030 	 * TODO: Move this into GART.
2031 	 */
2032 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2033 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2034 				    &adev->dm.dmub_bo_gpu_addr,
2035 				    &adev->dm.dmub_bo_cpu_addr);
2036 	if (r)
2037 		return r;
2038 
2039 	/* Rebase the regions on the framebuffer address. */
2040 	memset(&fb_params, 0, sizeof(fb_params));
2041 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2042 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2043 	fb_params.region_info = &region_info;
2044 
2045 	adev->dm.dmub_fb_info =
2046 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2047 	fb_info = adev->dm.dmub_fb_info;
2048 
2049 	if (!fb_info) {
2050 		DRM_ERROR(
2051 			"Failed to allocate framebuffer info for DMUB service!\n");
2052 		return -ENOMEM;
2053 	}
2054 
2055 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2056 	if (status != DMUB_STATUS_OK) {
2057 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2058 		return -EINVAL;
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 static int dm_sw_init(void *handle)
2065 {
2066 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2067 	int r;
2068 
2069 	r = dm_dmub_sw_init(adev);
2070 	if (r)
2071 		return r;
2072 
2073 	return load_dmcu_fw(adev);
2074 }
2075 
2076 static int dm_sw_fini(void *handle)
2077 {
2078 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2079 
2080 	kfree(adev->dm.dmub_fb_info);
2081 	adev->dm.dmub_fb_info = NULL;
2082 
2083 	if (adev->dm.dmub_srv) {
2084 		dmub_srv_destroy(adev->dm.dmub_srv);
2085 		adev->dm.dmub_srv = NULL;
2086 	}
2087 
2088 	release_firmware(adev->dm.dmub_fw);
2089 	adev->dm.dmub_fw = NULL;
2090 
2091 	release_firmware(adev->dm.fw_dmcu);
2092 	adev->dm.fw_dmcu = NULL;
2093 
2094 	return 0;
2095 }
2096 
2097 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2098 {
2099 	struct amdgpu_dm_connector *aconnector;
2100 	struct drm_connector *connector;
2101 	struct drm_connector_list_iter iter;
2102 	int ret = 0;
2103 
2104 	drm_connector_list_iter_begin(dev, &iter);
2105 	drm_for_each_connector_iter(connector, &iter) {
2106 		aconnector = to_amdgpu_dm_connector(connector);
2107 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2108 		    aconnector->mst_mgr.aux) {
2109 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2110 					 aconnector,
2111 					 aconnector->base.base.id);
2112 
2113 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2114 			if (ret < 0) {
2115 				DRM_ERROR("DM_MST: Failed to start MST\n");
2116 				aconnector->dc_link->type =
2117 					dc_connection_single;
2118 				break;
2119 			}
2120 		}
2121 	}
2122 	drm_connector_list_iter_end(&iter);
2123 
2124 	return ret;
2125 }
2126 
2127 static int dm_late_init(void *handle)
2128 {
2129 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2130 
2131 	struct dmcu_iram_parameters params;
2132 	unsigned int linear_lut[16];
2133 	int i;
2134 	struct dmcu *dmcu = NULL;
2135 
2136 	dmcu = adev->dm.dc->res_pool->dmcu;
2137 
2138 	for (i = 0; i < 16; i++)
2139 		linear_lut[i] = 0xFFFF * i / 15;
2140 
2141 	params.set = 0;
2142 	params.backlight_ramping_override = false;
2143 	params.backlight_ramping_start = 0xCCCC;
2144 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2145 	params.backlight_lut_array_size = 16;
2146 	params.backlight_lut_array = linear_lut;
2147 
2148 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2149 	 * 0xFFFF x 0.01 = 0x28F
2150 	 */
2151 	params.min_abm_backlight = 0x28F;
2152 	/* In the case where abm is implemented on dmcub,
2153 	* dmcu object will be null.
2154 	* ABM 2.4 and up are implemented on dmcub.
2155 	*/
2156 	if (dmcu) {
2157 		if (!dmcu_load_iram(dmcu, params))
2158 			return -EINVAL;
2159 	} else if (adev->dm.dc->ctx->dmub_srv) {
2160 		struct dc_link *edp_links[MAX_NUM_EDP];
2161 		int edp_num;
2162 
2163 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2164 		for (i = 0; i < edp_num; i++) {
2165 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2166 				return -EINVAL;
2167 		}
2168 	}
2169 
2170 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2171 }
2172 
2173 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2174 {
2175 	struct amdgpu_dm_connector *aconnector;
2176 	struct drm_connector *connector;
2177 	struct drm_connector_list_iter iter;
2178 	struct drm_dp_mst_topology_mgr *mgr;
2179 	int ret;
2180 	bool need_hotplug = false;
2181 
2182 	drm_connector_list_iter_begin(dev, &iter);
2183 	drm_for_each_connector_iter(connector, &iter) {
2184 		aconnector = to_amdgpu_dm_connector(connector);
2185 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2186 		    aconnector->mst_port)
2187 			continue;
2188 
2189 		mgr = &aconnector->mst_mgr;
2190 
2191 		if (suspend) {
2192 			drm_dp_mst_topology_mgr_suspend(mgr);
2193 		} else {
2194 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2195 			if (ret < 0) {
2196 				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2197 					aconnector->dc_link);
2198 				need_hotplug = true;
2199 			}
2200 		}
2201 	}
2202 	drm_connector_list_iter_end(&iter);
2203 
2204 	if (need_hotplug)
2205 		drm_kms_helper_hotplug_event(dev);
2206 }
2207 
2208 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2209 {
2210 	int ret = 0;
2211 
2212 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2213 	 * on window driver dc implementation.
2214 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2215 	 * should be passed to smu during boot up and resume from s3.
2216 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2217 	 * dcn20_resource_construct
2218 	 * then call pplib functions below to pass the settings to smu:
2219 	 * smu_set_watermarks_for_clock_ranges
2220 	 * smu_set_watermarks_table
2221 	 * navi10_set_watermarks_table
2222 	 * smu_write_watermarks_table
2223 	 *
2224 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2225 	 * dc has implemented different flow for window driver:
2226 	 * dc_hardware_init / dc_set_power_state
2227 	 * dcn10_init_hw
2228 	 * notify_wm_ranges
2229 	 * set_wm_ranges
2230 	 * -- Linux
2231 	 * smu_set_watermarks_for_clock_ranges
2232 	 * renoir_set_watermarks_table
2233 	 * smu_write_watermarks_table
2234 	 *
2235 	 * For Linux,
2236 	 * dc_hardware_init -> amdgpu_dm_init
2237 	 * dc_set_power_state --> dm_resume
2238 	 *
2239 	 * therefore, this function apply to navi10/12/14 but not Renoir
2240 	 * *
2241 	 */
2242 	switch (adev->ip_versions[DCE_HWIP][0]) {
2243 	case IP_VERSION(2, 0, 2):
2244 	case IP_VERSION(2, 0, 0):
2245 		break;
2246 	default:
2247 		return 0;
2248 	}
2249 
2250 	ret = amdgpu_dpm_write_watermarks_table(adev);
2251 	if (ret) {
2252 		DRM_ERROR("Failed to update WMTABLE!\n");
2253 		return ret;
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 /**
2260  * dm_hw_init() - Initialize DC device
2261  * @handle: The base driver device containing the amdgpu_dm device.
2262  *
2263  * Initialize the &struct amdgpu_display_manager device. This involves calling
2264  * the initializers of each DM component, then populating the struct with them.
2265  *
2266  * Although the function implies hardware initialization, both hardware and
2267  * software are initialized here. Splitting them out to their relevant init
2268  * hooks is a future TODO item.
2269  *
2270  * Some notable things that are initialized here:
2271  *
2272  * - Display Core, both software and hardware
2273  * - DC modules that we need (freesync and color management)
2274  * - DRM software states
2275  * - Interrupt sources and handlers
2276  * - Vblank support
2277  * - Debug FS entries, if enabled
2278  */
2279 static int dm_hw_init(void *handle)
2280 {
2281 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2282 	/* Create DAL display manager */
2283 	amdgpu_dm_init(adev);
2284 	amdgpu_dm_hpd_init(adev);
2285 
2286 	return 0;
2287 }
2288 
2289 /**
2290  * dm_hw_fini() - Teardown DC device
2291  * @handle: The base driver device containing the amdgpu_dm device.
2292  *
2293  * Teardown components within &struct amdgpu_display_manager that require
2294  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2295  * were loaded. Also flush IRQ workqueues and disable them.
2296  */
2297 static int dm_hw_fini(void *handle)
2298 {
2299 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2300 
2301 	amdgpu_dm_hpd_fini(adev);
2302 
2303 	amdgpu_dm_irq_fini(adev);
2304 	amdgpu_dm_fini(adev);
2305 	return 0;
2306 }
2307 
2308 
2309 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2310 				 struct dc_state *state, bool enable)
2311 {
2312 	enum dc_irq_source irq_source;
2313 	struct amdgpu_crtc *acrtc;
2314 	int rc = -EBUSY;
2315 	int i = 0;
2316 
2317 	for (i = 0; i < state->stream_count; i++) {
2318 		acrtc = get_crtc_by_otg_inst(
2319 				adev, state->stream_status[i].primary_otg_inst);
2320 
2321 		if (acrtc && state->stream_status[i].plane_count != 0) {
2322 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2323 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2324 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2325 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2326 			if (rc)
2327 				DRM_WARN("Failed to %s pflip interrupts\n",
2328 					 enable ? "enable" : "disable");
2329 
2330 			if (enable) {
2331 				rc = dm_enable_vblank(&acrtc->base);
2332 				if (rc)
2333 					DRM_WARN("Failed to enable vblank interrupts\n");
2334 			} else {
2335 				dm_disable_vblank(&acrtc->base);
2336 			}
2337 
2338 		}
2339 	}
2340 
2341 }
2342 
2343 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2344 {
2345 	struct dc_state *context = NULL;
2346 	enum dc_status res = DC_ERROR_UNEXPECTED;
2347 	int i;
2348 	struct dc_stream_state *del_streams[MAX_PIPES];
2349 	int del_streams_count = 0;
2350 
2351 	memset(del_streams, 0, sizeof(del_streams));
2352 
2353 	context = dc_create_state(dc);
2354 	if (context == NULL)
2355 		goto context_alloc_fail;
2356 
2357 	dc_resource_state_copy_construct_current(dc, context);
2358 
2359 	/* First remove from context all streams */
2360 	for (i = 0; i < context->stream_count; i++) {
2361 		struct dc_stream_state *stream = context->streams[i];
2362 
2363 		del_streams[del_streams_count++] = stream;
2364 	}
2365 
2366 	/* Remove all planes for removed streams and then remove the streams */
2367 	for (i = 0; i < del_streams_count; i++) {
2368 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2369 			res = DC_FAIL_DETACH_SURFACES;
2370 			goto fail;
2371 		}
2372 
2373 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2374 		if (res != DC_OK)
2375 			goto fail;
2376 	}
2377 
2378 	res = dc_commit_state(dc, context);
2379 
2380 fail:
2381 	dc_release_state(context);
2382 
2383 context_alloc_fail:
2384 	return res;
2385 }
2386 
2387 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2388 {
2389 	int i;
2390 
2391 	if (dm->hpd_rx_offload_wq) {
2392 		for (i = 0; i < dm->dc->caps.max_links; i++)
2393 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2394 	}
2395 }
2396 
2397 static int dm_suspend(void *handle)
2398 {
2399 	struct amdgpu_device *adev = handle;
2400 	struct amdgpu_display_manager *dm = &adev->dm;
2401 	int ret = 0;
2402 
2403 	if (amdgpu_in_reset(adev)) {
2404 		mutex_lock(&dm->dc_lock);
2405 
2406 		dc_allow_idle_optimizations(adev->dm.dc, false);
2407 
2408 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2409 
2410 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2411 
2412 		amdgpu_dm_commit_zero_streams(dm->dc);
2413 
2414 		amdgpu_dm_irq_suspend(adev);
2415 
2416 		hpd_rx_irq_work_suspend(dm);
2417 
2418 		return ret;
2419 	}
2420 
2421 	WARN_ON(adev->dm.cached_state);
2422 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2423 
2424 	s3_handle_mst(adev_to_drm(adev), true);
2425 
2426 	amdgpu_dm_irq_suspend(adev);
2427 
2428 	hpd_rx_irq_work_suspend(dm);
2429 
2430 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2431 
2432 	return 0;
2433 }
2434 
2435 struct amdgpu_dm_connector *
2436 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2437 					     struct drm_crtc *crtc)
2438 {
2439 	uint32_t i;
2440 	struct drm_connector_state *new_con_state;
2441 	struct drm_connector *connector;
2442 	struct drm_crtc *crtc_from_state;
2443 
2444 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2445 		crtc_from_state = new_con_state->crtc;
2446 
2447 		if (crtc_from_state == crtc)
2448 			return to_amdgpu_dm_connector(connector);
2449 	}
2450 
2451 	return NULL;
2452 }
2453 
2454 static void emulated_link_detect(struct dc_link *link)
2455 {
2456 	struct dc_sink_init_data sink_init_data = { 0 };
2457 	struct display_sink_capability sink_caps = { 0 };
2458 	enum dc_edid_status edid_status;
2459 	struct dc_context *dc_ctx = link->ctx;
2460 	struct dc_sink *sink = NULL;
2461 	struct dc_sink *prev_sink = NULL;
2462 
2463 	link->type = dc_connection_none;
2464 	prev_sink = link->local_sink;
2465 
2466 	if (prev_sink)
2467 		dc_sink_release(prev_sink);
2468 
2469 	switch (link->connector_signal) {
2470 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2471 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2472 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2473 		break;
2474 	}
2475 
2476 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2477 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2478 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2479 		break;
2480 	}
2481 
2482 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2483 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2484 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2485 		break;
2486 	}
2487 
2488 	case SIGNAL_TYPE_LVDS: {
2489 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2490 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2491 		break;
2492 	}
2493 
2494 	case SIGNAL_TYPE_EDP: {
2495 		sink_caps.transaction_type =
2496 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2497 		sink_caps.signal = SIGNAL_TYPE_EDP;
2498 		break;
2499 	}
2500 
2501 	case SIGNAL_TYPE_DISPLAY_PORT: {
2502 		sink_caps.transaction_type =
2503 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2504 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2505 		break;
2506 	}
2507 
2508 	default:
2509 		DC_ERROR("Invalid connector type! signal:%d\n",
2510 			link->connector_signal);
2511 		return;
2512 	}
2513 
2514 	sink_init_data.link = link;
2515 	sink_init_data.sink_signal = sink_caps.signal;
2516 
2517 	sink = dc_sink_create(&sink_init_data);
2518 	if (!sink) {
2519 		DC_ERROR("Failed to create sink!\n");
2520 		return;
2521 	}
2522 
2523 	/* dc_sink_create returns a new reference */
2524 	link->local_sink = sink;
2525 
2526 	edid_status = dm_helpers_read_local_edid(
2527 			link->ctx,
2528 			link,
2529 			sink);
2530 
2531 	if (edid_status != EDID_OK)
2532 		DC_ERROR("Failed to read EDID");
2533 
2534 }
2535 
2536 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2537 				     struct amdgpu_display_manager *dm)
2538 {
2539 	struct {
2540 		struct dc_surface_update surface_updates[MAX_SURFACES];
2541 		struct dc_plane_info plane_infos[MAX_SURFACES];
2542 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2543 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2544 		struct dc_stream_update stream_update;
2545 	} * bundle;
2546 	int k, m;
2547 
2548 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2549 
2550 	if (!bundle) {
2551 		dm_error("Failed to allocate update bundle\n");
2552 		goto cleanup;
2553 	}
2554 
2555 	for (k = 0; k < dc_state->stream_count; k++) {
2556 		bundle->stream_update.stream = dc_state->streams[k];
2557 
2558 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2559 			bundle->surface_updates[m].surface =
2560 				dc_state->stream_status->plane_states[m];
2561 			bundle->surface_updates[m].surface->force_full_update =
2562 				true;
2563 		}
2564 		dc_commit_updates_for_stream(
2565 			dm->dc, bundle->surface_updates,
2566 			dc_state->stream_status->plane_count,
2567 			dc_state->streams[k], &bundle->stream_update, dc_state);
2568 	}
2569 
2570 cleanup:
2571 	kfree(bundle);
2572 
2573 	return;
2574 }
2575 
2576 static int dm_resume(void *handle)
2577 {
2578 	struct amdgpu_device *adev = handle;
2579 	struct drm_device *ddev = adev_to_drm(adev);
2580 	struct amdgpu_display_manager *dm = &adev->dm;
2581 	struct amdgpu_dm_connector *aconnector;
2582 	struct drm_connector *connector;
2583 	struct drm_connector_list_iter iter;
2584 	struct drm_crtc *crtc;
2585 	struct drm_crtc_state *new_crtc_state;
2586 	struct dm_crtc_state *dm_new_crtc_state;
2587 	struct drm_plane *plane;
2588 	struct drm_plane_state *new_plane_state;
2589 	struct dm_plane_state *dm_new_plane_state;
2590 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2591 	enum dc_connection_type new_connection_type = dc_connection_none;
2592 	struct dc_state *dc_state;
2593 	int i, r, j;
2594 
2595 	if (amdgpu_in_reset(adev)) {
2596 		dc_state = dm->cached_dc_state;
2597 
2598 		/*
2599 		 * The dc->current_state is backed up into dm->cached_dc_state
2600 		 * before we commit 0 streams.
2601 		 *
2602 		 * DC will clear link encoder assignments on the real state
2603 		 * but the changes won't propagate over to the copy we made
2604 		 * before the 0 streams commit.
2605 		 *
2606 		 * DC expects that link encoder assignments are *not* valid
2607 		 * when committing a state, so as a workaround we can copy
2608 		 * off of the current state.
2609 		 *
2610 		 * We lose the previous assignments, but we had already
2611 		 * commit 0 streams anyway.
2612 		 */
2613 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2614 
2615 		r = dm_dmub_hw_init(adev);
2616 		if (r)
2617 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2618 
2619 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2620 		dc_resume(dm->dc);
2621 
2622 		amdgpu_dm_irq_resume_early(adev);
2623 
2624 		for (i = 0; i < dc_state->stream_count; i++) {
2625 			dc_state->streams[i]->mode_changed = true;
2626 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2627 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2628 					= 0xffffffff;
2629 			}
2630 		}
2631 
2632 		if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2633 			amdgpu_dm_outbox_init(adev);
2634 			dc_enable_dmub_outbox(adev->dm.dc);
2635 		}
2636 
2637 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2638 
2639 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2640 
2641 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2642 
2643 		dc_release_state(dm->cached_dc_state);
2644 		dm->cached_dc_state = NULL;
2645 
2646 		amdgpu_dm_irq_resume_late(adev);
2647 
2648 		mutex_unlock(&dm->dc_lock);
2649 
2650 		return 0;
2651 	}
2652 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2653 	dc_release_state(dm_state->context);
2654 	dm_state->context = dc_create_state(dm->dc);
2655 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2656 	dc_resource_state_construct(dm->dc, dm_state->context);
2657 
2658 	/* Before powering on DC we need to re-initialize DMUB. */
2659 	dm_dmub_hw_resume(adev);
2660 
2661 	/* Re-enable outbox interrupts for DPIA. */
2662 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2663 		amdgpu_dm_outbox_init(adev);
2664 		dc_enable_dmub_outbox(adev->dm.dc);
2665 	}
2666 
2667 	/* power on hardware */
2668 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2669 
2670 	/* program HPD filter */
2671 	dc_resume(dm->dc);
2672 
2673 	/*
2674 	 * early enable HPD Rx IRQ, should be done before set mode as short
2675 	 * pulse interrupts are used for MST
2676 	 */
2677 	amdgpu_dm_irq_resume_early(adev);
2678 
2679 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2680 	s3_handle_mst(ddev, false);
2681 
2682 	/* Do detection*/
2683 	drm_connector_list_iter_begin(ddev, &iter);
2684 	drm_for_each_connector_iter(connector, &iter) {
2685 		aconnector = to_amdgpu_dm_connector(connector);
2686 
2687 		/*
2688 		 * this is the case when traversing through already created
2689 		 * MST connectors, should be skipped
2690 		 */
2691 		if (aconnector->dc_link &&
2692 		    aconnector->dc_link->type == dc_connection_mst_branch)
2693 			continue;
2694 
2695 		mutex_lock(&aconnector->hpd_lock);
2696 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2697 			DRM_ERROR("KMS: Failed to detect connector\n");
2698 
2699 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2700 			emulated_link_detect(aconnector->dc_link);
2701 		} else {
2702 			mutex_lock(&dm->dc_lock);
2703 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2704 			mutex_unlock(&dm->dc_lock);
2705 		}
2706 
2707 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2708 			aconnector->fake_enable = false;
2709 
2710 		if (aconnector->dc_sink)
2711 			dc_sink_release(aconnector->dc_sink);
2712 		aconnector->dc_sink = NULL;
2713 		amdgpu_dm_update_connector_after_detect(aconnector);
2714 		mutex_unlock(&aconnector->hpd_lock);
2715 	}
2716 	drm_connector_list_iter_end(&iter);
2717 
2718 	/* Force mode set in atomic commit */
2719 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2720 		new_crtc_state->active_changed = true;
2721 
2722 	/*
2723 	 * atomic_check is expected to create the dc states. We need to release
2724 	 * them here, since they were duplicated as part of the suspend
2725 	 * procedure.
2726 	 */
2727 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2728 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2729 		if (dm_new_crtc_state->stream) {
2730 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2731 			dc_stream_release(dm_new_crtc_state->stream);
2732 			dm_new_crtc_state->stream = NULL;
2733 		}
2734 	}
2735 
2736 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2737 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2738 		if (dm_new_plane_state->dc_state) {
2739 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2740 			dc_plane_state_release(dm_new_plane_state->dc_state);
2741 			dm_new_plane_state->dc_state = NULL;
2742 		}
2743 	}
2744 
2745 	drm_atomic_helper_resume(ddev, dm->cached_state);
2746 
2747 	dm->cached_state = NULL;
2748 
2749 	amdgpu_dm_irq_resume_late(adev);
2750 
2751 	amdgpu_dm_smu_write_watermarks_table(adev);
2752 
2753 	return 0;
2754 }
2755 
2756 /**
2757  * DOC: DM Lifecycle
2758  *
2759  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2760  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2761  * the base driver's device list to be initialized and torn down accordingly.
2762  *
2763  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2764  */
2765 
2766 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2767 	.name = "dm",
2768 	.early_init = dm_early_init,
2769 	.late_init = dm_late_init,
2770 	.sw_init = dm_sw_init,
2771 	.sw_fini = dm_sw_fini,
2772 	.early_fini = amdgpu_dm_early_fini,
2773 	.hw_init = dm_hw_init,
2774 	.hw_fini = dm_hw_fini,
2775 	.suspend = dm_suspend,
2776 	.resume = dm_resume,
2777 	.is_idle = dm_is_idle,
2778 	.wait_for_idle = dm_wait_for_idle,
2779 	.check_soft_reset = dm_check_soft_reset,
2780 	.soft_reset = dm_soft_reset,
2781 	.set_clockgating_state = dm_set_clockgating_state,
2782 	.set_powergating_state = dm_set_powergating_state,
2783 };
2784 
2785 const struct amdgpu_ip_block_version dm_ip_block =
2786 {
2787 	.type = AMD_IP_BLOCK_TYPE_DCE,
2788 	.major = 1,
2789 	.minor = 0,
2790 	.rev = 0,
2791 	.funcs = &amdgpu_dm_funcs,
2792 };
2793 
2794 
2795 /**
2796  * DOC: atomic
2797  *
2798  * *WIP*
2799  */
2800 
2801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2802 	.fb_create = amdgpu_display_user_framebuffer_create,
2803 	.get_format_info = amd_get_format_info,
2804 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2805 	.atomic_check = amdgpu_dm_atomic_check,
2806 	.atomic_commit = drm_atomic_helper_commit,
2807 };
2808 
2809 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2810 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2811 };
2812 
2813 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2814 {
2815 	u32 max_avg, min_cll, max, min, q, r;
2816 	struct amdgpu_dm_backlight_caps *caps;
2817 	struct amdgpu_display_manager *dm;
2818 	struct drm_connector *conn_base;
2819 	struct amdgpu_device *adev;
2820 	struct dc_link *link = NULL;
2821 	static const u8 pre_computed_values[] = {
2822 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2823 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2824 	int i;
2825 
2826 	if (!aconnector || !aconnector->dc_link)
2827 		return;
2828 
2829 	link = aconnector->dc_link;
2830 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2831 		return;
2832 
2833 	conn_base = &aconnector->base;
2834 	adev = drm_to_adev(conn_base->dev);
2835 	dm = &adev->dm;
2836 	for (i = 0; i < dm->num_of_edps; i++) {
2837 		if (link == dm->backlight_link[i])
2838 			break;
2839 	}
2840 	if (i >= dm->num_of_edps)
2841 		return;
2842 	caps = &dm->backlight_caps[i];
2843 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2844 	caps->aux_support = false;
2845 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2846 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2847 
2848 	if (caps->ext_caps->bits.oled == 1 /*||
2849 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2850 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2851 		caps->aux_support = true;
2852 
2853 	if (amdgpu_backlight == 0)
2854 		caps->aux_support = false;
2855 	else if (amdgpu_backlight == 1)
2856 		caps->aux_support = true;
2857 
2858 	/* From the specification (CTA-861-G), for calculating the maximum
2859 	 * luminance we need to use:
2860 	 *	Luminance = 50*2**(CV/32)
2861 	 * Where CV is a one-byte value.
2862 	 * For calculating this expression we may need float point precision;
2863 	 * to avoid this complexity level, we take advantage that CV is divided
2864 	 * by a constant. From the Euclids division algorithm, we know that CV
2865 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2866 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2867 	 * need to pre-compute the value of r/32. For pre-computing the values
2868 	 * We just used the following Ruby line:
2869 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2870 	 * The results of the above expressions can be verified at
2871 	 * pre_computed_values.
2872 	 */
2873 	q = max_avg >> 5;
2874 	r = max_avg % 32;
2875 	max = (1 << q) * pre_computed_values[r];
2876 
2877 	// min luminance: maxLum * (CV/255)^2 / 100
2878 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2879 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2880 
2881 	caps->aux_max_input_signal = max;
2882 	caps->aux_min_input_signal = min;
2883 }
2884 
2885 void amdgpu_dm_update_connector_after_detect(
2886 		struct amdgpu_dm_connector *aconnector)
2887 {
2888 	struct drm_connector *connector = &aconnector->base;
2889 	struct drm_device *dev = connector->dev;
2890 	struct dc_sink *sink;
2891 
2892 	/* MST handled by drm_mst framework */
2893 	if (aconnector->mst_mgr.mst_state == true)
2894 		return;
2895 
2896 	sink = aconnector->dc_link->local_sink;
2897 	if (sink)
2898 		dc_sink_retain(sink);
2899 
2900 	/*
2901 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2902 	 * the connector sink is set to either fake or physical sink depends on link status.
2903 	 * Skip if already done during boot.
2904 	 */
2905 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2906 			&& aconnector->dc_em_sink) {
2907 
2908 		/*
2909 		 * For S3 resume with headless use eml_sink to fake stream
2910 		 * because on resume connector->sink is set to NULL
2911 		 */
2912 		mutex_lock(&dev->mode_config.mutex);
2913 
2914 		if (sink) {
2915 			if (aconnector->dc_sink) {
2916 				amdgpu_dm_update_freesync_caps(connector, NULL);
2917 				/*
2918 				 * retain and release below are used to
2919 				 * bump up refcount for sink because the link doesn't point
2920 				 * to it anymore after disconnect, so on next crtc to connector
2921 				 * reshuffle by UMD we will get into unwanted dc_sink release
2922 				 */
2923 				dc_sink_release(aconnector->dc_sink);
2924 			}
2925 			aconnector->dc_sink = sink;
2926 			dc_sink_retain(aconnector->dc_sink);
2927 			amdgpu_dm_update_freesync_caps(connector,
2928 					aconnector->edid);
2929 		} else {
2930 			amdgpu_dm_update_freesync_caps(connector, NULL);
2931 			if (!aconnector->dc_sink) {
2932 				aconnector->dc_sink = aconnector->dc_em_sink;
2933 				dc_sink_retain(aconnector->dc_sink);
2934 			}
2935 		}
2936 
2937 		mutex_unlock(&dev->mode_config.mutex);
2938 
2939 		if (sink)
2940 			dc_sink_release(sink);
2941 		return;
2942 	}
2943 
2944 	/*
2945 	 * TODO: temporary guard to look for proper fix
2946 	 * if this sink is MST sink, we should not do anything
2947 	 */
2948 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2949 		dc_sink_release(sink);
2950 		return;
2951 	}
2952 
2953 	if (aconnector->dc_sink == sink) {
2954 		/*
2955 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2956 		 * Do nothing!!
2957 		 */
2958 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2959 				aconnector->connector_id);
2960 		if (sink)
2961 			dc_sink_release(sink);
2962 		return;
2963 	}
2964 
2965 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2966 		aconnector->connector_id, aconnector->dc_sink, sink);
2967 
2968 	mutex_lock(&dev->mode_config.mutex);
2969 
2970 	/*
2971 	 * 1. Update status of the drm connector
2972 	 * 2. Send an event and let userspace tell us what to do
2973 	 */
2974 	if (sink) {
2975 		/*
2976 		 * TODO: check if we still need the S3 mode update workaround.
2977 		 * If yes, put it here.
2978 		 */
2979 		if (aconnector->dc_sink) {
2980 			amdgpu_dm_update_freesync_caps(connector, NULL);
2981 			dc_sink_release(aconnector->dc_sink);
2982 		}
2983 
2984 		aconnector->dc_sink = sink;
2985 		dc_sink_retain(aconnector->dc_sink);
2986 		if (sink->dc_edid.length == 0) {
2987 			aconnector->edid = NULL;
2988 			if (aconnector->dc_link->aux_mode) {
2989 				drm_dp_cec_unset_edid(
2990 					&aconnector->dm_dp_aux.aux);
2991 			}
2992 		} else {
2993 			aconnector->edid =
2994 				(struct edid *)sink->dc_edid.raw_edid;
2995 
2996 			if (aconnector->dc_link->aux_mode)
2997 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2998 						    aconnector->edid);
2999 		}
3000 
3001 		drm_connector_update_edid_property(connector, aconnector->edid);
3002 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3003 		update_connector_ext_caps(aconnector);
3004 	} else {
3005 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3006 		amdgpu_dm_update_freesync_caps(connector, NULL);
3007 		drm_connector_update_edid_property(connector, NULL);
3008 		aconnector->num_modes = 0;
3009 		dc_sink_release(aconnector->dc_sink);
3010 		aconnector->dc_sink = NULL;
3011 		aconnector->edid = NULL;
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3014 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3015 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3016 #endif
3017 	}
3018 
3019 	mutex_unlock(&dev->mode_config.mutex);
3020 
3021 	update_subconnector_property(aconnector);
3022 
3023 	if (sink)
3024 		dc_sink_release(sink);
3025 }
3026 
3027 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3028 {
3029 	struct drm_connector *connector = &aconnector->base;
3030 	struct drm_device *dev = connector->dev;
3031 	enum dc_connection_type new_connection_type = dc_connection_none;
3032 	struct amdgpu_device *adev = drm_to_adev(dev);
3033 #ifdef CONFIG_DRM_AMD_DC_HDCP
3034 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3035 #endif
3036 	bool ret = false;
3037 
3038 	if (adev->dm.disable_hpd_irq)
3039 		return;
3040 
3041 	/*
3042 	 * In case of failure or MST no need to update connector status or notify the OS
3043 	 * since (for MST case) MST does this in its own context.
3044 	 */
3045 	mutex_lock(&aconnector->hpd_lock);
3046 
3047 #ifdef CONFIG_DRM_AMD_DC_HDCP
3048 	if (adev->dm.hdcp_workqueue) {
3049 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3050 		dm_con_state->update_hdcp = true;
3051 	}
3052 #endif
3053 	if (aconnector->fake_enable)
3054 		aconnector->fake_enable = false;
3055 
3056 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3057 		DRM_ERROR("KMS: Failed to detect connector\n");
3058 
3059 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3060 		emulated_link_detect(aconnector->dc_link);
3061 
3062 		drm_modeset_lock_all(dev);
3063 		dm_restore_drm_connector_state(dev, connector);
3064 		drm_modeset_unlock_all(dev);
3065 
3066 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3067 			drm_kms_helper_connector_hotplug_event(connector);
3068 	} else {
3069 		mutex_lock(&adev->dm.dc_lock);
3070 		ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3071 		mutex_unlock(&adev->dm.dc_lock);
3072 		if (ret) {
3073 			amdgpu_dm_update_connector_after_detect(aconnector);
3074 
3075 			drm_modeset_lock_all(dev);
3076 			dm_restore_drm_connector_state(dev, connector);
3077 			drm_modeset_unlock_all(dev);
3078 
3079 			if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3080 				drm_kms_helper_connector_hotplug_event(connector);
3081 		}
3082 	}
3083 	mutex_unlock(&aconnector->hpd_lock);
3084 
3085 }
3086 
3087 static void handle_hpd_irq(void *param)
3088 {
3089 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3090 
3091 	handle_hpd_irq_helper(aconnector);
3092 
3093 }
3094 
3095 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3096 {
3097 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3098 	uint8_t dret;
3099 	bool new_irq_handled = false;
3100 	int dpcd_addr;
3101 	int dpcd_bytes_to_read;
3102 
3103 	const int max_process_count = 30;
3104 	int process_count = 0;
3105 
3106 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3107 
3108 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3109 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3110 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3111 		dpcd_addr = DP_SINK_COUNT;
3112 	} else {
3113 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3114 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3115 		dpcd_addr = DP_SINK_COUNT_ESI;
3116 	}
3117 
3118 	dret = drm_dp_dpcd_read(
3119 		&aconnector->dm_dp_aux.aux,
3120 		dpcd_addr,
3121 		esi,
3122 		dpcd_bytes_to_read);
3123 
3124 	while (dret == dpcd_bytes_to_read &&
3125 		process_count < max_process_count) {
3126 		uint8_t retry;
3127 		dret = 0;
3128 
3129 		process_count++;
3130 
3131 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3132 		/* handle HPD short pulse irq */
3133 		if (aconnector->mst_mgr.mst_state)
3134 			drm_dp_mst_hpd_irq(
3135 				&aconnector->mst_mgr,
3136 				esi,
3137 				&new_irq_handled);
3138 
3139 		if (new_irq_handled) {
3140 			/* ACK at DPCD to notify down stream */
3141 			const int ack_dpcd_bytes_to_write =
3142 				dpcd_bytes_to_read - 1;
3143 
3144 			for (retry = 0; retry < 3; retry++) {
3145 				uint8_t wret;
3146 
3147 				wret = drm_dp_dpcd_write(
3148 					&aconnector->dm_dp_aux.aux,
3149 					dpcd_addr + 1,
3150 					&esi[1],
3151 					ack_dpcd_bytes_to_write);
3152 				if (wret == ack_dpcd_bytes_to_write)
3153 					break;
3154 			}
3155 
3156 			/* check if there is new irq to be handled */
3157 			dret = drm_dp_dpcd_read(
3158 				&aconnector->dm_dp_aux.aux,
3159 				dpcd_addr,
3160 				esi,
3161 				dpcd_bytes_to_read);
3162 
3163 			new_irq_handled = false;
3164 		} else {
3165 			break;
3166 		}
3167 	}
3168 
3169 	if (process_count == max_process_count)
3170 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3171 }
3172 
3173 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3174 							union hpd_irq_data hpd_irq_data)
3175 {
3176 	struct hpd_rx_irq_offload_work *offload_work =
3177 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3178 
3179 	if (!offload_work) {
3180 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3181 		return;
3182 	}
3183 
3184 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3185 	offload_work->data = hpd_irq_data;
3186 	offload_work->offload_wq = offload_wq;
3187 
3188 	queue_work(offload_wq->wq, &offload_work->work);
3189 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3190 }
3191 
3192 static void handle_hpd_rx_irq(void *param)
3193 {
3194 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3195 	struct drm_connector *connector = &aconnector->base;
3196 	struct drm_device *dev = connector->dev;
3197 	struct dc_link *dc_link = aconnector->dc_link;
3198 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3199 	bool result = false;
3200 	enum dc_connection_type new_connection_type = dc_connection_none;
3201 	struct amdgpu_device *adev = drm_to_adev(dev);
3202 	union hpd_irq_data hpd_irq_data;
3203 	bool link_loss = false;
3204 	bool has_left_work = false;
3205 	int idx = aconnector->base.index;
3206 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3207 
3208 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3209 
3210 	if (adev->dm.disable_hpd_irq)
3211 		return;
3212 
3213 	/*
3214 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3215 	 * conflict, after implement i2c helper, this mutex should be
3216 	 * retired.
3217 	 */
3218 	mutex_lock(&aconnector->hpd_lock);
3219 
3220 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3221 						&link_loss, true, &has_left_work);
3222 
3223 	if (!has_left_work)
3224 		goto out;
3225 
3226 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3227 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3228 		goto out;
3229 	}
3230 
3231 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3232 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3233 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3234 			dm_handle_mst_sideband_msg(aconnector);
3235 			goto out;
3236 		}
3237 
3238 		if (link_loss) {
3239 			bool skip = false;
3240 
3241 			spin_lock(&offload_wq->offload_lock);
3242 			skip = offload_wq->is_handling_link_loss;
3243 
3244 			if (!skip)
3245 				offload_wq->is_handling_link_loss = true;
3246 
3247 			spin_unlock(&offload_wq->offload_lock);
3248 
3249 			if (!skip)
3250 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3251 
3252 			goto out;
3253 		}
3254 	}
3255 
3256 out:
3257 	if (result && !is_mst_root_connector) {
3258 		/* Downstream Port status changed. */
3259 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3260 			DRM_ERROR("KMS: Failed to detect connector\n");
3261 
3262 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3263 			emulated_link_detect(dc_link);
3264 
3265 			if (aconnector->fake_enable)
3266 				aconnector->fake_enable = false;
3267 
3268 			amdgpu_dm_update_connector_after_detect(aconnector);
3269 
3270 
3271 			drm_modeset_lock_all(dev);
3272 			dm_restore_drm_connector_state(dev, connector);
3273 			drm_modeset_unlock_all(dev);
3274 
3275 			drm_kms_helper_connector_hotplug_event(connector);
3276 		} else {
3277 			bool ret = false;
3278 
3279 			mutex_lock(&adev->dm.dc_lock);
3280 			ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3281 			mutex_unlock(&adev->dm.dc_lock);
3282 
3283 			if (ret) {
3284 				if (aconnector->fake_enable)
3285 					aconnector->fake_enable = false;
3286 
3287 				amdgpu_dm_update_connector_after_detect(aconnector);
3288 
3289 				drm_modeset_lock_all(dev);
3290 				dm_restore_drm_connector_state(dev, connector);
3291 				drm_modeset_unlock_all(dev);
3292 
3293 				drm_kms_helper_connector_hotplug_event(connector);
3294 			}
3295 		}
3296 	}
3297 #ifdef CONFIG_DRM_AMD_DC_HDCP
3298 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3299 		if (adev->dm.hdcp_workqueue)
3300 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3301 	}
3302 #endif
3303 
3304 	if (dc_link->type != dc_connection_mst_branch)
3305 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3306 
3307 	mutex_unlock(&aconnector->hpd_lock);
3308 }
3309 
3310 static void register_hpd_handlers(struct amdgpu_device *adev)
3311 {
3312 	struct drm_device *dev = adev_to_drm(adev);
3313 	struct drm_connector *connector;
3314 	struct amdgpu_dm_connector *aconnector;
3315 	const struct dc_link *dc_link;
3316 	struct dc_interrupt_params int_params = {0};
3317 
3318 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3319 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3320 
3321 	list_for_each_entry(connector,
3322 			&dev->mode_config.connector_list, head)	{
3323 
3324 		aconnector = to_amdgpu_dm_connector(connector);
3325 		dc_link = aconnector->dc_link;
3326 
3327 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3328 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3329 			int_params.irq_source = dc_link->irq_source_hpd;
3330 
3331 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3332 					handle_hpd_irq,
3333 					(void *) aconnector);
3334 		}
3335 
3336 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3337 
3338 			/* Also register for DP short pulse (hpd_rx). */
3339 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3340 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3341 
3342 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3343 					handle_hpd_rx_irq,
3344 					(void *) aconnector);
3345 
3346 			if (adev->dm.hpd_rx_offload_wq)
3347 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3348 					aconnector;
3349 		}
3350 	}
3351 }
3352 
3353 #if defined(CONFIG_DRM_AMD_DC_SI)
3354 /* Register IRQ sources and initialize IRQ callbacks */
3355 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3356 {
3357 	struct dc *dc = adev->dm.dc;
3358 	struct common_irq_params *c_irq_params;
3359 	struct dc_interrupt_params int_params = {0};
3360 	int r;
3361 	int i;
3362 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3363 
3364 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3365 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3366 
3367 	/*
3368 	 * Actions of amdgpu_irq_add_id():
3369 	 * 1. Register a set() function with base driver.
3370 	 *    Base driver will call set() function to enable/disable an
3371 	 *    interrupt in DC hardware.
3372 	 * 2. Register amdgpu_dm_irq_handler().
3373 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3374 	 *    coming from DC hardware.
3375 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3376 	 *    for acknowledging and handling. */
3377 
3378 	/* Use VBLANK interrupt */
3379 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3380 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3381 		if (r) {
3382 			DRM_ERROR("Failed to add crtc irq id!\n");
3383 			return r;
3384 		}
3385 
3386 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3387 		int_params.irq_source =
3388 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3389 
3390 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3391 
3392 		c_irq_params->adev = adev;
3393 		c_irq_params->irq_src = int_params.irq_source;
3394 
3395 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3396 				dm_crtc_high_irq, c_irq_params);
3397 	}
3398 
3399 	/* Use GRPH_PFLIP interrupt */
3400 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3401 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3402 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3403 		if (r) {
3404 			DRM_ERROR("Failed to add page flip irq id!\n");
3405 			return r;
3406 		}
3407 
3408 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3409 		int_params.irq_source =
3410 			dc_interrupt_to_irq_source(dc, i, 0);
3411 
3412 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3413 
3414 		c_irq_params->adev = adev;
3415 		c_irq_params->irq_src = int_params.irq_source;
3416 
3417 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3418 				dm_pflip_high_irq, c_irq_params);
3419 
3420 	}
3421 
3422 	/* HPD */
3423 	r = amdgpu_irq_add_id(adev, client_id,
3424 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3425 	if (r) {
3426 		DRM_ERROR("Failed to add hpd irq id!\n");
3427 		return r;
3428 	}
3429 
3430 	register_hpd_handlers(adev);
3431 
3432 	return 0;
3433 }
3434 #endif
3435 
3436 /* Register IRQ sources and initialize IRQ callbacks */
3437 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3438 {
3439 	struct dc *dc = adev->dm.dc;
3440 	struct common_irq_params *c_irq_params;
3441 	struct dc_interrupt_params int_params = {0};
3442 	int r;
3443 	int i;
3444 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3445 
3446 	if (adev->family >= AMDGPU_FAMILY_AI)
3447 		client_id = SOC15_IH_CLIENTID_DCE;
3448 
3449 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3450 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3451 
3452 	/*
3453 	 * Actions of amdgpu_irq_add_id():
3454 	 * 1. Register a set() function with base driver.
3455 	 *    Base driver will call set() function to enable/disable an
3456 	 *    interrupt in DC hardware.
3457 	 * 2. Register amdgpu_dm_irq_handler().
3458 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3459 	 *    coming from DC hardware.
3460 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3461 	 *    for acknowledging and handling. */
3462 
3463 	/* Use VBLANK interrupt */
3464 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3465 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3466 		if (r) {
3467 			DRM_ERROR("Failed to add crtc irq id!\n");
3468 			return r;
3469 		}
3470 
3471 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3472 		int_params.irq_source =
3473 			dc_interrupt_to_irq_source(dc, i, 0);
3474 
3475 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3476 
3477 		c_irq_params->adev = adev;
3478 		c_irq_params->irq_src = int_params.irq_source;
3479 
3480 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3481 				dm_crtc_high_irq, c_irq_params);
3482 	}
3483 
3484 	/* Use VUPDATE interrupt */
3485 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3486 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3487 		if (r) {
3488 			DRM_ERROR("Failed to add vupdate irq id!\n");
3489 			return r;
3490 		}
3491 
3492 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3493 		int_params.irq_source =
3494 			dc_interrupt_to_irq_source(dc, i, 0);
3495 
3496 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3497 
3498 		c_irq_params->adev = adev;
3499 		c_irq_params->irq_src = int_params.irq_source;
3500 
3501 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3502 				dm_vupdate_high_irq, c_irq_params);
3503 	}
3504 
3505 	/* Use GRPH_PFLIP interrupt */
3506 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3507 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3508 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3509 		if (r) {
3510 			DRM_ERROR("Failed to add page flip irq id!\n");
3511 			return r;
3512 		}
3513 
3514 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3515 		int_params.irq_source =
3516 			dc_interrupt_to_irq_source(dc, i, 0);
3517 
3518 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3519 
3520 		c_irq_params->adev = adev;
3521 		c_irq_params->irq_src = int_params.irq_source;
3522 
3523 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3524 				dm_pflip_high_irq, c_irq_params);
3525 
3526 	}
3527 
3528 	/* HPD */
3529 	r = amdgpu_irq_add_id(adev, client_id,
3530 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3531 	if (r) {
3532 		DRM_ERROR("Failed to add hpd irq id!\n");
3533 		return r;
3534 	}
3535 
3536 	register_hpd_handlers(adev);
3537 
3538 	return 0;
3539 }
3540 
3541 /* Register IRQ sources and initialize IRQ callbacks */
3542 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3543 {
3544 	struct dc *dc = adev->dm.dc;
3545 	struct common_irq_params *c_irq_params;
3546 	struct dc_interrupt_params int_params = {0};
3547 	int r;
3548 	int i;
3549 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3550 	static const unsigned int vrtl_int_srcid[] = {
3551 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3552 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3553 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3554 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3555 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3556 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3557 	};
3558 #endif
3559 
3560 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3561 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3562 
3563 	/*
3564 	 * Actions of amdgpu_irq_add_id():
3565 	 * 1. Register a set() function with base driver.
3566 	 *    Base driver will call set() function to enable/disable an
3567 	 *    interrupt in DC hardware.
3568 	 * 2. Register amdgpu_dm_irq_handler().
3569 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3570 	 *    coming from DC hardware.
3571 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3572 	 *    for acknowledging and handling.
3573 	 */
3574 
3575 	/* Use VSTARTUP interrupt */
3576 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3577 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3578 			i++) {
3579 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3580 
3581 		if (r) {
3582 			DRM_ERROR("Failed to add crtc irq id!\n");
3583 			return r;
3584 		}
3585 
3586 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3587 		int_params.irq_source =
3588 			dc_interrupt_to_irq_source(dc, i, 0);
3589 
3590 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3591 
3592 		c_irq_params->adev = adev;
3593 		c_irq_params->irq_src = int_params.irq_source;
3594 
3595 		amdgpu_dm_irq_register_interrupt(
3596 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3597 	}
3598 
3599 	/* Use otg vertical line interrupt */
3600 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3601 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3602 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3603 				vrtl_int_srcid[i], &adev->vline0_irq);
3604 
3605 		if (r) {
3606 			DRM_ERROR("Failed to add vline0 irq id!\n");
3607 			return r;
3608 		}
3609 
3610 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3611 		int_params.irq_source =
3612 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3613 
3614 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3615 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3616 			break;
3617 		}
3618 
3619 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3620 					- DC_IRQ_SOURCE_DC1_VLINE0];
3621 
3622 		c_irq_params->adev = adev;
3623 		c_irq_params->irq_src = int_params.irq_source;
3624 
3625 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3626 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3627 	}
3628 #endif
3629 
3630 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3631 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3632 	 * to trigger at end of each vblank, regardless of state of the lock,
3633 	 * matching DCE behaviour.
3634 	 */
3635 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3636 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3637 	     i++) {
3638 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3639 
3640 		if (r) {
3641 			DRM_ERROR("Failed to add vupdate irq id!\n");
3642 			return r;
3643 		}
3644 
3645 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3646 		int_params.irq_source =
3647 			dc_interrupt_to_irq_source(dc, i, 0);
3648 
3649 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3650 
3651 		c_irq_params->adev = adev;
3652 		c_irq_params->irq_src = int_params.irq_source;
3653 
3654 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3655 				dm_vupdate_high_irq, c_irq_params);
3656 	}
3657 
3658 	/* Use GRPH_PFLIP interrupt */
3659 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3660 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3661 			i++) {
3662 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3663 		if (r) {
3664 			DRM_ERROR("Failed to add page flip irq id!\n");
3665 			return r;
3666 		}
3667 
3668 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3669 		int_params.irq_source =
3670 			dc_interrupt_to_irq_source(dc, i, 0);
3671 
3672 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3673 
3674 		c_irq_params->adev = adev;
3675 		c_irq_params->irq_src = int_params.irq_source;
3676 
3677 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3678 				dm_pflip_high_irq, c_irq_params);
3679 
3680 	}
3681 
3682 	/* HPD */
3683 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3684 			&adev->hpd_irq);
3685 	if (r) {
3686 		DRM_ERROR("Failed to add hpd irq id!\n");
3687 		return r;
3688 	}
3689 
3690 	register_hpd_handlers(adev);
3691 
3692 	return 0;
3693 }
3694 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3695 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3696 {
3697 	struct dc *dc = adev->dm.dc;
3698 	struct common_irq_params *c_irq_params;
3699 	struct dc_interrupt_params int_params = {0};
3700 	int r, i;
3701 
3702 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3703 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3704 
3705 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3706 			&adev->dmub_outbox_irq);
3707 	if (r) {
3708 		DRM_ERROR("Failed to add outbox irq id!\n");
3709 		return r;
3710 	}
3711 
3712 	if (dc->ctx->dmub_srv) {
3713 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3714 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3715 		int_params.irq_source =
3716 		dc_interrupt_to_irq_source(dc, i, 0);
3717 
3718 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3719 
3720 		c_irq_params->adev = adev;
3721 		c_irq_params->irq_src = int_params.irq_source;
3722 
3723 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3724 				dm_dmub_outbox1_low_irq, c_irq_params);
3725 	}
3726 
3727 	return 0;
3728 }
3729 
3730 /*
3731  * Acquires the lock for the atomic state object and returns
3732  * the new atomic state.
3733  *
3734  * This should only be called during atomic check.
3735  */
3736 int dm_atomic_get_state(struct drm_atomic_state *state,
3737 			struct dm_atomic_state **dm_state)
3738 {
3739 	struct drm_device *dev = state->dev;
3740 	struct amdgpu_device *adev = drm_to_adev(dev);
3741 	struct amdgpu_display_manager *dm = &adev->dm;
3742 	struct drm_private_state *priv_state;
3743 
3744 	if (*dm_state)
3745 		return 0;
3746 
3747 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3748 	if (IS_ERR(priv_state))
3749 		return PTR_ERR(priv_state);
3750 
3751 	*dm_state = to_dm_atomic_state(priv_state);
3752 
3753 	return 0;
3754 }
3755 
3756 static struct dm_atomic_state *
3757 dm_atomic_get_new_state(struct drm_atomic_state *state)
3758 {
3759 	struct drm_device *dev = state->dev;
3760 	struct amdgpu_device *adev = drm_to_adev(dev);
3761 	struct amdgpu_display_manager *dm = &adev->dm;
3762 	struct drm_private_obj *obj;
3763 	struct drm_private_state *new_obj_state;
3764 	int i;
3765 
3766 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3767 		if (obj->funcs == dm->atomic_obj.funcs)
3768 			return to_dm_atomic_state(new_obj_state);
3769 	}
3770 
3771 	return NULL;
3772 }
3773 
3774 static struct drm_private_state *
3775 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3776 {
3777 	struct dm_atomic_state *old_state, *new_state;
3778 
3779 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3780 	if (!new_state)
3781 		return NULL;
3782 
3783 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3784 
3785 	old_state = to_dm_atomic_state(obj->state);
3786 
3787 	if (old_state && old_state->context)
3788 		new_state->context = dc_copy_state(old_state->context);
3789 
3790 	if (!new_state->context) {
3791 		kfree(new_state);
3792 		return NULL;
3793 	}
3794 
3795 	return &new_state->base;
3796 }
3797 
3798 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3799 				    struct drm_private_state *state)
3800 {
3801 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3802 
3803 	if (dm_state && dm_state->context)
3804 		dc_release_state(dm_state->context);
3805 
3806 	kfree(dm_state);
3807 }
3808 
3809 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3810 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3811 	.atomic_destroy_state = dm_atomic_destroy_state,
3812 };
3813 
3814 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3815 {
3816 	struct dm_atomic_state *state;
3817 	int r;
3818 
3819 	adev->mode_info.mode_config_initialized = true;
3820 
3821 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3822 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3823 
3824 	adev_to_drm(adev)->mode_config.max_width = 16384;
3825 	adev_to_drm(adev)->mode_config.max_height = 16384;
3826 
3827 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3828 	/* disable prefer shadow for now due to hibernation issues */
3829 	adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3830 	/* indicates support for immediate flip */
3831 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3832 
3833 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3834 
3835 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3836 	if (!state)
3837 		return -ENOMEM;
3838 
3839 	state->context = dc_create_state(adev->dm.dc);
3840 	if (!state->context) {
3841 		kfree(state);
3842 		return -ENOMEM;
3843 	}
3844 
3845 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3846 
3847 	drm_atomic_private_obj_init(adev_to_drm(adev),
3848 				    &adev->dm.atomic_obj,
3849 				    &state->base,
3850 				    &dm_atomic_state_funcs);
3851 
3852 	r = amdgpu_display_modeset_create_props(adev);
3853 	if (r) {
3854 		dc_release_state(state->context);
3855 		kfree(state);
3856 		return r;
3857 	}
3858 
3859 	r = amdgpu_dm_audio_init(adev);
3860 	if (r) {
3861 		dc_release_state(state->context);
3862 		kfree(state);
3863 		return r;
3864 	}
3865 
3866 	return 0;
3867 }
3868 
3869 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3870 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3871 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3872 
3873 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3874 					    int bl_idx)
3875 {
3876 #if defined(CONFIG_ACPI)
3877 	struct amdgpu_dm_backlight_caps caps;
3878 
3879 	memset(&caps, 0, sizeof(caps));
3880 
3881 	if (dm->backlight_caps[bl_idx].caps_valid)
3882 		return;
3883 
3884 	amdgpu_acpi_get_backlight_caps(&caps);
3885 	if (caps.caps_valid) {
3886 		dm->backlight_caps[bl_idx].caps_valid = true;
3887 		if (caps.aux_support)
3888 			return;
3889 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3890 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3891 	} else {
3892 		dm->backlight_caps[bl_idx].min_input_signal =
3893 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3894 		dm->backlight_caps[bl_idx].max_input_signal =
3895 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3896 	}
3897 #else
3898 	if (dm->backlight_caps[bl_idx].aux_support)
3899 		return;
3900 
3901 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3902 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3903 #endif
3904 }
3905 
3906 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3907 				unsigned *min, unsigned *max)
3908 {
3909 	if (!caps)
3910 		return 0;
3911 
3912 	if (caps->aux_support) {
3913 		// Firmware limits are in nits, DC API wants millinits.
3914 		*max = 1000 * caps->aux_max_input_signal;
3915 		*min = 1000 * caps->aux_min_input_signal;
3916 	} else {
3917 		// Firmware limits are 8-bit, PWM control is 16-bit.
3918 		*max = 0x101 * caps->max_input_signal;
3919 		*min = 0x101 * caps->min_input_signal;
3920 	}
3921 	return 1;
3922 }
3923 
3924 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3925 					uint32_t brightness)
3926 {
3927 	unsigned min, max;
3928 
3929 	if (!get_brightness_range(caps, &min, &max))
3930 		return brightness;
3931 
3932 	// Rescale 0..255 to min..max
3933 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3934 				       AMDGPU_MAX_BL_LEVEL);
3935 }
3936 
3937 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3938 				      uint32_t brightness)
3939 {
3940 	unsigned min, max;
3941 
3942 	if (!get_brightness_range(caps, &min, &max))
3943 		return brightness;
3944 
3945 	if (brightness < min)
3946 		return 0;
3947 	// Rescale min..max to 0..255
3948 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3949 				 max - min);
3950 }
3951 
3952 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3953 					 int bl_idx,
3954 					 u32 user_brightness)
3955 {
3956 	struct amdgpu_dm_backlight_caps caps;
3957 	struct dc_link *link;
3958 	u32 brightness;
3959 	bool rc;
3960 
3961 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3962 	caps = dm->backlight_caps[bl_idx];
3963 
3964 	dm->brightness[bl_idx] = user_brightness;
3965 	/* update scratch register */
3966 	if (bl_idx == 0)
3967 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3968 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3969 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3970 
3971 	/* Change brightness based on AUX property */
3972 	if (caps.aux_support) {
3973 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3974 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3975 		if (!rc)
3976 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3977 	} else {
3978 		rc = dc_link_set_backlight_level(link, brightness, 0);
3979 		if (!rc)
3980 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3981 	}
3982 
3983 	if (rc)
3984 		dm->actual_brightness[bl_idx] = user_brightness;
3985 }
3986 
3987 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3988 {
3989 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3990 	int i;
3991 
3992 	for (i = 0; i < dm->num_of_edps; i++) {
3993 		if (bd == dm->backlight_dev[i])
3994 			break;
3995 	}
3996 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3997 		i = 0;
3998 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3999 
4000 	return 0;
4001 }
4002 
4003 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4004 					 int bl_idx)
4005 {
4006 	struct amdgpu_dm_backlight_caps caps;
4007 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4008 
4009 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4010 	caps = dm->backlight_caps[bl_idx];
4011 
4012 	if (caps.aux_support) {
4013 		u32 avg, peak;
4014 		bool rc;
4015 
4016 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4017 		if (!rc)
4018 			return dm->brightness[bl_idx];
4019 		return convert_brightness_to_user(&caps, avg);
4020 	} else {
4021 		int ret = dc_link_get_backlight_level(link);
4022 
4023 		if (ret == DC_ERROR_UNEXPECTED)
4024 			return dm->brightness[bl_idx];
4025 		return convert_brightness_to_user(&caps, ret);
4026 	}
4027 }
4028 
4029 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4030 {
4031 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4032 	int i;
4033 
4034 	for (i = 0; i < dm->num_of_edps; i++) {
4035 		if (bd == dm->backlight_dev[i])
4036 			break;
4037 	}
4038 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4039 		i = 0;
4040 	return amdgpu_dm_backlight_get_level(dm, i);
4041 }
4042 
4043 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4044 	.options = BL_CORE_SUSPENDRESUME,
4045 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4046 	.update_status	= amdgpu_dm_backlight_update_status,
4047 };
4048 
4049 static void
4050 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4051 {
4052 	char bl_name[16];
4053 	struct backlight_properties props = { 0 };
4054 
4055 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4056 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4057 
4058 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4059 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4060 	props.type = BACKLIGHT_RAW;
4061 
4062 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4063 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4064 
4065 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4066 								       adev_to_drm(dm->adev)->dev,
4067 								       dm,
4068 								       &amdgpu_dm_backlight_ops,
4069 								       &props);
4070 
4071 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4072 		DRM_ERROR("DM: Backlight registration failed!\n");
4073 	else
4074 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4075 }
4076 
4077 static int initialize_plane(struct amdgpu_display_manager *dm,
4078 			    struct amdgpu_mode_info *mode_info, int plane_id,
4079 			    enum drm_plane_type plane_type,
4080 			    const struct dc_plane_cap *plane_cap)
4081 {
4082 	struct drm_plane *plane;
4083 	unsigned long possible_crtcs;
4084 	int ret = 0;
4085 
4086 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4087 	if (!plane) {
4088 		DRM_ERROR("KMS: Failed to allocate plane\n");
4089 		return -ENOMEM;
4090 	}
4091 	plane->type = plane_type;
4092 
4093 	/*
4094 	 * HACK: IGT tests expect that the primary plane for a CRTC
4095 	 * can only have one possible CRTC. Only expose support for
4096 	 * any CRTC if they're not going to be used as a primary plane
4097 	 * for a CRTC - like overlay or underlay planes.
4098 	 */
4099 	possible_crtcs = 1 << plane_id;
4100 	if (plane_id >= dm->dc->caps.max_streams)
4101 		possible_crtcs = 0xff;
4102 
4103 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4104 
4105 	if (ret) {
4106 		DRM_ERROR("KMS: Failed to initialize plane\n");
4107 		kfree(plane);
4108 		return ret;
4109 	}
4110 
4111 	if (mode_info)
4112 		mode_info->planes[plane_id] = plane;
4113 
4114 	return ret;
4115 }
4116 
4117 
4118 static void register_backlight_device(struct amdgpu_display_manager *dm,
4119 				      struct dc_link *link)
4120 {
4121 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4122 	    link->type != dc_connection_none) {
4123 		/*
4124 		 * Event if registration failed, we should continue with
4125 		 * DM initialization because not having a backlight control
4126 		 * is better then a black screen.
4127 		 */
4128 		if (!dm->backlight_dev[dm->num_of_edps])
4129 			amdgpu_dm_register_backlight_device(dm);
4130 
4131 		if (dm->backlight_dev[dm->num_of_edps]) {
4132 			dm->backlight_link[dm->num_of_edps] = link;
4133 			dm->num_of_edps++;
4134 		}
4135 	}
4136 }
4137 
4138 
4139 /*
4140  * In this architecture, the association
4141  * connector -> encoder -> crtc
4142  * id not really requried. The crtc and connector will hold the
4143  * display_index as an abstraction to use with DAL component
4144  *
4145  * Returns 0 on success
4146  */
4147 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4148 {
4149 	struct amdgpu_display_manager *dm = &adev->dm;
4150 	int32_t i;
4151 	struct amdgpu_dm_connector *aconnector = NULL;
4152 	struct amdgpu_encoder *aencoder = NULL;
4153 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4154 	uint32_t link_cnt;
4155 	int32_t primary_planes;
4156 	enum dc_connection_type new_connection_type = dc_connection_none;
4157 	const struct dc_plane_cap *plane;
4158 	bool psr_feature_enabled = false;
4159 
4160 	dm->display_indexes_num = dm->dc->caps.max_streams;
4161 	/* Update the actual used number of crtc */
4162 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4163 
4164 	link_cnt = dm->dc->caps.max_links;
4165 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4166 		DRM_ERROR("DM: Failed to initialize mode config\n");
4167 		return -EINVAL;
4168 	}
4169 
4170 	/* There is one primary plane per CRTC */
4171 	primary_planes = dm->dc->caps.max_streams;
4172 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4173 
4174 	/*
4175 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4176 	 * Order is reversed to match iteration order in atomic check.
4177 	 */
4178 	for (i = (primary_planes - 1); i >= 0; i--) {
4179 		plane = &dm->dc->caps.planes[i];
4180 
4181 		if (initialize_plane(dm, mode_info, i,
4182 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4183 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4184 			goto fail;
4185 		}
4186 	}
4187 
4188 	/*
4189 	 * Initialize overlay planes, index starting after primary planes.
4190 	 * These planes have a higher DRM index than the primary planes since
4191 	 * they should be considered as having a higher z-order.
4192 	 * Order is reversed to match iteration order in atomic check.
4193 	 *
4194 	 * Only support DCN for now, and only expose one so we don't encourage
4195 	 * userspace to use up all the pipes.
4196 	 */
4197 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4198 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4199 
4200 		/* Do not create overlay if MPO disabled */
4201 		if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4202 			break;
4203 
4204 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4205 			continue;
4206 
4207 		if (!plane->blends_with_above || !plane->blends_with_below)
4208 			continue;
4209 
4210 		if (!plane->pixel_format_support.argb8888)
4211 			continue;
4212 
4213 		if (initialize_plane(dm, NULL, primary_planes + i,
4214 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4215 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4216 			goto fail;
4217 		}
4218 
4219 		/* Only create one overlay plane. */
4220 		break;
4221 	}
4222 
4223 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4224 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4225 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4226 			goto fail;
4227 		}
4228 
4229 	/* Use Outbox interrupt */
4230 	switch (adev->ip_versions[DCE_HWIP][0]) {
4231 	case IP_VERSION(3, 0, 0):
4232 	case IP_VERSION(3, 1, 2):
4233 	case IP_VERSION(3, 1, 3):
4234 	case IP_VERSION(3, 1, 4):
4235 	case IP_VERSION(3, 1, 5):
4236 	case IP_VERSION(3, 1, 6):
4237 	case IP_VERSION(3, 2, 0):
4238 	case IP_VERSION(3, 2, 1):
4239 	case IP_VERSION(2, 1, 0):
4240 		if (register_outbox_irq_handlers(dm->adev)) {
4241 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4242 			goto fail;
4243 		}
4244 		break;
4245 	default:
4246 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4247 			      adev->ip_versions[DCE_HWIP][0]);
4248 	}
4249 
4250 	/* Determine whether to enable PSR support by default. */
4251 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4252 		switch (adev->ip_versions[DCE_HWIP][0]) {
4253 		case IP_VERSION(3, 1, 2):
4254 		case IP_VERSION(3, 1, 3):
4255 		case IP_VERSION(3, 1, 4):
4256 		case IP_VERSION(3, 1, 5):
4257 		case IP_VERSION(3, 1, 6):
4258 		case IP_VERSION(3, 2, 0):
4259 		case IP_VERSION(3, 2, 1):
4260 			psr_feature_enabled = true;
4261 			break;
4262 		default:
4263 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4264 			break;
4265 		}
4266 	}
4267 
4268 	/* loops over all connectors on the board */
4269 	for (i = 0; i < link_cnt; i++) {
4270 		struct dc_link *link = NULL;
4271 
4272 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4273 			DRM_ERROR(
4274 				"KMS: Cannot support more than %d display indexes\n",
4275 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4276 			continue;
4277 		}
4278 
4279 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4280 		if (!aconnector)
4281 			goto fail;
4282 
4283 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4284 		if (!aencoder)
4285 			goto fail;
4286 
4287 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4288 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4289 			goto fail;
4290 		}
4291 
4292 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4293 			DRM_ERROR("KMS: Failed to initialize connector\n");
4294 			goto fail;
4295 		}
4296 
4297 		link = dc_get_link_at_index(dm->dc, i);
4298 
4299 		if (!dc_link_detect_sink(link, &new_connection_type))
4300 			DRM_ERROR("KMS: Failed to detect connector\n");
4301 
4302 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4303 			emulated_link_detect(link);
4304 			amdgpu_dm_update_connector_after_detect(aconnector);
4305 		} else {
4306 			bool ret = false;
4307 
4308 			mutex_lock(&dm->dc_lock);
4309 			ret = dc_link_detect(link, DETECT_REASON_BOOT);
4310 			mutex_unlock(&dm->dc_lock);
4311 
4312 			if (ret) {
4313 				amdgpu_dm_update_connector_after_detect(aconnector);
4314 				register_backlight_device(dm, link);
4315 
4316 				if (dm->num_of_edps)
4317 					update_connector_ext_caps(aconnector);
4318 
4319 				if (psr_feature_enabled)
4320 					amdgpu_dm_set_psr_caps(link);
4321 
4322 				/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4323 				 * PSR is also supported.
4324 				 */
4325 				if (link->psr_settings.psr_feature_enabled)
4326 					adev_to_drm(adev)->vblank_disable_immediate = false;
4327 			}
4328 		}
4329 	}
4330 
4331 	/* Software is initialized. Now we can register interrupt handlers. */
4332 	switch (adev->asic_type) {
4333 #if defined(CONFIG_DRM_AMD_DC_SI)
4334 	case CHIP_TAHITI:
4335 	case CHIP_PITCAIRN:
4336 	case CHIP_VERDE:
4337 	case CHIP_OLAND:
4338 		if (dce60_register_irq_handlers(dm->adev)) {
4339 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4340 			goto fail;
4341 		}
4342 		break;
4343 #endif
4344 	case CHIP_BONAIRE:
4345 	case CHIP_HAWAII:
4346 	case CHIP_KAVERI:
4347 	case CHIP_KABINI:
4348 	case CHIP_MULLINS:
4349 	case CHIP_TONGA:
4350 	case CHIP_FIJI:
4351 	case CHIP_CARRIZO:
4352 	case CHIP_STONEY:
4353 	case CHIP_POLARIS11:
4354 	case CHIP_POLARIS10:
4355 	case CHIP_POLARIS12:
4356 	case CHIP_VEGAM:
4357 	case CHIP_VEGA10:
4358 	case CHIP_VEGA12:
4359 	case CHIP_VEGA20:
4360 		if (dce110_register_irq_handlers(dm->adev)) {
4361 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4362 			goto fail;
4363 		}
4364 		break;
4365 	default:
4366 		switch (adev->ip_versions[DCE_HWIP][0]) {
4367 		case IP_VERSION(1, 0, 0):
4368 		case IP_VERSION(1, 0, 1):
4369 		case IP_VERSION(2, 0, 2):
4370 		case IP_VERSION(2, 0, 3):
4371 		case IP_VERSION(2, 0, 0):
4372 		case IP_VERSION(2, 1, 0):
4373 		case IP_VERSION(3, 0, 0):
4374 		case IP_VERSION(3, 0, 2):
4375 		case IP_VERSION(3, 0, 3):
4376 		case IP_VERSION(3, 0, 1):
4377 		case IP_VERSION(3, 1, 2):
4378 		case IP_VERSION(3, 1, 3):
4379 		case IP_VERSION(3, 1, 4):
4380 		case IP_VERSION(3, 1, 5):
4381 		case IP_VERSION(3, 1, 6):
4382 		case IP_VERSION(3, 2, 0):
4383 		case IP_VERSION(3, 2, 1):
4384 			if (dcn10_register_irq_handlers(dm->adev)) {
4385 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4386 				goto fail;
4387 			}
4388 			break;
4389 		default:
4390 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4391 					adev->ip_versions[DCE_HWIP][0]);
4392 			goto fail;
4393 		}
4394 		break;
4395 	}
4396 
4397 	return 0;
4398 fail:
4399 	kfree(aencoder);
4400 	kfree(aconnector);
4401 
4402 	return -EINVAL;
4403 }
4404 
4405 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4406 {
4407 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4408 	return;
4409 }
4410 
4411 /******************************************************************************
4412  * amdgpu_display_funcs functions
4413  *****************************************************************************/
4414 
4415 /*
4416  * dm_bandwidth_update - program display watermarks
4417  *
4418  * @adev: amdgpu_device pointer
4419  *
4420  * Calculate and program the display watermarks and line buffer allocation.
4421  */
4422 static void dm_bandwidth_update(struct amdgpu_device *adev)
4423 {
4424 	/* TODO: implement later */
4425 }
4426 
4427 static const struct amdgpu_display_funcs dm_display_funcs = {
4428 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4429 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4430 	.backlight_set_level = NULL, /* never called for DC */
4431 	.backlight_get_level = NULL, /* never called for DC */
4432 	.hpd_sense = NULL,/* called unconditionally */
4433 	.hpd_set_polarity = NULL, /* called unconditionally */
4434 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4435 	.page_flip_get_scanoutpos =
4436 		dm_crtc_get_scanoutpos,/* called unconditionally */
4437 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4438 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4439 };
4440 
4441 #if defined(CONFIG_DEBUG_KERNEL_DC)
4442 
4443 static ssize_t s3_debug_store(struct device *device,
4444 			      struct device_attribute *attr,
4445 			      const char *buf,
4446 			      size_t count)
4447 {
4448 	int ret;
4449 	int s3_state;
4450 	struct drm_device *drm_dev = dev_get_drvdata(device);
4451 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4452 
4453 	ret = kstrtoint(buf, 0, &s3_state);
4454 
4455 	if (ret == 0) {
4456 		if (s3_state) {
4457 			dm_resume(adev);
4458 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4459 		} else
4460 			dm_suspend(adev);
4461 	}
4462 
4463 	return ret == 0 ? count : 0;
4464 }
4465 
4466 DEVICE_ATTR_WO(s3_debug);
4467 
4468 #endif
4469 
4470 static int dm_early_init(void *handle)
4471 {
4472 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4473 
4474 	switch (adev->asic_type) {
4475 #if defined(CONFIG_DRM_AMD_DC_SI)
4476 	case CHIP_TAHITI:
4477 	case CHIP_PITCAIRN:
4478 	case CHIP_VERDE:
4479 		adev->mode_info.num_crtc = 6;
4480 		adev->mode_info.num_hpd = 6;
4481 		adev->mode_info.num_dig = 6;
4482 		break;
4483 	case CHIP_OLAND:
4484 		adev->mode_info.num_crtc = 2;
4485 		adev->mode_info.num_hpd = 2;
4486 		adev->mode_info.num_dig = 2;
4487 		break;
4488 #endif
4489 	case CHIP_BONAIRE:
4490 	case CHIP_HAWAII:
4491 		adev->mode_info.num_crtc = 6;
4492 		adev->mode_info.num_hpd = 6;
4493 		adev->mode_info.num_dig = 6;
4494 		break;
4495 	case CHIP_KAVERI:
4496 		adev->mode_info.num_crtc = 4;
4497 		adev->mode_info.num_hpd = 6;
4498 		adev->mode_info.num_dig = 7;
4499 		break;
4500 	case CHIP_KABINI:
4501 	case CHIP_MULLINS:
4502 		adev->mode_info.num_crtc = 2;
4503 		adev->mode_info.num_hpd = 6;
4504 		adev->mode_info.num_dig = 6;
4505 		break;
4506 	case CHIP_FIJI:
4507 	case CHIP_TONGA:
4508 		adev->mode_info.num_crtc = 6;
4509 		adev->mode_info.num_hpd = 6;
4510 		adev->mode_info.num_dig = 7;
4511 		break;
4512 	case CHIP_CARRIZO:
4513 		adev->mode_info.num_crtc = 3;
4514 		adev->mode_info.num_hpd = 6;
4515 		adev->mode_info.num_dig = 9;
4516 		break;
4517 	case CHIP_STONEY:
4518 		adev->mode_info.num_crtc = 2;
4519 		adev->mode_info.num_hpd = 6;
4520 		adev->mode_info.num_dig = 9;
4521 		break;
4522 	case CHIP_POLARIS11:
4523 	case CHIP_POLARIS12:
4524 		adev->mode_info.num_crtc = 5;
4525 		adev->mode_info.num_hpd = 5;
4526 		adev->mode_info.num_dig = 5;
4527 		break;
4528 	case CHIP_POLARIS10:
4529 	case CHIP_VEGAM:
4530 		adev->mode_info.num_crtc = 6;
4531 		adev->mode_info.num_hpd = 6;
4532 		adev->mode_info.num_dig = 6;
4533 		break;
4534 	case CHIP_VEGA10:
4535 	case CHIP_VEGA12:
4536 	case CHIP_VEGA20:
4537 		adev->mode_info.num_crtc = 6;
4538 		adev->mode_info.num_hpd = 6;
4539 		adev->mode_info.num_dig = 6;
4540 		break;
4541 	default:
4542 
4543 		switch (adev->ip_versions[DCE_HWIP][0]) {
4544 		case IP_VERSION(2, 0, 2):
4545 		case IP_VERSION(3, 0, 0):
4546 			adev->mode_info.num_crtc = 6;
4547 			adev->mode_info.num_hpd = 6;
4548 			adev->mode_info.num_dig = 6;
4549 			break;
4550 		case IP_VERSION(2, 0, 0):
4551 		case IP_VERSION(3, 0, 2):
4552 			adev->mode_info.num_crtc = 5;
4553 			adev->mode_info.num_hpd = 5;
4554 			adev->mode_info.num_dig = 5;
4555 			break;
4556 		case IP_VERSION(2, 0, 3):
4557 		case IP_VERSION(3, 0, 3):
4558 			adev->mode_info.num_crtc = 2;
4559 			adev->mode_info.num_hpd = 2;
4560 			adev->mode_info.num_dig = 2;
4561 			break;
4562 		case IP_VERSION(1, 0, 0):
4563 		case IP_VERSION(1, 0, 1):
4564 		case IP_VERSION(3, 0, 1):
4565 		case IP_VERSION(2, 1, 0):
4566 		case IP_VERSION(3, 1, 2):
4567 		case IP_VERSION(3, 1, 3):
4568 		case IP_VERSION(3, 1, 4):
4569 		case IP_VERSION(3, 1, 5):
4570 		case IP_VERSION(3, 1, 6):
4571 		case IP_VERSION(3, 2, 0):
4572 		case IP_VERSION(3, 2, 1):
4573 			adev->mode_info.num_crtc = 4;
4574 			adev->mode_info.num_hpd = 4;
4575 			adev->mode_info.num_dig = 4;
4576 			break;
4577 		default:
4578 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4579 					adev->ip_versions[DCE_HWIP][0]);
4580 			return -EINVAL;
4581 		}
4582 		break;
4583 	}
4584 
4585 	amdgpu_dm_set_irq_funcs(adev);
4586 
4587 	if (adev->mode_info.funcs == NULL)
4588 		adev->mode_info.funcs = &dm_display_funcs;
4589 
4590 	/*
4591 	 * Note: Do NOT change adev->audio_endpt_rreg and
4592 	 * adev->audio_endpt_wreg because they are initialised in
4593 	 * amdgpu_device_init()
4594 	 */
4595 #if defined(CONFIG_DEBUG_KERNEL_DC)
4596 	device_create_file(
4597 		adev_to_drm(adev)->dev,
4598 		&dev_attr_s3_debug);
4599 #endif
4600 
4601 	return 0;
4602 }
4603 
4604 static bool modereset_required(struct drm_crtc_state *crtc_state)
4605 {
4606 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4607 }
4608 
4609 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4610 {
4611 	drm_encoder_cleanup(encoder);
4612 	kfree(encoder);
4613 }
4614 
4615 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4616 	.destroy = amdgpu_dm_encoder_destroy,
4617 };
4618 
4619 static int
4620 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4621 			    const enum surface_pixel_format format,
4622 			    enum dc_color_space *color_space)
4623 {
4624 	bool full_range;
4625 
4626 	*color_space = COLOR_SPACE_SRGB;
4627 
4628 	/* DRM color properties only affect non-RGB formats. */
4629 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4630 		return 0;
4631 
4632 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4633 
4634 	switch (plane_state->color_encoding) {
4635 	case DRM_COLOR_YCBCR_BT601:
4636 		if (full_range)
4637 			*color_space = COLOR_SPACE_YCBCR601;
4638 		else
4639 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4640 		break;
4641 
4642 	case DRM_COLOR_YCBCR_BT709:
4643 		if (full_range)
4644 			*color_space = COLOR_SPACE_YCBCR709;
4645 		else
4646 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4647 		break;
4648 
4649 	case DRM_COLOR_YCBCR_BT2020:
4650 		if (full_range)
4651 			*color_space = COLOR_SPACE_2020_YCBCR;
4652 		else
4653 			return -EINVAL;
4654 		break;
4655 
4656 	default:
4657 		return -EINVAL;
4658 	}
4659 
4660 	return 0;
4661 }
4662 
4663 static int
4664 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4665 			    const struct drm_plane_state *plane_state,
4666 			    const uint64_t tiling_flags,
4667 			    struct dc_plane_info *plane_info,
4668 			    struct dc_plane_address *address,
4669 			    bool tmz_surface,
4670 			    bool force_disable_dcc)
4671 {
4672 	const struct drm_framebuffer *fb = plane_state->fb;
4673 	const struct amdgpu_framebuffer *afb =
4674 		to_amdgpu_framebuffer(plane_state->fb);
4675 	int ret;
4676 
4677 	memset(plane_info, 0, sizeof(*plane_info));
4678 
4679 	switch (fb->format->format) {
4680 	case DRM_FORMAT_C8:
4681 		plane_info->format =
4682 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4683 		break;
4684 	case DRM_FORMAT_RGB565:
4685 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4686 		break;
4687 	case DRM_FORMAT_XRGB8888:
4688 	case DRM_FORMAT_ARGB8888:
4689 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4690 		break;
4691 	case DRM_FORMAT_XRGB2101010:
4692 	case DRM_FORMAT_ARGB2101010:
4693 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4694 		break;
4695 	case DRM_FORMAT_XBGR2101010:
4696 	case DRM_FORMAT_ABGR2101010:
4697 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4698 		break;
4699 	case DRM_FORMAT_XBGR8888:
4700 	case DRM_FORMAT_ABGR8888:
4701 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4702 		break;
4703 	case DRM_FORMAT_NV21:
4704 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4705 		break;
4706 	case DRM_FORMAT_NV12:
4707 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4708 		break;
4709 	case DRM_FORMAT_P010:
4710 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4711 		break;
4712 	case DRM_FORMAT_XRGB16161616F:
4713 	case DRM_FORMAT_ARGB16161616F:
4714 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4715 		break;
4716 	case DRM_FORMAT_XBGR16161616F:
4717 	case DRM_FORMAT_ABGR16161616F:
4718 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4719 		break;
4720 	case DRM_FORMAT_XRGB16161616:
4721 	case DRM_FORMAT_ARGB16161616:
4722 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4723 		break;
4724 	case DRM_FORMAT_XBGR16161616:
4725 	case DRM_FORMAT_ABGR16161616:
4726 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4727 		break;
4728 	default:
4729 		DRM_ERROR(
4730 			"Unsupported screen format %p4cc\n",
4731 			&fb->format->format);
4732 		return -EINVAL;
4733 	}
4734 
4735 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4736 	case DRM_MODE_ROTATE_0:
4737 		plane_info->rotation = ROTATION_ANGLE_0;
4738 		break;
4739 	case DRM_MODE_ROTATE_90:
4740 		plane_info->rotation = ROTATION_ANGLE_90;
4741 		break;
4742 	case DRM_MODE_ROTATE_180:
4743 		plane_info->rotation = ROTATION_ANGLE_180;
4744 		break;
4745 	case DRM_MODE_ROTATE_270:
4746 		plane_info->rotation = ROTATION_ANGLE_270;
4747 		break;
4748 	default:
4749 		plane_info->rotation = ROTATION_ANGLE_0;
4750 		break;
4751 	}
4752 
4753 
4754 	plane_info->visible = true;
4755 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4756 
4757 	plane_info->layer_index = 0;
4758 
4759 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4760 					  &plane_info->color_space);
4761 	if (ret)
4762 		return ret;
4763 
4764 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4765 					   plane_info->rotation, tiling_flags,
4766 					   &plane_info->tiling_info,
4767 					   &plane_info->plane_size,
4768 					   &plane_info->dcc, address,
4769 					   tmz_surface, force_disable_dcc);
4770 	if (ret)
4771 		return ret;
4772 
4773 	fill_blending_from_plane_state(
4774 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4775 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4776 
4777 	return 0;
4778 }
4779 
4780 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4781 				    struct dc_plane_state *dc_plane_state,
4782 				    struct drm_plane_state *plane_state,
4783 				    struct drm_crtc_state *crtc_state)
4784 {
4785 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4786 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4787 	struct dc_scaling_info scaling_info;
4788 	struct dc_plane_info plane_info;
4789 	int ret;
4790 	bool force_disable_dcc = false;
4791 
4792 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4793 	if (ret)
4794 		return ret;
4795 
4796 	dc_plane_state->src_rect = scaling_info.src_rect;
4797 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4798 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4799 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4800 
4801 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4802 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4803 					  afb->tiling_flags,
4804 					  &plane_info,
4805 					  &dc_plane_state->address,
4806 					  afb->tmz_surface,
4807 					  force_disable_dcc);
4808 	if (ret)
4809 		return ret;
4810 
4811 	dc_plane_state->format = plane_info.format;
4812 	dc_plane_state->color_space = plane_info.color_space;
4813 	dc_plane_state->format = plane_info.format;
4814 	dc_plane_state->plane_size = plane_info.plane_size;
4815 	dc_plane_state->rotation = plane_info.rotation;
4816 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4817 	dc_plane_state->stereo_format = plane_info.stereo_format;
4818 	dc_plane_state->tiling_info = plane_info.tiling_info;
4819 	dc_plane_state->visible = plane_info.visible;
4820 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4821 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
4822 	dc_plane_state->global_alpha = plane_info.global_alpha;
4823 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4824 	dc_plane_state->dcc = plane_info.dcc;
4825 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4826 	dc_plane_state->flip_int_enabled = true;
4827 
4828 	/*
4829 	 * Always set input transfer function, since plane state is refreshed
4830 	 * every time.
4831 	 */
4832 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4833 	if (ret)
4834 		return ret;
4835 
4836 	return 0;
4837 }
4838 
4839 /**
4840  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
4841  *
4842  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
4843  *         remote fb
4844  * @old_plane_state: Old state of @plane
4845  * @new_plane_state: New state of @plane
4846  * @crtc_state: New state of CRTC connected to the @plane
4847  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
4848  *
4849  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
4850  * (referred to as "damage clips" in DRM nomenclature) that require updating on
4851  * the eDP remote buffer. The responsibility of specifying the dirty regions is
4852  * amdgpu_dm's.
4853  *
4854  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
4855  * plane with regions that require flushing to the eDP remote buffer. In
4856  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
4857  * implicitly provide damage clips without any client support via the plane
4858  * bounds.
4859  *
4860  * Today, amdgpu_dm only supports the MPO and cursor usecase.
4861  *
4862  * TODO: Also enable for FB_DAMAGE_CLIPS
4863  */
4864 static void fill_dc_dirty_rects(struct drm_plane *plane,
4865 				struct drm_plane_state *old_plane_state,
4866 				struct drm_plane_state *new_plane_state,
4867 				struct drm_crtc_state *crtc_state,
4868 				struct dc_flip_addrs *flip_addrs)
4869 {
4870 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4871 	struct rect *dirty_rects = flip_addrs->dirty_rects;
4872 	uint32_t num_clips;
4873 	bool bb_changed;
4874 	bool fb_changed;
4875 	uint32_t i = 0;
4876 
4877 	flip_addrs->dirty_rect_count = 0;
4878 
4879 	/*
4880 	 * Cursor plane has it's own dirty rect update interface. See
4881 	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
4882 	 */
4883 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
4884 		return;
4885 
4886 	/*
4887 	 * Today, we only consider MPO use-case for PSR SU. If MPO not
4888 	 * requested, and there is a plane update, do FFU.
4889 	 */
4890 	if (!dm_crtc_state->mpo_requested) {
4891 		dirty_rects[0].x = 0;
4892 		dirty_rects[0].y = 0;
4893 		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
4894 		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
4895 		flip_addrs->dirty_rect_count = 1;
4896 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
4897 				 new_plane_state->plane->base.id,
4898 				 dm_crtc_state->base.mode.crtc_hdisplay,
4899 				 dm_crtc_state->base.mode.crtc_vdisplay);
4900 		return;
4901 	}
4902 
4903 	/*
4904 	 * MPO is requested. Add entire plane bounding box to dirty rects if
4905 	 * flipped to or damaged.
4906 	 *
4907 	 * If plane is moved or resized, also add old bounding box to dirty
4908 	 * rects.
4909 	 */
4910 	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
4911 	fb_changed = old_plane_state->fb->base.id !=
4912 		     new_plane_state->fb->base.id;
4913 	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
4914 		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
4915 		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
4916 		      old_plane_state->crtc_h != new_plane_state->crtc_h);
4917 
4918 	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
4919 			 new_plane_state->plane->base.id,
4920 			 bb_changed, fb_changed, num_clips);
4921 
4922 	if (num_clips || fb_changed || bb_changed) {
4923 		dirty_rects[i].x = new_plane_state->crtc_x;
4924 		dirty_rects[i].y = new_plane_state->crtc_y;
4925 		dirty_rects[i].width = new_plane_state->crtc_w;
4926 		dirty_rects[i].height = new_plane_state->crtc_h;
4927 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4928 				 new_plane_state->plane->base.id,
4929 				 dirty_rects[i].x, dirty_rects[i].y,
4930 				 dirty_rects[i].width, dirty_rects[i].height);
4931 		i += 1;
4932 	}
4933 
4934 	/* Add old plane bounding-box if plane is moved or resized */
4935 	if (bb_changed) {
4936 		dirty_rects[i].x = old_plane_state->crtc_x;
4937 		dirty_rects[i].y = old_plane_state->crtc_y;
4938 		dirty_rects[i].width = old_plane_state->crtc_w;
4939 		dirty_rects[i].height = old_plane_state->crtc_h;
4940 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4941 				old_plane_state->plane->base.id,
4942 				dirty_rects[i].x, dirty_rects[i].y,
4943 				dirty_rects[i].width, dirty_rects[i].height);
4944 		i += 1;
4945 	}
4946 
4947 	flip_addrs->dirty_rect_count = i;
4948 }
4949 
4950 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4951 					   const struct dm_connector_state *dm_state,
4952 					   struct dc_stream_state *stream)
4953 {
4954 	enum amdgpu_rmx_type rmx_type;
4955 
4956 	struct rect src = { 0 }; /* viewport in composition space*/
4957 	struct rect dst = { 0 }; /* stream addressable area */
4958 
4959 	/* no mode. nothing to be done */
4960 	if (!mode)
4961 		return;
4962 
4963 	/* Full screen scaling by default */
4964 	src.width = mode->hdisplay;
4965 	src.height = mode->vdisplay;
4966 	dst.width = stream->timing.h_addressable;
4967 	dst.height = stream->timing.v_addressable;
4968 
4969 	if (dm_state) {
4970 		rmx_type = dm_state->scaling;
4971 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4972 			if (src.width * dst.height <
4973 					src.height * dst.width) {
4974 				/* height needs less upscaling/more downscaling */
4975 				dst.width = src.width *
4976 						dst.height / src.height;
4977 			} else {
4978 				/* width needs less upscaling/more downscaling */
4979 				dst.height = src.height *
4980 						dst.width / src.width;
4981 			}
4982 		} else if (rmx_type == RMX_CENTER) {
4983 			dst = src;
4984 		}
4985 
4986 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4987 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4988 
4989 		if (dm_state->underscan_enable) {
4990 			dst.x += dm_state->underscan_hborder / 2;
4991 			dst.y += dm_state->underscan_vborder / 2;
4992 			dst.width -= dm_state->underscan_hborder;
4993 			dst.height -= dm_state->underscan_vborder;
4994 		}
4995 	}
4996 
4997 	stream->src = src;
4998 	stream->dst = dst;
4999 
5000 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5001 		      dst.x, dst.y, dst.width, dst.height);
5002 
5003 }
5004 
5005 static enum dc_color_depth
5006 convert_color_depth_from_display_info(const struct drm_connector *connector,
5007 				      bool is_y420, int requested_bpc)
5008 {
5009 	uint8_t bpc;
5010 
5011 	if (is_y420) {
5012 		bpc = 8;
5013 
5014 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5015 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5016 			bpc = 16;
5017 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5018 			bpc = 12;
5019 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5020 			bpc = 10;
5021 	} else {
5022 		bpc = (uint8_t)connector->display_info.bpc;
5023 		/* Assume 8 bpc by default if no bpc is specified. */
5024 		bpc = bpc ? bpc : 8;
5025 	}
5026 
5027 	if (requested_bpc > 0) {
5028 		/*
5029 		 * Cap display bpc based on the user requested value.
5030 		 *
5031 		 * The value for state->max_bpc may not correctly updated
5032 		 * depending on when the connector gets added to the state
5033 		 * or if this was called outside of atomic check, so it
5034 		 * can't be used directly.
5035 		 */
5036 		bpc = min_t(u8, bpc, requested_bpc);
5037 
5038 		/* Round down to the nearest even number. */
5039 		bpc = bpc - (bpc & 1);
5040 	}
5041 
5042 	switch (bpc) {
5043 	case 0:
5044 		/*
5045 		 * Temporary Work around, DRM doesn't parse color depth for
5046 		 * EDID revision before 1.4
5047 		 * TODO: Fix edid parsing
5048 		 */
5049 		return COLOR_DEPTH_888;
5050 	case 6:
5051 		return COLOR_DEPTH_666;
5052 	case 8:
5053 		return COLOR_DEPTH_888;
5054 	case 10:
5055 		return COLOR_DEPTH_101010;
5056 	case 12:
5057 		return COLOR_DEPTH_121212;
5058 	case 14:
5059 		return COLOR_DEPTH_141414;
5060 	case 16:
5061 		return COLOR_DEPTH_161616;
5062 	default:
5063 		return COLOR_DEPTH_UNDEFINED;
5064 	}
5065 }
5066 
5067 static enum dc_aspect_ratio
5068 get_aspect_ratio(const struct drm_display_mode *mode_in)
5069 {
5070 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5071 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5072 }
5073 
5074 static enum dc_color_space
5075 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5076 {
5077 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5078 
5079 	switch (dc_crtc_timing->pixel_encoding)	{
5080 	case PIXEL_ENCODING_YCBCR422:
5081 	case PIXEL_ENCODING_YCBCR444:
5082 	case PIXEL_ENCODING_YCBCR420:
5083 	{
5084 		/*
5085 		 * 27030khz is the separation point between HDTV and SDTV
5086 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5087 		 * respectively
5088 		 */
5089 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5090 			if (dc_crtc_timing->flags.Y_ONLY)
5091 				color_space =
5092 					COLOR_SPACE_YCBCR709_LIMITED;
5093 			else
5094 				color_space = COLOR_SPACE_YCBCR709;
5095 		} else {
5096 			if (dc_crtc_timing->flags.Y_ONLY)
5097 				color_space =
5098 					COLOR_SPACE_YCBCR601_LIMITED;
5099 			else
5100 				color_space = COLOR_SPACE_YCBCR601;
5101 		}
5102 
5103 	}
5104 	break;
5105 	case PIXEL_ENCODING_RGB:
5106 		color_space = COLOR_SPACE_SRGB;
5107 		break;
5108 
5109 	default:
5110 		WARN_ON(1);
5111 		break;
5112 	}
5113 
5114 	return color_space;
5115 }
5116 
5117 static bool adjust_colour_depth_from_display_info(
5118 	struct dc_crtc_timing *timing_out,
5119 	const struct drm_display_info *info)
5120 {
5121 	enum dc_color_depth depth = timing_out->display_color_depth;
5122 	int normalized_clk;
5123 	do {
5124 		normalized_clk = timing_out->pix_clk_100hz / 10;
5125 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5126 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5127 			normalized_clk /= 2;
5128 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5129 		switch (depth) {
5130 		case COLOR_DEPTH_888:
5131 			break;
5132 		case COLOR_DEPTH_101010:
5133 			normalized_clk = (normalized_clk * 30) / 24;
5134 			break;
5135 		case COLOR_DEPTH_121212:
5136 			normalized_clk = (normalized_clk * 36) / 24;
5137 			break;
5138 		case COLOR_DEPTH_161616:
5139 			normalized_clk = (normalized_clk * 48) / 24;
5140 			break;
5141 		default:
5142 			/* The above depths are the only ones valid for HDMI. */
5143 			return false;
5144 		}
5145 		if (normalized_clk <= info->max_tmds_clock) {
5146 			timing_out->display_color_depth = depth;
5147 			return true;
5148 		}
5149 	} while (--depth > COLOR_DEPTH_666);
5150 	return false;
5151 }
5152 
5153 static void fill_stream_properties_from_drm_display_mode(
5154 	struct dc_stream_state *stream,
5155 	const struct drm_display_mode *mode_in,
5156 	const struct drm_connector *connector,
5157 	const struct drm_connector_state *connector_state,
5158 	const struct dc_stream_state *old_stream,
5159 	int requested_bpc)
5160 {
5161 	struct dc_crtc_timing *timing_out = &stream->timing;
5162 	const struct drm_display_info *info = &connector->display_info;
5163 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5164 	struct hdmi_vendor_infoframe hv_frame;
5165 	struct hdmi_avi_infoframe avi_frame;
5166 
5167 	memset(&hv_frame, 0, sizeof(hv_frame));
5168 	memset(&avi_frame, 0, sizeof(avi_frame));
5169 
5170 	timing_out->h_border_left = 0;
5171 	timing_out->h_border_right = 0;
5172 	timing_out->v_border_top = 0;
5173 	timing_out->v_border_bottom = 0;
5174 	/* TODO: un-hardcode */
5175 	if (drm_mode_is_420_only(info, mode_in)
5176 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5177 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5178 	else if (drm_mode_is_420_also(info, mode_in)
5179 			&& aconnector->force_yuv420_output)
5180 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5181 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5182 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5183 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5184 	else
5185 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5186 
5187 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5188 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5189 		connector,
5190 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5191 		requested_bpc);
5192 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5193 	timing_out->hdmi_vic = 0;
5194 
5195 	if (old_stream) {
5196 		timing_out->vic = old_stream->timing.vic;
5197 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5198 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5199 	} else {
5200 		timing_out->vic = drm_match_cea_mode(mode_in);
5201 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5202 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5203 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5204 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5205 	}
5206 
5207 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5208 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5209 		timing_out->vic = avi_frame.video_code;
5210 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5211 		timing_out->hdmi_vic = hv_frame.vic;
5212 	}
5213 
5214 	if (is_freesync_video_mode(mode_in, aconnector)) {
5215 		timing_out->h_addressable = mode_in->hdisplay;
5216 		timing_out->h_total = mode_in->htotal;
5217 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5218 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5219 		timing_out->v_total = mode_in->vtotal;
5220 		timing_out->v_addressable = mode_in->vdisplay;
5221 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5222 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5223 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5224 	} else {
5225 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5226 		timing_out->h_total = mode_in->crtc_htotal;
5227 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5228 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5229 		timing_out->v_total = mode_in->crtc_vtotal;
5230 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5231 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5232 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5233 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5234 	}
5235 
5236 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5237 
5238 	stream->output_color_space = get_output_color_space(timing_out);
5239 
5240 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5241 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5242 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5243 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5244 		    drm_mode_is_420_also(info, mode_in) &&
5245 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5246 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5247 			adjust_colour_depth_from_display_info(timing_out, info);
5248 		}
5249 	}
5250 }
5251 
5252 static void fill_audio_info(struct audio_info *audio_info,
5253 			    const struct drm_connector *drm_connector,
5254 			    const struct dc_sink *dc_sink)
5255 {
5256 	int i = 0;
5257 	int cea_revision = 0;
5258 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5259 
5260 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5261 	audio_info->product_id = edid_caps->product_id;
5262 
5263 	cea_revision = drm_connector->display_info.cea_rev;
5264 
5265 	strscpy(audio_info->display_name,
5266 		edid_caps->display_name,
5267 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5268 
5269 	if (cea_revision >= 3) {
5270 		audio_info->mode_count = edid_caps->audio_mode_count;
5271 
5272 		for (i = 0; i < audio_info->mode_count; ++i) {
5273 			audio_info->modes[i].format_code =
5274 					(enum audio_format_code)
5275 					(edid_caps->audio_modes[i].format_code);
5276 			audio_info->modes[i].channel_count =
5277 					edid_caps->audio_modes[i].channel_count;
5278 			audio_info->modes[i].sample_rates.all =
5279 					edid_caps->audio_modes[i].sample_rate;
5280 			audio_info->modes[i].sample_size =
5281 					edid_caps->audio_modes[i].sample_size;
5282 		}
5283 	}
5284 
5285 	audio_info->flags.all = edid_caps->speaker_flags;
5286 
5287 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5288 	if (drm_connector->latency_present[0]) {
5289 		audio_info->video_latency = drm_connector->video_latency[0];
5290 		audio_info->audio_latency = drm_connector->audio_latency[0];
5291 	}
5292 
5293 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5294 
5295 }
5296 
5297 static void
5298 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5299 				      struct drm_display_mode *dst_mode)
5300 {
5301 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5302 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5303 	dst_mode->crtc_clock = src_mode->crtc_clock;
5304 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5305 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5306 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5307 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5308 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5309 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5310 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5311 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5312 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5313 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5314 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5315 }
5316 
5317 static void
5318 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5319 					const struct drm_display_mode *native_mode,
5320 					bool scale_enabled)
5321 {
5322 	if (scale_enabled) {
5323 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5324 	} else if (native_mode->clock == drm_mode->clock &&
5325 			native_mode->htotal == drm_mode->htotal &&
5326 			native_mode->vtotal == drm_mode->vtotal) {
5327 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5328 	} else {
5329 		/* no scaling nor amdgpu inserted, no need to patch */
5330 	}
5331 }
5332 
5333 static struct dc_sink *
5334 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5335 {
5336 	struct dc_sink_init_data sink_init_data = { 0 };
5337 	struct dc_sink *sink = NULL;
5338 	sink_init_data.link = aconnector->dc_link;
5339 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5340 
5341 	sink = dc_sink_create(&sink_init_data);
5342 	if (!sink) {
5343 		DRM_ERROR("Failed to create sink!\n");
5344 		return NULL;
5345 	}
5346 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5347 
5348 	return sink;
5349 }
5350 
5351 static void set_multisync_trigger_params(
5352 		struct dc_stream_state *stream)
5353 {
5354 	struct dc_stream_state *master = NULL;
5355 
5356 	if (stream->triggered_crtc_reset.enabled) {
5357 		master = stream->triggered_crtc_reset.event_source;
5358 		stream->triggered_crtc_reset.event =
5359 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5360 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5361 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5362 	}
5363 }
5364 
5365 static void set_master_stream(struct dc_stream_state *stream_set[],
5366 			      int stream_count)
5367 {
5368 	int j, highest_rfr = 0, master_stream = 0;
5369 
5370 	for (j = 0;  j < stream_count; j++) {
5371 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5372 			int refresh_rate = 0;
5373 
5374 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5375 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5376 			if (refresh_rate > highest_rfr) {
5377 				highest_rfr = refresh_rate;
5378 				master_stream = j;
5379 			}
5380 		}
5381 	}
5382 	for (j = 0;  j < stream_count; j++) {
5383 		if (stream_set[j])
5384 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5385 	}
5386 }
5387 
5388 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5389 {
5390 	int i = 0;
5391 	struct dc_stream_state *stream;
5392 
5393 	if (context->stream_count < 2)
5394 		return;
5395 	for (i = 0; i < context->stream_count ; i++) {
5396 		if (!context->streams[i])
5397 			continue;
5398 		/*
5399 		 * TODO: add a function to read AMD VSDB bits and set
5400 		 * crtc_sync_master.multi_sync_enabled flag
5401 		 * For now it's set to false
5402 		 */
5403 	}
5404 
5405 	set_master_stream(context->streams, context->stream_count);
5406 
5407 	for (i = 0; i < context->stream_count ; i++) {
5408 		stream = context->streams[i];
5409 
5410 		if (!stream)
5411 			continue;
5412 
5413 		set_multisync_trigger_params(stream);
5414 	}
5415 }
5416 
5417 /**
5418  * DOC: FreeSync Video
5419  *
5420  * When a userspace application wants to play a video, the content follows a
5421  * standard format definition that usually specifies the FPS for that format.
5422  * The below list illustrates some video format and the expected FPS,
5423  * respectively:
5424  *
5425  * - TV/NTSC (23.976 FPS)
5426  * - Cinema (24 FPS)
5427  * - TV/PAL (25 FPS)
5428  * - TV/NTSC (29.97 FPS)
5429  * - TV/NTSC (30 FPS)
5430  * - Cinema HFR (48 FPS)
5431  * - TV/PAL (50 FPS)
5432  * - Commonly used (60 FPS)
5433  * - Multiples of 24 (48,72,96 FPS)
5434  *
5435  * The list of standards video format is not huge and can be added to the
5436  * connector modeset list beforehand. With that, userspace can leverage
5437  * FreeSync to extends the front porch in order to attain the target refresh
5438  * rate. Such a switch will happen seamlessly, without screen blanking or
5439  * reprogramming of the output in any other way. If the userspace requests a
5440  * modesetting change compatible with FreeSync modes that only differ in the
5441  * refresh rate, DC will skip the full update and avoid blink during the
5442  * transition. For example, the video player can change the modesetting from
5443  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5444  * causing any display blink. This same concept can be applied to a mode
5445  * setting change.
5446  */
5447 static struct drm_display_mode *
5448 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5449 		bool use_probed_modes)
5450 {
5451 	struct drm_display_mode *m, *m_pref = NULL;
5452 	u16 current_refresh, highest_refresh;
5453 	struct list_head *list_head = use_probed_modes ?
5454 		&aconnector->base.probed_modes :
5455 		&aconnector->base.modes;
5456 
5457 	if (aconnector->freesync_vid_base.clock != 0)
5458 		return &aconnector->freesync_vid_base;
5459 
5460 	/* Find the preferred mode */
5461 	list_for_each_entry (m, list_head, head) {
5462 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5463 			m_pref = m;
5464 			break;
5465 		}
5466 	}
5467 
5468 	if (!m_pref) {
5469 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5470 		m_pref = list_first_entry_or_null(
5471 				&aconnector->base.modes, struct drm_display_mode, head);
5472 		if (!m_pref) {
5473 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5474 			return NULL;
5475 		}
5476 	}
5477 
5478 	highest_refresh = drm_mode_vrefresh(m_pref);
5479 
5480 	/*
5481 	 * Find the mode with highest refresh rate with same resolution.
5482 	 * For some monitors, preferred mode is not the mode with highest
5483 	 * supported refresh rate.
5484 	 */
5485 	list_for_each_entry (m, list_head, head) {
5486 		current_refresh  = drm_mode_vrefresh(m);
5487 
5488 		if (m->hdisplay == m_pref->hdisplay &&
5489 		    m->vdisplay == m_pref->vdisplay &&
5490 		    highest_refresh < current_refresh) {
5491 			highest_refresh = current_refresh;
5492 			m_pref = m;
5493 		}
5494 	}
5495 
5496 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5497 	return m_pref;
5498 }
5499 
5500 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5501 		struct amdgpu_dm_connector *aconnector)
5502 {
5503 	struct drm_display_mode *high_mode;
5504 	int timing_diff;
5505 
5506 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5507 	if (!high_mode || !mode)
5508 		return false;
5509 
5510 	timing_diff = high_mode->vtotal - mode->vtotal;
5511 
5512 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5513 	    high_mode->hdisplay != mode->hdisplay ||
5514 	    high_mode->vdisplay != mode->vdisplay ||
5515 	    high_mode->hsync_start != mode->hsync_start ||
5516 	    high_mode->hsync_end != mode->hsync_end ||
5517 	    high_mode->htotal != mode->htotal ||
5518 	    high_mode->hskew != mode->hskew ||
5519 	    high_mode->vscan != mode->vscan ||
5520 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5521 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5522 		return false;
5523 	else
5524 		return true;
5525 }
5526 
5527 #if defined(CONFIG_DRM_AMD_DC_DCN)
5528 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5529 			    struct dc_sink *sink, struct dc_stream_state *stream,
5530 			    struct dsc_dec_dpcd_caps *dsc_caps)
5531 {
5532 	stream->timing.flags.DSC = 0;
5533 	dsc_caps->is_dsc_supported = false;
5534 
5535 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5536 	    sink->sink_signal == SIGNAL_TYPE_EDP)) {
5537 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5538 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5539 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5540 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5541 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5542 				dsc_caps);
5543 	}
5544 }
5545 
5546 
5547 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5548 				    struct dc_sink *sink, struct dc_stream_state *stream,
5549 				    struct dsc_dec_dpcd_caps *dsc_caps,
5550 				    uint32_t max_dsc_target_bpp_limit_override)
5551 {
5552 	const struct dc_link_settings *verified_link_cap = NULL;
5553 	uint32_t link_bw_in_kbps;
5554 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
5555 	struct dc *dc = sink->ctx->dc;
5556 	struct dc_dsc_bw_range bw_range = {0};
5557 	struct dc_dsc_config dsc_cfg = {0};
5558 
5559 	verified_link_cap = dc_link_get_link_cap(stream->link);
5560 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5561 	edp_min_bpp_x16 = 8 * 16;
5562 	edp_max_bpp_x16 = 8 * 16;
5563 
5564 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5565 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5566 
5567 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
5568 		edp_min_bpp_x16 = edp_max_bpp_x16;
5569 
5570 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5571 				dc->debug.dsc_min_slice_height_override,
5572 				edp_min_bpp_x16, edp_max_bpp_x16,
5573 				dsc_caps,
5574 				&stream->timing,
5575 				&bw_range)) {
5576 
5577 		if (bw_range.max_kbps < link_bw_in_kbps) {
5578 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5579 					dsc_caps,
5580 					dc->debug.dsc_min_slice_height_override,
5581 					max_dsc_target_bpp_limit_override,
5582 					0,
5583 					&stream->timing,
5584 					&dsc_cfg)) {
5585 				stream->timing.dsc_cfg = dsc_cfg;
5586 				stream->timing.flags.DSC = 1;
5587 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5588 			}
5589 			return;
5590 		}
5591 	}
5592 
5593 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5594 				dsc_caps,
5595 				dc->debug.dsc_min_slice_height_override,
5596 				max_dsc_target_bpp_limit_override,
5597 				link_bw_in_kbps,
5598 				&stream->timing,
5599 				&dsc_cfg)) {
5600 		stream->timing.dsc_cfg = dsc_cfg;
5601 		stream->timing.flags.DSC = 1;
5602 	}
5603 }
5604 
5605 
5606 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5607 					struct dc_sink *sink, struct dc_stream_state *stream,
5608 					struct dsc_dec_dpcd_caps *dsc_caps)
5609 {
5610 	struct drm_connector *drm_connector = &aconnector->base;
5611 	uint32_t link_bandwidth_kbps;
5612 	uint32_t max_dsc_target_bpp_limit_override = 0;
5613 	struct dc *dc = sink->ctx->dc;
5614 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
5615 	uint32_t dsc_max_supported_bw_in_kbps;
5616 
5617 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5618 							dc_link_get_link_cap(aconnector->dc_link));
5619 	if (stream->link && stream->link->local_sink)
5620 		max_dsc_target_bpp_limit_override =
5621 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5622 
5623 	/* Set DSC policy according to dsc_clock_en */
5624 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5625 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5626 
5627 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
5628 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5629 
5630 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5631 
5632 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5633 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5634 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5635 						dsc_caps,
5636 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5637 						max_dsc_target_bpp_limit_override,
5638 						link_bandwidth_kbps,
5639 						&stream->timing,
5640 						&stream->timing.dsc_cfg)) {
5641 				stream->timing.flags.DSC = 1;
5642 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5643 			}
5644 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5645 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5646 			max_supported_bw_in_kbps = link_bandwidth_kbps;
5647 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5648 
5649 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5650 					max_supported_bw_in_kbps > 0 &&
5651 					dsc_max_supported_bw_in_kbps > 0)
5652 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5653 						dsc_caps,
5654 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5655 						max_dsc_target_bpp_limit_override,
5656 						dsc_max_supported_bw_in_kbps,
5657 						&stream->timing,
5658 						&stream->timing.dsc_cfg)) {
5659 					stream->timing.flags.DSC = 1;
5660 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5661 									 __func__, drm_connector->name);
5662 				}
5663 		}
5664 	}
5665 
5666 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5667 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5668 		stream->timing.flags.DSC = 1;
5669 
5670 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5671 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5672 
5673 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5674 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5675 
5676 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5677 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5678 }
5679 #endif /* CONFIG_DRM_AMD_DC_DCN */
5680 
5681 static struct dc_stream_state *
5682 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5683 		       const struct drm_display_mode *drm_mode,
5684 		       const struct dm_connector_state *dm_state,
5685 		       const struct dc_stream_state *old_stream,
5686 		       int requested_bpc)
5687 {
5688 	struct drm_display_mode *preferred_mode = NULL;
5689 	struct drm_connector *drm_connector;
5690 	const struct drm_connector_state *con_state =
5691 		dm_state ? &dm_state->base : NULL;
5692 	struct dc_stream_state *stream = NULL;
5693 	struct drm_display_mode mode = *drm_mode;
5694 	struct drm_display_mode saved_mode;
5695 	struct drm_display_mode *freesync_mode = NULL;
5696 	bool native_mode_found = false;
5697 	bool recalculate_timing = false;
5698 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5699 	int mode_refresh;
5700 	int preferred_refresh = 0;
5701 #if defined(CONFIG_DRM_AMD_DC_DCN)
5702 	struct dsc_dec_dpcd_caps dsc_caps;
5703 #endif
5704 
5705 	struct dc_sink *sink = NULL;
5706 
5707 	memset(&saved_mode, 0, sizeof(saved_mode));
5708 
5709 	if (aconnector == NULL) {
5710 		DRM_ERROR("aconnector is NULL!\n");
5711 		return stream;
5712 	}
5713 
5714 	drm_connector = &aconnector->base;
5715 
5716 	if (!aconnector->dc_sink) {
5717 		sink = create_fake_sink(aconnector);
5718 		if (!sink)
5719 			return stream;
5720 	} else {
5721 		sink = aconnector->dc_sink;
5722 		dc_sink_retain(sink);
5723 	}
5724 
5725 	stream = dc_create_stream_for_sink(sink);
5726 
5727 	if (stream == NULL) {
5728 		DRM_ERROR("Failed to create stream for sink!\n");
5729 		goto finish;
5730 	}
5731 
5732 	stream->dm_stream_context = aconnector;
5733 
5734 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5735 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5736 
5737 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5738 		/* Search for preferred mode */
5739 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5740 			native_mode_found = true;
5741 			break;
5742 		}
5743 	}
5744 	if (!native_mode_found)
5745 		preferred_mode = list_first_entry_or_null(
5746 				&aconnector->base.modes,
5747 				struct drm_display_mode,
5748 				head);
5749 
5750 	mode_refresh = drm_mode_vrefresh(&mode);
5751 
5752 	if (preferred_mode == NULL) {
5753 		/*
5754 		 * This may not be an error, the use case is when we have no
5755 		 * usermode calls to reset and set mode upon hotplug. In this
5756 		 * case, we call set mode ourselves to restore the previous mode
5757 		 * and the modelist may not be filled in in time.
5758 		 */
5759 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5760 	} else {
5761 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
5762 		if (recalculate_timing) {
5763 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5764 			drm_mode_copy(&saved_mode, &mode);
5765 			drm_mode_copy(&mode, freesync_mode);
5766 		} else {
5767 			decide_crtc_timing_for_drm_display_mode(
5768 					&mode, preferred_mode, scale);
5769 
5770 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5771 		}
5772 	}
5773 
5774 	if (recalculate_timing)
5775 		drm_mode_set_crtcinfo(&saved_mode, 0);
5776 	else if (!dm_state)
5777 		drm_mode_set_crtcinfo(&mode, 0);
5778 
5779 	/*
5780 	* If scaling is enabled and refresh rate didn't change
5781 	* we copy the vic and polarities of the old timings
5782 	*/
5783 	if (!scale || mode_refresh != preferred_refresh)
5784 		fill_stream_properties_from_drm_display_mode(
5785 			stream, &mode, &aconnector->base, con_state, NULL,
5786 			requested_bpc);
5787 	else
5788 		fill_stream_properties_from_drm_display_mode(
5789 			stream, &mode, &aconnector->base, con_state, old_stream,
5790 			requested_bpc);
5791 
5792 #if defined(CONFIG_DRM_AMD_DC_DCN)
5793 	/* SST DSC determination policy */
5794 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5795 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5796 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5797 #endif
5798 
5799 	update_stream_scaling_settings(&mode, dm_state, stream);
5800 
5801 	fill_audio_info(
5802 		&stream->audio_info,
5803 		drm_connector,
5804 		sink);
5805 
5806 	update_stream_signal(stream, sink);
5807 
5808 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5809 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5810 
5811 	if (stream->link->psr_settings.psr_feature_enabled) {
5812 		//
5813 		// should decide stream support vsc sdp colorimetry capability
5814 		// before building vsc info packet
5815 		//
5816 		stream->use_vsc_sdp_for_colorimetry = false;
5817 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5818 			stream->use_vsc_sdp_for_colorimetry =
5819 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5820 		} else {
5821 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5822 				stream->use_vsc_sdp_for_colorimetry = true;
5823 		}
5824 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
5825 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5826 
5827 	}
5828 finish:
5829 	dc_sink_release(sink);
5830 
5831 	return stream;
5832 }
5833 
5834 static enum drm_connector_status
5835 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5836 {
5837 	bool connected;
5838 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5839 
5840 	/*
5841 	 * Notes:
5842 	 * 1. This interface is NOT called in context of HPD irq.
5843 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5844 	 * makes it a bad place for *any* MST-related activity.
5845 	 */
5846 
5847 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5848 	    !aconnector->fake_enable)
5849 		connected = (aconnector->dc_sink != NULL);
5850 	else
5851 		connected = (aconnector->base.force == DRM_FORCE_ON ||
5852 				aconnector->base.force == DRM_FORCE_ON_DIGITAL);
5853 
5854 	update_subconnector_property(aconnector);
5855 
5856 	return (connected ? connector_status_connected :
5857 			connector_status_disconnected);
5858 }
5859 
5860 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5861 					    struct drm_connector_state *connector_state,
5862 					    struct drm_property *property,
5863 					    uint64_t val)
5864 {
5865 	struct drm_device *dev = connector->dev;
5866 	struct amdgpu_device *adev = drm_to_adev(dev);
5867 	struct dm_connector_state *dm_old_state =
5868 		to_dm_connector_state(connector->state);
5869 	struct dm_connector_state *dm_new_state =
5870 		to_dm_connector_state(connector_state);
5871 
5872 	int ret = -EINVAL;
5873 
5874 	if (property == dev->mode_config.scaling_mode_property) {
5875 		enum amdgpu_rmx_type rmx_type;
5876 
5877 		switch (val) {
5878 		case DRM_MODE_SCALE_CENTER:
5879 			rmx_type = RMX_CENTER;
5880 			break;
5881 		case DRM_MODE_SCALE_ASPECT:
5882 			rmx_type = RMX_ASPECT;
5883 			break;
5884 		case DRM_MODE_SCALE_FULLSCREEN:
5885 			rmx_type = RMX_FULL;
5886 			break;
5887 		case DRM_MODE_SCALE_NONE:
5888 		default:
5889 			rmx_type = RMX_OFF;
5890 			break;
5891 		}
5892 
5893 		if (dm_old_state->scaling == rmx_type)
5894 			return 0;
5895 
5896 		dm_new_state->scaling = rmx_type;
5897 		ret = 0;
5898 	} else if (property == adev->mode_info.underscan_hborder_property) {
5899 		dm_new_state->underscan_hborder = val;
5900 		ret = 0;
5901 	} else if (property == adev->mode_info.underscan_vborder_property) {
5902 		dm_new_state->underscan_vborder = val;
5903 		ret = 0;
5904 	} else if (property == adev->mode_info.underscan_property) {
5905 		dm_new_state->underscan_enable = val;
5906 		ret = 0;
5907 	} else if (property == adev->mode_info.abm_level_property) {
5908 		dm_new_state->abm_level = val;
5909 		ret = 0;
5910 	}
5911 
5912 	return ret;
5913 }
5914 
5915 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5916 					    const struct drm_connector_state *state,
5917 					    struct drm_property *property,
5918 					    uint64_t *val)
5919 {
5920 	struct drm_device *dev = connector->dev;
5921 	struct amdgpu_device *adev = drm_to_adev(dev);
5922 	struct dm_connector_state *dm_state =
5923 		to_dm_connector_state(state);
5924 	int ret = -EINVAL;
5925 
5926 	if (property == dev->mode_config.scaling_mode_property) {
5927 		switch (dm_state->scaling) {
5928 		case RMX_CENTER:
5929 			*val = DRM_MODE_SCALE_CENTER;
5930 			break;
5931 		case RMX_ASPECT:
5932 			*val = DRM_MODE_SCALE_ASPECT;
5933 			break;
5934 		case RMX_FULL:
5935 			*val = DRM_MODE_SCALE_FULLSCREEN;
5936 			break;
5937 		case RMX_OFF:
5938 		default:
5939 			*val = DRM_MODE_SCALE_NONE;
5940 			break;
5941 		}
5942 		ret = 0;
5943 	} else if (property == adev->mode_info.underscan_hborder_property) {
5944 		*val = dm_state->underscan_hborder;
5945 		ret = 0;
5946 	} else if (property == adev->mode_info.underscan_vborder_property) {
5947 		*val = dm_state->underscan_vborder;
5948 		ret = 0;
5949 	} else if (property == adev->mode_info.underscan_property) {
5950 		*val = dm_state->underscan_enable;
5951 		ret = 0;
5952 	} else if (property == adev->mode_info.abm_level_property) {
5953 		*val = dm_state->abm_level;
5954 		ret = 0;
5955 	}
5956 
5957 	return ret;
5958 }
5959 
5960 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5961 {
5962 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5963 
5964 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5965 }
5966 
5967 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5968 {
5969 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5970 	const struct dc_link *link = aconnector->dc_link;
5971 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5972 	struct amdgpu_display_manager *dm = &adev->dm;
5973 	int i;
5974 
5975 	/*
5976 	 * Call only if mst_mgr was initialized before since it's not done
5977 	 * for all connector types.
5978 	 */
5979 	if (aconnector->mst_mgr.dev)
5980 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5981 
5982 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5983 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5984 	for (i = 0; i < dm->num_of_edps; i++) {
5985 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
5986 			backlight_device_unregister(dm->backlight_dev[i]);
5987 			dm->backlight_dev[i] = NULL;
5988 		}
5989 	}
5990 #endif
5991 
5992 	if (aconnector->dc_em_sink)
5993 		dc_sink_release(aconnector->dc_em_sink);
5994 	aconnector->dc_em_sink = NULL;
5995 	if (aconnector->dc_sink)
5996 		dc_sink_release(aconnector->dc_sink);
5997 	aconnector->dc_sink = NULL;
5998 
5999 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6000 	drm_connector_unregister(connector);
6001 	drm_connector_cleanup(connector);
6002 	if (aconnector->i2c) {
6003 		i2c_del_adapter(&aconnector->i2c->base);
6004 		kfree(aconnector->i2c);
6005 	}
6006 	kfree(aconnector->dm_dp_aux.aux.name);
6007 
6008 	kfree(connector);
6009 }
6010 
6011 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6012 {
6013 	struct dm_connector_state *state =
6014 		to_dm_connector_state(connector->state);
6015 
6016 	if (connector->state)
6017 		__drm_atomic_helper_connector_destroy_state(connector->state);
6018 
6019 	kfree(state);
6020 
6021 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6022 
6023 	if (state) {
6024 		state->scaling = RMX_OFF;
6025 		state->underscan_enable = false;
6026 		state->underscan_hborder = 0;
6027 		state->underscan_vborder = 0;
6028 		state->base.max_requested_bpc = 8;
6029 		state->vcpi_slots = 0;
6030 		state->pbn = 0;
6031 
6032 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6033 			state->abm_level = amdgpu_dm_abm_level;
6034 
6035 		__drm_atomic_helper_connector_reset(connector, &state->base);
6036 	}
6037 }
6038 
6039 struct drm_connector_state *
6040 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6041 {
6042 	struct dm_connector_state *state =
6043 		to_dm_connector_state(connector->state);
6044 
6045 	struct dm_connector_state *new_state =
6046 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6047 
6048 	if (!new_state)
6049 		return NULL;
6050 
6051 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6052 
6053 	new_state->freesync_capable = state->freesync_capable;
6054 	new_state->abm_level = state->abm_level;
6055 	new_state->scaling = state->scaling;
6056 	new_state->underscan_enable = state->underscan_enable;
6057 	new_state->underscan_hborder = state->underscan_hborder;
6058 	new_state->underscan_vborder = state->underscan_vborder;
6059 	new_state->vcpi_slots = state->vcpi_slots;
6060 	new_state->pbn = state->pbn;
6061 	return &new_state->base;
6062 }
6063 
6064 static int
6065 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6066 {
6067 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6068 		to_amdgpu_dm_connector(connector);
6069 	int r;
6070 
6071 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6072 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6073 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6074 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6075 		if (r)
6076 			return r;
6077 	}
6078 
6079 #if defined(CONFIG_DEBUG_FS)
6080 	connector_debugfs_init(amdgpu_dm_connector);
6081 #endif
6082 
6083 	return 0;
6084 }
6085 
6086 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6087 	.reset = amdgpu_dm_connector_funcs_reset,
6088 	.detect = amdgpu_dm_connector_detect,
6089 	.fill_modes = drm_helper_probe_single_connector_modes,
6090 	.destroy = amdgpu_dm_connector_destroy,
6091 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6092 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6093 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6094 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6095 	.late_register = amdgpu_dm_connector_late_register,
6096 	.early_unregister = amdgpu_dm_connector_unregister
6097 };
6098 
6099 static int get_modes(struct drm_connector *connector)
6100 {
6101 	return amdgpu_dm_connector_get_modes(connector);
6102 }
6103 
6104 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6105 {
6106 	struct dc_sink_init_data init_params = {
6107 			.link = aconnector->dc_link,
6108 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6109 	};
6110 	struct edid *edid;
6111 
6112 	if (!aconnector->base.edid_blob_ptr) {
6113 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6114 				aconnector->base.name);
6115 
6116 		aconnector->base.force = DRM_FORCE_OFF;
6117 		aconnector->base.override_edid = false;
6118 		return;
6119 	}
6120 
6121 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6122 
6123 	aconnector->edid = edid;
6124 
6125 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6126 		aconnector->dc_link,
6127 		(uint8_t *)edid,
6128 		(edid->extensions + 1) * EDID_LENGTH,
6129 		&init_params);
6130 
6131 	if (aconnector->base.force == DRM_FORCE_ON) {
6132 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6133 		aconnector->dc_link->local_sink :
6134 		aconnector->dc_em_sink;
6135 		dc_sink_retain(aconnector->dc_sink);
6136 	}
6137 }
6138 
6139 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6140 {
6141 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6142 
6143 	/*
6144 	 * In case of headless boot with force on for DP managed connector
6145 	 * Those settings have to be != 0 to get initial modeset
6146 	 */
6147 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6148 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6149 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6150 	}
6151 
6152 
6153 	aconnector->base.override_edid = true;
6154 	create_eml_sink(aconnector);
6155 }
6156 
6157 struct dc_stream_state *
6158 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6159 				const struct drm_display_mode *drm_mode,
6160 				const struct dm_connector_state *dm_state,
6161 				const struct dc_stream_state *old_stream)
6162 {
6163 	struct drm_connector *connector = &aconnector->base;
6164 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6165 	struct dc_stream_state *stream;
6166 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6167 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6168 	enum dc_status dc_result = DC_OK;
6169 
6170 	do {
6171 		stream = create_stream_for_sink(aconnector, drm_mode,
6172 						dm_state, old_stream,
6173 						requested_bpc);
6174 		if (stream == NULL) {
6175 			DRM_ERROR("Failed to create stream for sink!\n");
6176 			break;
6177 		}
6178 
6179 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6180 		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6181 			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6182 
6183 		if (dc_result != DC_OK) {
6184 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6185 				      drm_mode->hdisplay,
6186 				      drm_mode->vdisplay,
6187 				      drm_mode->clock,
6188 				      dc_result,
6189 				      dc_status_to_str(dc_result));
6190 
6191 			dc_stream_release(stream);
6192 			stream = NULL;
6193 			requested_bpc -= 2; /* lower bpc to retry validation */
6194 		}
6195 
6196 	} while (stream == NULL && requested_bpc >= 6);
6197 
6198 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6199 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6200 
6201 		aconnector->force_yuv420_output = true;
6202 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6203 						dm_state, old_stream);
6204 		aconnector->force_yuv420_output = false;
6205 	}
6206 
6207 	return stream;
6208 }
6209 
6210 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6211 				   struct drm_display_mode *mode)
6212 {
6213 	int result = MODE_ERROR;
6214 	struct dc_sink *dc_sink;
6215 	/* TODO: Unhardcode stream count */
6216 	struct dc_stream_state *stream;
6217 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6218 
6219 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6220 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6221 		return result;
6222 
6223 	/*
6224 	 * Only run this the first time mode_valid is called to initilialize
6225 	 * EDID mgmt
6226 	 */
6227 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6228 		!aconnector->dc_em_sink)
6229 		handle_edid_mgmt(aconnector);
6230 
6231 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6232 
6233 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6234 				aconnector->base.force != DRM_FORCE_ON) {
6235 		DRM_ERROR("dc_sink is NULL!\n");
6236 		goto fail;
6237 	}
6238 
6239 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6240 	if (stream) {
6241 		dc_stream_release(stream);
6242 		result = MODE_OK;
6243 	}
6244 
6245 fail:
6246 	/* TODO: error handling*/
6247 	return result;
6248 }
6249 
6250 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6251 				struct dc_info_packet *out)
6252 {
6253 	struct hdmi_drm_infoframe frame;
6254 	unsigned char buf[30]; /* 26 + 4 */
6255 	ssize_t len;
6256 	int ret, i;
6257 
6258 	memset(out, 0, sizeof(*out));
6259 
6260 	if (!state->hdr_output_metadata)
6261 		return 0;
6262 
6263 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6264 	if (ret)
6265 		return ret;
6266 
6267 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6268 	if (len < 0)
6269 		return (int)len;
6270 
6271 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6272 	if (len != 30)
6273 		return -EINVAL;
6274 
6275 	/* Prepare the infopacket for DC. */
6276 	switch (state->connector->connector_type) {
6277 	case DRM_MODE_CONNECTOR_HDMIA:
6278 		out->hb0 = 0x87; /* type */
6279 		out->hb1 = 0x01; /* version */
6280 		out->hb2 = 0x1A; /* length */
6281 		out->sb[0] = buf[3]; /* checksum */
6282 		i = 1;
6283 		break;
6284 
6285 	case DRM_MODE_CONNECTOR_DisplayPort:
6286 	case DRM_MODE_CONNECTOR_eDP:
6287 		out->hb0 = 0x00; /* sdp id, zero */
6288 		out->hb1 = 0x87; /* type */
6289 		out->hb2 = 0x1D; /* payload len - 1 */
6290 		out->hb3 = (0x13 << 2); /* sdp version */
6291 		out->sb[0] = 0x01; /* version */
6292 		out->sb[1] = 0x1A; /* length */
6293 		i = 2;
6294 		break;
6295 
6296 	default:
6297 		return -EINVAL;
6298 	}
6299 
6300 	memcpy(&out->sb[i], &buf[4], 26);
6301 	out->valid = true;
6302 
6303 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6304 		       sizeof(out->sb), false);
6305 
6306 	return 0;
6307 }
6308 
6309 static int
6310 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6311 				 struct drm_atomic_state *state)
6312 {
6313 	struct drm_connector_state *new_con_state =
6314 		drm_atomic_get_new_connector_state(state, conn);
6315 	struct drm_connector_state *old_con_state =
6316 		drm_atomic_get_old_connector_state(state, conn);
6317 	struct drm_crtc *crtc = new_con_state->crtc;
6318 	struct drm_crtc_state *new_crtc_state;
6319 	int ret;
6320 
6321 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6322 
6323 	if (!crtc)
6324 		return 0;
6325 
6326 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6327 		struct dc_info_packet hdr_infopacket;
6328 
6329 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6330 		if (ret)
6331 			return ret;
6332 
6333 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6334 		if (IS_ERR(new_crtc_state))
6335 			return PTR_ERR(new_crtc_state);
6336 
6337 		/*
6338 		 * DC considers the stream backends changed if the
6339 		 * static metadata changes. Forcing the modeset also
6340 		 * gives a simple way for userspace to switch from
6341 		 * 8bpc to 10bpc when setting the metadata to enter
6342 		 * or exit HDR.
6343 		 *
6344 		 * Changing the static metadata after it's been
6345 		 * set is permissible, however. So only force a
6346 		 * modeset if we're entering or exiting HDR.
6347 		 */
6348 		new_crtc_state->mode_changed =
6349 			!old_con_state->hdr_output_metadata ||
6350 			!new_con_state->hdr_output_metadata;
6351 	}
6352 
6353 	return 0;
6354 }
6355 
6356 static const struct drm_connector_helper_funcs
6357 amdgpu_dm_connector_helper_funcs = {
6358 	/*
6359 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6360 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6361 	 * are missing after user start lightdm. So we need to renew modes list.
6362 	 * in get_modes call back, not just return the modes count
6363 	 */
6364 	.get_modes = get_modes,
6365 	.mode_valid = amdgpu_dm_connector_mode_valid,
6366 	.atomic_check = amdgpu_dm_connector_atomic_check,
6367 };
6368 
6369 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6370 {
6371 
6372 }
6373 
6374 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6375 {
6376 	switch (display_color_depth) {
6377 	case COLOR_DEPTH_666:
6378 		return 6;
6379 	case COLOR_DEPTH_888:
6380 		return 8;
6381 	case COLOR_DEPTH_101010:
6382 		return 10;
6383 	case COLOR_DEPTH_121212:
6384 		return 12;
6385 	case COLOR_DEPTH_141414:
6386 		return 14;
6387 	case COLOR_DEPTH_161616:
6388 		return 16;
6389 	default:
6390 		break;
6391 	}
6392 	return 0;
6393 }
6394 
6395 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6396 					  struct drm_crtc_state *crtc_state,
6397 					  struct drm_connector_state *conn_state)
6398 {
6399 	struct drm_atomic_state *state = crtc_state->state;
6400 	struct drm_connector *connector = conn_state->connector;
6401 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6402 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6403 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6404 	struct drm_dp_mst_topology_mgr *mst_mgr;
6405 	struct drm_dp_mst_port *mst_port;
6406 	enum dc_color_depth color_depth;
6407 	int clock, bpp = 0;
6408 	bool is_y420 = false;
6409 
6410 	if (!aconnector->port || !aconnector->dc_sink)
6411 		return 0;
6412 
6413 	mst_port = aconnector->port;
6414 	mst_mgr = &aconnector->mst_port->mst_mgr;
6415 
6416 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6417 		return 0;
6418 
6419 	if (!state->duplicated) {
6420 		int max_bpc = conn_state->max_requested_bpc;
6421 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6422 			  aconnector->force_yuv420_output;
6423 		color_depth = convert_color_depth_from_display_info(connector,
6424 								    is_y420,
6425 								    max_bpc);
6426 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6427 		clock = adjusted_mode->clock;
6428 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6429 	}
6430 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6431 									   mst_mgr,
6432 									   mst_port,
6433 									   dm_new_connector_state->pbn,
6434 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6435 	if (dm_new_connector_state->vcpi_slots < 0) {
6436 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6437 		return dm_new_connector_state->vcpi_slots;
6438 	}
6439 	return 0;
6440 }
6441 
6442 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6443 	.disable = dm_encoder_helper_disable,
6444 	.atomic_check = dm_encoder_helper_atomic_check
6445 };
6446 
6447 #if defined(CONFIG_DRM_AMD_DC_DCN)
6448 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6449 					    struct dc_state *dc_state,
6450 					    struct dsc_mst_fairness_vars *vars)
6451 {
6452 	struct dc_stream_state *stream = NULL;
6453 	struct drm_connector *connector;
6454 	struct drm_connector_state *new_con_state;
6455 	struct amdgpu_dm_connector *aconnector;
6456 	struct dm_connector_state *dm_conn_state;
6457 	int i, j;
6458 	int vcpi, pbn_div, pbn, slot_num = 0;
6459 
6460 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6461 
6462 		aconnector = to_amdgpu_dm_connector(connector);
6463 
6464 		if (!aconnector->port)
6465 			continue;
6466 
6467 		if (!new_con_state || !new_con_state->crtc)
6468 			continue;
6469 
6470 		dm_conn_state = to_dm_connector_state(new_con_state);
6471 
6472 		for (j = 0; j < dc_state->stream_count; j++) {
6473 			stream = dc_state->streams[j];
6474 			if (!stream)
6475 				continue;
6476 
6477 			if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6478 				break;
6479 
6480 			stream = NULL;
6481 		}
6482 
6483 		if (!stream)
6484 			continue;
6485 
6486 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6487 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
6488 		for (j = 0; j < dc_state->stream_count; j++) {
6489 			if (vars[j].aconnector == aconnector) {
6490 				pbn = vars[j].pbn;
6491 				break;
6492 			}
6493 		}
6494 
6495 		if (j == dc_state->stream_count)
6496 			continue;
6497 
6498 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
6499 
6500 		if (stream->timing.flags.DSC != 1) {
6501 			dm_conn_state->pbn = pbn;
6502 			dm_conn_state->vcpi_slots = slot_num;
6503 
6504 			drm_dp_mst_atomic_enable_dsc(state,
6505 						     aconnector->port,
6506 						     dm_conn_state->pbn,
6507 						     0,
6508 						     false);
6509 			continue;
6510 		}
6511 
6512 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6513 						    aconnector->port,
6514 						    pbn, pbn_div,
6515 						    true);
6516 		if (vcpi < 0)
6517 			return vcpi;
6518 
6519 		dm_conn_state->pbn = pbn;
6520 		dm_conn_state->vcpi_slots = vcpi;
6521 	}
6522 	return 0;
6523 }
6524 #endif
6525 
6526 static int to_drm_connector_type(enum signal_type st)
6527 {
6528 	switch (st) {
6529 	case SIGNAL_TYPE_HDMI_TYPE_A:
6530 		return DRM_MODE_CONNECTOR_HDMIA;
6531 	case SIGNAL_TYPE_EDP:
6532 		return DRM_MODE_CONNECTOR_eDP;
6533 	case SIGNAL_TYPE_LVDS:
6534 		return DRM_MODE_CONNECTOR_LVDS;
6535 	case SIGNAL_TYPE_RGB:
6536 		return DRM_MODE_CONNECTOR_VGA;
6537 	case SIGNAL_TYPE_DISPLAY_PORT:
6538 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6539 		return DRM_MODE_CONNECTOR_DisplayPort;
6540 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6541 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6542 		return DRM_MODE_CONNECTOR_DVID;
6543 	case SIGNAL_TYPE_VIRTUAL:
6544 		return DRM_MODE_CONNECTOR_VIRTUAL;
6545 
6546 	default:
6547 		return DRM_MODE_CONNECTOR_Unknown;
6548 	}
6549 }
6550 
6551 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6552 {
6553 	struct drm_encoder *encoder;
6554 
6555 	/* There is only one encoder per connector */
6556 	drm_connector_for_each_possible_encoder(connector, encoder)
6557 		return encoder;
6558 
6559 	return NULL;
6560 }
6561 
6562 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6563 {
6564 	struct drm_encoder *encoder;
6565 	struct amdgpu_encoder *amdgpu_encoder;
6566 
6567 	encoder = amdgpu_dm_connector_to_encoder(connector);
6568 
6569 	if (encoder == NULL)
6570 		return;
6571 
6572 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6573 
6574 	amdgpu_encoder->native_mode.clock = 0;
6575 
6576 	if (!list_empty(&connector->probed_modes)) {
6577 		struct drm_display_mode *preferred_mode = NULL;
6578 
6579 		list_for_each_entry(preferred_mode,
6580 				    &connector->probed_modes,
6581 				    head) {
6582 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6583 				amdgpu_encoder->native_mode = *preferred_mode;
6584 
6585 			break;
6586 		}
6587 
6588 	}
6589 }
6590 
6591 static struct drm_display_mode *
6592 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6593 			     char *name,
6594 			     int hdisplay, int vdisplay)
6595 {
6596 	struct drm_device *dev = encoder->dev;
6597 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6598 	struct drm_display_mode *mode = NULL;
6599 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6600 
6601 	mode = drm_mode_duplicate(dev, native_mode);
6602 
6603 	if (mode == NULL)
6604 		return NULL;
6605 
6606 	mode->hdisplay = hdisplay;
6607 	mode->vdisplay = vdisplay;
6608 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6609 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6610 
6611 	return mode;
6612 
6613 }
6614 
6615 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6616 						 struct drm_connector *connector)
6617 {
6618 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6619 	struct drm_display_mode *mode = NULL;
6620 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6621 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6622 				to_amdgpu_dm_connector(connector);
6623 	int i;
6624 	int n;
6625 	struct mode_size {
6626 		char name[DRM_DISPLAY_MODE_LEN];
6627 		int w;
6628 		int h;
6629 	} common_modes[] = {
6630 		{  "640x480",  640,  480},
6631 		{  "800x600",  800,  600},
6632 		{ "1024x768", 1024,  768},
6633 		{ "1280x720", 1280,  720},
6634 		{ "1280x800", 1280,  800},
6635 		{"1280x1024", 1280, 1024},
6636 		{ "1440x900", 1440,  900},
6637 		{"1680x1050", 1680, 1050},
6638 		{"1600x1200", 1600, 1200},
6639 		{"1920x1080", 1920, 1080},
6640 		{"1920x1200", 1920, 1200}
6641 	};
6642 
6643 	n = ARRAY_SIZE(common_modes);
6644 
6645 	for (i = 0; i < n; i++) {
6646 		struct drm_display_mode *curmode = NULL;
6647 		bool mode_existed = false;
6648 
6649 		if (common_modes[i].w > native_mode->hdisplay ||
6650 		    common_modes[i].h > native_mode->vdisplay ||
6651 		   (common_modes[i].w == native_mode->hdisplay &&
6652 		    common_modes[i].h == native_mode->vdisplay))
6653 			continue;
6654 
6655 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6656 			if (common_modes[i].w == curmode->hdisplay &&
6657 			    common_modes[i].h == curmode->vdisplay) {
6658 				mode_existed = true;
6659 				break;
6660 			}
6661 		}
6662 
6663 		if (mode_existed)
6664 			continue;
6665 
6666 		mode = amdgpu_dm_create_common_mode(encoder,
6667 				common_modes[i].name, common_modes[i].w,
6668 				common_modes[i].h);
6669 		if (!mode)
6670 			continue;
6671 
6672 		drm_mode_probed_add(connector, mode);
6673 		amdgpu_dm_connector->num_modes++;
6674 	}
6675 }
6676 
6677 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
6678 {
6679 	struct drm_encoder *encoder;
6680 	struct amdgpu_encoder *amdgpu_encoder;
6681 	const struct drm_display_mode *native_mode;
6682 
6683 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
6684 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
6685 		return;
6686 
6687 	encoder = amdgpu_dm_connector_to_encoder(connector);
6688 	if (!encoder)
6689 		return;
6690 
6691 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6692 
6693 	native_mode = &amdgpu_encoder->native_mode;
6694 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
6695 		return;
6696 
6697 	drm_connector_set_panel_orientation_with_quirk(connector,
6698 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
6699 						       native_mode->hdisplay,
6700 						       native_mode->vdisplay);
6701 }
6702 
6703 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6704 					      struct edid *edid)
6705 {
6706 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6707 			to_amdgpu_dm_connector(connector);
6708 
6709 	if (edid) {
6710 		/* empty probed_modes */
6711 		INIT_LIST_HEAD(&connector->probed_modes);
6712 		amdgpu_dm_connector->num_modes =
6713 				drm_add_edid_modes(connector, edid);
6714 
6715 		/* sorting the probed modes before calling function
6716 		 * amdgpu_dm_get_native_mode() since EDID can have
6717 		 * more than one preferred mode. The modes that are
6718 		 * later in the probed mode list could be of higher
6719 		 * and preferred resolution. For example, 3840x2160
6720 		 * resolution in base EDID preferred timing and 4096x2160
6721 		 * preferred resolution in DID extension block later.
6722 		 */
6723 		drm_mode_sort(&connector->probed_modes);
6724 		amdgpu_dm_get_native_mode(connector);
6725 
6726 		/* Freesync capabilities are reset by calling
6727 		 * drm_add_edid_modes() and need to be
6728 		 * restored here.
6729 		 */
6730 		amdgpu_dm_update_freesync_caps(connector, edid);
6731 
6732 		amdgpu_set_panel_orientation(connector);
6733 	} else {
6734 		amdgpu_dm_connector->num_modes = 0;
6735 	}
6736 }
6737 
6738 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
6739 			      struct drm_display_mode *mode)
6740 {
6741 	struct drm_display_mode *m;
6742 
6743 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
6744 		if (drm_mode_equal(m, mode))
6745 			return true;
6746 	}
6747 
6748 	return false;
6749 }
6750 
6751 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
6752 {
6753 	const struct drm_display_mode *m;
6754 	struct drm_display_mode *new_mode;
6755 	uint i;
6756 	uint32_t new_modes_count = 0;
6757 
6758 	/* Standard FPS values
6759 	 *
6760 	 * 23.976       - TV/NTSC
6761 	 * 24 	        - Cinema
6762 	 * 25 	        - TV/PAL
6763 	 * 29.97        - TV/NTSC
6764 	 * 30 	        - TV/NTSC
6765 	 * 48 	        - Cinema HFR
6766 	 * 50 	        - TV/PAL
6767 	 * 60 	        - Commonly used
6768 	 * 48,72,96,120 - Multiples of 24
6769 	 */
6770 	static const uint32_t common_rates[] = {
6771 		23976, 24000, 25000, 29970, 30000,
6772 		48000, 50000, 60000, 72000, 96000, 120000
6773 	};
6774 
6775 	/*
6776 	 * Find mode with highest refresh rate with the same resolution
6777 	 * as the preferred mode. Some monitors report a preferred mode
6778 	 * with lower resolution than the highest refresh rate supported.
6779 	 */
6780 
6781 	m = get_highest_refresh_rate_mode(aconnector, true);
6782 	if (!m)
6783 		return 0;
6784 
6785 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
6786 		uint64_t target_vtotal, target_vtotal_diff;
6787 		uint64_t num, den;
6788 
6789 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
6790 			continue;
6791 
6792 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
6793 		    common_rates[i] > aconnector->max_vfreq * 1000)
6794 			continue;
6795 
6796 		num = (unsigned long long)m->clock * 1000 * 1000;
6797 		den = common_rates[i] * (unsigned long long)m->htotal;
6798 		target_vtotal = div_u64(num, den);
6799 		target_vtotal_diff = target_vtotal - m->vtotal;
6800 
6801 		/* Check for illegal modes */
6802 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
6803 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
6804 		    m->vtotal + target_vtotal_diff < m->vsync_end)
6805 			continue;
6806 
6807 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
6808 		if (!new_mode)
6809 			goto out;
6810 
6811 		new_mode->vtotal += (u16)target_vtotal_diff;
6812 		new_mode->vsync_start += (u16)target_vtotal_diff;
6813 		new_mode->vsync_end += (u16)target_vtotal_diff;
6814 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6815 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
6816 
6817 		if (!is_duplicate_mode(aconnector, new_mode)) {
6818 			drm_mode_probed_add(&aconnector->base, new_mode);
6819 			new_modes_count += 1;
6820 		} else
6821 			drm_mode_destroy(aconnector->base.dev, new_mode);
6822 	}
6823  out:
6824 	return new_modes_count;
6825 }
6826 
6827 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
6828 						   struct edid *edid)
6829 {
6830 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6831 		to_amdgpu_dm_connector(connector);
6832 
6833 	if (!edid)
6834 		return;
6835 
6836 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
6837 		amdgpu_dm_connector->num_modes +=
6838 			add_fs_modes(amdgpu_dm_connector);
6839 }
6840 
6841 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6842 {
6843 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6844 			to_amdgpu_dm_connector(connector);
6845 	struct drm_encoder *encoder;
6846 	struct edid *edid = amdgpu_dm_connector->edid;
6847 
6848 	encoder = amdgpu_dm_connector_to_encoder(connector);
6849 
6850 	if (!drm_edid_is_valid(edid)) {
6851 		amdgpu_dm_connector->num_modes =
6852 				drm_add_modes_noedid(connector, 640, 480);
6853 	} else {
6854 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6855 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6856 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
6857 	}
6858 	amdgpu_dm_fbc_init(connector);
6859 
6860 	return amdgpu_dm_connector->num_modes;
6861 }
6862 
6863 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6864 				     struct amdgpu_dm_connector *aconnector,
6865 				     int connector_type,
6866 				     struct dc_link *link,
6867 				     int link_index)
6868 {
6869 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6870 
6871 	/*
6872 	 * Some of the properties below require access to state, like bpc.
6873 	 * Allocate some default initial connector state with our reset helper.
6874 	 */
6875 	if (aconnector->base.funcs->reset)
6876 		aconnector->base.funcs->reset(&aconnector->base);
6877 
6878 	aconnector->connector_id = link_index;
6879 	aconnector->dc_link = link;
6880 	aconnector->base.interlace_allowed = false;
6881 	aconnector->base.doublescan_allowed = false;
6882 	aconnector->base.stereo_allowed = false;
6883 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6884 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6885 	aconnector->audio_inst = -1;
6886 	mutex_init(&aconnector->hpd_lock);
6887 
6888 	/*
6889 	 * configure support HPD hot plug connector_>polled default value is 0
6890 	 * which means HPD hot plug not supported
6891 	 */
6892 	switch (connector_type) {
6893 	case DRM_MODE_CONNECTOR_HDMIA:
6894 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6895 		aconnector->base.ycbcr_420_allowed =
6896 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6897 		break;
6898 	case DRM_MODE_CONNECTOR_DisplayPort:
6899 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6900 		link->link_enc = link_enc_cfg_get_link_enc(link);
6901 		ASSERT(link->link_enc);
6902 		if (link->link_enc)
6903 			aconnector->base.ycbcr_420_allowed =
6904 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6905 		break;
6906 	case DRM_MODE_CONNECTOR_DVID:
6907 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6908 		break;
6909 	default:
6910 		break;
6911 	}
6912 
6913 	drm_object_attach_property(&aconnector->base.base,
6914 				dm->ddev->mode_config.scaling_mode_property,
6915 				DRM_MODE_SCALE_NONE);
6916 
6917 	drm_object_attach_property(&aconnector->base.base,
6918 				adev->mode_info.underscan_property,
6919 				UNDERSCAN_OFF);
6920 	drm_object_attach_property(&aconnector->base.base,
6921 				adev->mode_info.underscan_hborder_property,
6922 				0);
6923 	drm_object_attach_property(&aconnector->base.base,
6924 				adev->mode_info.underscan_vborder_property,
6925 				0);
6926 
6927 	if (!aconnector->mst_port)
6928 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6929 
6930 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6931 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6932 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6933 
6934 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6935 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6936 		drm_object_attach_property(&aconnector->base.base,
6937 				adev->mode_info.abm_level_property, 0);
6938 	}
6939 
6940 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6941 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6942 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6943 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
6944 
6945 		if (!aconnector->mst_port)
6946 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6947 
6948 #ifdef CONFIG_DRM_AMD_DC_HDCP
6949 		if (adev->dm.hdcp_workqueue)
6950 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6951 #endif
6952 	}
6953 }
6954 
6955 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6956 			      struct i2c_msg *msgs, int num)
6957 {
6958 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6959 	struct ddc_service *ddc_service = i2c->ddc_service;
6960 	struct i2c_command cmd;
6961 	int i;
6962 	int result = -EIO;
6963 
6964 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6965 
6966 	if (!cmd.payloads)
6967 		return result;
6968 
6969 	cmd.number_of_payloads = num;
6970 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6971 	cmd.speed = 100;
6972 
6973 	for (i = 0; i < num; i++) {
6974 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6975 		cmd.payloads[i].address = msgs[i].addr;
6976 		cmd.payloads[i].length = msgs[i].len;
6977 		cmd.payloads[i].data = msgs[i].buf;
6978 	}
6979 
6980 	if (dc_submit_i2c(
6981 			ddc_service->ctx->dc,
6982 			ddc_service->link->link_index,
6983 			&cmd))
6984 		result = num;
6985 
6986 	kfree(cmd.payloads);
6987 	return result;
6988 }
6989 
6990 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6991 {
6992 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6993 }
6994 
6995 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6996 	.master_xfer = amdgpu_dm_i2c_xfer,
6997 	.functionality = amdgpu_dm_i2c_func,
6998 };
6999 
7000 static struct amdgpu_i2c_adapter *
7001 create_i2c(struct ddc_service *ddc_service,
7002 	   int link_index,
7003 	   int *res)
7004 {
7005 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7006 	struct amdgpu_i2c_adapter *i2c;
7007 
7008 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7009 	if (!i2c)
7010 		return NULL;
7011 	i2c->base.owner = THIS_MODULE;
7012 	i2c->base.class = I2C_CLASS_DDC;
7013 	i2c->base.dev.parent = &adev->pdev->dev;
7014 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7015 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7016 	i2c_set_adapdata(&i2c->base, i2c);
7017 	i2c->ddc_service = ddc_service;
7018 
7019 	return i2c;
7020 }
7021 
7022 
7023 /*
7024  * Note: this function assumes that dc_link_detect() was called for the
7025  * dc_link which will be represented by this aconnector.
7026  */
7027 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7028 				    struct amdgpu_dm_connector *aconnector,
7029 				    uint32_t link_index,
7030 				    struct amdgpu_encoder *aencoder)
7031 {
7032 	int res = 0;
7033 	int connector_type;
7034 	struct dc *dc = dm->dc;
7035 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7036 	struct amdgpu_i2c_adapter *i2c;
7037 
7038 	link->priv = aconnector;
7039 
7040 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7041 
7042 	i2c = create_i2c(link->ddc, link->link_index, &res);
7043 	if (!i2c) {
7044 		DRM_ERROR("Failed to create i2c adapter data\n");
7045 		return -ENOMEM;
7046 	}
7047 
7048 	aconnector->i2c = i2c;
7049 	res = i2c_add_adapter(&i2c->base);
7050 
7051 	if (res) {
7052 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7053 		goto out_free;
7054 	}
7055 
7056 	connector_type = to_drm_connector_type(link->connector_signal);
7057 
7058 	res = drm_connector_init_with_ddc(
7059 			dm->ddev,
7060 			&aconnector->base,
7061 			&amdgpu_dm_connector_funcs,
7062 			connector_type,
7063 			&i2c->base);
7064 
7065 	if (res) {
7066 		DRM_ERROR("connector_init failed\n");
7067 		aconnector->connector_id = -1;
7068 		goto out_free;
7069 	}
7070 
7071 	drm_connector_helper_add(
7072 			&aconnector->base,
7073 			&amdgpu_dm_connector_helper_funcs);
7074 
7075 	amdgpu_dm_connector_init_helper(
7076 		dm,
7077 		aconnector,
7078 		connector_type,
7079 		link,
7080 		link_index);
7081 
7082 	drm_connector_attach_encoder(
7083 		&aconnector->base, &aencoder->base);
7084 
7085 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7086 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7087 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7088 
7089 out_free:
7090 	if (res) {
7091 		kfree(i2c);
7092 		aconnector->i2c = NULL;
7093 	}
7094 	return res;
7095 }
7096 
7097 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7098 {
7099 	switch (adev->mode_info.num_crtc) {
7100 	case 1:
7101 		return 0x1;
7102 	case 2:
7103 		return 0x3;
7104 	case 3:
7105 		return 0x7;
7106 	case 4:
7107 		return 0xf;
7108 	case 5:
7109 		return 0x1f;
7110 	case 6:
7111 	default:
7112 		return 0x3f;
7113 	}
7114 }
7115 
7116 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7117 				  struct amdgpu_encoder *aencoder,
7118 				  uint32_t link_index)
7119 {
7120 	struct amdgpu_device *adev = drm_to_adev(dev);
7121 
7122 	int res = drm_encoder_init(dev,
7123 				   &aencoder->base,
7124 				   &amdgpu_dm_encoder_funcs,
7125 				   DRM_MODE_ENCODER_TMDS,
7126 				   NULL);
7127 
7128 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7129 
7130 	if (!res)
7131 		aencoder->encoder_id = link_index;
7132 	else
7133 		aencoder->encoder_id = -1;
7134 
7135 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7136 
7137 	return res;
7138 }
7139 
7140 static void manage_dm_interrupts(struct amdgpu_device *adev,
7141 				 struct amdgpu_crtc *acrtc,
7142 				 bool enable)
7143 {
7144 	/*
7145 	 * We have no guarantee that the frontend index maps to the same
7146 	 * backend index - some even map to more than one.
7147 	 *
7148 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7149 	 */
7150 	int irq_type =
7151 		amdgpu_display_crtc_idx_to_irq_type(
7152 			adev,
7153 			acrtc->crtc_id);
7154 
7155 	if (enable) {
7156 		drm_crtc_vblank_on(&acrtc->base);
7157 		amdgpu_irq_get(
7158 			adev,
7159 			&adev->pageflip_irq,
7160 			irq_type);
7161 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7162 		amdgpu_irq_get(
7163 			adev,
7164 			&adev->vline0_irq,
7165 			irq_type);
7166 #endif
7167 	} else {
7168 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7169 		amdgpu_irq_put(
7170 			adev,
7171 			&adev->vline0_irq,
7172 			irq_type);
7173 #endif
7174 		amdgpu_irq_put(
7175 			adev,
7176 			&adev->pageflip_irq,
7177 			irq_type);
7178 		drm_crtc_vblank_off(&acrtc->base);
7179 	}
7180 }
7181 
7182 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7183 				      struct amdgpu_crtc *acrtc)
7184 {
7185 	int irq_type =
7186 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7187 
7188 	/**
7189 	 * This reads the current state for the IRQ and force reapplies
7190 	 * the setting to hardware.
7191 	 */
7192 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7193 }
7194 
7195 static bool
7196 is_scaling_state_different(const struct dm_connector_state *dm_state,
7197 			   const struct dm_connector_state *old_dm_state)
7198 {
7199 	if (dm_state->scaling != old_dm_state->scaling)
7200 		return true;
7201 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7202 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7203 			return true;
7204 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7205 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7206 			return true;
7207 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7208 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7209 		return true;
7210 	return false;
7211 }
7212 
7213 #ifdef CONFIG_DRM_AMD_DC_HDCP
7214 static bool is_content_protection_different(struct drm_connector_state *state,
7215 					    const struct drm_connector_state *old_state,
7216 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7217 {
7218 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7219 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7220 
7221 	/* Handle: Type0/1 change */
7222 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7223 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7224 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7225 		return true;
7226 	}
7227 
7228 	/* CP is being re enabled, ignore this
7229 	 *
7230 	 * Handles:	ENABLED -> DESIRED
7231 	 */
7232 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7233 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7234 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7235 		return false;
7236 	}
7237 
7238 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7239 	 *
7240 	 * Handles:	UNDESIRED -> ENABLED
7241 	 */
7242 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7243 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7244 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7245 
7246 	/* Stream removed and re-enabled
7247 	 *
7248 	 * Can sometimes overlap with the HPD case,
7249 	 * thus set update_hdcp to false to avoid
7250 	 * setting HDCP multiple times.
7251 	 *
7252 	 * Handles:	DESIRED -> DESIRED (Special case)
7253 	 */
7254 	if (!(old_state->crtc && old_state->crtc->enabled) &&
7255 		state->crtc && state->crtc->enabled &&
7256 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7257 		dm_con_state->update_hdcp = false;
7258 		return true;
7259 	}
7260 
7261 	/* Hot-plug, headless s3, dpms
7262 	 *
7263 	 * Only start HDCP if the display is connected/enabled.
7264 	 * update_hdcp flag will be set to false until the next
7265 	 * HPD comes in.
7266 	 *
7267 	 * Handles:	DESIRED -> DESIRED (Special case)
7268 	 */
7269 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7270 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7271 		dm_con_state->update_hdcp = false;
7272 		return true;
7273 	}
7274 
7275 	/*
7276 	 * Handles:	UNDESIRED -> UNDESIRED
7277 	 *		DESIRED -> DESIRED
7278 	 *		ENABLED -> ENABLED
7279 	 */
7280 	if (old_state->content_protection == state->content_protection)
7281 		return false;
7282 
7283 	/*
7284 	 * Handles:	UNDESIRED -> DESIRED
7285 	 *		DESIRED -> UNDESIRED
7286 	 *		ENABLED -> UNDESIRED
7287 	 */
7288 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7289 		return true;
7290 
7291 	/*
7292 	 * Handles:	DESIRED -> ENABLED
7293 	 */
7294 	return false;
7295 }
7296 
7297 #endif
7298 static void remove_stream(struct amdgpu_device *adev,
7299 			  struct amdgpu_crtc *acrtc,
7300 			  struct dc_stream_state *stream)
7301 {
7302 	/* this is the update mode case */
7303 
7304 	acrtc->otg_inst = -1;
7305 	acrtc->enabled = false;
7306 }
7307 
7308 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7309 {
7310 
7311 	assert_spin_locked(&acrtc->base.dev->event_lock);
7312 	WARN_ON(acrtc->event);
7313 
7314 	acrtc->event = acrtc->base.state->event;
7315 
7316 	/* Set the flip status */
7317 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7318 
7319 	/* Mark this event as consumed */
7320 	acrtc->base.state->event = NULL;
7321 
7322 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7323 		     acrtc->crtc_id);
7324 }
7325 
7326 static void update_freesync_state_on_stream(
7327 	struct amdgpu_display_manager *dm,
7328 	struct dm_crtc_state *new_crtc_state,
7329 	struct dc_stream_state *new_stream,
7330 	struct dc_plane_state *surface,
7331 	u32 flip_timestamp_in_us)
7332 {
7333 	struct mod_vrr_params vrr_params;
7334 	struct dc_info_packet vrr_infopacket = {0};
7335 	struct amdgpu_device *adev = dm->adev;
7336 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7337 	unsigned long flags;
7338 	bool pack_sdp_v1_3 = false;
7339 
7340 	if (!new_stream)
7341 		return;
7342 
7343 	/*
7344 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7345 	 * For now it's sufficient to just guard against these conditions.
7346 	 */
7347 
7348 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7349 		return;
7350 
7351 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7352         vrr_params = acrtc->dm_irq_params.vrr_params;
7353 
7354 	if (surface) {
7355 		mod_freesync_handle_preflip(
7356 			dm->freesync_module,
7357 			surface,
7358 			new_stream,
7359 			flip_timestamp_in_us,
7360 			&vrr_params);
7361 
7362 		if (adev->family < AMDGPU_FAMILY_AI &&
7363 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7364 			mod_freesync_handle_v_update(dm->freesync_module,
7365 						     new_stream, &vrr_params);
7366 
7367 			/* Need to call this before the frame ends. */
7368 			dc_stream_adjust_vmin_vmax(dm->dc,
7369 						   new_crtc_state->stream,
7370 						   &vrr_params.adjust);
7371 		}
7372 	}
7373 
7374 	mod_freesync_build_vrr_infopacket(
7375 		dm->freesync_module,
7376 		new_stream,
7377 		&vrr_params,
7378 		PACKET_TYPE_VRR,
7379 		TRANSFER_FUNC_UNKNOWN,
7380 		&vrr_infopacket,
7381 		pack_sdp_v1_3);
7382 
7383 	new_crtc_state->freesync_timing_changed |=
7384 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7385 			&vrr_params.adjust,
7386 			sizeof(vrr_params.adjust)) != 0);
7387 
7388 	new_crtc_state->freesync_vrr_info_changed |=
7389 		(memcmp(&new_crtc_state->vrr_infopacket,
7390 			&vrr_infopacket,
7391 			sizeof(vrr_infopacket)) != 0);
7392 
7393 	acrtc->dm_irq_params.vrr_params = vrr_params;
7394 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7395 
7396 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7397 	new_stream->vrr_infopacket = vrr_infopacket;
7398 
7399 	if (new_crtc_state->freesync_vrr_info_changed)
7400 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7401 			      new_crtc_state->base.crtc->base.id,
7402 			      (int)new_crtc_state->base.vrr_enabled,
7403 			      (int)vrr_params.state);
7404 
7405 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7406 }
7407 
7408 static void update_stream_irq_parameters(
7409 	struct amdgpu_display_manager *dm,
7410 	struct dm_crtc_state *new_crtc_state)
7411 {
7412 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7413 	struct mod_vrr_params vrr_params;
7414 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7415 	struct amdgpu_device *adev = dm->adev;
7416 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7417 	unsigned long flags;
7418 
7419 	if (!new_stream)
7420 		return;
7421 
7422 	/*
7423 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7424 	 * For now it's sufficient to just guard against these conditions.
7425 	 */
7426 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7427 		return;
7428 
7429 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7430 	vrr_params = acrtc->dm_irq_params.vrr_params;
7431 
7432 	if (new_crtc_state->vrr_supported &&
7433 	    config.min_refresh_in_uhz &&
7434 	    config.max_refresh_in_uhz) {
7435 		/*
7436 		 * if freesync compatible mode was set, config.state will be set
7437 		 * in atomic check
7438 		 */
7439 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7440 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7441 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7442 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7443 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7444 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7445 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7446 		} else {
7447 			config.state = new_crtc_state->base.vrr_enabled ?
7448 						     VRR_STATE_ACTIVE_VARIABLE :
7449 						     VRR_STATE_INACTIVE;
7450 		}
7451 	} else {
7452 		config.state = VRR_STATE_UNSUPPORTED;
7453 	}
7454 
7455 	mod_freesync_build_vrr_params(dm->freesync_module,
7456 				      new_stream,
7457 				      &config, &vrr_params);
7458 
7459 	new_crtc_state->freesync_timing_changed |=
7460 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7461 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7462 
7463 	new_crtc_state->freesync_config = config;
7464 	/* Copy state for access from DM IRQ handler */
7465 	acrtc->dm_irq_params.freesync_config = config;
7466 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7467 	acrtc->dm_irq_params.vrr_params = vrr_params;
7468 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7469 }
7470 
7471 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7472 					    struct dm_crtc_state *new_state)
7473 {
7474 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7475 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7476 
7477 	if (!old_vrr_active && new_vrr_active) {
7478 		/* Transition VRR inactive -> active:
7479 		 * While VRR is active, we must not disable vblank irq, as a
7480 		 * reenable after disable would compute bogus vblank/pflip
7481 		 * timestamps if it likely happened inside display front-porch.
7482 		 *
7483 		 * We also need vupdate irq for the actual core vblank handling
7484 		 * at end of vblank.
7485 		 */
7486 		dm_set_vupdate_irq(new_state->base.crtc, true);
7487 		drm_crtc_vblank_get(new_state->base.crtc);
7488 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7489 				 __func__, new_state->base.crtc->base.id);
7490 	} else if (old_vrr_active && !new_vrr_active) {
7491 		/* Transition VRR active -> inactive:
7492 		 * Allow vblank irq disable again for fixed refresh rate.
7493 		 */
7494 		dm_set_vupdate_irq(new_state->base.crtc, false);
7495 		drm_crtc_vblank_put(new_state->base.crtc);
7496 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7497 				 __func__, new_state->base.crtc->base.id);
7498 	}
7499 }
7500 
7501 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7502 {
7503 	struct drm_plane *plane;
7504 	struct drm_plane_state *old_plane_state;
7505 	int i;
7506 
7507 	/*
7508 	 * TODO: Make this per-stream so we don't issue redundant updates for
7509 	 * commits with multiple streams.
7510 	 */
7511 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
7512 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7513 			handle_cursor_update(plane, old_plane_state);
7514 }
7515 
7516 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7517 				    struct dc_state *dc_state,
7518 				    struct drm_device *dev,
7519 				    struct amdgpu_display_manager *dm,
7520 				    struct drm_crtc *pcrtc,
7521 				    bool wait_for_vblank)
7522 {
7523 	uint32_t i;
7524 	uint64_t timestamp_ns;
7525 	struct drm_plane *plane;
7526 	struct drm_plane_state *old_plane_state, *new_plane_state;
7527 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7528 	struct drm_crtc_state *new_pcrtc_state =
7529 			drm_atomic_get_new_crtc_state(state, pcrtc);
7530 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7531 	struct dm_crtc_state *dm_old_crtc_state =
7532 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7533 	int planes_count = 0, vpos, hpos;
7534 	unsigned long flags;
7535 	uint32_t target_vblank, last_flip_vblank;
7536 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7537 	bool cursor_update = false;
7538 	bool pflip_present = false;
7539 	struct {
7540 		struct dc_surface_update surface_updates[MAX_SURFACES];
7541 		struct dc_plane_info plane_infos[MAX_SURFACES];
7542 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7543 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7544 		struct dc_stream_update stream_update;
7545 	} *bundle;
7546 
7547 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7548 
7549 	if (!bundle) {
7550 		dm_error("Failed to allocate update bundle\n");
7551 		goto cleanup;
7552 	}
7553 
7554 	/*
7555 	 * Disable the cursor first if we're disabling all the planes.
7556 	 * It'll remain on the screen after the planes are re-enabled
7557 	 * if we don't.
7558 	 */
7559 	if (acrtc_state->active_planes == 0)
7560 		amdgpu_dm_commit_cursors(state);
7561 
7562 	/* update planes when needed */
7563 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7564 		struct drm_crtc *crtc = new_plane_state->crtc;
7565 		struct drm_crtc_state *new_crtc_state;
7566 		struct drm_framebuffer *fb = new_plane_state->fb;
7567 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7568 		bool plane_needs_flip;
7569 		struct dc_plane_state *dc_plane;
7570 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7571 
7572 		/* Cursor plane is handled after stream updates */
7573 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7574 			if ((fb && crtc == pcrtc) ||
7575 			    (old_plane_state->fb && old_plane_state->crtc == pcrtc))
7576 				cursor_update = true;
7577 
7578 			continue;
7579 		}
7580 
7581 		if (!fb || !crtc || pcrtc != crtc)
7582 			continue;
7583 
7584 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7585 		if (!new_crtc_state->active)
7586 			continue;
7587 
7588 		dc_plane = dm_new_plane_state->dc_state;
7589 
7590 		bundle->surface_updates[planes_count].surface = dc_plane;
7591 		if (new_pcrtc_state->color_mgmt_changed) {
7592 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7593 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7594 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7595 		}
7596 
7597 		fill_dc_scaling_info(dm->adev, new_plane_state,
7598 				     &bundle->scaling_infos[planes_count]);
7599 
7600 		bundle->surface_updates[planes_count].scaling_info =
7601 			&bundle->scaling_infos[planes_count];
7602 
7603 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7604 
7605 		pflip_present = pflip_present || plane_needs_flip;
7606 
7607 		if (!plane_needs_flip) {
7608 			planes_count += 1;
7609 			continue;
7610 		}
7611 
7612 		fill_dc_plane_info_and_addr(
7613 			dm->adev, new_plane_state,
7614 			afb->tiling_flags,
7615 			&bundle->plane_infos[planes_count],
7616 			&bundle->flip_addrs[planes_count].address,
7617 			afb->tmz_surface, false);
7618 
7619 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
7620 				 new_plane_state->plane->index,
7621 				 bundle->plane_infos[planes_count].dcc.enable);
7622 
7623 		bundle->surface_updates[planes_count].plane_info =
7624 			&bundle->plane_infos[planes_count];
7625 
7626 		fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
7627 				    new_crtc_state,
7628 				    &bundle->flip_addrs[planes_count]);
7629 
7630 		/*
7631 		 * Only allow immediate flips for fast updates that don't
7632 		 * change FB pitch, DCC state, rotation or mirroing.
7633 		 */
7634 		bundle->flip_addrs[planes_count].flip_immediate =
7635 			crtc->state->async_flip &&
7636 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7637 
7638 		timestamp_ns = ktime_get_ns();
7639 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7640 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7641 		bundle->surface_updates[planes_count].surface = dc_plane;
7642 
7643 		if (!bundle->surface_updates[planes_count].surface) {
7644 			DRM_ERROR("No surface for CRTC: id=%d\n",
7645 					acrtc_attach->crtc_id);
7646 			continue;
7647 		}
7648 
7649 		if (plane == pcrtc->primary)
7650 			update_freesync_state_on_stream(
7651 				dm,
7652 				acrtc_state,
7653 				acrtc_state->stream,
7654 				dc_plane,
7655 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7656 
7657 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
7658 				 __func__,
7659 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7660 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7661 
7662 		planes_count += 1;
7663 
7664 	}
7665 
7666 	if (pflip_present) {
7667 		if (!vrr_active) {
7668 			/* Use old throttling in non-vrr fixed refresh rate mode
7669 			 * to keep flip scheduling based on target vblank counts
7670 			 * working in a backwards compatible way, e.g., for
7671 			 * clients using the GLX_OML_sync_control extension or
7672 			 * DRI3/Present extension with defined target_msc.
7673 			 */
7674 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7675 		}
7676 		else {
7677 			/* For variable refresh rate mode only:
7678 			 * Get vblank of last completed flip to avoid > 1 vrr
7679 			 * flips per video frame by use of throttling, but allow
7680 			 * flip programming anywhere in the possibly large
7681 			 * variable vrr vblank interval for fine-grained flip
7682 			 * timing control and more opportunity to avoid stutter
7683 			 * on late submission of flips.
7684 			 */
7685 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7686 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7687 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7688 		}
7689 
7690 		target_vblank = last_flip_vblank + wait_for_vblank;
7691 
7692 		/*
7693 		 * Wait until we're out of the vertical blank period before the one
7694 		 * targeted by the flip
7695 		 */
7696 		while ((acrtc_attach->enabled &&
7697 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7698 							    0, &vpos, &hpos, NULL,
7699 							    NULL, &pcrtc->hwmode)
7700 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7701 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7702 			(int)(target_vblank -
7703 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7704 			usleep_range(1000, 1100);
7705 		}
7706 
7707 		/**
7708 		 * Prepare the flip event for the pageflip interrupt to handle.
7709 		 *
7710 		 * This only works in the case where we've already turned on the
7711 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7712 		 * from 0 -> n planes we have to skip a hardware generated event
7713 		 * and rely on sending it from software.
7714 		 */
7715 		if (acrtc_attach->base.state->event &&
7716 		    acrtc_state->active_planes > 0) {
7717 			drm_crtc_vblank_get(pcrtc);
7718 
7719 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7720 
7721 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7722 			prepare_flip_isr(acrtc_attach);
7723 
7724 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7725 		}
7726 
7727 		if (acrtc_state->stream) {
7728 			if (acrtc_state->freesync_vrr_info_changed)
7729 				bundle->stream_update.vrr_infopacket =
7730 					&acrtc_state->stream->vrr_infopacket;
7731 		}
7732 	} else if (cursor_update && acrtc_state->active_planes > 0 &&
7733 		   acrtc_attach->base.state->event) {
7734 		drm_crtc_vblank_get(pcrtc);
7735 
7736 		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7737 
7738 		acrtc_attach->event = acrtc_attach->base.state->event;
7739 		acrtc_attach->base.state->event = NULL;
7740 
7741 		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7742 	}
7743 
7744 	/* Update the planes if changed or disable if we don't have any. */
7745 	if ((planes_count || acrtc_state->active_planes == 0) &&
7746 		acrtc_state->stream) {
7747 		/*
7748 		 * If PSR or idle optimizations are enabled then flush out
7749 		 * any pending work before hardware programming.
7750 		 */
7751 		if (dm->vblank_control_workqueue)
7752 			flush_workqueue(dm->vblank_control_workqueue);
7753 
7754 		bundle->stream_update.stream = acrtc_state->stream;
7755 		if (new_pcrtc_state->mode_changed) {
7756 			bundle->stream_update.src = acrtc_state->stream->src;
7757 			bundle->stream_update.dst = acrtc_state->stream->dst;
7758 		}
7759 
7760 		if (new_pcrtc_state->color_mgmt_changed) {
7761 			/*
7762 			 * TODO: This isn't fully correct since we've actually
7763 			 * already modified the stream in place.
7764 			 */
7765 			bundle->stream_update.gamut_remap =
7766 				&acrtc_state->stream->gamut_remap_matrix;
7767 			bundle->stream_update.output_csc_transform =
7768 				&acrtc_state->stream->csc_color_matrix;
7769 			bundle->stream_update.out_transfer_func =
7770 				acrtc_state->stream->out_transfer_func;
7771 		}
7772 
7773 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7774 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7775 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7776 
7777 		/*
7778 		 * If FreeSync state on the stream has changed then we need to
7779 		 * re-adjust the min/max bounds now that DC doesn't handle this
7780 		 * as part of commit.
7781 		 */
7782 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
7783 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7784 			dc_stream_adjust_vmin_vmax(
7785 				dm->dc, acrtc_state->stream,
7786 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7787 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7788 		}
7789 		mutex_lock(&dm->dc_lock);
7790 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7791 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7792 			amdgpu_dm_psr_disable(acrtc_state->stream);
7793 
7794 		dc_commit_updates_for_stream(dm->dc,
7795 						     bundle->surface_updates,
7796 						     planes_count,
7797 						     acrtc_state->stream,
7798 						     &bundle->stream_update,
7799 						     dc_state);
7800 
7801 		/**
7802 		 * Enable or disable the interrupts on the backend.
7803 		 *
7804 		 * Most pipes are put into power gating when unused.
7805 		 *
7806 		 * When power gating is enabled on a pipe we lose the
7807 		 * interrupt enablement state when power gating is disabled.
7808 		 *
7809 		 * So we need to update the IRQ control state in hardware
7810 		 * whenever the pipe turns on (since it could be previously
7811 		 * power gated) or off (since some pipes can't be power gated
7812 		 * on some ASICs).
7813 		 */
7814 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7815 			dm_update_pflip_irq_state(drm_to_adev(dev),
7816 						  acrtc_attach);
7817 
7818 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7819 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7820 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7821 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7822 
7823 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
7824 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
7825 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
7826 			struct amdgpu_dm_connector *aconn =
7827 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
7828 
7829 			if (aconn->psr_skip_count > 0)
7830 				aconn->psr_skip_count--;
7831 
7832 			/* Allow PSR when skip count is 0. */
7833 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7834 
7835 			/*
7836 			 * If sink supports PSR SU, there is no need to rely on
7837 			 * a vblank event disable request to enable PSR. PSR SU
7838 			 * can be enabled immediately once OS demonstrates an
7839 			 * adequate number of fast atomic commits to notify KMD
7840 			 * of update events. See `vblank_control_worker()`.
7841 			 */
7842 			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
7843 			    acrtc_attach->dm_irq_params.allow_psr_entry &&
7844 			    !acrtc_state->stream->link->psr_settings.psr_allow_active)
7845 				amdgpu_dm_psr_enable(acrtc_state->stream);
7846 		} else {
7847 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
7848 		}
7849 
7850 		mutex_unlock(&dm->dc_lock);
7851 	}
7852 
7853 	/*
7854 	 * Update cursor state *after* programming all the planes.
7855 	 * This avoids redundant programming in the case where we're going
7856 	 * to be disabling a single plane - those pipes are being disabled.
7857 	 */
7858 	if (acrtc_state->active_planes)
7859 		amdgpu_dm_commit_cursors(state);
7860 
7861 cleanup:
7862 	kfree(bundle);
7863 }
7864 
7865 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7866 				   struct drm_atomic_state *state)
7867 {
7868 	struct amdgpu_device *adev = drm_to_adev(dev);
7869 	struct amdgpu_dm_connector *aconnector;
7870 	struct drm_connector *connector;
7871 	struct drm_connector_state *old_con_state, *new_con_state;
7872 	struct drm_crtc_state *new_crtc_state;
7873 	struct dm_crtc_state *new_dm_crtc_state;
7874 	const struct dc_stream_status *status;
7875 	int i, inst;
7876 
7877 	/* Notify device removals. */
7878 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7879 		if (old_con_state->crtc != new_con_state->crtc) {
7880 			/* CRTC changes require notification. */
7881 			goto notify;
7882 		}
7883 
7884 		if (!new_con_state->crtc)
7885 			continue;
7886 
7887 		new_crtc_state = drm_atomic_get_new_crtc_state(
7888 			state, new_con_state->crtc);
7889 
7890 		if (!new_crtc_state)
7891 			continue;
7892 
7893 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7894 			continue;
7895 
7896 	notify:
7897 		aconnector = to_amdgpu_dm_connector(connector);
7898 
7899 		mutex_lock(&adev->dm.audio_lock);
7900 		inst = aconnector->audio_inst;
7901 		aconnector->audio_inst = -1;
7902 		mutex_unlock(&adev->dm.audio_lock);
7903 
7904 		amdgpu_dm_audio_eld_notify(adev, inst);
7905 	}
7906 
7907 	/* Notify audio device additions. */
7908 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7909 		if (!new_con_state->crtc)
7910 			continue;
7911 
7912 		new_crtc_state = drm_atomic_get_new_crtc_state(
7913 			state, new_con_state->crtc);
7914 
7915 		if (!new_crtc_state)
7916 			continue;
7917 
7918 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7919 			continue;
7920 
7921 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7922 		if (!new_dm_crtc_state->stream)
7923 			continue;
7924 
7925 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7926 		if (!status)
7927 			continue;
7928 
7929 		aconnector = to_amdgpu_dm_connector(connector);
7930 
7931 		mutex_lock(&adev->dm.audio_lock);
7932 		inst = status->audio_inst;
7933 		aconnector->audio_inst = inst;
7934 		mutex_unlock(&adev->dm.audio_lock);
7935 
7936 		amdgpu_dm_audio_eld_notify(adev, inst);
7937 	}
7938 }
7939 
7940 /*
7941  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7942  * @crtc_state: the DRM CRTC state
7943  * @stream_state: the DC stream state.
7944  *
7945  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7946  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7947  */
7948 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7949 						struct dc_stream_state *stream_state)
7950 {
7951 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7952 }
7953 
7954 /**
7955  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7956  * @state: The atomic state to commit
7957  *
7958  * This will tell DC to commit the constructed DC state from atomic_check,
7959  * programming the hardware. Any failures here implies a hardware failure, since
7960  * atomic check should have filtered anything non-kosher.
7961  */
7962 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7963 {
7964 	struct drm_device *dev = state->dev;
7965 	struct amdgpu_device *adev = drm_to_adev(dev);
7966 	struct amdgpu_display_manager *dm = &adev->dm;
7967 	struct dm_atomic_state *dm_state;
7968 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7969 	uint32_t i, j;
7970 	struct drm_crtc *crtc;
7971 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7972 	unsigned long flags;
7973 	bool wait_for_vblank = true;
7974 	struct drm_connector *connector;
7975 	struct drm_connector_state *old_con_state, *new_con_state;
7976 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7977 	int crtc_disable_count = 0;
7978 	bool mode_set_reset_required = false;
7979 	int r;
7980 
7981 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
7982 
7983 	r = drm_atomic_helper_wait_for_fences(dev, state, false);
7984 	if (unlikely(r))
7985 		DRM_ERROR("Waiting for fences timed out!");
7986 
7987 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7988 
7989 	dm_state = dm_atomic_get_new_state(state);
7990 	if (dm_state && dm_state->context) {
7991 		dc_state = dm_state->context;
7992 	} else {
7993 		/* No state changes, retain current state. */
7994 		dc_state_temp = dc_create_state(dm->dc);
7995 		ASSERT(dc_state_temp);
7996 		dc_state = dc_state_temp;
7997 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7998 	}
7999 
8000 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8001 				       new_crtc_state, i) {
8002 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8003 
8004 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8005 
8006 		if (old_crtc_state->active &&
8007 		    (!new_crtc_state->active ||
8008 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8009 			manage_dm_interrupts(adev, acrtc, false);
8010 			dc_stream_release(dm_old_crtc_state->stream);
8011 		}
8012 	}
8013 
8014 	drm_atomic_helper_calc_timestamping_constants(state);
8015 
8016 	/* update changed items */
8017 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8018 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8019 
8020 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8021 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8022 
8023 		drm_dbg_state(state->dev,
8024 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8025 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8026 			"connectors_changed:%d\n",
8027 			acrtc->crtc_id,
8028 			new_crtc_state->enable,
8029 			new_crtc_state->active,
8030 			new_crtc_state->planes_changed,
8031 			new_crtc_state->mode_changed,
8032 			new_crtc_state->active_changed,
8033 			new_crtc_state->connectors_changed);
8034 
8035 		/* Disable cursor if disabling crtc */
8036 		if (old_crtc_state->active && !new_crtc_state->active) {
8037 			struct dc_cursor_position position;
8038 
8039 			memset(&position, 0, sizeof(position));
8040 			mutex_lock(&dm->dc_lock);
8041 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8042 			mutex_unlock(&dm->dc_lock);
8043 		}
8044 
8045 		/* Copy all transient state flags into dc state */
8046 		if (dm_new_crtc_state->stream) {
8047 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8048 							    dm_new_crtc_state->stream);
8049 		}
8050 
8051 		/* handles headless hotplug case, updating new_state and
8052 		 * aconnector as needed
8053 		 */
8054 
8055 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8056 
8057 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8058 
8059 			if (!dm_new_crtc_state->stream) {
8060 				/*
8061 				 * this could happen because of issues with
8062 				 * userspace notifications delivery.
8063 				 * In this case userspace tries to set mode on
8064 				 * display which is disconnected in fact.
8065 				 * dc_sink is NULL in this case on aconnector.
8066 				 * We expect reset mode will come soon.
8067 				 *
8068 				 * This can also happen when unplug is done
8069 				 * during resume sequence ended
8070 				 *
8071 				 * In this case, we want to pretend we still
8072 				 * have a sink to keep the pipe running so that
8073 				 * hw state is consistent with the sw state
8074 				 */
8075 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8076 						__func__, acrtc->base.base.id);
8077 				continue;
8078 			}
8079 
8080 			if (dm_old_crtc_state->stream)
8081 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8082 
8083 			pm_runtime_get_noresume(dev->dev);
8084 
8085 			acrtc->enabled = true;
8086 			acrtc->hw_mode = new_crtc_state->mode;
8087 			crtc->hwmode = new_crtc_state->mode;
8088 			mode_set_reset_required = true;
8089 		} else if (modereset_required(new_crtc_state)) {
8090 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8091 			/* i.e. reset mode */
8092 			if (dm_old_crtc_state->stream)
8093 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8094 
8095 			mode_set_reset_required = true;
8096 		}
8097 	} /* for_each_crtc_in_state() */
8098 
8099 	if (dc_state) {
8100 		/* if there mode set or reset, disable eDP PSR */
8101 		if (mode_set_reset_required) {
8102 			if (dm->vblank_control_workqueue)
8103 				flush_workqueue(dm->vblank_control_workqueue);
8104 
8105 			amdgpu_dm_psr_disable_all(dm);
8106 		}
8107 
8108 		dm_enable_per_frame_crtc_master_sync(dc_state);
8109 		mutex_lock(&dm->dc_lock);
8110 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8111 
8112 		/* Allow idle optimization when vblank count is 0 for display off */
8113 		if (dm->active_vblank_irq_count == 0)
8114 			dc_allow_idle_optimizations(dm->dc, true);
8115 		mutex_unlock(&dm->dc_lock);
8116 	}
8117 
8118 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8119 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8120 
8121 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8122 
8123 		if (dm_new_crtc_state->stream != NULL) {
8124 			const struct dc_stream_status *status =
8125 					dc_stream_get_status(dm_new_crtc_state->stream);
8126 
8127 			if (!status)
8128 				status = dc_stream_get_status_from_state(dc_state,
8129 									 dm_new_crtc_state->stream);
8130 			if (!status)
8131 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8132 			else
8133 				acrtc->otg_inst = status->primary_otg_inst;
8134 		}
8135 	}
8136 #ifdef CONFIG_DRM_AMD_DC_HDCP
8137 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8138 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8139 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8140 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8141 
8142 		new_crtc_state = NULL;
8143 
8144 		if (acrtc)
8145 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8146 
8147 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8148 
8149 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8150 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8151 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8152 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8153 			dm_new_con_state->update_hdcp = true;
8154 			continue;
8155 		}
8156 
8157 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8158 			hdcp_update_display(
8159 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8160 				new_con_state->hdcp_content_type,
8161 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8162 	}
8163 #endif
8164 
8165 	/* Handle connector state changes */
8166 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8167 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8168 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8169 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8170 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8171 		struct dc_stream_update stream_update;
8172 		struct dc_info_packet hdr_packet;
8173 		struct dc_stream_status *status = NULL;
8174 		bool abm_changed, hdr_changed, scaling_changed;
8175 
8176 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8177 		memset(&stream_update, 0, sizeof(stream_update));
8178 
8179 		if (acrtc) {
8180 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8181 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8182 		}
8183 
8184 		/* Skip any modesets/resets */
8185 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8186 			continue;
8187 
8188 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8189 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8190 
8191 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8192 							     dm_old_con_state);
8193 
8194 		abm_changed = dm_new_crtc_state->abm_level !=
8195 			      dm_old_crtc_state->abm_level;
8196 
8197 		hdr_changed =
8198 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8199 
8200 		if (!scaling_changed && !abm_changed && !hdr_changed)
8201 			continue;
8202 
8203 		stream_update.stream = dm_new_crtc_state->stream;
8204 		if (scaling_changed) {
8205 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8206 					dm_new_con_state, dm_new_crtc_state->stream);
8207 
8208 			stream_update.src = dm_new_crtc_state->stream->src;
8209 			stream_update.dst = dm_new_crtc_state->stream->dst;
8210 		}
8211 
8212 		if (abm_changed) {
8213 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8214 
8215 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8216 		}
8217 
8218 		if (hdr_changed) {
8219 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8220 			stream_update.hdr_static_metadata = &hdr_packet;
8221 		}
8222 
8223 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8224 
8225 		if (WARN_ON(!status))
8226 			continue;
8227 
8228 		WARN_ON(!status->plane_count);
8229 
8230 		/*
8231 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8232 		 * Here we create an empty update on each plane.
8233 		 * To fix this, DC should permit updating only stream properties.
8234 		 */
8235 		for (j = 0; j < status->plane_count; j++)
8236 			dummy_updates[j].surface = status->plane_states[0];
8237 
8238 
8239 		mutex_lock(&dm->dc_lock);
8240 		dc_commit_updates_for_stream(dm->dc,
8241 						     dummy_updates,
8242 						     status->plane_count,
8243 						     dm_new_crtc_state->stream,
8244 						     &stream_update,
8245 						     dc_state);
8246 		mutex_unlock(&dm->dc_lock);
8247 	}
8248 
8249 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8250 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8251 				      new_crtc_state, i) {
8252 		if (old_crtc_state->active && !new_crtc_state->active)
8253 			crtc_disable_count++;
8254 
8255 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8256 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8257 
8258 		/* For freesync config update on crtc state and params for irq */
8259 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8260 
8261 		/* Handle vrr on->off / off->on transitions */
8262 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8263 						dm_new_crtc_state);
8264 	}
8265 
8266 	/**
8267 	 * Enable interrupts for CRTCs that are newly enabled or went through
8268 	 * a modeset. It was intentionally deferred until after the front end
8269 	 * state was modified to wait until the OTG was on and so the IRQ
8270 	 * handlers didn't access stale or invalid state.
8271 	 */
8272 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8273 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8274 #ifdef CONFIG_DEBUG_FS
8275 		bool configure_crc = false;
8276 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8277 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8278 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
8279 #endif
8280 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8281 		cur_crc_src = acrtc->dm_irq_params.crc_src;
8282 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8283 #endif
8284 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8285 
8286 		if (new_crtc_state->active &&
8287 		    (!old_crtc_state->active ||
8288 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8289 			dc_stream_retain(dm_new_crtc_state->stream);
8290 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8291 			manage_dm_interrupts(adev, acrtc, true);
8292 
8293 #ifdef CONFIG_DEBUG_FS
8294 			/**
8295 			 * Frontend may have changed so reapply the CRC capture
8296 			 * settings for the stream.
8297 			 */
8298 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8299 
8300 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8301 				configure_crc = true;
8302 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8303 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
8304 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8305 					acrtc->dm_irq_params.crc_window.update_win = true;
8306 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
8307 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
8308 					crc_rd_wrk->crtc = crtc;
8309 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
8310 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8311 				}
8312 #endif
8313 			}
8314 
8315 			if (configure_crc)
8316 				if (amdgpu_dm_crtc_configure_crc_source(
8317 					crtc, dm_new_crtc_state, cur_crc_src))
8318 					DRM_DEBUG_DRIVER("Failed to configure crc source");
8319 #endif
8320 		}
8321 	}
8322 
8323 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8324 		if (new_crtc_state->async_flip)
8325 			wait_for_vblank = false;
8326 
8327 	/* update planes when needed per crtc*/
8328 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8329 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8330 
8331 		if (dm_new_crtc_state->stream)
8332 			amdgpu_dm_commit_planes(state, dc_state, dev,
8333 						dm, crtc, wait_for_vblank);
8334 	}
8335 
8336 	/* Update audio instances for each connector. */
8337 	amdgpu_dm_commit_audio(dev, state);
8338 
8339 	/* restore the backlight level */
8340 	for (i = 0; i < dm->num_of_edps; i++) {
8341 		if (dm->backlight_dev[i] &&
8342 		    (dm->actual_brightness[i] != dm->brightness[i]))
8343 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8344 	}
8345 
8346 	/*
8347 	 * send vblank event on all events not handled in flip and
8348 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8349 	 */
8350 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8351 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8352 
8353 		if (new_crtc_state->event)
8354 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8355 
8356 		new_crtc_state->event = NULL;
8357 	}
8358 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8359 
8360 	/* Signal HW programming completion */
8361 	drm_atomic_helper_commit_hw_done(state);
8362 
8363 	if (wait_for_vblank)
8364 		drm_atomic_helper_wait_for_flip_done(dev, state);
8365 
8366 	drm_atomic_helper_cleanup_planes(dev, state);
8367 
8368 	/* return the stolen vga memory back to VRAM */
8369 	if (!adev->mman.keep_stolen_vga_memory)
8370 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8371 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8372 
8373 	/*
8374 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8375 	 * so we can put the GPU into runtime suspend if we're not driving any
8376 	 * displays anymore
8377 	 */
8378 	for (i = 0; i < crtc_disable_count; i++)
8379 		pm_runtime_put_autosuspend(dev->dev);
8380 	pm_runtime_mark_last_busy(dev->dev);
8381 
8382 	if (dc_state_temp)
8383 		dc_release_state(dc_state_temp);
8384 }
8385 
8386 
8387 static int dm_force_atomic_commit(struct drm_connector *connector)
8388 {
8389 	int ret = 0;
8390 	struct drm_device *ddev = connector->dev;
8391 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8392 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8393 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8394 	struct drm_connector_state *conn_state;
8395 	struct drm_crtc_state *crtc_state;
8396 	struct drm_plane_state *plane_state;
8397 
8398 	if (!state)
8399 		return -ENOMEM;
8400 
8401 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8402 
8403 	/* Construct an atomic state to restore previous display setting */
8404 
8405 	/*
8406 	 * Attach connectors to drm_atomic_state
8407 	 */
8408 	conn_state = drm_atomic_get_connector_state(state, connector);
8409 
8410 	ret = PTR_ERR_OR_ZERO(conn_state);
8411 	if (ret)
8412 		goto out;
8413 
8414 	/* Attach crtc to drm_atomic_state*/
8415 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8416 
8417 	ret = PTR_ERR_OR_ZERO(crtc_state);
8418 	if (ret)
8419 		goto out;
8420 
8421 	/* force a restore */
8422 	crtc_state->mode_changed = true;
8423 
8424 	/* Attach plane to drm_atomic_state */
8425 	plane_state = drm_atomic_get_plane_state(state, plane);
8426 
8427 	ret = PTR_ERR_OR_ZERO(plane_state);
8428 	if (ret)
8429 		goto out;
8430 
8431 	/* Call commit internally with the state we just constructed */
8432 	ret = drm_atomic_commit(state);
8433 
8434 out:
8435 	drm_atomic_state_put(state);
8436 	if (ret)
8437 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8438 
8439 	return ret;
8440 }
8441 
8442 /*
8443  * This function handles all cases when set mode does not come upon hotplug.
8444  * This includes when a display is unplugged then plugged back into the
8445  * same port and when running without usermode desktop manager supprot
8446  */
8447 void dm_restore_drm_connector_state(struct drm_device *dev,
8448 				    struct drm_connector *connector)
8449 {
8450 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8451 	struct amdgpu_crtc *disconnected_acrtc;
8452 	struct dm_crtc_state *acrtc_state;
8453 
8454 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8455 		return;
8456 
8457 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8458 	if (!disconnected_acrtc)
8459 		return;
8460 
8461 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8462 	if (!acrtc_state->stream)
8463 		return;
8464 
8465 	/*
8466 	 * If the previous sink is not released and different from the current,
8467 	 * we deduce we are in a state where we can not rely on usermode call
8468 	 * to turn on the display, so we do it here
8469 	 */
8470 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8471 		dm_force_atomic_commit(&aconnector->base);
8472 }
8473 
8474 /*
8475  * Grabs all modesetting locks to serialize against any blocking commits,
8476  * Waits for completion of all non blocking commits.
8477  */
8478 static int do_aquire_global_lock(struct drm_device *dev,
8479 				 struct drm_atomic_state *state)
8480 {
8481 	struct drm_crtc *crtc;
8482 	struct drm_crtc_commit *commit;
8483 	long ret;
8484 
8485 	/*
8486 	 * Adding all modeset locks to aquire_ctx will
8487 	 * ensure that when the framework release it the
8488 	 * extra locks we are locking here will get released to
8489 	 */
8490 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8491 	if (ret)
8492 		return ret;
8493 
8494 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8495 		spin_lock(&crtc->commit_lock);
8496 		commit = list_first_entry_or_null(&crtc->commit_list,
8497 				struct drm_crtc_commit, commit_entry);
8498 		if (commit)
8499 			drm_crtc_commit_get(commit);
8500 		spin_unlock(&crtc->commit_lock);
8501 
8502 		if (!commit)
8503 			continue;
8504 
8505 		/*
8506 		 * Make sure all pending HW programming completed and
8507 		 * page flips done
8508 		 */
8509 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8510 
8511 		if (ret > 0)
8512 			ret = wait_for_completion_interruptible_timeout(
8513 					&commit->flip_done, 10*HZ);
8514 
8515 		if (ret == 0)
8516 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8517 				  "timed out\n", crtc->base.id, crtc->name);
8518 
8519 		drm_crtc_commit_put(commit);
8520 	}
8521 
8522 	return ret < 0 ? ret : 0;
8523 }
8524 
8525 static void get_freesync_config_for_crtc(
8526 	struct dm_crtc_state *new_crtc_state,
8527 	struct dm_connector_state *new_con_state)
8528 {
8529 	struct mod_freesync_config config = {0};
8530 	struct amdgpu_dm_connector *aconnector =
8531 			to_amdgpu_dm_connector(new_con_state->base.connector);
8532 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8533 	int vrefresh = drm_mode_vrefresh(mode);
8534 	bool fs_vid_mode = false;
8535 
8536 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8537 					vrefresh >= aconnector->min_vfreq &&
8538 					vrefresh <= aconnector->max_vfreq;
8539 
8540 	if (new_crtc_state->vrr_supported) {
8541 		new_crtc_state->stream->ignore_msa_timing_param = true;
8542 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8543 
8544 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8545 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
8546 		config.vsif_supported = true;
8547 		config.btr = true;
8548 
8549 		if (fs_vid_mode) {
8550 			config.state = VRR_STATE_ACTIVE_FIXED;
8551 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8552 			goto out;
8553 		} else if (new_crtc_state->base.vrr_enabled) {
8554 			config.state = VRR_STATE_ACTIVE_VARIABLE;
8555 		} else {
8556 			config.state = VRR_STATE_INACTIVE;
8557 		}
8558 	}
8559 out:
8560 	new_crtc_state->freesync_config = config;
8561 }
8562 
8563 static void reset_freesync_config_for_crtc(
8564 	struct dm_crtc_state *new_crtc_state)
8565 {
8566 	new_crtc_state->vrr_supported = false;
8567 
8568 	memset(&new_crtc_state->vrr_infopacket, 0,
8569 	       sizeof(new_crtc_state->vrr_infopacket));
8570 }
8571 
8572 static bool
8573 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8574 				 struct drm_crtc_state *new_crtc_state)
8575 {
8576 	const struct drm_display_mode *old_mode, *new_mode;
8577 
8578 	if (!old_crtc_state || !new_crtc_state)
8579 		return false;
8580 
8581 	old_mode = &old_crtc_state->mode;
8582 	new_mode = &new_crtc_state->mode;
8583 
8584 	if (old_mode->clock       == new_mode->clock &&
8585 	    old_mode->hdisplay    == new_mode->hdisplay &&
8586 	    old_mode->vdisplay    == new_mode->vdisplay &&
8587 	    old_mode->htotal      == new_mode->htotal &&
8588 	    old_mode->vtotal      != new_mode->vtotal &&
8589 	    old_mode->hsync_start == new_mode->hsync_start &&
8590 	    old_mode->vsync_start != new_mode->vsync_start &&
8591 	    old_mode->hsync_end   == new_mode->hsync_end &&
8592 	    old_mode->vsync_end   != new_mode->vsync_end &&
8593 	    old_mode->hskew       == new_mode->hskew &&
8594 	    old_mode->vscan       == new_mode->vscan &&
8595 	    (old_mode->vsync_end - old_mode->vsync_start) ==
8596 	    (new_mode->vsync_end - new_mode->vsync_start))
8597 		return true;
8598 
8599 	return false;
8600 }
8601 
8602 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
8603 	uint64_t num, den, res;
8604 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
8605 
8606 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
8607 
8608 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
8609 	den = (unsigned long long)new_crtc_state->mode.htotal *
8610 	      (unsigned long long)new_crtc_state->mode.vtotal;
8611 
8612 	res = div_u64(num, den);
8613 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
8614 }
8615 
8616 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8617 			 struct drm_atomic_state *state,
8618 			 struct drm_crtc *crtc,
8619 			 struct drm_crtc_state *old_crtc_state,
8620 			 struct drm_crtc_state *new_crtc_state,
8621 			 bool enable,
8622 			 bool *lock_and_validation_needed)
8623 {
8624 	struct dm_atomic_state *dm_state = NULL;
8625 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8626 	struct dc_stream_state *new_stream;
8627 	int ret = 0;
8628 
8629 	/*
8630 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8631 	 * update changed items
8632 	 */
8633 	struct amdgpu_crtc *acrtc = NULL;
8634 	struct amdgpu_dm_connector *aconnector = NULL;
8635 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8636 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8637 
8638 	new_stream = NULL;
8639 
8640 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8641 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8642 	acrtc = to_amdgpu_crtc(crtc);
8643 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8644 
8645 	/* TODO This hack should go away */
8646 	if (aconnector && enable) {
8647 		/* Make sure fake sink is created in plug-in scenario */
8648 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8649 							    &aconnector->base);
8650 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8651 							    &aconnector->base);
8652 
8653 		if (IS_ERR(drm_new_conn_state)) {
8654 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8655 			goto fail;
8656 		}
8657 
8658 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8659 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8660 
8661 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8662 			goto skip_modeset;
8663 
8664 		new_stream = create_validate_stream_for_sink(aconnector,
8665 							     &new_crtc_state->mode,
8666 							     dm_new_conn_state,
8667 							     dm_old_crtc_state->stream);
8668 
8669 		/*
8670 		 * we can have no stream on ACTION_SET if a display
8671 		 * was disconnected during S3, in this case it is not an
8672 		 * error, the OS will be updated after detection, and
8673 		 * will do the right thing on next atomic commit
8674 		 */
8675 
8676 		if (!new_stream) {
8677 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8678 					__func__, acrtc->base.base.id);
8679 			ret = -ENOMEM;
8680 			goto fail;
8681 		}
8682 
8683 		/*
8684 		 * TODO: Check VSDB bits to decide whether this should
8685 		 * be enabled or not.
8686 		 */
8687 		new_stream->triggered_crtc_reset.enabled =
8688 			dm->force_timing_sync;
8689 
8690 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8691 
8692 		ret = fill_hdr_info_packet(drm_new_conn_state,
8693 					   &new_stream->hdr_static_metadata);
8694 		if (ret)
8695 			goto fail;
8696 
8697 		/*
8698 		 * If we already removed the old stream from the context
8699 		 * (and set the new stream to NULL) then we can't reuse
8700 		 * the old stream even if the stream and scaling are unchanged.
8701 		 * We'll hit the BUG_ON and black screen.
8702 		 *
8703 		 * TODO: Refactor this function to allow this check to work
8704 		 * in all conditions.
8705 		 */
8706 		if (dm_new_crtc_state->stream &&
8707 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
8708 			goto skip_modeset;
8709 
8710 		if (dm_new_crtc_state->stream &&
8711 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8712 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8713 			new_crtc_state->mode_changed = false;
8714 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8715 					 new_crtc_state->mode_changed);
8716 		}
8717 	}
8718 
8719 	/* mode_changed flag may get updated above, need to check again */
8720 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8721 		goto skip_modeset;
8722 
8723 	drm_dbg_state(state->dev,
8724 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8725 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8726 		"connectors_changed:%d\n",
8727 		acrtc->crtc_id,
8728 		new_crtc_state->enable,
8729 		new_crtc_state->active,
8730 		new_crtc_state->planes_changed,
8731 		new_crtc_state->mode_changed,
8732 		new_crtc_state->active_changed,
8733 		new_crtc_state->connectors_changed);
8734 
8735 	/* Remove stream for any changed/disabled CRTC */
8736 	if (!enable) {
8737 
8738 		if (!dm_old_crtc_state->stream)
8739 			goto skip_modeset;
8740 
8741 		if (dm_new_crtc_state->stream &&
8742 		    is_timing_unchanged_for_freesync(new_crtc_state,
8743 						     old_crtc_state)) {
8744 			new_crtc_state->mode_changed = false;
8745 			DRM_DEBUG_DRIVER(
8746 				"Mode change not required for front porch change, "
8747 				"setting mode_changed to %d",
8748 				new_crtc_state->mode_changed);
8749 
8750 			set_freesync_fixed_config(dm_new_crtc_state);
8751 
8752 			goto skip_modeset;
8753 		} else if (aconnector &&
8754 			   is_freesync_video_mode(&new_crtc_state->mode,
8755 						  aconnector)) {
8756 			struct drm_display_mode *high_mode;
8757 
8758 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
8759 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
8760 				set_freesync_fixed_config(dm_new_crtc_state);
8761 			}
8762 		}
8763 
8764 		ret = dm_atomic_get_state(state, &dm_state);
8765 		if (ret)
8766 			goto fail;
8767 
8768 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8769 				crtc->base.id);
8770 
8771 		/* i.e. reset mode */
8772 		if (dc_remove_stream_from_ctx(
8773 				dm->dc,
8774 				dm_state->context,
8775 				dm_old_crtc_state->stream) != DC_OK) {
8776 			ret = -EINVAL;
8777 			goto fail;
8778 		}
8779 
8780 		dc_stream_release(dm_old_crtc_state->stream);
8781 		dm_new_crtc_state->stream = NULL;
8782 
8783 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8784 
8785 		*lock_and_validation_needed = true;
8786 
8787 	} else {/* Add stream for any updated/enabled CRTC */
8788 		/*
8789 		 * Quick fix to prevent NULL pointer on new_stream when
8790 		 * added MST connectors not found in existing crtc_state in the chained mode
8791 		 * TODO: need to dig out the root cause of that
8792 		 */
8793 		if (!aconnector)
8794 			goto skip_modeset;
8795 
8796 		if (modereset_required(new_crtc_state))
8797 			goto skip_modeset;
8798 
8799 		if (modeset_required(new_crtc_state, new_stream,
8800 				     dm_old_crtc_state->stream)) {
8801 
8802 			WARN_ON(dm_new_crtc_state->stream);
8803 
8804 			ret = dm_atomic_get_state(state, &dm_state);
8805 			if (ret)
8806 				goto fail;
8807 
8808 			dm_new_crtc_state->stream = new_stream;
8809 
8810 			dc_stream_retain(new_stream);
8811 
8812 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
8813 					 crtc->base.id);
8814 
8815 			if (dc_add_stream_to_ctx(
8816 					dm->dc,
8817 					dm_state->context,
8818 					dm_new_crtc_state->stream) != DC_OK) {
8819 				ret = -EINVAL;
8820 				goto fail;
8821 			}
8822 
8823 			*lock_and_validation_needed = true;
8824 		}
8825 	}
8826 
8827 skip_modeset:
8828 	/* Release extra reference */
8829 	if (new_stream)
8830 		 dc_stream_release(new_stream);
8831 
8832 	/*
8833 	 * We want to do dc stream updates that do not require a
8834 	 * full modeset below.
8835 	 */
8836 	if (!(enable && aconnector && new_crtc_state->active))
8837 		return 0;
8838 	/*
8839 	 * Given above conditions, the dc state cannot be NULL because:
8840 	 * 1. We're in the process of enabling CRTCs (just been added
8841 	 *    to the dc context, or already is on the context)
8842 	 * 2. Has a valid connector attached, and
8843 	 * 3. Is currently active and enabled.
8844 	 * => The dc stream state currently exists.
8845 	 */
8846 	BUG_ON(dm_new_crtc_state->stream == NULL);
8847 
8848 	/* Scaling or underscan settings */
8849 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8850 				drm_atomic_crtc_needs_modeset(new_crtc_state))
8851 		update_stream_scaling_settings(
8852 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8853 
8854 	/* ABM settings */
8855 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8856 
8857 	/*
8858 	 * Color management settings. We also update color properties
8859 	 * when a modeset is needed, to ensure it gets reprogrammed.
8860 	 */
8861 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8862 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8863 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8864 		if (ret)
8865 			goto fail;
8866 	}
8867 
8868 	/* Update Freesync settings. */
8869 	get_freesync_config_for_crtc(dm_new_crtc_state,
8870 				     dm_new_conn_state);
8871 
8872 	return ret;
8873 
8874 fail:
8875 	if (new_stream)
8876 		dc_stream_release(new_stream);
8877 	return ret;
8878 }
8879 
8880 static bool should_reset_plane(struct drm_atomic_state *state,
8881 			       struct drm_plane *plane,
8882 			       struct drm_plane_state *old_plane_state,
8883 			       struct drm_plane_state *new_plane_state)
8884 {
8885 	struct drm_plane *other;
8886 	struct drm_plane_state *old_other_state, *new_other_state;
8887 	struct drm_crtc_state *new_crtc_state;
8888 	int i;
8889 
8890 	/*
8891 	 * TODO: Remove this hack once the checks below are sufficient
8892 	 * enough to determine when we need to reset all the planes on
8893 	 * the stream.
8894 	 */
8895 	if (state->allow_modeset)
8896 		return true;
8897 
8898 	/* Exit early if we know that we're adding or removing the plane. */
8899 	if (old_plane_state->crtc != new_plane_state->crtc)
8900 		return true;
8901 
8902 	/* old crtc == new_crtc == NULL, plane not in context. */
8903 	if (!new_plane_state->crtc)
8904 		return false;
8905 
8906 	new_crtc_state =
8907 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8908 
8909 	if (!new_crtc_state)
8910 		return true;
8911 
8912 	/* CRTC Degamma changes currently require us to recreate planes. */
8913 	if (new_crtc_state->color_mgmt_changed)
8914 		return true;
8915 
8916 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8917 		return true;
8918 
8919 	/*
8920 	 * If there are any new primary or overlay planes being added or
8921 	 * removed then the z-order can potentially change. To ensure
8922 	 * correct z-order and pipe acquisition the current DC architecture
8923 	 * requires us to remove and recreate all existing planes.
8924 	 *
8925 	 * TODO: Come up with a more elegant solution for this.
8926 	 */
8927 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8928 		struct amdgpu_framebuffer *old_afb, *new_afb;
8929 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8930 			continue;
8931 
8932 		if (old_other_state->crtc != new_plane_state->crtc &&
8933 		    new_other_state->crtc != new_plane_state->crtc)
8934 			continue;
8935 
8936 		if (old_other_state->crtc != new_other_state->crtc)
8937 			return true;
8938 
8939 		/* Src/dst size and scaling updates. */
8940 		if (old_other_state->src_w != new_other_state->src_w ||
8941 		    old_other_state->src_h != new_other_state->src_h ||
8942 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8943 		    old_other_state->crtc_h != new_other_state->crtc_h)
8944 			return true;
8945 
8946 		/* Rotation / mirroring updates. */
8947 		if (old_other_state->rotation != new_other_state->rotation)
8948 			return true;
8949 
8950 		/* Blending updates. */
8951 		if (old_other_state->pixel_blend_mode !=
8952 		    new_other_state->pixel_blend_mode)
8953 			return true;
8954 
8955 		/* Alpha updates. */
8956 		if (old_other_state->alpha != new_other_state->alpha)
8957 			return true;
8958 
8959 		/* Colorspace changes. */
8960 		if (old_other_state->color_range != new_other_state->color_range ||
8961 		    old_other_state->color_encoding != new_other_state->color_encoding)
8962 			return true;
8963 
8964 		/* Framebuffer checks fall at the end. */
8965 		if (!old_other_state->fb || !new_other_state->fb)
8966 			continue;
8967 
8968 		/* Pixel format changes can require bandwidth updates. */
8969 		if (old_other_state->fb->format != new_other_state->fb->format)
8970 			return true;
8971 
8972 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8973 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8974 
8975 		/* Tiling and DCC changes also require bandwidth updates. */
8976 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8977 		    old_afb->base.modifier != new_afb->base.modifier)
8978 			return true;
8979 	}
8980 
8981 	return false;
8982 }
8983 
8984 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8985 			      struct drm_plane_state *new_plane_state,
8986 			      struct drm_framebuffer *fb)
8987 {
8988 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8989 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8990 	unsigned int pitch;
8991 	bool linear;
8992 
8993 	if (fb->width > new_acrtc->max_cursor_width ||
8994 	    fb->height > new_acrtc->max_cursor_height) {
8995 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8996 				 new_plane_state->fb->width,
8997 				 new_plane_state->fb->height);
8998 		return -EINVAL;
8999 	}
9000 	if (new_plane_state->src_w != fb->width << 16 ||
9001 	    new_plane_state->src_h != fb->height << 16) {
9002 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9003 		return -EINVAL;
9004 	}
9005 
9006 	/* Pitch in pixels */
9007 	pitch = fb->pitches[0] / fb->format->cpp[0];
9008 
9009 	if (fb->width != pitch) {
9010 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9011 				 fb->width, pitch);
9012 		return -EINVAL;
9013 	}
9014 
9015 	switch (pitch) {
9016 	case 64:
9017 	case 128:
9018 	case 256:
9019 		/* FB pitch is supported by cursor plane */
9020 		break;
9021 	default:
9022 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9023 		return -EINVAL;
9024 	}
9025 
9026 	/* Core DRM takes care of checking FB modifiers, so we only need to
9027 	 * check tiling flags when the FB doesn't have a modifier. */
9028 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9029 		if (adev->family < AMDGPU_FAMILY_AI) {
9030 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9031 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9032 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9033 		} else {
9034 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9035 		}
9036 		if (!linear) {
9037 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9038 			return -EINVAL;
9039 		}
9040 	}
9041 
9042 	return 0;
9043 }
9044 
9045 static int dm_update_plane_state(struct dc *dc,
9046 				 struct drm_atomic_state *state,
9047 				 struct drm_plane *plane,
9048 				 struct drm_plane_state *old_plane_state,
9049 				 struct drm_plane_state *new_plane_state,
9050 				 bool enable,
9051 				 bool *lock_and_validation_needed)
9052 {
9053 
9054 	struct dm_atomic_state *dm_state = NULL;
9055 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9056 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9057 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9058 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9059 	struct amdgpu_crtc *new_acrtc;
9060 	bool needs_reset;
9061 	int ret = 0;
9062 
9063 
9064 	new_plane_crtc = new_plane_state->crtc;
9065 	old_plane_crtc = old_plane_state->crtc;
9066 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9067 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9068 
9069 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9070 		if (!enable || !new_plane_crtc ||
9071 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9072 			return 0;
9073 
9074 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9075 
9076 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9077 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9078 			return -EINVAL;
9079 		}
9080 
9081 		if (new_plane_state->fb) {
9082 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9083 						 new_plane_state->fb);
9084 			if (ret)
9085 				return ret;
9086 		}
9087 
9088 		return 0;
9089 	}
9090 
9091 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9092 					 new_plane_state);
9093 
9094 	/* Remove any changed/removed planes */
9095 	if (!enable) {
9096 		if (!needs_reset)
9097 			return 0;
9098 
9099 		if (!old_plane_crtc)
9100 			return 0;
9101 
9102 		old_crtc_state = drm_atomic_get_old_crtc_state(
9103 				state, old_plane_crtc);
9104 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9105 
9106 		if (!dm_old_crtc_state->stream)
9107 			return 0;
9108 
9109 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9110 				plane->base.id, old_plane_crtc->base.id);
9111 
9112 		ret = dm_atomic_get_state(state, &dm_state);
9113 		if (ret)
9114 			return ret;
9115 
9116 		if (!dc_remove_plane_from_context(
9117 				dc,
9118 				dm_old_crtc_state->stream,
9119 				dm_old_plane_state->dc_state,
9120 				dm_state->context)) {
9121 
9122 			return -EINVAL;
9123 		}
9124 
9125 
9126 		dc_plane_state_release(dm_old_plane_state->dc_state);
9127 		dm_new_plane_state->dc_state = NULL;
9128 
9129 		*lock_and_validation_needed = true;
9130 
9131 	} else { /* Add new planes */
9132 		struct dc_plane_state *dc_new_plane_state;
9133 
9134 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9135 			return 0;
9136 
9137 		if (!new_plane_crtc)
9138 			return 0;
9139 
9140 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9141 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9142 
9143 		if (!dm_new_crtc_state->stream)
9144 			return 0;
9145 
9146 		if (!needs_reset)
9147 			return 0;
9148 
9149 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9150 		if (ret)
9151 			return ret;
9152 
9153 		WARN_ON(dm_new_plane_state->dc_state);
9154 
9155 		dc_new_plane_state = dc_create_plane_state(dc);
9156 		if (!dc_new_plane_state)
9157 			return -ENOMEM;
9158 
9159 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9160 				 plane->base.id, new_plane_crtc->base.id);
9161 
9162 		ret = fill_dc_plane_attributes(
9163 			drm_to_adev(new_plane_crtc->dev),
9164 			dc_new_plane_state,
9165 			new_plane_state,
9166 			new_crtc_state);
9167 		if (ret) {
9168 			dc_plane_state_release(dc_new_plane_state);
9169 			return ret;
9170 		}
9171 
9172 		ret = dm_atomic_get_state(state, &dm_state);
9173 		if (ret) {
9174 			dc_plane_state_release(dc_new_plane_state);
9175 			return ret;
9176 		}
9177 
9178 		/*
9179 		 * Any atomic check errors that occur after this will
9180 		 * not need a release. The plane state will be attached
9181 		 * to the stream, and therefore part of the atomic
9182 		 * state. It'll be released when the atomic state is
9183 		 * cleaned.
9184 		 */
9185 		if (!dc_add_plane_to_context(
9186 				dc,
9187 				dm_new_crtc_state->stream,
9188 				dc_new_plane_state,
9189 				dm_state->context)) {
9190 
9191 			dc_plane_state_release(dc_new_plane_state);
9192 			return -EINVAL;
9193 		}
9194 
9195 		dm_new_plane_state->dc_state = dc_new_plane_state;
9196 
9197 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9198 
9199 		/* Tell DC to do a full surface update every time there
9200 		 * is a plane change. Inefficient, but works for now.
9201 		 */
9202 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9203 
9204 		*lock_and_validation_needed = true;
9205 	}
9206 
9207 
9208 	return ret;
9209 }
9210 
9211 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9212 				       int *src_w, int *src_h)
9213 {
9214 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9215 	case DRM_MODE_ROTATE_90:
9216 	case DRM_MODE_ROTATE_270:
9217 		*src_w = plane_state->src_h >> 16;
9218 		*src_h = plane_state->src_w >> 16;
9219 		break;
9220 	case DRM_MODE_ROTATE_0:
9221 	case DRM_MODE_ROTATE_180:
9222 	default:
9223 		*src_w = plane_state->src_w >> 16;
9224 		*src_h = plane_state->src_h >> 16;
9225 		break;
9226 	}
9227 }
9228 
9229 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9230 				struct drm_crtc *crtc,
9231 				struct drm_crtc_state *new_crtc_state)
9232 {
9233 	struct drm_plane *cursor = crtc->cursor, *underlying;
9234 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
9235 	int i;
9236 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9237 	int cursor_src_w, cursor_src_h;
9238 	int underlying_src_w, underlying_src_h;
9239 
9240 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9241 	 * cursor per pipe but it's going to inherit the scaling and
9242 	 * positioning from the underlying pipe. Check the cursor plane's
9243 	 * blending properties match the underlying planes'. */
9244 
9245 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9246 	if (!new_cursor_state || !new_cursor_state->fb) {
9247 		return 0;
9248 	}
9249 
9250 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9251 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9252 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9253 
9254 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9255 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
9256 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9257 			continue;
9258 
9259 		/* Ignore disabled planes */
9260 		if (!new_underlying_state->fb)
9261 			continue;
9262 
9263 		dm_get_oriented_plane_size(new_underlying_state,
9264 					   &underlying_src_w, &underlying_src_h);
9265 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9266 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9267 
9268 		if (cursor_scale_w != underlying_scale_w ||
9269 		    cursor_scale_h != underlying_scale_h) {
9270 			drm_dbg_atomic(crtc->dev,
9271 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9272 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9273 			return -EINVAL;
9274 		}
9275 
9276 		/* If this plane covers the whole CRTC, no need to check planes underneath */
9277 		if (new_underlying_state->crtc_x <= 0 &&
9278 		    new_underlying_state->crtc_y <= 0 &&
9279 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9280 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9281 			break;
9282 	}
9283 
9284 	return 0;
9285 }
9286 
9287 #if defined(CONFIG_DRM_AMD_DC_DCN)
9288 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9289 {
9290 	struct drm_connector *connector;
9291 	struct drm_connector_state *conn_state, *old_conn_state;
9292 	struct amdgpu_dm_connector *aconnector = NULL;
9293 	int i;
9294 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9295 		if (!conn_state->crtc)
9296 			conn_state = old_conn_state;
9297 
9298 		if (conn_state->crtc != crtc)
9299 			continue;
9300 
9301 		aconnector = to_amdgpu_dm_connector(connector);
9302 		if (!aconnector->port || !aconnector->mst_port)
9303 			aconnector = NULL;
9304 		else
9305 			break;
9306 	}
9307 
9308 	if (!aconnector)
9309 		return 0;
9310 
9311 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9312 }
9313 #endif
9314 
9315 /**
9316  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9317  * @dev: The DRM device
9318  * @state: The atomic state to commit
9319  *
9320  * Validate that the given atomic state is programmable by DC into hardware.
9321  * This involves constructing a &struct dc_state reflecting the new hardware
9322  * state we wish to commit, then querying DC to see if it is programmable. It's
9323  * important not to modify the existing DC state. Otherwise, atomic_check
9324  * may unexpectedly commit hardware changes.
9325  *
9326  * When validating the DC state, it's important that the right locks are
9327  * acquired. For full updates case which removes/adds/updates streams on one
9328  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9329  * that any such full update commit will wait for completion of any outstanding
9330  * flip using DRMs synchronization events.
9331  *
9332  * Note that DM adds the affected connectors for all CRTCs in state, when that
9333  * might not seem necessary. This is because DC stream creation requires the
9334  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9335  * be possible but non-trivial - a possible TODO item.
9336  *
9337  * Return: -Error code if validation failed.
9338  */
9339 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9340 				  struct drm_atomic_state *state)
9341 {
9342 	struct amdgpu_device *adev = drm_to_adev(dev);
9343 	struct dm_atomic_state *dm_state = NULL;
9344 	struct dc *dc = adev->dm.dc;
9345 	struct drm_connector *connector;
9346 	struct drm_connector_state *old_con_state, *new_con_state;
9347 	struct drm_crtc *crtc;
9348 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9349 	struct drm_plane *plane;
9350 	struct drm_plane_state *old_plane_state, *new_plane_state;
9351 	enum dc_status status;
9352 	int ret, i;
9353 	bool lock_and_validation_needed = false;
9354 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9355 #if defined(CONFIG_DRM_AMD_DC_DCN)
9356 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
9357 	struct drm_dp_mst_topology_state *mst_state;
9358 	struct drm_dp_mst_topology_mgr *mgr;
9359 #endif
9360 
9361 	trace_amdgpu_dm_atomic_check_begin(state);
9362 
9363 	ret = drm_atomic_helper_check_modeset(dev, state);
9364 	if (ret) {
9365 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
9366 		goto fail;
9367 	}
9368 
9369 	/* Check connector changes */
9370 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9371 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9372 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9373 
9374 		/* Skip connectors that are disabled or part of modeset already. */
9375 		if (!old_con_state->crtc && !new_con_state->crtc)
9376 			continue;
9377 
9378 		if (!new_con_state->crtc)
9379 			continue;
9380 
9381 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9382 		if (IS_ERR(new_crtc_state)) {
9383 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
9384 			ret = PTR_ERR(new_crtc_state);
9385 			goto fail;
9386 		}
9387 
9388 		if (dm_old_con_state->abm_level !=
9389 		    dm_new_con_state->abm_level)
9390 			new_crtc_state->connectors_changed = true;
9391 	}
9392 
9393 #if defined(CONFIG_DRM_AMD_DC_DCN)
9394 	if (dc_resource_is_dsc_encoding_supported(dc)) {
9395 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9396 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9397 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9398 				if (ret) {
9399 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
9400 					goto fail;
9401 				}
9402 			}
9403 		}
9404 		if (!pre_validate_dsc(state, &dm_state, vars)) {
9405 			ret = -EINVAL;
9406 			goto fail;
9407 		}
9408 	}
9409 #endif
9410 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9411 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9412 
9413 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9414 		    !new_crtc_state->color_mgmt_changed &&
9415 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9416 			dm_old_crtc_state->dsc_force_changed == false)
9417 			continue;
9418 
9419 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
9420 		if (ret) {
9421 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
9422 			goto fail;
9423 		}
9424 
9425 		if (!new_crtc_state->enable)
9426 			continue;
9427 
9428 		ret = drm_atomic_add_affected_connectors(state, crtc);
9429 		if (ret) {
9430 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
9431 			goto fail;
9432 		}
9433 
9434 		ret = drm_atomic_add_affected_planes(state, crtc);
9435 		if (ret) {
9436 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
9437 			goto fail;
9438 		}
9439 
9440 		if (dm_old_crtc_state->dsc_force_changed)
9441 			new_crtc_state->mode_changed = true;
9442 	}
9443 
9444 	/*
9445 	 * Add all primary and overlay planes on the CRTC to the state
9446 	 * whenever a plane is enabled to maintain correct z-ordering
9447 	 * and to enable fast surface updates.
9448 	 */
9449 	drm_for_each_crtc(crtc, dev) {
9450 		bool modified = false;
9451 
9452 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9453 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9454 				continue;
9455 
9456 			if (new_plane_state->crtc == crtc ||
9457 			    old_plane_state->crtc == crtc) {
9458 				modified = true;
9459 				break;
9460 			}
9461 		}
9462 
9463 		if (!modified)
9464 			continue;
9465 
9466 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9467 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9468 				continue;
9469 
9470 			new_plane_state =
9471 				drm_atomic_get_plane_state(state, plane);
9472 
9473 			if (IS_ERR(new_plane_state)) {
9474 				ret = PTR_ERR(new_plane_state);
9475 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
9476 				goto fail;
9477 			}
9478 		}
9479 	}
9480 
9481 	/* Remove exiting planes if they are modified */
9482 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9483 		ret = dm_update_plane_state(dc, state, plane,
9484 					    old_plane_state,
9485 					    new_plane_state,
9486 					    false,
9487 					    &lock_and_validation_needed);
9488 		if (ret) {
9489 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9490 			goto fail;
9491 		}
9492 	}
9493 
9494 	/* Disable all crtcs which require disable */
9495 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9496 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9497 					   old_crtc_state,
9498 					   new_crtc_state,
9499 					   false,
9500 					   &lock_and_validation_needed);
9501 		if (ret) {
9502 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
9503 			goto fail;
9504 		}
9505 	}
9506 
9507 	/* Enable all crtcs which require enable */
9508 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9509 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9510 					   old_crtc_state,
9511 					   new_crtc_state,
9512 					   true,
9513 					   &lock_and_validation_needed);
9514 		if (ret) {
9515 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
9516 			goto fail;
9517 		}
9518 	}
9519 
9520 	/* Add new/modified planes */
9521 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9522 		ret = dm_update_plane_state(dc, state, plane,
9523 					    old_plane_state,
9524 					    new_plane_state,
9525 					    true,
9526 					    &lock_and_validation_needed);
9527 		if (ret) {
9528 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9529 			goto fail;
9530 		}
9531 	}
9532 
9533 	/* Run this here since we want to validate the streams we created */
9534 	ret = drm_atomic_helper_check_planes(dev, state);
9535 	if (ret) {
9536 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
9537 		goto fail;
9538 	}
9539 
9540 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9541 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9542 		if (dm_new_crtc_state->mpo_requested)
9543 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
9544 	}
9545 
9546 	/* Check cursor planes scaling */
9547 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9548 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9549 		if (ret) {
9550 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
9551 			goto fail;
9552 		}
9553 	}
9554 
9555 	if (state->legacy_cursor_update) {
9556 		/*
9557 		 * This is a fast cursor update coming from the plane update
9558 		 * helper, check if it can be done asynchronously for better
9559 		 * performance.
9560 		 */
9561 		state->async_update =
9562 			!drm_atomic_helper_async_check(dev, state);
9563 
9564 		/*
9565 		 * Skip the remaining global validation if this is an async
9566 		 * update. Cursor updates can be done without affecting
9567 		 * state or bandwidth calcs and this avoids the performance
9568 		 * penalty of locking the private state object and
9569 		 * allocating a new dc_state.
9570 		 */
9571 		if (state->async_update)
9572 			return 0;
9573 	}
9574 
9575 	/* Check scaling and underscan changes*/
9576 	/* TODO Removed scaling changes validation due to inability to commit
9577 	 * new stream into context w\o causing full reset. Need to
9578 	 * decide how to handle.
9579 	 */
9580 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9581 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9582 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9583 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9584 
9585 		/* Skip any modesets/resets */
9586 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9587 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9588 			continue;
9589 
9590 		/* Skip any thing not scale or underscan changes */
9591 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9592 			continue;
9593 
9594 		lock_and_validation_needed = true;
9595 	}
9596 
9597 #if defined(CONFIG_DRM_AMD_DC_DCN)
9598 	/* set the slot info for each mst_state based on the link encoding format */
9599 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
9600 		struct amdgpu_dm_connector *aconnector;
9601 		struct drm_connector *connector;
9602 		struct drm_connector_list_iter iter;
9603 		u8 link_coding_cap;
9604 
9605 		if (!mgr->mst_state )
9606 			continue;
9607 
9608 		drm_connector_list_iter_begin(dev, &iter);
9609 		drm_for_each_connector_iter(connector, &iter) {
9610 			int id = connector->index;
9611 
9612 			if (id == mst_state->mgr->conn_base_id) {
9613 				aconnector = to_amdgpu_dm_connector(connector);
9614 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
9615 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
9616 
9617 				break;
9618 			}
9619 		}
9620 		drm_connector_list_iter_end(&iter);
9621 
9622 	}
9623 #endif
9624 	/**
9625 	 * Streams and planes are reset when there are changes that affect
9626 	 * bandwidth. Anything that affects bandwidth needs to go through
9627 	 * DC global validation to ensure that the configuration can be applied
9628 	 * to hardware.
9629 	 *
9630 	 * We have to currently stall out here in atomic_check for outstanding
9631 	 * commits to finish in this case because our IRQ handlers reference
9632 	 * DRM state directly - we can end up disabling interrupts too early
9633 	 * if we don't.
9634 	 *
9635 	 * TODO: Remove this stall and drop DM state private objects.
9636 	 */
9637 	if (lock_and_validation_needed) {
9638 		ret = dm_atomic_get_state(state, &dm_state);
9639 		if (ret) {
9640 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
9641 			goto fail;
9642 		}
9643 
9644 		ret = do_aquire_global_lock(dev, state);
9645 		if (ret) {
9646 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
9647 			goto fail;
9648 		}
9649 
9650 #if defined(CONFIG_DRM_AMD_DC_DCN)
9651 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
9652 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
9653 			ret = -EINVAL;
9654 			goto fail;
9655 		}
9656 
9657 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
9658 		if (ret) {
9659 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
9660 			goto fail;
9661 		}
9662 #endif
9663 
9664 		/*
9665 		 * Perform validation of MST topology in the state:
9666 		 * We need to perform MST atomic check before calling
9667 		 * dc_validate_global_state(), or there is a chance
9668 		 * to get stuck in an infinite loop and hang eventually.
9669 		 */
9670 		ret = drm_dp_mst_atomic_check(state);
9671 		if (ret) {
9672 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
9673 			goto fail;
9674 		}
9675 		status = dc_validate_global_state(dc, dm_state->context, true);
9676 		if (status != DC_OK) {
9677 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
9678 				       dc_status_to_str(status), status);
9679 			ret = -EINVAL;
9680 			goto fail;
9681 		}
9682 	} else {
9683 		/*
9684 		 * The commit is a fast update. Fast updates shouldn't change
9685 		 * the DC context, affect global validation, and can have their
9686 		 * commit work done in parallel with other commits not touching
9687 		 * the same resource. If we have a new DC context as part of
9688 		 * the DM atomic state from validation we need to free it and
9689 		 * retain the existing one instead.
9690 		 *
9691 		 * Furthermore, since the DM atomic state only contains the DC
9692 		 * context and can safely be annulled, we can free the state
9693 		 * and clear the associated private object now to free
9694 		 * some memory and avoid a possible use-after-free later.
9695 		 */
9696 
9697 		for (i = 0; i < state->num_private_objs; i++) {
9698 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9699 
9700 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9701 				int j = state->num_private_objs-1;
9702 
9703 				dm_atomic_destroy_state(obj,
9704 						state->private_objs[i].state);
9705 
9706 				/* If i is not at the end of the array then the
9707 				 * last element needs to be moved to where i was
9708 				 * before the array can safely be truncated.
9709 				 */
9710 				if (i != j)
9711 					state->private_objs[i] =
9712 						state->private_objs[j];
9713 
9714 				state->private_objs[j].ptr = NULL;
9715 				state->private_objs[j].state = NULL;
9716 				state->private_objs[j].old_state = NULL;
9717 				state->private_objs[j].new_state = NULL;
9718 
9719 				state->num_private_objs = j;
9720 				break;
9721 			}
9722 		}
9723 	}
9724 
9725 	/* Store the overall update type for use later in atomic check. */
9726 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9727 		struct dm_crtc_state *dm_new_crtc_state =
9728 			to_dm_crtc_state(new_crtc_state);
9729 
9730 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9731 							 UPDATE_TYPE_FULL :
9732 							 UPDATE_TYPE_FAST;
9733 	}
9734 
9735 	/* Must be success */
9736 	WARN_ON(ret);
9737 
9738 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9739 
9740 	return ret;
9741 
9742 fail:
9743 	if (ret == -EDEADLK)
9744 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9745 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9746 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9747 	else
9748 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9749 
9750 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9751 
9752 	return ret;
9753 }
9754 
9755 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9756 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9757 {
9758 	uint8_t dpcd_data;
9759 	bool capable = false;
9760 
9761 	if (amdgpu_dm_connector->dc_link &&
9762 		dm_helpers_dp_read_dpcd(
9763 				NULL,
9764 				amdgpu_dm_connector->dc_link,
9765 				DP_DOWN_STREAM_PORT_COUNT,
9766 				&dpcd_data,
9767 				sizeof(dpcd_data))) {
9768 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9769 	}
9770 
9771 	return capable;
9772 }
9773 
9774 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
9775 		unsigned int offset,
9776 		unsigned int total_length,
9777 		uint8_t *data,
9778 		unsigned int length,
9779 		struct amdgpu_hdmi_vsdb_info *vsdb)
9780 {
9781 	bool res;
9782 	union dmub_rb_cmd cmd;
9783 	struct dmub_cmd_send_edid_cea *input;
9784 	struct dmub_cmd_edid_cea_output *output;
9785 
9786 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
9787 		return false;
9788 
9789 	memset(&cmd, 0, sizeof(cmd));
9790 
9791 	input = &cmd.edid_cea.data.input;
9792 
9793 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
9794 	cmd.edid_cea.header.sub_type = 0;
9795 	cmd.edid_cea.header.payload_bytes =
9796 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
9797 	input->offset = offset;
9798 	input->length = length;
9799 	input->cea_total_length = total_length;
9800 	memcpy(input->payload, data, length);
9801 
9802 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
9803 	if (!res) {
9804 		DRM_ERROR("EDID CEA parser failed\n");
9805 		return false;
9806 	}
9807 
9808 	output = &cmd.edid_cea.data.output;
9809 
9810 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
9811 		if (!output->ack.success) {
9812 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
9813 					output->ack.offset);
9814 		}
9815 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
9816 		if (!output->amd_vsdb.vsdb_found)
9817 			return false;
9818 
9819 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
9820 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
9821 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
9822 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
9823 	} else {
9824 		DRM_WARN("Unknown EDID CEA parser results\n");
9825 		return false;
9826 	}
9827 
9828 	return true;
9829 }
9830 
9831 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
9832 		uint8_t *edid_ext, int len,
9833 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9834 {
9835 	int i;
9836 
9837 	/* send extension block to DMCU for parsing */
9838 	for (i = 0; i < len; i += 8) {
9839 		bool res;
9840 		int offset;
9841 
9842 		/* send 8 bytes a time */
9843 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
9844 			return false;
9845 
9846 		if (i+8 == len) {
9847 			/* EDID block sent completed, expect result */
9848 			int version, min_rate, max_rate;
9849 
9850 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
9851 			if (res) {
9852 				/* amd vsdb found */
9853 				vsdb_info->freesync_supported = 1;
9854 				vsdb_info->amd_vsdb_version = version;
9855 				vsdb_info->min_refresh_rate_hz = min_rate;
9856 				vsdb_info->max_refresh_rate_hz = max_rate;
9857 				return true;
9858 			}
9859 			/* not amd vsdb */
9860 			return false;
9861 		}
9862 
9863 		/* check for ack*/
9864 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
9865 		if (!res)
9866 			return false;
9867 	}
9868 
9869 	return false;
9870 }
9871 
9872 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
9873 		uint8_t *edid_ext, int len,
9874 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9875 {
9876 	int i;
9877 
9878 	/* send extension block to DMCU for parsing */
9879 	for (i = 0; i < len; i += 8) {
9880 		/* send 8 bytes a time */
9881 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
9882 			return false;
9883 	}
9884 
9885 	return vsdb_info->freesync_supported;
9886 }
9887 
9888 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
9889 		uint8_t *edid_ext, int len,
9890 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9891 {
9892 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
9893 
9894 	if (adev->dm.dmub_srv)
9895 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
9896 	else
9897 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
9898 }
9899 
9900 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
9901 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
9902 {
9903 	uint8_t *edid_ext = NULL;
9904 	int i;
9905 	bool valid_vsdb_found = false;
9906 
9907 	/*----- drm_find_cea_extension() -----*/
9908 	/* No EDID or EDID extensions */
9909 	if (edid == NULL || edid->extensions == 0)
9910 		return -ENODEV;
9911 
9912 	/* Find CEA extension */
9913 	for (i = 0; i < edid->extensions; i++) {
9914 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
9915 		if (edid_ext[0] == CEA_EXT)
9916 			break;
9917 	}
9918 
9919 	if (i == edid->extensions)
9920 		return -ENODEV;
9921 
9922 	/*----- cea_db_offsets() -----*/
9923 	if (edid_ext[0] != CEA_EXT)
9924 		return -ENODEV;
9925 
9926 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
9927 
9928 	return valid_vsdb_found ? i : -ENODEV;
9929 }
9930 
9931 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9932 					struct edid *edid)
9933 {
9934 	int i = 0;
9935 	struct detailed_timing *timing;
9936 	struct detailed_non_pixel *data;
9937 	struct detailed_data_monitor_range *range;
9938 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9939 			to_amdgpu_dm_connector(connector);
9940 	struct dm_connector_state *dm_con_state = NULL;
9941 	struct dc_sink *sink;
9942 
9943 	struct drm_device *dev = connector->dev;
9944 	struct amdgpu_device *adev = drm_to_adev(dev);
9945 	bool freesync_capable = false;
9946 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
9947 
9948 	if (!connector->state) {
9949 		DRM_ERROR("%s - Connector has no state", __func__);
9950 		goto update;
9951 	}
9952 
9953 	sink = amdgpu_dm_connector->dc_sink ?
9954 		amdgpu_dm_connector->dc_sink :
9955 		amdgpu_dm_connector->dc_em_sink;
9956 
9957 	if (!edid || !sink) {
9958 		dm_con_state = to_dm_connector_state(connector->state);
9959 
9960 		amdgpu_dm_connector->min_vfreq = 0;
9961 		amdgpu_dm_connector->max_vfreq = 0;
9962 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9963 		connector->display_info.monitor_range.min_vfreq = 0;
9964 		connector->display_info.monitor_range.max_vfreq = 0;
9965 		freesync_capable = false;
9966 
9967 		goto update;
9968 	}
9969 
9970 	dm_con_state = to_dm_connector_state(connector->state);
9971 
9972 	if (!adev->dm.freesync_module)
9973 		goto update;
9974 
9975 
9976 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9977 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
9978 		bool edid_check_required = false;
9979 
9980 		if (edid) {
9981 			edid_check_required = is_dp_capable_without_timing_msa(
9982 						adev->dm.dc,
9983 						amdgpu_dm_connector);
9984 		}
9985 
9986 		if (edid_check_required == true && (edid->version > 1 ||
9987 		   (edid->version == 1 && edid->revision > 1))) {
9988 			for (i = 0; i < 4; i++) {
9989 
9990 				timing	= &edid->detailed_timings[i];
9991 				data	= &timing->data.other_data;
9992 				range	= &data->data.range;
9993 				/*
9994 				 * Check if monitor has continuous frequency mode
9995 				 */
9996 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
9997 					continue;
9998 				/*
9999 				 * Check for flag range limits only. If flag == 1 then
10000 				 * no additional timing information provided.
10001 				 * Default GTF, GTF Secondary curve and CVT are not
10002 				 * supported
10003 				 */
10004 				if (range->flags != 1)
10005 					continue;
10006 
10007 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10008 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10009 				amdgpu_dm_connector->pixel_clock_mhz =
10010 					range->pixel_clock_mhz * 10;
10011 
10012 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10013 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10014 
10015 				break;
10016 			}
10017 
10018 			if (amdgpu_dm_connector->max_vfreq -
10019 			    amdgpu_dm_connector->min_vfreq > 10) {
10020 
10021 				freesync_capable = true;
10022 			}
10023 		}
10024 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10025 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10026 		if (i >= 0 && vsdb_info.freesync_supported) {
10027 			timing  = &edid->detailed_timings[i];
10028 			data    = &timing->data.other_data;
10029 
10030 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10031 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10032 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10033 				freesync_capable = true;
10034 
10035 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10036 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10037 		}
10038 	}
10039 
10040 update:
10041 	if (dm_con_state)
10042 		dm_con_state->freesync_capable = freesync_capable;
10043 
10044 	if (connector->vrr_capable_property)
10045 		drm_connector_set_vrr_capable_property(connector,
10046 						       freesync_capable);
10047 }
10048 
10049 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10050 {
10051 	struct amdgpu_device *adev = drm_to_adev(dev);
10052 	struct dc *dc = adev->dm.dc;
10053 	int i;
10054 
10055 	mutex_lock(&adev->dm.dc_lock);
10056 	if (dc->current_state) {
10057 		for (i = 0; i < dc->current_state->stream_count; ++i)
10058 			dc->current_state->streams[i]
10059 				->triggered_crtc_reset.enabled =
10060 				adev->dm.force_timing_sync;
10061 
10062 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10063 		dc_trigger_sync(dc, dc->current_state);
10064 	}
10065 	mutex_unlock(&adev->dm.dc_lock);
10066 }
10067 
10068 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10069 		       uint32_t value, const char *func_name)
10070 {
10071 #ifdef DM_CHECK_ADDR_0
10072 	if (address == 0) {
10073 		DC_ERR("invalid register write. address = 0");
10074 		return;
10075 	}
10076 #endif
10077 	cgs_write_register(ctx->cgs_device, address, value);
10078 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10079 }
10080 
10081 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10082 			  const char *func_name)
10083 {
10084 	uint32_t value;
10085 #ifdef DM_CHECK_ADDR_0
10086 	if (address == 0) {
10087 		DC_ERR("invalid register read; address = 0\n");
10088 		return 0;
10089 	}
10090 #endif
10091 
10092 	if (ctx->dmub_srv &&
10093 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10094 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10095 		ASSERT(false);
10096 		return 0;
10097 	}
10098 
10099 	value = cgs_read_register(ctx->cgs_device, address);
10100 
10101 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10102 
10103 	return value;
10104 }
10105 
10106 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
10107 						struct dc_context *ctx,
10108 						uint8_t status_type,
10109 						uint32_t *operation_result)
10110 {
10111 	struct amdgpu_device *adev = ctx->driver_context;
10112 	int return_status = -1;
10113 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
10114 
10115 	if (is_cmd_aux) {
10116 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10117 			return_status = p_notify->aux_reply.length;
10118 			*operation_result = p_notify->result;
10119 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
10120 			*operation_result = AUX_RET_ERROR_TIMEOUT;
10121 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
10122 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10123 		} else {
10124 			*operation_result = AUX_RET_ERROR_UNKNOWN;
10125 		}
10126 	} else {
10127 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10128 			return_status = 0;
10129 			*operation_result = p_notify->sc_status;
10130 		} else {
10131 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
10132 		}
10133 	}
10134 
10135 	return return_status;
10136 }
10137 
10138 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
10139 	unsigned int link_index, void *cmd_payload, void *operation_result)
10140 {
10141 	struct amdgpu_device *adev = ctx->driver_context;
10142 	int ret = 0;
10143 
10144 	if (is_cmd_aux) {
10145 		dc_process_dmub_aux_transfer_async(ctx->dc,
10146 			link_index, (struct aux_payload *)cmd_payload);
10147 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
10148 					(struct set_config_cmd_payload *)cmd_payload,
10149 					adev->dm.dmub_notify)) {
10150 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10151 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10152 					(uint32_t *)operation_result);
10153 	}
10154 
10155 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
10156 	if (ret == 0) {
10157 		DRM_ERROR("wait_for_completion_timeout timeout!");
10158 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10159 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
10160 				(uint32_t *)operation_result);
10161 	}
10162 
10163 	if (is_cmd_aux) {
10164 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10165 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
10166 
10167 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10168 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10169 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
10170 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10171 				       adev->dm.dmub_notify->aux_reply.length);
10172 			}
10173 		}
10174 	}
10175 
10176 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10177 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10178 			(uint32_t *)operation_result);
10179 }
10180 
10181 /*
10182  * Check whether seamless boot is supported.
10183  *
10184  * So far we only support seamless boot on CHIP_VANGOGH.
10185  * If everything goes well, we may consider expanding
10186  * seamless boot to other ASICs.
10187  */
10188 bool check_seamless_boot_capability(struct amdgpu_device *adev)
10189 {
10190 	switch (adev->asic_type) {
10191 	case CHIP_VANGOGH:
10192 		if (!adev->mman.keep_stolen_vga_memory)
10193 			return true;
10194 		break;
10195 	default:
10196 		break;
10197 	}
10198 
10199 	return false;
10200 }
10201