xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 1e20904e417738066b26490de2daf7ef3ed34483)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86 
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99 
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
118 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
120 
121 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
123 
124 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
125 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
126 
127 /* Number of bytes in PSP header for firmware. */
128 #define PSP_HEADER_BYTES 0x100
129 
130 /* Number of bytes in PSP footer for firmware. */
131 #define PSP_FOOTER_BYTES 0x100
132 
133 /**
134  * DOC: overview
135  *
136  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
137  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
138  * requests into DC requests, and DC responses into DRM responses.
139  *
140  * The root control structure is &struct amdgpu_display_manager.
141  */
142 
143 /* basic init/fini API */
144 static int amdgpu_dm_init(struct amdgpu_device *adev);
145 static void amdgpu_dm_fini(struct amdgpu_device *adev);
146 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
147 
148 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
149 {
150 	switch (link->dpcd_caps.dongle_type) {
151 	case DISPLAY_DONGLE_NONE:
152 		return DRM_MODE_SUBCONNECTOR_Native;
153 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
154 		return DRM_MODE_SUBCONNECTOR_VGA;
155 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
156 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
157 		return DRM_MODE_SUBCONNECTOR_DVID;
158 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
159 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
160 		return DRM_MODE_SUBCONNECTOR_HDMIA;
161 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
162 	default:
163 		return DRM_MODE_SUBCONNECTOR_Unknown;
164 	}
165 }
166 
167 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
168 {
169 	struct dc_link *link = aconnector->dc_link;
170 	struct drm_connector *connector = &aconnector->base;
171 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
172 
173 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
174 		return;
175 
176 	if (aconnector->dc_sink)
177 		subconnector = get_subconnector_type(link);
178 
179 	drm_object_property_set_value(&connector->base,
180 			connector->dev->mode_config.dp_subconnector_property,
181 			subconnector);
182 }
183 
184 /*
185  * initializes drm_device display related structures, based on the information
186  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
187  * drm_encoder, drm_mode_config
188  *
189  * Returns 0 on success
190  */
191 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
192 /* removes and deallocates the drm structures, created by the above function */
193 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
194 
195 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
196 				struct drm_plane *plane,
197 				unsigned long possible_crtcs,
198 				const struct dc_plane_cap *plane_cap);
199 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
200 			       struct drm_plane *plane,
201 			       uint32_t link_index);
202 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
203 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
204 				    uint32_t link_index,
205 				    struct amdgpu_encoder *amdgpu_encoder);
206 static int amdgpu_dm_encoder_init(struct drm_device *dev,
207 				  struct amdgpu_encoder *aencoder,
208 				  uint32_t link_index);
209 
210 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
211 
212 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
213 
214 static int amdgpu_dm_atomic_check(struct drm_device *dev,
215 				  struct drm_atomic_state *state);
216 
217 static void handle_cursor_update(struct drm_plane *plane,
218 				 struct drm_plane_state *old_plane_state);
219 
220 static const struct drm_format_info *
221 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
222 
223 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
224 static void handle_hpd_rx_irq(void *param);
225 
226 static bool
227 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
228 				 struct drm_crtc_state *new_crtc_state);
229 /*
230  * dm_vblank_get_counter
231  *
232  * @brief
233  * Get counter for number of vertical blanks
234  *
235  * @param
236  * struct amdgpu_device *adev - [in] desired amdgpu device
237  * int disp_idx - [in] which CRTC to get the counter from
238  *
239  * @return
240  * Counter for vertical blanks
241  */
242 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
243 {
244 	if (crtc >= adev->mode_info.num_crtc)
245 		return 0;
246 	else {
247 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
248 
249 		if (acrtc->dm_irq_params.stream == NULL) {
250 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
251 				  crtc);
252 			return 0;
253 		}
254 
255 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
256 	}
257 }
258 
259 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
260 				  u32 *vbl, u32 *position)
261 {
262 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
263 
264 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
265 		return -EINVAL;
266 	else {
267 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
268 
269 		if (acrtc->dm_irq_params.stream ==  NULL) {
270 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271 				  crtc);
272 			return 0;
273 		}
274 
275 		/*
276 		 * TODO rework base driver to use values directly.
277 		 * for now parse it back into reg-format
278 		 */
279 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280 					 &v_blank_start,
281 					 &v_blank_end,
282 					 &h_position,
283 					 &v_position);
284 
285 		*position = v_position | (h_position << 16);
286 		*vbl = v_blank_start | (v_blank_end << 16);
287 	}
288 
289 	return 0;
290 }
291 
292 static bool dm_is_idle(void *handle)
293 {
294 	/* XXX todo */
295 	return true;
296 }
297 
298 static int dm_wait_for_idle(void *handle)
299 {
300 	/* XXX todo */
301 	return 0;
302 }
303 
304 static bool dm_check_soft_reset(void *handle)
305 {
306 	return false;
307 }
308 
309 static int dm_soft_reset(void *handle)
310 {
311 	/* XXX todo */
312 	return 0;
313 }
314 
315 static struct amdgpu_crtc *
316 get_crtc_by_otg_inst(struct amdgpu_device *adev,
317 		     int otg_inst)
318 {
319 	struct drm_device *dev = adev_to_drm(adev);
320 	struct drm_crtc *crtc;
321 	struct amdgpu_crtc *amdgpu_crtc;
322 
323 	if (WARN_ON(otg_inst == -1))
324 		return adev->mode_info.crtcs[0];
325 
326 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
327 		amdgpu_crtc = to_amdgpu_crtc(crtc);
328 
329 		if (amdgpu_crtc->otg_inst == otg_inst)
330 			return amdgpu_crtc;
331 	}
332 
333 	return NULL;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
337 {
338 	return acrtc->dm_irq_params.freesync_config.state ==
339 		       VRR_STATE_ACTIVE_VARIABLE ||
340 	       acrtc->dm_irq_params.freesync_config.state ==
341 		       VRR_STATE_ACTIVE_FIXED;
342 }
343 
344 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
345 {
346 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
347 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
348 }
349 
350 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
351 					      struct dm_crtc_state *new_state)
352 {
353 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
354 		return true;
355 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
356 		return true;
357 	else
358 		return false;
359 }
360 
361 /**
362  * dm_pflip_high_irq() - Handle pageflip interrupt
363  * @interrupt_params: ignored
364  *
365  * Handles the pageflip interrupt by notifying all interested parties
366  * that the pageflip has been completed.
367  */
368 static void dm_pflip_high_irq(void *interrupt_params)
369 {
370 	struct amdgpu_crtc *amdgpu_crtc;
371 	struct common_irq_params *irq_params = interrupt_params;
372 	struct amdgpu_device *adev = irq_params->adev;
373 	unsigned long flags;
374 	struct drm_pending_vblank_event *e;
375 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
376 	bool vrr_active;
377 
378 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
379 
380 	/* IRQ could occur when in initial stage */
381 	/* TODO work and BO cleanup */
382 	if (amdgpu_crtc == NULL) {
383 		DC_LOG_PFLIP("CRTC is null, returning.\n");
384 		return;
385 	}
386 
387 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
388 
389 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
390 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
391 						 amdgpu_crtc->pflip_status,
392 						 AMDGPU_FLIP_SUBMITTED,
393 						 amdgpu_crtc->crtc_id,
394 						 amdgpu_crtc);
395 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396 		return;
397 	}
398 
399 	/* page flip completed. */
400 	e = amdgpu_crtc->event;
401 	amdgpu_crtc->event = NULL;
402 
403 	WARN_ON(!e);
404 
405 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
406 
407 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
408 	if (!vrr_active ||
409 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
410 				      &v_blank_end, &hpos, &vpos) ||
411 	    (vpos < v_blank_start)) {
412 		/* Update to correct count and vblank timestamp if racing with
413 		 * vblank irq. This also updates to the correct vblank timestamp
414 		 * even in VRR mode, as scanout is past the front-porch atm.
415 		 */
416 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
417 
418 		/* Wake up userspace by sending the pageflip event with proper
419 		 * count and timestamp of vblank of flip completion.
420 		 */
421 		if (e) {
422 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
423 
424 			/* Event sent, so done with vblank for this flip */
425 			drm_crtc_vblank_put(&amdgpu_crtc->base);
426 		}
427 	} else if (e) {
428 		/* VRR active and inside front-porch: vblank count and
429 		 * timestamp for pageflip event will only be up to date after
430 		 * drm_crtc_handle_vblank() has been executed from late vblank
431 		 * irq handler after start of back-porch (vline 0). We queue the
432 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
433 		 * updated timestamp and count, once it runs after us.
434 		 *
435 		 * We need to open-code this instead of using the helper
436 		 * drm_crtc_arm_vblank_event(), as that helper would
437 		 * call drm_crtc_accurate_vblank_count(), which we must
438 		 * not call in VRR mode while we are in front-porch!
439 		 */
440 
441 		/* sequence will be replaced by real count during send-out. */
442 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
443 		e->pipe = amdgpu_crtc->crtc_id;
444 
445 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446 		e = NULL;
447 	}
448 
449 	/* Keep track of vblank of this flip for flip throttling. We use the
450 	 * cooked hw counter, as that one incremented at start of this vblank
451 	 * of pageflip completion, so last_flip_vblank is the forbidden count
452 	 * for queueing new pageflips if vsync + VRR is enabled.
453 	 */
454 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
455 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
456 
457 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
458 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
459 
460 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
461 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
462 		     vrr_active, (int) !e);
463 }
464 
465 static void dm_vupdate_high_irq(void *interrupt_params)
466 {
467 	struct common_irq_params *irq_params = interrupt_params;
468 	struct amdgpu_device *adev = irq_params->adev;
469 	struct amdgpu_crtc *acrtc;
470 	struct drm_device *drm_dev;
471 	struct drm_vblank_crtc *vblank;
472 	ktime_t frame_duration_ns, previous_timestamp;
473 	unsigned long flags;
474 	int vrr_active;
475 
476 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
477 
478 	if (acrtc) {
479 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
480 		drm_dev = acrtc->base.dev;
481 		vblank = &drm_dev->vblank[acrtc->base.index];
482 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
483 		frame_duration_ns = vblank->time - previous_timestamp;
484 
485 		if (frame_duration_ns > 0) {
486 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
487 						frame_duration_ns,
488 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
489 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
490 		}
491 
492 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493 			      acrtc->crtc_id,
494 			      vrr_active);
495 
496 		/* Core vblank handling is done here after end of front-porch in
497 		 * vrr mode, as vblank timestamping will give valid results
498 		 * while now done after front-porch. This will also deliver
499 		 * page-flip completion events that have been queued to us
500 		 * if a pageflip happened inside front-porch.
501 		 */
502 		if (vrr_active) {
503 			drm_crtc_handle_vblank(&acrtc->base);
504 
505 			/* BTR processing for pre-DCE12 ASICs */
506 			if (acrtc->dm_irq_params.stream &&
507 			    adev->family < AMDGPU_FAMILY_AI) {
508 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
509 				mod_freesync_handle_v_update(
510 				    adev->dm.freesync_module,
511 				    acrtc->dm_irq_params.stream,
512 				    &acrtc->dm_irq_params.vrr_params);
513 
514 				dc_stream_adjust_vmin_vmax(
515 				    adev->dm.dc,
516 				    acrtc->dm_irq_params.stream,
517 				    &acrtc->dm_irq_params.vrr_params.adjust);
518 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
519 			}
520 		}
521 	}
522 }
523 
524 /**
525  * dm_crtc_high_irq() - Handles CRTC interrupt
526  * @interrupt_params: used for determining the CRTC instance
527  *
528  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
529  * event handler.
530  */
531 static void dm_crtc_high_irq(void *interrupt_params)
532 {
533 	struct common_irq_params *irq_params = interrupt_params;
534 	struct amdgpu_device *adev = irq_params->adev;
535 	struct amdgpu_crtc *acrtc;
536 	unsigned long flags;
537 	int vrr_active;
538 
539 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540 	if (!acrtc)
541 		return;
542 
543 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
544 
545 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
546 		      vrr_active, acrtc->dm_irq_params.active_planes);
547 
548 	/**
549 	 * Core vblank handling at start of front-porch is only possible
550 	 * in non-vrr mode, as only there vblank timestamping will give
551 	 * valid results while done in front-porch. Otherwise defer it
552 	 * to dm_vupdate_high_irq after end of front-porch.
553 	 */
554 	if (!vrr_active)
555 		drm_crtc_handle_vblank(&acrtc->base);
556 
557 	/**
558 	 * Following stuff must happen at start of vblank, for crc
559 	 * computation and below-the-range btr support in vrr mode.
560 	 */
561 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
562 
563 	/* BTR updates need to happen before VUPDATE on Vega and above. */
564 	if (adev->family < AMDGPU_FAMILY_AI)
565 		return;
566 
567 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
568 
569 	if (acrtc->dm_irq_params.stream &&
570 	    acrtc->dm_irq_params.vrr_params.supported &&
571 	    acrtc->dm_irq_params.freesync_config.state ==
572 		    VRR_STATE_ACTIVE_VARIABLE) {
573 		mod_freesync_handle_v_update(adev->dm.freesync_module,
574 					     acrtc->dm_irq_params.stream,
575 					     &acrtc->dm_irq_params.vrr_params);
576 
577 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
578 					   &acrtc->dm_irq_params.vrr_params.adjust);
579 	}
580 
581 	/*
582 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
583 	 * In that case, pageflip completion interrupts won't fire and pageflip
584 	 * completion events won't get delivered. Prevent this by sending
585 	 * pending pageflip events from here if a flip is still pending.
586 	 *
587 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
588 	 * avoid race conditions between flip programming and completion,
589 	 * which could cause too early flip completion events.
590 	 */
591 	if (adev->family >= AMDGPU_FAMILY_RV &&
592 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
593 	    acrtc->dm_irq_params.active_planes == 0) {
594 		if (acrtc->event) {
595 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
596 			acrtc->event = NULL;
597 			drm_crtc_vblank_put(&acrtc->base);
598 		}
599 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
600 	}
601 
602 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
603 }
604 
605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
606 /**
607  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
608  * DCN generation ASICs
609  * @interrupt_params: interrupt parameters
610  *
611  * Used to set crc window/read out crc value at vertical line 0 position
612  */
613 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614 {
615 	struct common_irq_params *irq_params = interrupt_params;
616 	struct amdgpu_device *adev = irq_params->adev;
617 	struct amdgpu_crtc *acrtc;
618 
619 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620 
621 	if (!acrtc)
622 		return;
623 
624 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 }
626 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627 
628 /**
629  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
630  * @adev: amdgpu_device pointer
631  * @notify: dmub notification structure
632  *
633  * Dmub AUX or SET_CONFIG command completion processing callback
634  * Copies dmub notification to DM which is to be read by AUX command.
635  * issuing thread and also signals the event to wake up the thread.
636  */
637 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
638 					struct dmub_notification *notify)
639 {
640 	if (adev->dm.dmub_notify)
641 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
642 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
643 		complete(&adev->dm.dmub_aux_transfer_done);
644 }
645 
646 /**
647  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
648  * @adev: amdgpu_device pointer
649  * @notify: dmub notification structure
650  *
651  * Dmub Hpd interrupt processing callback. Gets displayindex through the
652  * ink index and calls helper to do the processing.
653  */
654 static void dmub_hpd_callback(struct amdgpu_device *adev,
655 			      struct dmub_notification *notify)
656 {
657 	struct amdgpu_dm_connector *aconnector;
658 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
659 	struct drm_connector *connector;
660 	struct drm_connector_list_iter iter;
661 	struct dc_link *link;
662 	uint8_t link_index = 0;
663 	struct drm_device *dev;
664 
665 	if (adev == NULL)
666 		return;
667 
668 	if (notify == NULL) {
669 		DRM_ERROR("DMUB HPD callback notification was NULL");
670 		return;
671 	}
672 
673 	if (notify->link_index > adev->dm.dc->link_count) {
674 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
675 		return;
676 	}
677 
678 	link_index = notify->link_index;
679 	link = adev->dm.dc->links[link_index];
680 	dev = adev->dm.ddev;
681 
682 	drm_connector_list_iter_begin(dev, &iter);
683 	drm_for_each_connector_iter(connector, &iter) {
684 		aconnector = to_amdgpu_dm_connector(connector);
685 		if (link && aconnector->dc_link == link) {
686 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
687 			hpd_aconnector = aconnector;
688 			break;
689 		}
690 	}
691 	drm_connector_list_iter_end(&iter);
692 
693 	if (hpd_aconnector) {
694 		if (notify->type == DMUB_NOTIFICATION_HPD)
695 			handle_hpd_irq_helper(hpd_aconnector);
696 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
697 			handle_hpd_rx_irq(hpd_aconnector);
698 	}
699 }
700 
701 /**
702  * register_dmub_notify_callback - Sets callback for DMUB notify
703  * @adev: amdgpu_device pointer
704  * @type: Type of dmub notification
705  * @callback: Dmub interrupt callback function
706  * @dmub_int_thread_offload: offload indicator
707  *
708  * API to register a dmub callback handler for a dmub notification
709  * Also sets indicator whether callback processing to be offloaded.
710  * to dmub interrupt handling thread
711  * Return: true if successfully registered, false if there is existing registration
712  */
713 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
714 					  enum dmub_notification_type type,
715 					  dmub_notify_interrupt_callback_t callback,
716 					  bool dmub_int_thread_offload)
717 {
718 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
719 		adev->dm.dmub_callback[type] = callback;
720 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
721 	} else
722 		return false;
723 
724 	return true;
725 }
726 
727 static void dm_handle_hpd_work(struct work_struct *work)
728 {
729 	struct dmub_hpd_work *dmub_hpd_wrk;
730 
731 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
732 
733 	if (!dmub_hpd_wrk->dmub_notify) {
734 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
735 		return;
736 	}
737 
738 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
739 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
740 		dmub_hpd_wrk->dmub_notify);
741 	}
742 
743 	kfree(dmub_hpd_wrk->dmub_notify);
744 	kfree(dmub_hpd_wrk);
745 
746 }
747 
748 #define DMUB_TRACE_MAX_READ 64
749 /**
750  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
751  * @interrupt_params: used for determining the Outbox instance
752  *
753  * Handles the Outbox Interrupt
754  * event handler.
755  */
756 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
757 {
758 	struct dmub_notification notify;
759 	struct common_irq_params *irq_params = interrupt_params;
760 	struct amdgpu_device *adev = irq_params->adev;
761 	struct amdgpu_display_manager *dm = &adev->dm;
762 	struct dmcub_trace_buf_entry entry = { 0 };
763 	uint32_t count = 0;
764 	struct dmub_hpd_work *dmub_hpd_wrk;
765 	struct dc_link *plink = NULL;
766 
767 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
768 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
769 
770 		do {
771 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
772 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
773 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
774 				continue;
775 			}
776 			if (!dm->dmub_callback[notify.type]) {
777 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
778 				continue;
779 			}
780 			if (dm->dmub_thread_offload[notify.type] == true) {
781 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
782 				if (!dmub_hpd_wrk) {
783 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
784 					return;
785 				}
786 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
787 				if (!dmub_hpd_wrk->dmub_notify) {
788 					kfree(dmub_hpd_wrk);
789 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
790 					return;
791 				}
792 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
793 				if (dmub_hpd_wrk->dmub_notify)
794 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
795 				dmub_hpd_wrk->adev = adev;
796 				if (notify.type == DMUB_NOTIFICATION_HPD) {
797 					plink = adev->dm.dc->links[notify.link_index];
798 					if (plink) {
799 						plink->hpd_status =
800 							notify.hpd_status == DP_HPD_PLUG;
801 					}
802 				}
803 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
804 			} else {
805 				dm->dmub_callback[notify.type](adev, &notify);
806 			}
807 		} while (notify.pending_notification);
808 	}
809 
810 
811 	do {
812 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
813 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
814 							entry.param0, entry.param1);
815 
816 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
817 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
818 		} else
819 			break;
820 
821 		count++;
822 
823 	} while (count <= DMUB_TRACE_MAX_READ);
824 
825 	if (count > DMUB_TRACE_MAX_READ)
826 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
827 }
828 
829 static int dm_set_clockgating_state(void *handle,
830 		  enum amd_clockgating_state state)
831 {
832 	return 0;
833 }
834 
835 static int dm_set_powergating_state(void *handle,
836 		  enum amd_powergating_state state)
837 {
838 	return 0;
839 }
840 
841 /* Prototypes of private functions */
842 static int dm_early_init(void* handle);
843 
844 /* Allocate memory for FBC compressed data  */
845 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
846 {
847 	struct drm_device *dev = connector->dev;
848 	struct amdgpu_device *adev = drm_to_adev(dev);
849 	struct dm_compressor_info *compressor = &adev->dm.compressor;
850 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
851 	struct drm_display_mode *mode;
852 	unsigned long max_size = 0;
853 
854 	if (adev->dm.dc->fbc_compressor == NULL)
855 		return;
856 
857 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
858 		return;
859 
860 	if (compressor->bo_ptr)
861 		return;
862 
863 
864 	list_for_each_entry(mode, &connector->modes, head) {
865 		if (max_size < mode->htotal * mode->vtotal)
866 			max_size = mode->htotal * mode->vtotal;
867 	}
868 
869 	if (max_size) {
870 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
871 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
872 			    &compressor->gpu_addr, &compressor->cpu_addr);
873 
874 		if (r)
875 			DRM_ERROR("DM: Failed to initialize FBC\n");
876 		else {
877 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
878 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
879 		}
880 
881 	}
882 
883 }
884 
885 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
886 					  int pipe, bool *enabled,
887 					  unsigned char *buf, int max_bytes)
888 {
889 	struct drm_device *dev = dev_get_drvdata(kdev);
890 	struct amdgpu_device *adev = drm_to_adev(dev);
891 	struct drm_connector *connector;
892 	struct drm_connector_list_iter conn_iter;
893 	struct amdgpu_dm_connector *aconnector;
894 	int ret = 0;
895 
896 	*enabled = false;
897 
898 	mutex_lock(&adev->dm.audio_lock);
899 
900 	drm_connector_list_iter_begin(dev, &conn_iter);
901 	drm_for_each_connector_iter(connector, &conn_iter) {
902 		aconnector = to_amdgpu_dm_connector(connector);
903 		if (aconnector->audio_inst != port)
904 			continue;
905 
906 		*enabled = true;
907 		ret = drm_eld_size(connector->eld);
908 		memcpy(buf, connector->eld, min(max_bytes, ret));
909 
910 		break;
911 	}
912 	drm_connector_list_iter_end(&conn_iter);
913 
914 	mutex_unlock(&adev->dm.audio_lock);
915 
916 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
917 
918 	return ret;
919 }
920 
921 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
922 	.get_eld = amdgpu_dm_audio_component_get_eld,
923 };
924 
925 static int amdgpu_dm_audio_component_bind(struct device *kdev,
926 				       struct device *hda_kdev, void *data)
927 {
928 	struct drm_device *dev = dev_get_drvdata(kdev);
929 	struct amdgpu_device *adev = drm_to_adev(dev);
930 	struct drm_audio_component *acomp = data;
931 
932 	acomp->ops = &amdgpu_dm_audio_component_ops;
933 	acomp->dev = kdev;
934 	adev->dm.audio_component = acomp;
935 
936 	return 0;
937 }
938 
939 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
940 					  struct device *hda_kdev, void *data)
941 {
942 	struct drm_device *dev = dev_get_drvdata(kdev);
943 	struct amdgpu_device *adev = drm_to_adev(dev);
944 	struct drm_audio_component *acomp = data;
945 
946 	acomp->ops = NULL;
947 	acomp->dev = NULL;
948 	adev->dm.audio_component = NULL;
949 }
950 
951 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
952 	.bind	= amdgpu_dm_audio_component_bind,
953 	.unbind	= amdgpu_dm_audio_component_unbind,
954 };
955 
956 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
957 {
958 	int i, ret;
959 
960 	if (!amdgpu_audio)
961 		return 0;
962 
963 	adev->mode_info.audio.enabled = true;
964 
965 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
966 
967 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
968 		adev->mode_info.audio.pin[i].channels = -1;
969 		adev->mode_info.audio.pin[i].rate = -1;
970 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
971 		adev->mode_info.audio.pin[i].status_bits = 0;
972 		adev->mode_info.audio.pin[i].category_code = 0;
973 		adev->mode_info.audio.pin[i].connected = false;
974 		adev->mode_info.audio.pin[i].id =
975 			adev->dm.dc->res_pool->audios[i]->inst;
976 		adev->mode_info.audio.pin[i].offset = 0;
977 	}
978 
979 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
980 	if (ret < 0)
981 		return ret;
982 
983 	adev->dm.audio_registered = true;
984 
985 	return 0;
986 }
987 
988 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
989 {
990 	if (!amdgpu_audio)
991 		return;
992 
993 	if (!adev->mode_info.audio.enabled)
994 		return;
995 
996 	if (adev->dm.audio_registered) {
997 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
998 		adev->dm.audio_registered = false;
999 	}
1000 
1001 	/* TODO: Disable audio? */
1002 
1003 	adev->mode_info.audio.enabled = false;
1004 }
1005 
1006 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1007 {
1008 	struct drm_audio_component *acomp = adev->dm.audio_component;
1009 
1010 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1011 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1012 
1013 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1014 						 pin, -1);
1015 	}
1016 }
1017 
1018 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1019 {
1020 	const struct dmcub_firmware_header_v1_0 *hdr;
1021 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1022 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1023 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1024 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1025 	struct abm *abm = adev->dm.dc->res_pool->abm;
1026 	struct dmub_srv_hw_params hw_params;
1027 	enum dmub_status status;
1028 	const unsigned char *fw_inst_const, *fw_bss_data;
1029 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1030 	bool has_hw_support;
1031 
1032 	if (!dmub_srv)
1033 		/* DMUB isn't supported on the ASIC. */
1034 		return 0;
1035 
1036 	if (!fb_info) {
1037 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1038 		return -EINVAL;
1039 	}
1040 
1041 	if (!dmub_fw) {
1042 		/* Firmware required for DMUB support. */
1043 		DRM_ERROR("No firmware provided for DMUB.\n");
1044 		return -EINVAL;
1045 	}
1046 
1047 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1048 	if (status != DMUB_STATUS_OK) {
1049 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1050 		return -EINVAL;
1051 	}
1052 
1053 	if (!has_hw_support) {
1054 		DRM_INFO("DMUB unsupported on ASIC\n");
1055 		return 0;
1056 	}
1057 
1058 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1059 	status = dmub_srv_hw_reset(dmub_srv);
1060 	if (status != DMUB_STATUS_OK)
1061 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1062 
1063 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1064 
1065 	fw_inst_const = dmub_fw->data +
1066 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1067 			PSP_HEADER_BYTES;
1068 
1069 	fw_bss_data = dmub_fw->data +
1070 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071 		      le32_to_cpu(hdr->inst_const_bytes);
1072 
1073 	/* Copy firmware and bios info into FB memory. */
1074 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1075 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1076 
1077 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1078 
1079 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1080 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1081 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1082 	 * will be done by dm_dmub_hw_init
1083 	 */
1084 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1085 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1086 				fw_inst_const_size);
1087 	}
1088 
1089 	if (fw_bss_data_size)
1090 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1091 		       fw_bss_data, fw_bss_data_size);
1092 
1093 	/* Copy firmware bios info into FB memory. */
1094 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1095 	       adev->bios_size);
1096 
1097 	/* Reset regions that need to be reset. */
1098 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1099 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1100 
1101 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1102 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1103 
1104 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1105 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1106 
1107 	/* Initialize hardware. */
1108 	memset(&hw_params, 0, sizeof(hw_params));
1109 	hw_params.fb_base = adev->gmc.fb_start;
1110 	hw_params.fb_offset = adev->gmc.aper_base;
1111 
1112 	/* backdoor load firmware and trigger dmub running */
1113 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1114 		hw_params.load_inst_const = true;
1115 
1116 	if (dmcu)
1117 		hw_params.psp_version = dmcu->psp_version;
1118 
1119 	for (i = 0; i < fb_info->num_fb; ++i)
1120 		hw_params.fb[i] = &fb_info->fb[i];
1121 
1122 	switch (adev->ip_versions[DCE_HWIP][0]) {
1123 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1124 		hw_params.dpia_supported = true;
1125 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1126 		break;
1127 	default:
1128 		break;
1129 	}
1130 
1131 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1132 	if (status != DMUB_STATUS_OK) {
1133 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1134 		return -EINVAL;
1135 	}
1136 
1137 	/* Wait for firmware load to finish. */
1138 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1139 	if (status != DMUB_STATUS_OK)
1140 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1141 
1142 	/* Init DMCU and ABM if available. */
1143 	if (dmcu && abm) {
1144 		dmcu->funcs->dmcu_init(dmcu);
1145 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1146 	}
1147 
1148 	if (!adev->dm.dc->ctx->dmub_srv)
1149 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1150 	if (!adev->dm.dc->ctx->dmub_srv) {
1151 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1152 		return -ENOMEM;
1153 	}
1154 
1155 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1156 		 adev->dm.dmcub_fw_version);
1157 
1158 	return 0;
1159 }
1160 
1161 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1162 {
1163 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1164 	enum dmub_status status;
1165 	bool init;
1166 
1167 	if (!dmub_srv) {
1168 		/* DMUB isn't supported on the ASIC. */
1169 		return;
1170 	}
1171 
1172 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1173 	if (status != DMUB_STATUS_OK)
1174 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1175 
1176 	if (status == DMUB_STATUS_OK && init) {
1177 		/* Wait for firmware load to finish. */
1178 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1179 		if (status != DMUB_STATUS_OK)
1180 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1181 	} else {
1182 		/* Perform the full hardware initialization. */
1183 		dm_dmub_hw_init(adev);
1184 	}
1185 }
1186 
1187 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1188 {
1189 	uint64_t pt_base;
1190 	uint32_t logical_addr_low;
1191 	uint32_t logical_addr_high;
1192 	uint32_t agp_base, agp_bot, agp_top;
1193 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1194 
1195 	memset(pa_config, 0, sizeof(*pa_config));
1196 
1197 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1198 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1199 
1200 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1201 		/*
1202 		 * Raven2 has a HW issue that it is unable to use the vram which
1203 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1204 		 * workaround that increase system aperture high address (add 1)
1205 		 * to get rid of the VM fault and hardware hang.
1206 		 */
1207 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1208 	else
1209 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1210 
1211 	agp_base = 0;
1212 	agp_bot = adev->gmc.agp_start >> 24;
1213 	agp_top = adev->gmc.agp_end >> 24;
1214 
1215 
1216 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1217 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1218 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1219 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1220 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1221 	page_table_base.low_part = lower_32_bits(pt_base);
1222 
1223 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1224 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1225 
1226 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1227 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1228 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1229 
1230 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1231 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1232 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1233 
1234 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1235 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1236 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1237 
1238 	pa_config->is_hvm_enabled = 0;
1239 
1240 }
1241 
1242 static void vblank_control_worker(struct work_struct *work)
1243 {
1244 	struct vblank_control_work *vblank_work =
1245 		container_of(work, struct vblank_control_work, work);
1246 	struct amdgpu_display_manager *dm = vblank_work->dm;
1247 
1248 	mutex_lock(&dm->dc_lock);
1249 
1250 	if (vblank_work->enable)
1251 		dm->active_vblank_irq_count++;
1252 	else if(dm->active_vblank_irq_count)
1253 		dm->active_vblank_irq_count--;
1254 
1255 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1256 
1257 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1258 
1259 	/* Control PSR based on vblank requirements from OS */
1260 	if (vblank_work->stream && vblank_work->stream->link) {
1261 		if (vblank_work->enable) {
1262 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1263 				amdgpu_dm_psr_disable(vblank_work->stream);
1264 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1265 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1266 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1267 			amdgpu_dm_psr_enable(vblank_work->stream);
1268 		}
1269 	}
1270 
1271 	mutex_unlock(&dm->dc_lock);
1272 
1273 	dc_stream_release(vblank_work->stream);
1274 
1275 	kfree(vblank_work);
1276 }
1277 
1278 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1279 {
1280 	struct hpd_rx_irq_offload_work *offload_work;
1281 	struct amdgpu_dm_connector *aconnector;
1282 	struct dc_link *dc_link;
1283 	struct amdgpu_device *adev;
1284 	enum dc_connection_type new_connection_type = dc_connection_none;
1285 	unsigned long flags;
1286 
1287 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1288 	aconnector = offload_work->offload_wq->aconnector;
1289 
1290 	if (!aconnector) {
1291 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1292 		goto skip;
1293 	}
1294 
1295 	adev = drm_to_adev(aconnector->base.dev);
1296 	dc_link = aconnector->dc_link;
1297 
1298 	mutex_lock(&aconnector->hpd_lock);
1299 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1300 		DRM_ERROR("KMS: Failed to detect connector\n");
1301 	mutex_unlock(&aconnector->hpd_lock);
1302 
1303 	if (new_connection_type == dc_connection_none)
1304 		goto skip;
1305 
1306 	if (amdgpu_in_reset(adev))
1307 		goto skip;
1308 
1309 	mutex_lock(&adev->dm.dc_lock);
1310 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1311 		dc_link_dp_handle_automated_test(dc_link);
1312 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1313 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1314 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1315 		dc_link_dp_handle_link_loss(dc_link);
1316 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1317 		offload_work->offload_wq->is_handling_link_loss = false;
1318 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1319 	}
1320 	mutex_unlock(&adev->dm.dc_lock);
1321 
1322 skip:
1323 	kfree(offload_work);
1324 
1325 }
1326 
1327 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1328 {
1329 	int max_caps = dc->caps.max_links;
1330 	int i = 0;
1331 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1332 
1333 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1334 
1335 	if (!hpd_rx_offload_wq)
1336 		return NULL;
1337 
1338 
1339 	for (i = 0; i < max_caps; i++) {
1340 		hpd_rx_offload_wq[i].wq =
1341 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1342 
1343 		if (hpd_rx_offload_wq[i].wq == NULL) {
1344 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1345 			return NULL;
1346 		}
1347 
1348 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1349 	}
1350 
1351 	return hpd_rx_offload_wq;
1352 }
1353 
1354 struct amdgpu_stutter_quirk {
1355 	u16 chip_vendor;
1356 	u16 chip_device;
1357 	u16 subsys_vendor;
1358 	u16 subsys_device;
1359 	u8 revision;
1360 };
1361 
1362 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1363 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1364 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1365 	{ 0, 0, 0, 0, 0 },
1366 };
1367 
1368 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1369 {
1370 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1371 
1372 	while (p && p->chip_device != 0) {
1373 		if (pdev->vendor == p->chip_vendor &&
1374 		    pdev->device == p->chip_device &&
1375 		    pdev->subsystem_vendor == p->subsys_vendor &&
1376 		    pdev->subsystem_device == p->subsys_device &&
1377 		    pdev->revision == p->revision) {
1378 			return true;
1379 		}
1380 		++p;
1381 	}
1382 	return false;
1383 }
1384 
1385 static int amdgpu_dm_init(struct amdgpu_device *adev)
1386 {
1387 	struct dc_init_data init_data;
1388 #ifdef CONFIG_DRM_AMD_DC_HDCP
1389 	struct dc_callback_init init_params;
1390 #endif
1391 	int r;
1392 
1393 	adev->dm.ddev = adev_to_drm(adev);
1394 	adev->dm.adev = adev;
1395 
1396 	/* Zero all the fields */
1397 	memset(&init_data, 0, sizeof(init_data));
1398 #ifdef CONFIG_DRM_AMD_DC_HDCP
1399 	memset(&init_params, 0, sizeof(init_params));
1400 #endif
1401 
1402 	mutex_init(&adev->dm.dc_lock);
1403 	mutex_init(&adev->dm.audio_lock);
1404 	spin_lock_init(&adev->dm.vblank_lock);
1405 
1406 	if(amdgpu_dm_irq_init(adev)) {
1407 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1408 		goto error;
1409 	}
1410 
1411 	init_data.asic_id.chip_family = adev->family;
1412 
1413 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1414 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1415 	init_data.asic_id.chip_id = adev->pdev->device;
1416 
1417 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1418 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1419 	init_data.asic_id.atombios_base_address =
1420 		adev->mode_info.atom_context->bios;
1421 
1422 	init_data.driver = adev;
1423 
1424 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1425 
1426 	if (!adev->dm.cgs_device) {
1427 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1428 		goto error;
1429 	}
1430 
1431 	init_data.cgs_device = adev->dm.cgs_device;
1432 
1433 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1434 
1435 	switch (adev->ip_versions[DCE_HWIP][0]) {
1436 	case IP_VERSION(2, 1, 0):
1437 		switch (adev->dm.dmcub_fw_version) {
1438 		case 0: /* development */
1439 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1440 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1441 			init_data.flags.disable_dmcu = false;
1442 			break;
1443 		default:
1444 			init_data.flags.disable_dmcu = true;
1445 		}
1446 		break;
1447 	case IP_VERSION(2, 0, 3):
1448 		init_data.flags.disable_dmcu = true;
1449 		break;
1450 	default:
1451 		break;
1452 	}
1453 
1454 	switch (adev->asic_type) {
1455 	case CHIP_CARRIZO:
1456 	case CHIP_STONEY:
1457 		init_data.flags.gpu_vm_support = true;
1458 		break;
1459 	default:
1460 		switch (adev->ip_versions[DCE_HWIP][0]) {
1461 		case IP_VERSION(1, 0, 0):
1462 		case IP_VERSION(1, 0, 1):
1463 			/* enable S/G on PCO and RV2 */
1464 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1465 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1466 				init_data.flags.gpu_vm_support = true;
1467 			break;
1468 		case IP_VERSION(2, 1, 0):
1469 		case IP_VERSION(3, 0, 1):
1470 		case IP_VERSION(3, 1, 2):
1471 		case IP_VERSION(3, 1, 3):
1472 		case IP_VERSION(3, 1, 5):
1473 		case IP_VERSION(3, 1, 6):
1474 			init_data.flags.gpu_vm_support = true;
1475 			break;
1476 		default:
1477 			break;
1478 		}
1479 		break;
1480 	}
1481 
1482 	if (init_data.flags.gpu_vm_support)
1483 		adev->mode_info.gpu_vm_support = true;
1484 
1485 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1486 		init_data.flags.fbc_support = true;
1487 
1488 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1489 		init_data.flags.multi_mon_pp_mclk_switch = true;
1490 
1491 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1492 		init_data.flags.disable_fractional_pwm = true;
1493 
1494 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1495 		init_data.flags.edp_no_power_sequencing = true;
1496 
1497 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1498 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1499 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1500 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1501 
1502 	init_data.flags.seamless_boot_edp_requested = false;
1503 
1504 	if (check_seamless_boot_capability(adev)) {
1505 		init_data.flags.seamless_boot_edp_requested = true;
1506 		init_data.flags.allow_seamless_boot_optimization = true;
1507 		DRM_INFO("Seamless boot condition check passed\n");
1508 	}
1509 
1510 	INIT_LIST_HEAD(&adev->dm.da_list);
1511 	/* Display Core create. */
1512 	adev->dm.dc = dc_create(&init_data);
1513 
1514 	if (adev->dm.dc) {
1515 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1516 	} else {
1517 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1518 		goto error;
1519 	}
1520 
1521 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1522 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1523 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1524 	}
1525 
1526 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1527 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1528 	if (dm_should_disable_stutter(adev->pdev))
1529 		adev->dm.dc->debug.disable_stutter = true;
1530 
1531 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1532 		adev->dm.dc->debug.disable_stutter = true;
1533 
1534 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1535 		adev->dm.dc->debug.disable_dsc = true;
1536 		adev->dm.dc->debug.disable_dsc_edp = true;
1537 	}
1538 
1539 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1540 		adev->dm.dc->debug.disable_clock_gate = true;
1541 
1542 	r = dm_dmub_hw_init(adev);
1543 	if (r) {
1544 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1545 		goto error;
1546 	}
1547 
1548 	dc_hardware_init(adev->dm.dc);
1549 
1550 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1551 	if (!adev->dm.hpd_rx_offload_wq) {
1552 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1553 		goto error;
1554 	}
1555 
1556 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1557 		struct dc_phy_addr_space_config pa_config;
1558 
1559 		mmhub_read_system_context(adev, &pa_config);
1560 
1561 		// Call the DC init_memory func
1562 		dc_setup_system_context(adev->dm.dc, &pa_config);
1563 	}
1564 
1565 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1566 	if (!adev->dm.freesync_module) {
1567 		DRM_ERROR(
1568 		"amdgpu: failed to initialize freesync_module.\n");
1569 	} else
1570 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1571 				adev->dm.freesync_module);
1572 
1573 	amdgpu_dm_init_color_mod();
1574 
1575 	if (adev->dm.dc->caps.max_links > 0) {
1576 		adev->dm.vblank_control_workqueue =
1577 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1578 		if (!adev->dm.vblank_control_workqueue)
1579 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1580 	}
1581 
1582 #ifdef CONFIG_DRM_AMD_DC_HDCP
1583 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1584 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1585 
1586 		if (!adev->dm.hdcp_workqueue)
1587 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1588 		else
1589 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1590 
1591 		dc_init_callbacks(adev->dm.dc, &init_params);
1592 	}
1593 #endif
1594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1595 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1596 #endif
1597 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1598 		init_completion(&adev->dm.dmub_aux_transfer_done);
1599 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1600 		if (!adev->dm.dmub_notify) {
1601 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1602 			goto error;
1603 		}
1604 
1605 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1606 		if (!adev->dm.delayed_hpd_wq) {
1607 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1608 			goto error;
1609 		}
1610 
1611 		amdgpu_dm_outbox_init(adev);
1612 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1613 			dmub_aux_setconfig_callback, false)) {
1614 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1615 			goto error;
1616 		}
1617 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1618 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1619 			goto error;
1620 		}
1621 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1622 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1623 			goto error;
1624 		}
1625 	}
1626 
1627 	if (amdgpu_dm_initialize_drm_device(adev)) {
1628 		DRM_ERROR(
1629 		"amdgpu: failed to initialize sw for display support.\n");
1630 		goto error;
1631 	}
1632 
1633 	/* create fake encoders for MST */
1634 	dm_dp_create_fake_mst_encoders(adev);
1635 
1636 	/* TODO: Add_display_info? */
1637 
1638 	/* TODO use dynamic cursor width */
1639 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1640 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1641 
1642 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1643 		DRM_ERROR(
1644 		"amdgpu: failed to initialize sw for display support.\n");
1645 		goto error;
1646 	}
1647 
1648 
1649 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1650 
1651 	return 0;
1652 error:
1653 	amdgpu_dm_fini(adev);
1654 
1655 	return -EINVAL;
1656 }
1657 
1658 static int amdgpu_dm_early_fini(void *handle)
1659 {
1660 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1661 
1662 	amdgpu_dm_audio_fini(adev);
1663 
1664 	return 0;
1665 }
1666 
1667 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1668 {
1669 	int i;
1670 
1671 	if (adev->dm.vblank_control_workqueue) {
1672 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1673 		adev->dm.vblank_control_workqueue = NULL;
1674 	}
1675 
1676 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1677 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1678 	}
1679 
1680 	amdgpu_dm_destroy_drm_device(&adev->dm);
1681 
1682 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1683 	if (adev->dm.crc_rd_wrk) {
1684 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1685 		kfree(adev->dm.crc_rd_wrk);
1686 		adev->dm.crc_rd_wrk = NULL;
1687 	}
1688 #endif
1689 #ifdef CONFIG_DRM_AMD_DC_HDCP
1690 	if (adev->dm.hdcp_workqueue) {
1691 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1692 		adev->dm.hdcp_workqueue = NULL;
1693 	}
1694 
1695 	if (adev->dm.dc)
1696 		dc_deinit_callbacks(adev->dm.dc);
1697 #endif
1698 
1699 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1700 
1701 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1702 		kfree(adev->dm.dmub_notify);
1703 		adev->dm.dmub_notify = NULL;
1704 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1705 		adev->dm.delayed_hpd_wq = NULL;
1706 	}
1707 
1708 	if (adev->dm.dmub_bo)
1709 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1710 				      &adev->dm.dmub_bo_gpu_addr,
1711 				      &adev->dm.dmub_bo_cpu_addr);
1712 
1713 	if (adev->dm.hpd_rx_offload_wq) {
1714 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1715 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1716 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1717 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1718 			}
1719 		}
1720 
1721 		kfree(adev->dm.hpd_rx_offload_wq);
1722 		adev->dm.hpd_rx_offload_wq = NULL;
1723 	}
1724 
1725 	/* DC Destroy TODO: Replace destroy DAL */
1726 	if (adev->dm.dc)
1727 		dc_destroy(&adev->dm.dc);
1728 	/*
1729 	 * TODO: pageflip, vlank interrupt
1730 	 *
1731 	 * amdgpu_dm_irq_fini(adev);
1732 	 */
1733 
1734 	if (adev->dm.cgs_device) {
1735 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1736 		adev->dm.cgs_device = NULL;
1737 	}
1738 	if (adev->dm.freesync_module) {
1739 		mod_freesync_destroy(adev->dm.freesync_module);
1740 		adev->dm.freesync_module = NULL;
1741 	}
1742 
1743 	mutex_destroy(&adev->dm.audio_lock);
1744 	mutex_destroy(&adev->dm.dc_lock);
1745 
1746 	return;
1747 }
1748 
1749 static int load_dmcu_fw(struct amdgpu_device *adev)
1750 {
1751 	const char *fw_name_dmcu = NULL;
1752 	int r;
1753 	const struct dmcu_firmware_header_v1_0 *hdr;
1754 
1755 	switch(adev->asic_type) {
1756 #if defined(CONFIG_DRM_AMD_DC_SI)
1757 	case CHIP_TAHITI:
1758 	case CHIP_PITCAIRN:
1759 	case CHIP_VERDE:
1760 	case CHIP_OLAND:
1761 #endif
1762 	case CHIP_BONAIRE:
1763 	case CHIP_HAWAII:
1764 	case CHIP_KAVERI:
1765 	case CHIP_KABINI:
1766 	case CHIP_MULLINS:
1767 	case CHIP_TONGA:
1768 	case CHIP_FIJI:
1769 	case CHIP_CARRIZO:
1770 	case CHIP_STONEY:
1771 	case CHIP_POLARIS11:
1772 	case CHIP_POLARIS10:
1773 	case CHIP_POLARIS12:
1774 	case CHIP_VEGAM:
1775 	case CHIP_VEGA10:
1776 	case CHIP_VEGA12:
1777 	case CHIP_VEGA20:
1778 		return 0;
1779 	case CHIP_NAVI12:
1780 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1781 		break;
1782 	case CHIP_RAVEN:
1783 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1784 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1785 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1786 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787 		else
1788 			return 0;
1789 		break;
1790 	default:
1791 		switch (adev->ip_versions[DCE_HWIP][0]) {
1792 		case IP_VERSION(2, 0, 2):
1793 		case IP_VERSION(2, 0, 3):
1794 		case IP_VERSION(2, 0, 0):
1795 		case IP_VERSION(2, 1, 0):
1796 		case IP_VERSION(3, 0, 0):
1797 		case IP_VERSION(3, 0, 2):
1798 		case IP_VERSION(3, 0, 3):
1799 		case IP_VERSION(3, 0, 1):
1800 		case IP_VERSION(3, 1, 2):
1801 		case IP_VERSION(3, 1, 3):
1802 		case IP_VERSION(3, 1, 5):
1803 		case IP_VERSION(3, 1, 6):
1804 			return 0;
1805 		default:
1806 			break;
1807 		}
1808 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1809 		return -EINVAL;
1810 	}
1811 
1812 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1813 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1814 		return 0;
1815 	}
1816 
1817 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1818 	if (r == -ENOENT) {
1819 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1820 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1821 		adev->dm.fw_dmcu = NULL;
1822 		return 0;
1823 	}
1824 	if (r) {
1825 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1826 			fw_name_dmcu);
1827 		return r;
1828 	}
1829 
1830 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1831 	if (r) {
1832 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1833 			fw_name_dmcu);
1834 		release_firmware(adev->dm.fw_dmcu);
1835 		adev->dm.fw_dmcu = NULL;
1836 		return r;
1837 	}
1838 
1839 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1840 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1841 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1842 	adev->firmware.fw_size +=
1843 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1844 
1845 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1846 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1847 	adev->firmware.fw_size +=
1848 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1849 
1850 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1851 
1852 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1853 
1854 	return 0;
1855 }
1856 
1857 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1858 {
1859 	struct amdgpu_device *adev = ctx;
1860 
1861 	return dm_read_reg(adev->dm.dc->ctx, address);
1862 }
1863 
1864 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1865 				     uint32_t value)
1866 {
1867 	struct amdgpu_device *adev = ctx;
1868 
1869 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1870 }
1871 
1872 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1873 {
1874 	struct dmub_srv_create_params create_params;
1875 	struct dmub_srv_region_params region_params;
1876 	struct dmub_srv_region_info region_info;
1877 	struct dmub_srv_fb_params fb_params;
1878 	struct dmub_srv_fb_info *fb_info;
1879 	struct dmub_srv *dmub_srv;
1880 	const struct dmcub_firmware_header_v1_0 *hdr;
1881 	const char *fw_name_dmub;
1882 	enum dmub_asic dmub_asic;
1883 	enum dmub_status status;
1884 	int r;
1885 
1886 	switch (adev->ip_versions[DCE_HWIP][0]) {
1887 	case IP_VERSION(2, 1, 0):
1888 		dmub_asic = DMUB_ASIC_DCN21;
1889 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1890 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1891 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1892 		break;
1893 	case IP_VERSION(3, 0, 0):
1894 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1895 			dmub_asic = DMUB_ASIC_DCN30;
1896 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1897 		} else {
1898 			dmub_asic = DMUB_ASIC_DCN30;
1899 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1900 		}
1901 		break;
1902 	case IP_VERSION(3, 0, 1):
1903 		dmub_asic = DMUB_ASIC_DCN301;
1904 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1905 		break;
1906 	case IP_VERSION(3, 0, 2):
1907 		dmub_asic = DMUB_ASIC_DCN302;
1908 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1909 		break;
1910 	case IP_VERSION(3, 0, 3):
1911 		dmub_asic = DMUB_ASIC_DCN303;
1912 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1913 		break;
1914 	case IP_VERSION(3, 1, 2):
1915 	case IP_VERSION(3, 1, 3):
1916 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1917 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1918 		break;
1919 	case IP_VERSION(3, 1, 5):
1920 		dmub_asic = DMUB_ASIC_DCN315;
1921 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1922 		break;
1923 	case IP_VERSION(3, 1, 6):
1924 		dmub_asic = DMUB_ASIC_DCN316;
1925 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1926 		break;
1927 	default:
1928 		/* ASIC doesn't support DMUB. */
1929 		return 0;
1930 	}
1931 
1932 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1933 	if (r) {
1934 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1935 		return 0;
1936 	}
1937 
1938 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1939 	if (r) {
1940 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1941 		return 0;
1942 	}
1943 
1944 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1945 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1946 
1947 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1948 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1949 			AMDGPU_UCODE_ID_DMCUB;
1950 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1951 			adev->dm.dmub_fw;
1952 		adev->firmware.fw_size +=
1953 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1954 
1955 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1956 			 adev->dm.dmcub_fw_version);
1957 	}
1958 
1959 
1960 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1961 	dmub_srv = adev->dm.dmub_srv;
1962 
1963 	if (!dmub_srv) {
1964 		DRM_ERROR("Failed to allocate DMUB service!\n");
1965 		return -ENOMEM;
1966 	}
1967 
1968 	memset(&create_params, 0, sizeof(create_params));
1969 	create_params.user_ctx = adev;
1970 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1971 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1972 	create_params.asic = dmub_asic;
1973 
1974 	/* Create the DMUB service. */
1975 	status = dmub_srv_create(dmub_srv, &create_params);
1976 	if (status != DMUB_STATUS_OK) {
1977 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1978 		return -EINVAL;
1979 	}
1980 
1981 	/* Calculate the size of all the regions for the DMUB service. */
1982 	memset(&region_params, 0, sizeof(region_params));
1983 
1984 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1985 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1986 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1987 	region_params.vbios_size = adev->bios_size;
1988 	region_params.fw_bss_data = region_params.bss_data_size ?
1989 		adev->dm.dmub_fw->data +
1990 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1991 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1992 	region_params.fw_inst_const =
1993 		adev->dm.dmub_fw->data +
1994 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1995 		PSP_HEADER_BYTES;
1996 
1997 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1998 					   &region_info);
1999 
2000 	if (status != DMUB_STATUS_OK) {
2001 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2002 		return -EINVAL;
2003 	}
2004 
2005 	/*
2006 	 * Allocate a framebuffer based on the total size of all the regions.
2007 	 * TODO: Move this into GART.
2008 	 */
2009 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2010 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2011 				    &adev->dm.dmub_bo_gpu_addr,
2012 				    &adev->dm.dmub_bo_cpu_addr);
2013 	if (r)
2014 		return r;
2015 
2016 	/* Rebase the regions on the framebuffer address. */
2017 	memset(&fb_params, 0, sizeof(fb_params));
2018 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2019 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2020 	fb_params.region_info = &region_info;
2021 
2022 	adev->dm.dmub_fb_info =
2023 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2024 	fb_info = adev->dm.dmub_fb_info;
2025 
2026 	if (!fb_info) {
2027 		DRM_ERROR(
2028 			"Failed to allocate framebuffer info for DMUB service!\n");
2029 		return -ENOMEM;
2030 	}
2031 
2032 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2033 	if (status != DMUB_STATUS_OK) {
2034 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2035 		return -EINVAL;
2036 	}
2037 
2038 	return 0;
2039 }
2040 
2041 static int dm_sw_init(void *handle)
2042 {
2043 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2044 	int r;
2045 
2046 	r = dm_dmub_sw_init(adev);
2047 	if (r)
2048 		return r;
2049 
2050 	return load_dmcu_fw(adev);
2051 }
2052 
2053 static int dm_sw_fini(void *handle)
2054 {
2055 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2056 
2057 	kfree(adev->dm.dmub_fb_info);
2058 	adev->dm.dmub_fb_info = NULL;
2059 
2060 	if (adev->dm.dmub_srv) {
2061 		dmub_srv_destroy(adev->dm.dmub_srv);
2062 		adev->dm.dmub_srv = NULL;
2063 	}
2064 
2065 	release_firmware(adev->dm.dmub_fw);
2066 	adev->dm.dmub_fw = NULL;
2067 
2068 	release_firmware(adev->dm.fw_dmcu);
2069 	adev->dm.fw_dmcu = NULL;
2070 
2071 	return 0;
2072 }
2073 
2074 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2075 {
2076 	struct amdgpu_dm_connector *aconnector;
2077 	struct drm_connector *connector;
2078 	struct drm_connector_list_iter iter;
2079 	int ret = 0;
2080 
2081 	drm_connector_list_iter_begin(dev, &iter);
2082 	drm_for_each_connector_iter(connector, &iter) {
2083 		aconnector = to_amdgpu_dm_connector(connector);
2084 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2085 		    aconnector->mst_mgr.aux) {
2086 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2087 					 aconnector,
2088 					 aconnector->base.base.id);
2089 
2090 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2091 			if (ret < 0) {
2092 				DRM_ERROR("DM_MST: Failed to start MST\n");
2093 				aconnector->dc_link->type =
2094 					dc_connection_single;
2095 				break;
2096 			}
2097 		}
2098 	}
2099 	drm_connector_list_iter_end(&iter);
2100 
2101 	return ret;
2102 }
2103 
2104 static int dm_late_init(void *handle)
2105 {
2106 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2107 
2108 	struct dmcu_iram_parameters params;
2109 	unsigned int linear_lut[16];
2110 	int i;
2111 	struct dmcu *dmcu = NULL;
2112 
2113 	dmcu = adev->dm.dc->res_pool->dmcu;
2114 
2115 	for (i = 0; i < 16; i++)
2116 		linear_lut[i] = 0xFFFF * i / 15;
2117 
2118 	params.set = 0;
2119 	params.backlight_ramping_override = false;
2120 	params.backlight_ramping_start = 0xCCCC;
2121 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2122 	params.backlight_lut_array_size = 16;
2123 	params.backlight_lut_array = linear_lut;
2124 
2125 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2126 	 * 0xFFFF x 0.01 = 0x28F
2127 	 */
2128 	params.min_abm_backlight = 0x28F;
2129 	/* In the case where abm is implemented on dmcub,
2130 	* dmcu object will be null.
2131 	* ABM 2.4 and up are implemented on dmcub.
2132 	*/
2133 	if (dmcu) {
2134 		if (!dmcu_load_iram(dmcu, params))
2135 			return -EINVAL;
2136 	} else if (adev->dm.dc->ctx->dmub_srv) {
2137 		struct dc_link *edp_links[MAX_NUM_EDP];
2138 		int edp_num;
2139 
2140 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2141 		for (i = 0; i < edp_num; i++) {
2142 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2143 				return -EINVAL;
2144 		}
2145 	}
2146 
2147 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2148 }
2149 
2150 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2151 {
2152 	struct amdgpu_dm_connector *aconnector;
2153 	struct drm_connector *connector;
2154 	struct drm_connector_list_iter iter;
2155 	struct drm_dp_mst_topology_mgr *mgr;
2156 	int ret;
2157 	bool need_hotplug = false;
2158 
2159 	drm_connector_list_iter_begin(dev, &iter);
2160 	drm_for_each_connector_iter(connector, &iter) {
2161 		aconnector = to_amdgpu_dm_connector(connector);
2162 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2163 		    aconnector->mst_port)
2164 			continue;
2165 
2166 		mgr = &aconnector->mst_mgr;
2167 
2168 		if (suspend) {
2169 			drm_dp_mst_topology_mgr_suspend(mgr);
2170 		} else {
2171 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2172 			if (ret < 0) {
2173 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2174 				need_hotplug = true;
2175 			}
2176 		}
2177 	}
2178 	drm_connector_list_iter_end(&iter);
2179 
2180 	if (need_hotplug)
2181 		drm_kms_helper_hotplug_event(dev);
2182 }
2183 
2184 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2185 {
2186 	int ret = 0;
2187 
2188 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2189 	 * on window driver dc implementation.
2190 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2191 	 * should be passed to smu during boot up and resume from s3.
2192 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2193 	 * dcn20_resource_construct
2194 	 * then call pplib functions below to pass the settings to smu:
2195 	 * smu_set_watermarks_for_clock_ranges
2196 	 * smu_set_watermarks_table
2197 	 * navi10_set_watermarks_table
2198 	 * smu_write_watermarks_table
2199 	 *
2200 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2201 	 * dc has implemented different flow for window driver:
2202 	 * dc_hardware_init / dc_set_power_state
2203 	 * dcn10_init_hw
2204 	 * notify_wm_ranges
2205 	 * set_wm_ranges
2206 	 * -- Linux
2207 	 * smu_set_watermarks_for_clock_ranges
2208 	 * renoir_set_watermarks_table
2209 	 * smu_write_watermarks_table
2210 	 *
2211 	 * For Linux,
2212 	 * dc_hardware_init -> amdgpu_dm_init
2213 	 * dc_set_power_state --> dm_resume
2214 	 *
2215 	 * therefore, this function apply to navi10/12/14 but not Renoir
2216 	 * *
2217 	 */
2218 	switch (adev->ip_versions[DCE_HWIP][0]) {
2219 	case IP_VERSION(2, 0, 2):
2220 	case IP_VERSION(2, 0, 0):
2221 		break;
2222 	default:
2223 		return 0;
2224 	}
2225 
2226 	ret = amdgpu_dpm_write_watermarks_table(adev);
2227 	if (ret) {
2228 		DRM_ERROR("Failed to update WMTABLE!\n");
2229 		return ret;
2230 	}
2231 
2232 	return 0;
2233 }
2234 
2235 /**
2236  * dm_hw_init() - Initialize DC device
2237  * @handle: The base driver device containing the amdgpu_dm device.
2238  *
2239  * Initialize the &struct amdgpu_display_manager device. This involves calling
2240  * the initializers of each DM component, then populating the struct with them.
2241  *
2242  * Although the function implies hardware initialization, both hardware and
2243  * software are initialized here. Splitting them out to their relevant init
2244  * hooks is a future TODO item.
2245  *
2246  * Some notable things that are initialized here:
2247  *
2248  * - Display Core, both software and hardware
2249  * - DC modules that we need (freesync and color management)
2250  * - DRM software states
2251  * - Interrupt sources and handlers
2252  * - Vblank support
2253  * - Debug FS entries, if enabled
2254  */
2255 static int dm_hw_init(void *handle)
2256 {
2257 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2258 	/* Create DAL display manager */
2259 	amdgpu_dm_init(adev);
2260 	amdgpu_dm_hpd_init(adev);
2261 
2262 	return 0;
2263 }
2264 
2265 /**
2266  * dm_hw_fini() - Teardown DC device
2267  * @handle: The base driver device containing the amdgpu_dm device.
2268  *
2269  * Teardown components within &struct amdgpu_display_manager that require
2270  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2271  * were loaded. Also flush IRQ workqueues and disable them.
2272  */
2273 static int dm_hw_fini(void *handle)
2274 {
2275 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2276 
2277 	amdgpu_dm_hpd_fini(adev);
2278 
2279 	amdgpu_dm_irq_fini(adev);
2280 	amdgpu_dm_fini(adev);
2281 	return 0;
2282 }
2283 
2284 
2285 static int dm_enable_vblank(struct drm_crtc *crtc);
2286 static void dm_disable_vblank(struct drm_crtc *crtc);
2287 
2288 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2289 				 struct dc_state *state, bool enable)
2290 {
2291 	enum dc_irq_source irq_source;
2292 	struct amdgpu_crtc *acrtc;
2293 	int rc = -EBUSY;
2294 	int i = 0;
2295 
2296 	for (i = 0; i < state->stream_count; i++) {
2297 		acrtc = get_crtc_by_otg_inst(
2298 				adev, state->stream_status[i].primary_otg_inst);
2299 
2300 		if (acrtc && state->stream_status[i].plane_count != 0) {
2301 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2302 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2303 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2304 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2305 			if (rc)
2306 				DRM_WARN("Failed to %s pflip interrupts\n",
2307 					 enable ? "enable" : "disable");
2308 
2309 			if (enable) {
2310 				rc = dm_enable_vblank(&acrtc->base);
2311 				if (rc)
2312 					DRM_WARN("Failed to enable vblank interrupts\n");
2313 			} else {
2314 				dm_disable_vblank(&acrtc->base);
2315 			}
2316 
2317 		}
2318 	}
2319 
2320 }
2321 
2322 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2323 {
2324 	struct dc_state *context = NULL;
2325 	enum dc_status res = DC_ERROR_UNEXPECTED;
2326 	int i;
2327 	struct dc_stream_state *del_streams[MAX_PIPES];
2328 	int del_streams_count = 0;
2329 
2330 	memset(del_streams, 0, sizeof(del_streams));
2331 
2332 	context = dc_create_state(dc);
2333 	if (context == NULL)
2334 		goto context_alloc_fail;
2335 
2336 	dc_resource_state_copy_construct_current(dc, context);
2337 
2338 	/* First remove from context all streams */
2339 	for (i = 0; i < context->stream_count; i++) {
2340 		struct dc_stream_state *stream = context->streams[i];
2341 
2342 		del_streams[del_streams_count++] = stream;
2343 	}
2344 
2345 	/* Remove all planes for removed streams and then remove the streams */
2346 	for (i = 0; i < del_streams_count; i++) {
2347 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2348 			res = DC_FAIL_DETACH_SURFACES;
2349 			goto fail;
2350 		}
2351 
2352 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2353 		if (res != DC_OK)
2354 			goto fail;
2355 	}
2356 
2357 	res = dc_commit_state(dc, context);
2358 
2359 fail:
2360 	dc_release_state(context);
2361 
2362 context_alloc_fail:
2363 	return res;
2364 }
2365 
2366 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2367 {
2368 	int i;
2369 
2370 	if (dm->hpd_rx_offload_wq) {
2371 		for (i = 0; i < dm->dc->caps.max_links; i++)
2372 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2373 	}
2374 }
2375 
2376 static int dm_suspend(void *handle)
2377 {
2378 	struct amdgpu_device *adev = handle;
2379 	struct amdgpu_display_manager *dm = &adev->dm;
2380 	int ret = 0;
2381 
2382 	if (amdgpu_in_reset(adev)) {
2383 		mutex_lock(&dm->dc_lock);
2384 
2385 		dc_allow_idle_optimizations(adev->dm.dc, false);
2386 
2387 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2388 
2389 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2390 
2391 		amdgpu_dm_commit_zero_streams(dm->dc);
2392 
2393 		amdgpu_dm_irq_suspend(adev);
2394 
2395 		hpd_rx_irq_work_suspend(dm);
2396 
2397 		return ret;
2398 	}
2399 
2400 	WARN_ON(adev->dm.cached_state);
2401 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2402 
2403 	s3_handle_mst(adev_to_drm(adev), true);
2404 
2405 	amdgpu_dm_irq_suspend(adev);
2406 
2407 	hpd_rx_irq_work_suspend(dm);
2408 
2409 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2410 
2411 	return 0;
2412 }
2413 
2414 struct amdgpu_dm_connector *
2415 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2416 					     struct drm_crtc *crtc)
2417 {
2418 	uint32_t i;
2419 	struct drm_connector_state *new_con_state;
2420 	struct drm_connector *connector;
2421 	struct drm_crtc *crtc_from_state;
2422 
2423 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2424 		crtc_from_state = new_con_state->crtc;
2425 
2426 		if (crtc_from_state == crtc)
2427 			return to_amdgpu_dm_connector(connector);
2428 	}
2429 
2430 	return NULL;
2431 }
2432 
2433 static void emulated_link_detect(struct dc_link *link)
2434 {
2435 	struct dc_sink_init_data sink_init_data = { 0 };
2436 	struct display_sink_capability sink_caps = { 0 };
2437 	enum dc_edid_status edid_status;
2438 	struct dc_context *dc_ctx = link->ctx;
2439 	struct dc_sink *sink = NULL;
2440 	struct dc_sink *prev_sink = NULL;
2441 
2442 	link->type = dc_connection_none;
2443 	prev_sink = link->local_sink;
2444 
2445 	if (prev_sink)
2446 		dc_sink_release(prev_sink);
2447 
2448 	switch (link->connector_signal) {
2449 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2450 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2451 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2452 		break;
2453 	}
2454 
2455 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2456 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2457 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2458 		break;
2459 	}
2460 
2461 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2462 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2463 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2464 		break;
2465 	}
2466 
2467 	case SIGNAL_TYPE_LVDS: {
2468 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2469 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2470 		break;
2471 	}
2472 
2473 	case SIGNAL_TYPE_EDP: {
2474 		sink_caps.transaction_type =
2475 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2476 		sink_caps.signal = SIGNAL_TYPE_EDP;
2477 		break;
2478 	}
2479 
2480 	case SIGNAL_TYPE_DISPLAY_PORT: {
2481 		sink_caps.transaction_type =
2482 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2483 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2484 		break;
2485 	}
2486 
2487 	default:
2488 		DC_ERROR("Invalid connector type! signal:%d\n",
2489 			link->connector_signal);
2490 		return;
2491 	}
2492 
2493 	sink_init_data.link = link;
2494 	sink_init_data.sink_signal = sink_caps.signal;
2495 
2496 	sink = dc_sink_create(&sink_init_data);
2497 	if (!sink) {
2498 		DC_ERROR("Failed to create sink!\n");
2499 		return;
2500 	}
2501 
2502 	/* dc_sink_create returns a new reference */
2503 	link->local_sink = sink;
2504 
2505 	edid_status = dm_helpers_read_local_edid(
2506 			link->ctx,
2507 			link,
2508 			sink);
2509 
2510 	if (edid_status != EDID_OK)
2511 		DC_ERROR("Failed to read EDID");
2512 
2513 }
2514 
2515 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2516 				     struct amdgpu_display_manager *dm)
2517 {
2518 	struct {
2519 		struct dc_surface_update surface_updates[MAX_SURFACES];
2520 		struct dc_plane_info plane_infos[MAX_SURFACES];
2521 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2522 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2523 		struct dc_stream_update stream_update;
2524 	} * bundle;
2525 	int k, m;
2526 
2527 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2528 
2529 	if (!bundle) {
2530 		dm_error("Failed to allocate update bundle\n");
2531 		goto cleanup;
2532 	}
2533 
2534 	for (k = 0; k < dc_state->stream_count; k++) {
2535 		bundle->stream_update.stream = dc_state->streams[k];
2536 
2537 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2538 			bundle->surface_updates[m].surface =
2539 				dc_state->stream_status->plane_states[m];
2540 			bundle->surface_updates[m].surface->force_full_update =
2541 				true;
2542 		}
2543 		dc_commit_updates_for_stream(
2544 			dm->dc, bundle->surface_updates,
2545 			dc_state->stream_status->plane_count,
2546 			dc_state->streams[k], &bundle->stream_update, dc_state);
2547 	}
2548 
2549 cleanup:
2550 	kfree(bundle);
2551 
2552 	return;
2553 }
2554 
2555 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2556 {
2557 	struct dc_stream_state *stream_state;
2558 	struct amdgpu_dm_connector *aconnector = link->priv;
2559 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2560 	struct dc_stream_update stream_update;
2561 	bool dpms_off = true;
2562 
2563 	memset(&stream_update, 0, sizeof(stream_update));
2564 	stream_update.dpms_off = &dpms_off;
2565 
2566 	mutex_lock(&adev->dm.dc_lock);
2567 	stream_state = dc_stream_find_from_link(link);
2568 
2569 	if (stream_state == NULL) {
2570 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2571 		mutex_unlock(&adev->dm.dc_lock);
2572 		return;
2573 	}
2574 
2575 	stream_update.stream = stream_state;
2576 	acrtc_state->force_dpms_off = true;
2577 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2578 				     stream_state, &stream_update,
2579 				     stream_state->ctx->dc->current_state);
2580 	mutex_unlock(&adev->dm.dc_lock);
2581 }
2582 
2583 static int dm_resume(void *handle)
2584 {
2585 	struct amdgpu_device *adev = handle;
2586 	struct drm_device *ddev = adev_to_drm(adev);
2587 	struct amdgpu_display_manager *dm = &adev->dm;
2588 	struct amdgpu_dm_connector *aconnector;
2589 	struct drm_connector *connector;
2590 	struct drm_connector_list_iter iter;
2591 	struct drm_crtc *crtc;
2592 	struct drm_crtc_state *new_crtc_state;
2593 	struct dm_crtc_state *dm_new_crtc_state;
2594 	struct drm_plane *plane;
2595 	struct drm_plane_state *new_plane_state;
2596 	struct dm_plane_state *dm_new_plane_state;
2597 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2598 	enum dc_connection_type new_connection_type = dc_connection_none;
2599 	struct dc_state *dc_state;
2600 	int i, r, j;
2601 
2602 	if (amdgpu_in_reset(adev)) {
2603 		dc_state = dm->cached_dc_state;
2604 
2605 		/*
2606 		 * The dc->current_state is backed up into dm->cached_dc_state
2607 		 * before we commit 0 streams.
2608 		 *
2609 		 * DC will clear link encoder assignments on the real state
2610 		 * but the changes won't propagate over to the copy we made
2611 		 * before the 0 streams commit.
2612 		 *
2613 		 * DC expects that link encoder assignments are *not* valid
2614 		 * when committing a state, so as a workaround we can copy
2615 		 * off of the current state.
2616 		 *
2617 		 * We lose the previous assignments, but we had already
2618 		 * commit 0 streams anyway.
2619 		 */
2620 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2621 
2622 		if (dc_enable_dmub_notifications(adev->dm.dc))
2623 			amdgpu_dm_outbox_init(adev);
2624 
2625 		r = dm_dmub_hw_init(adev);
2626 		if (r)
2627 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2628 
2629 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2630 		dc_resume(dm->dc);
2631 
2632 		amdgpu_dm_irq_resume_early(adev);
2633 
2634 		for (i = 0; i < dc_state->stream_count; i++) {
2635 			dc_state->streams[i]->mode_changed = true;
2636 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2637 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2638 					= 0xffffffff;
2639 			}
2640 		}
2641 
2642 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2643 
2644 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2645 
2646 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2647 
2648 		dc_release_state(dm->cached_dc_state);
2649 		dm->cached_dc_state = NULL;
2650 
2651 		amdgpu_dm_irq_resume_late(adev);
2652 
2653 		mutex_unlock(&dm->dc_lock);
2654 
2655 		return 0;
2656 	}
2657 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2658 	dc_release_state(dm_state->context);
2659 	dm_state->context = dc_create_state(dm->dc);
2660 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2661 	dc_resource_state_construct(dm->dc, dm_state->context);
2662 
2663 	/* Re-enable outbox interrupts for DPIA. */
2664 	if (dc_enable_dmub_notifications(adev->dm.dc))
2665 		amdgpu_dm_outbox_init(adev);
2666 
2667 	/* Before powering on DC we need to re-initialize DMUB. */
2668 	dm_dmub_hw_resume(adev);
2669 
2670 	/* power on hardware */
2671 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2672 
2673 	/* program HPD filter */
2674 	dc_resume(dm->dc);
2675 
2676 	/*
2677 	 * early enable HPD Rx IRQ, should be done before set mode as short
2678 	 * pulse interrupts are used for MST
2679 	 */
2680 	amdgpu_dm_irq_resume_early(adev);
2681 
2682 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2683 	s3_handle_mst(ddev, false);
2684 
2685 	/* Do detection*/
2686 	drm_connector_list_iter_begin(ddev, &iter);
2687 	drm_for_each_connector_iter(connector, &iter) {
2688 		aconnector = to_amdgpu_dm_connector(connector);
2689 
2690 		/*
2691 		 * this is the case when traversing through already created
2692 		 * MST connectors, should be skipped
2693 		 */
2694 		if (aconnector->dc_link &&
2695 		    aconnector->dc_link->type == dc_connection_mst_branch)
2696 			continue;
2697 
2698 		mutex_lock(&aconnector->hpd_lock);
2699 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2700 			DRM_ERROR("KMS: Failed to detect connector\n");
2701 
2702 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2703 			emulated_link_detect(aconnector->dc_link);
2704 		else
2705 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2706 
2707 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2708 			aconnector->fake_enable = false;
2709 
2710 		if (aconnector->dc_sink)
2711 			dc_sink_release(aconnector->dc_sink);
2712 		aconnector->dc_sink = NULL;
2713 		amdgpu_dm_update_connector_after_detect(aconnector);
2714 		mutex_unlock(&aconnector->hpd_lock);
2715 	}
2716 	drm_connector_list_iter_end(&iter);
2717 
2718 	/* Force mode set in atomic commit */
2719 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2720 		new_crtc_state->active_changed = true;
2721 
2722 	/*
2723 	 * atomic_check is expected to create the dc states. We need to release
2724 	 * them here, since they were duplicated as part of the suspend
2725 	 * procedure.
2726 	 */
2727 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2728 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2729 		if (dm_new_crtc_state->stream) {
2730 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2731 			dc_stream_release(dm_new_crtc_state->stream);
2732 			dm_new_crtc_state->stream = NULL;
2733 		}
2734 	}
2735 
2736 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2737 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2738 		if (dm_new_plane_state->dc_state) {
2739 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2740 			dc_plane_state_release(dm_new_plane_state->dc_state);
2741 			dm_new_plane_state->dc_state = NULL;
2742 		}
2743 	}
2744 
2745 	drm_atomic_helper_resume(ddev, dm->cached_state);
2746 
2747 	dm->cached_state = NULL;
2748 
2749 	amdgpu_dm_irq_resume_late(adev);
2750 
2751 	amdgpu_dm_smu_write_watermarks_table(adev);
2752 
2753 	return 0;
2754 }
2755 
2756 /**
2757  * DOC: DM Lifecycle
2758  *
2759  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2760  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2761  * the base driver's device list to be initialized and torn down accordingly.
2762  *
2763  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2764  */
2765 
2766 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2767 	.name = "dm",
2768 	.early_init = dm_early_init,
2769 	.late_init = dm_late_init,
2770 	.sw_init = dm_sw_init,
2771 	.sw_fini = dm_sw_fini,
2772 	.early_fini = amdgpu_dm_early_fini,
2773 	.hw_init = dm_hw_init,
2774 	.hw_fini = dm_hw_fini,
2775 	.suspend = dm_suspend,
2776 	.resume = dm_resume,
2777 	.is_idle = dm_is_idle,
2778 	.wait_for_idle = dm_wait_for_idle,
2779 	.check_soft_reset = dm_check_soft_reset,
2780 	.soft_reset = dm_soft_reset,
2781 	.set_clockgating_state = dm_set_clockgating_state,
2782 	.set_powergating_state = dm_set_powergating_state,
2783 };
2784 
2785 const struct amdgpu_ip_block_version dm_ip_block =
2786 {
2787 	.type = AMD_IP_BLOCK_TYPE_DCE,
2788 	.major = 1,
2789 	.minor = 0,
2790 	.rev = 0,
2791 	.funcs = &amdgpu_dm_funcs,
2792 };
2793 
2794 
2795 /**
2796  * DOC: atomic
2797  *
2798  * *WIP*
2799  */
2800 
2801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2802 	.fb_create = amdgpu_display_user_framebuffer_create,
2803 	.get_format_info = amd_get_format_info,
2804 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2805 	.atomic_check = amdgpu_dm_atomic_check,
2806 	.atomic_commit = drm_atomic_helper_commit,
2807 };
2808 
2809 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2810 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2811 };
2812 
2813 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2814 {
2815 	u32 max_avg, min_cll, max, min, q, r;
2816 	struct amdgpu_dm_backlight_caps *caps;
2817 	struct amdgpu_display_manager *dm;
2818 	struct drm_connector *conn_base;
2819 	struct amdgpu_device *adev;
2820 	struct dc_link *link = NULL;
2821 	static const u8 pre_computed_values[] = {
2822 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2823 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2824 	int i;
2825 
2826 	if (!aconnector || !aconnector->dc_link)
2827 		return;
2828 
2829 	link = aconnector->dc_link;
2830 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2831 		return;
2832 
2833 	conn_base = &aconnector->base;
2834 	adev = drm_to_adev(conn_base->dev);
2835 	dm = &adev->dm;
2836 	for (i = 0; i < dm->num_of_edps; i++) {
2837 		if (link == dm->backlight_link[i])
2838 			break;
2839 	}
2840 	if (i >= dm->num_of_edps)
2841 		return;
2842 	caps = &dm->backlight_caps[i];
2843 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2844 	caps->aux_support = false;
2845 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2846 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2847 
2848 	if (caps->ext_caps->bits.oled == 1 /*||
2849 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2850 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2851 		caps->aux_support = true;
2852 
2853 	if (amdgpu_backlight == 0)
2854 		caps->aux_support = false;
2855 	else if (amdgpu_backlight == 1)
2856 		caps->aux_support = true;
2857 
2858 	/* From the specification (CTA-861-G), for calculating the maximum
2859 	 * luminance we need to use:
2860 	 *	Luminance = 50*2**(CV/32)
2861 	 * Where CV is a one-byte value.
2862 	 * For calculating this expression we may need float point precision;
2863 	 * to avoid this complexity level, we take advantage that CV is divided
2864 	 * by a constant. From the Euclids division algorithm, we know that CV
2865 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2866 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2867 	 * need to pre-compute the value of r/32. For pre-computing the values
2868 	 * We just used the following Ruby line:
2869 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2870 	 * The results of the above expressions can be verified at
2871 	 * pre_computed_values.
2872 	 */
2873 	q = max_avg >> 5;
2874 	r = max_avg % 32;
2875 	max = (1 << q) * pre_computed_values[r];
2876 
2877 	// min luminance: maxLum * (CV/255)^2 / 100
2878 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2879 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2880 
2881 	caps->aux_max_input_signal = max;
2882 	caps->aux_min_input_signal = min;
2883 }
2884 
2885 void amdgpu_dm_update_connector_after_detect(
2886 		struct amdgpu_dm_connector *aconnector)
2887 {
2888 	struct drm_connector *connector = &aconnector->base;
2889 	struct drm_device *dev = connector->dev;
2890 	struct dc_sink *sink;
2891 
2892 	/* MST handled by drm_mst framework */
2893 	if (aconnector->mst_mgr.mst_state == true)
2894 		return;
2895 
2896 	sink = aconnector->dc_link->local_sink;
2897 	if (sink)
2898 		dc_sink_retain(sink);
2899 
2900 	/*
2901 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2902 	 * the connector sink is set to either fake or physical sink depends on link status.
2903 	 * Skip if already done during boot.
2904 	 */
2905 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2906 			&& aconnector->dc_em_sink) {
2907 
2908 		/*
2909 		 * For S3 resume with headless use eml_sink to fake stream
2910 		 * because on resume connector->sink is set to NULL
2911 		 */
2912 		mutex_lock(&dev->mode_config.mutex);
2913 
2914 		if (sink) {
2915 			if (aconnector->dc_sink) {
2916 				amdgpu_dm_update_freesync_caps(connector, NULL);
2917 				/*
2918 				 * retain and release below are used to
2919 				 * bump up refcount for sink because the link doesn't point
2920 				 * to it anymore after disconnect, so on next crtc to connector
2921 				 * reshuffle by UMD we will get into unwanted dc_sink release
2922 				 */
2923 				dc_sink_release(aconnector->dc_sink);
2924 			}
2925 			aconnector->dc_sink = sink;
2926 			dc_sink_retain(aconnector->dc_sink);
2927 			amdgpu_dm_update_freesync_caps(connector,
2928 					aconnector->edid);
2929 		} else {
2930 			amdgpu_dm_update_freesync_caps(connector, NULL);
2931 			if (!aconnector->dc_sink) {
2932 				aconnector->dc_sink = aconnector->dc_em_sink;
2933 				dc_sink_retain(aconnector->dc_sink);
2934 			}
2935 		}
2936 
2937 		mutex_unlock(&dev->mode_config.mutex);
2938 
2939 		if (sink)
2940 			dc_sink_release(sink);
2941 		return;
2942 	}
2943 
2944 	/*
2945 	 * TODO: temporary guard to look for proper fix
2946 	 * if this sink is MST sink, we should not do anything
2947 	 */
2948 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2949 		dc_sink_release(sink);
2950 		return;
2951 	}
2952 
2953 	if (aconnector->dc_sink == sink) {
2954 		/*
2955 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2956 		 * Do nothing!!
2957 		 */
2958 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2959 				aconnector->connector_id);
2960 		if (sink)
2961 			dc_sink_release(sink);
2962 		return;
2963 	}
2964 
2965 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2966 		aconnector->connector_id, aconnector->dc_sink, sink);
2967 
2968 	mutex_lock(&dev->mode_config.mutex);
2969 
2970 	/*
2971 	 * 1. Update status of the drm connector
2972 	 * 2. Send an event and let userspace tell us what to do
2973 	 */
2974 	if (sink) {
2975 		/*
2976 		 * TODO: check if we still need the S3 mode update workaround.
2977 		 * If yes, put it here.
2978 		 */
2979 		if (aconnector->dc_sink) {
2980 			amdgpu_dm_update_freesync_caps(connector, NULL);
2981 			dc_sink_release(aconnector->dc_sink);
2982 		}
2983 
2984 		aconnector->dc_sink = sink;
2985 		dc_sink_retain(aconnector->dc_sink);
2986 		if (sink->dc_edid.length == 0) {
2987 			aconnector->edid = NULL;
2988 			if (aconnector->dc_link->aux_mode) {
2989 				drm_dp_cec_unset_edid(
2990 					&aconnector->dm_dp_aux.aux);
2991 			}
2992 		} else {
2993 			aconnector->edid =
2994 				(struct edid *)sink->dc_edid.raw_edid;
2995 
2996 			if (aconnector->dc_link->aux_mode)
2997 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2998 						    aconnector->edid);
2999 		}
3000 
3001 		drm_connector_update_edid_property(connector, aconnector->edid);
3002 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3003 		update_connector_ext_caps(aconnector);
3004 	} else {
3005 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3006 		amdgpu_dm_update_freesync_caps(connector, NULL);
3007 		drm_connector_update_edid_property(connector, NULL);
3008 		aconnector->num_modes = 0;
3009 		dc_sink_release(aconnector->dc_sink);
3010 		aconnector->dc_sink = NULL;
3011 		aconnector->edid = NULL;
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3014 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3015 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3016 #endif
3017 	}
3018 
3019 	mutex_unlock(&dev->mode_config.mutex);
3020 
3021 	update_subconnector_property(aconnector);
3022 
3023 	if (sink)
3024 		dc_sink_release(sink);
3025 }
3026 
3027 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3028 {
3029 	struct drm_connector *connector = &aconnector->base;
3030 	struct drm_device *dev = connector->dev;
3031 	enum dc_connection_type new_connection_type = dc_connection_none;
3032 	struct amdgpu_device *adev = drm_to_adev(dev);
3033 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3034 	struct dm_crtc_state *dm_crtc_state = NULL;
3035 
3036 	if (adev->dm.disable_hpd_irq)
3037 		return;
3038 
3039 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3040 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3041 					dm_con_state->base.state,
3042 					dm_con_state->base.crtc));
3043 	/*
3044 	 * In case of failure or MST no need to update connector status or notify the OS
3045 	 * since (for MST case) MST does this in its own context.
3046 	 */
3047 	mutex_lock(&aconnector->hpd_lock);
3048 
3049 #ifdef CONFIG_DRM_AMD_DC_HDCP
3050 	if (adev->dm.hdcp_workqueue) {
3051 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3052 		dm_con_state->update_hdcp = true;
3053 	}
3054 #endif
3055 	if (aconnector->fake_enable)
3056 		aconnector->fake_enable = false;
3057 
3058 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3059 		DRM_ERROR("KMS: Failed to detect connector\n");
3060 
3061 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3062 		emulated_link_detect(aconnector->dc_link);
3063 
3064 		drm_modeset_lock_all(dev);
3065 		dm_restore_drm_connector_state(dev, connector);
3066 		drm_modeset_unlock_all(dev);
3067 
3068 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3069 			drm_kms_helper_connector_hotplug_event(connector);
3070 
3071 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3072 		if (new_connection_type == dc_connection_none &&
3073 		    aconnector->dc_link->type == dc_connection_none &&
3074 		    dm_crtc_state)
3075 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3076 
3077 		amdgpu_dm_update_connector_after_detect(aconnector);
3078 
3079 		drm_modeset_lock_all(dev);
3080 		dm_restore_drm_connector_state(dev, connector);
3081 		drm_modeset_unlock_all(dev);
3082 
3083 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3084 			drm_kms_helper_connector_hotplug_event(connector);
3085 	}
3086 	mutex_unlock(&aconnector->hpd_lock);
3087 
3088 }
3089 
3090 static void handle_hpd_irq(void *param)
3091 {
3092 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3093 
3094 	handle_hpd_irq_helper(aconnector);
3095 
3096 }
3097 
3098 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3099 {
3100 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3101 	uint8_t dret;
3102 	bool new_irq_handled = false;
3103 	int dpcd_addr;
3104 	int dpcd_bytes_to_read;
3105 
3106 	const int max_process_count = 30;
3107 	int process_count = 0;
3108 
3109 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3110 
3111 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3112 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3113 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3114 		dpcd_addr = DP_SINK_COUNT;
3115 	} else {
3116 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3117 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3118 		dpcd_addr = DP_SINK_COUNT_ESI;
3119 	}
3120 
3121 	dret = drm_dp_dpcd_read(
3122 		&aconnector->dm_dp_aux.aux,
3123 		dpcd_addr,
3124 		esi,
3125 		dpcd_bytes_to_read);
3126 
3127 	while (dret == dpcd_bytes_to_read &&
3128 		process_count < max_process_count) {
3129 		uint8_t retry;
3130 		dret = 0;
3131 
3132 		process_count++;
3133 
3134 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3135 		/* handle HPD short pulse irq */
3136 		if (aconnector->mst_mgr.mst_state)
3137 			drm_dp_mst_hpd_irq(
3138 				&aconnector->mst_mgr,
3139 				esi,
3140 				&new_irq_handled);
3141 
3142 		if (new_irq_handled) {
3143 			/* ACK at DPCD to notify down stream */
3144 			const int ack_dpcd_bytes_to_write =
3145 				dpcd_bytes_to_read - 1;
3146 
3147 			for (retry = 0; retry < 3; retry++) {
3148 				uint8_t wret;
3149 
3150 				wret = drm_dp_dpcd_write(
3151 					&aconnector->dm_dp_aux.aux,
3152 					dpcd_addr + 1,
3153 					&esi[1],
3154 					ack_dpcd_bytes_to_write);
3155 				if (wret == ack_dpcd_bytes_to_write)
3156 					break;
3157 			}
3158 
3159 			/* check if there is new irq to be handled */
3160 			dret = drm_dp_dpcd_read(
3161 				&aconnector->dm_dp_aux.aux,
3162 				dpcd_addr,
3163 				esi,
3164 				dpcd_bytes_to_read);
3165 
3166 			new_irq_handled = false;
3167 		} else {
3168 			break;
3169 		}
3170 	}
3171 
3172 	if (process_count == max_process_count)
3173 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3174 }
3175 
3176 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3177 							union hpd_irq_data hpd_irq_data)
3178 {
3179 	struct hpd_rx_irq_offload_work *offload_work =
3180 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3181 
3182 	if (!offload_work) {
3183 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3184 		return;
3185 	}
3186 
3187 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3188 	offload_work->data = hpd_irq_data;
3189 	offload_work->offload_wq = offload_wq;
3190 
3191 	queue_work(offload_wq->wq, &offload_work->work);
3192 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3193 }
3194 
3195 static void handle_hpd_rx_irq(void *param)
3196 {
3197 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3198 	struct drm_connector *connector = &aconnector->base;
3199 	struct drm_device *dev = connector->dev;
3200 	struct dc_link *dc_link = aconnector->dc_link;
3201 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3202 	bool result = false;
3203 	enum dc_connection_type new_connection_type = dc_connection_none;
3204 	struct amdgpu_device *adev = drm_to_adev(dev);
3205 	union hpd_irq_data hpd_irq_data;
3206 	bool link_loss = false;
3207 	bool has_left_work = false;
3208 	int idx = aconnector->base.index;
3209 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3210 
3211 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3212 
3213 	if (adev->dm.disable_hpd_irq)
3214 		return;
3215 
3216 	/*
3217 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3218 	 * conflict, after implement i2c helper, this mutex should be
3219 	 * retired.
3220 	 */
3221 	mutex_lock(&aconnector->hpd_lock);
3222 
3223 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3224 						&link_loss, true, &has_left_work);
3225 
3226 	if (!has_left_work)
3227 		goto out;
3228 
3229 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3230 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3231 		goto out;
3232 	}
3233 
3234 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3235 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3236 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3237 			dm_handle_mst_sideband_msg(aconnector);
3238 			goto out;
3239 		}
3240 
3241 		if (link_loss) {
3242 			bool skip = false;
3243 
3244 			spin_lock(&offload_wq->offload_lock);
3245 			skip = offload_wq->is_handling_link_loss;
3246 
3247 			if (!skip)
3248 				offload_wq->is_handling_link_loss = true;
3249 
3250 			spin_unlock(&offload_wq->offload_lock);
3251 
3252 			if (!skip)
3253 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3254 
3255 			goto out;
3256 		}
3257 	}
3258 
3259 out:
3260 	if (result && !is_mst_root_connector) {
3261 		/* Downstream Port status changed. */
3262 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3263 			DRM_ERROR("KMS: Failed to detect connector\n");
3264 
3265 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3266 			emulated_link_detect(dc_link);
3267 
3268 			if (aconnector->fake_enable)
3269 				aconnector->fake_enable = false;
3270 
3271 			amdgpu_dm_update_connector_after_detect(aconnector);
3272 
3273 
3274 			drm_modeset_lock_all(dev);
3275 			dm_restore_drm_connector_state(dev, connector);
3276 			drm_modeset_unlock_all(dev);
3277 
3278 			drm_kms_helper_connector_hotplug_event(connector);
3279 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3280 
3281 			if (aconnector->fake_enable)
3282 				aconnector->fake_enable = false;
3283 
3284 			amdgpu_dm_update_connector_after_detect(aconnector);
3285 
3286 
3287 			drm_modeset_lock_all(dev);
3288 			dm_restore_drm_connector_state(dev, connector);
3289 			drm_modeset_unlock_all(dev);
3290 
3291 			drm_kms_helper_connector_hotplug_event(connector);
3292 		}
3293 	}
3294 #ifdef CONFIG_DRM_AMD_DC_HDCP
3295 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3296 		if (adev->dm.hdcp_workqueue)
3297 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3298 	}
3299 #endif
3300 
3301 	if (dc_link->type != dc_connection_mst_branch)
3302 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3303 
3304 	mutex_unlock(&aconnector->hpd_lock);
3305 }
3306 
3307 static void register_hpd_handlers(struct amdgpu_device *adev)
3308 {
3309 	struct drm_device *dev = adev_to_drm(adev);
3310 	struct drm_connector *connector;
3311 	struct amdgpu_dm_connector *aconnector;
3312 	const struct dc_link *dc_link;
3313 	struct dc_interrupt_params int_params = {0};
3314 
3315 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3316 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3317 
3318 	list_for_each_entry(connector,
3319 			&dev->mode_config.connector_list, head)	{
3320 
3321 		aconnector = to_amdgpu_dm_connector(connector);
3322 		dc_link = aconnector->dc_link;
3323 
3324 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3325 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3326 			int_params.irq_source = dc_link->irq_source_hpd;
3327 
3328 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3329 					handle_hpd_irq,
3330 					(void *) aconnector);
3331 		}
3332 
3333 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3334 
3335 			/* Also register for DP short pulse (hpd_rx). */
3336 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3337 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3338 
3339 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3340 					handle_hpd_rx_irq,
3341 					(void *) aconnector);
3342 
3343 			if (adev->dm.hpd_rx_offload_wq)
3344 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3345 					aconnector;
3346 		}
3347 	}
3348 }
3349 
3350 #if defined(CONFIG_DRM_AMD_DC_SI)
3351 /* Register IRQ sources and initialize IRQ callbacks */
3352 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3353 {
3354 	struct dc *dc = adev->dm.dc;
3355 	struct common_irq_params *c_irq_params;
3356 	struct dc_interrupt_params int_params = {0};
3357 	int r;
3358 	int i;
3359 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3360 
3361 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3362 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3363 
3364 	/*
3365 	 * Actions of amdgpu_irq_add_id():
3366 	 * 1. Register a set() function with base driver.
3367 	 *    Base driver will call set() function to enable/disable an
3368 	 *    interrupt in DC hardware.
3369 	 * 2. Register amdgpu_dm_irq_handler().
3370 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3371 	 *    coming from DC hardware.
3372 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3373 	 *    for acknowledging and handling. */
3374 
3375 	/* Use VBLANK interrupt */
3376 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3377 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3378 		if (r) {
3379 			DRM_ERROR("Failed to add crtc irq id!\n");
3380 			return r;
3381 		}
3382 
3383 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3384 		int_params.irq_source =
3385 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3386 
3387 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3388 
3389 		c_irq_params->adev = adev;
3390 		c_irq_params->irq_src = int_params.irq_source;
3391 
3392 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3393 				dm_crtc_high_irq, c_irq_params);
3394 	}
3395 
3396 	/* Use GRPH_PFLIP interrupt */
3397 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3398 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3399 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3400 		if (r) {
3401 			DRM_ERROR("Failed to add page flip irq id!\n");
3402 			return r;
3403 		}
3404 
3405 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3406 		int_params.irq_source =
3407 			dc_interrupt_to_irq_source(dc, i, 0);
3408 
3409 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3410 
3411 		c_irq_params->adev = adev;
3412 		c_irq_params->irq_src = int_params.irq_source;
3413 
3414 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3415 				dm_pflip_high_irq, c_irq_params);
3416 
3417 	}
3418 
3419 	/* HPD */
3420 	r = amdgpu_irq_add_id(adev, client_id,
3421 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3422 	if (r) {
3423 		DRM_ERROR("Failed to add hpd irq id!\n");
3424 		return r;
3425 	}
3426 
3427 	register_hpd_handlers(adev);
3428 
3429 	return 0;
3430 }
3431 #endif
3432 
3433 /* Register IRQ sources and initialize IRQ callbacks */
3434 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3435 {
3436 	struct dc *dc = adev->dm.dc;
3437 	struct common_irq_params *c_irq_params;
3438 	struct dc_interrupt_params int_params = {0};
3439 	int r;
3440 	int i;
3441 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3442 
3443 	if (adev->family >= AMDGPU_FAMILY_AI)
3444 		client_id = SOC15_IH_CLIENTID_DCE;
3445 
3446 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3447 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3448 
3449 	/*
3450 	 * Actions of amdgpu_irq_add_id():
3451 	 * 1. Register a set() function with base driver.
3452 	 *    Base driver will call set() function to enable/disable an
3453 	 *    interrupt in DC hardware.
3454 	 * 2. Register amdgpu_dm_irq_handler().
3455 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3456 	 *    coming from DC hardware.
3457 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3458 	 *    for acknowledging and handling. */
3459 
3460 	/* Use VBLANK interrupt */
3461 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3462 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3463 		if (r) {
3464 			DRM_ERROR("Failed to add crtc irq id!\n");
3465 			return r;
3466 		}
3467 
3468 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3469 		int_params.irq_source =
3470 			dc_interrupt_to_irq_source(dc, i, 0);
3471 
3472 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3473 
3474 		c_irq_params->adev = adev;
3475 		c_irq_params->irq_src = int_params.irq_source;
3476 
3477 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3478 				dm_crtc_high_irq, c_irq_params);
3479 	}
3480 
3481 	/* Use VUPDATE interrupt */
3482 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3483 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3484 		if (r) {
3485 			DRM_ERROR("Failed to add vupdate irq id!\n");
3486 			return r;
3487 		}
3488 
3489 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3490 		int_params.irq_source =
3491 			dc_interrupt_to_irq_source(dc, i, 0);
3492 
3493 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3494 
3495 		c_irq_params->adev = adev;
3496 		c_irq_params->irq_src = int_params.irq_source;
3497 
3498 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3499 				dm_vupdate_high_irq, c_irq_params);
3500 	}
3501 
3502 	/* Use GRPH_PFLIP interrupt */
3503 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3504 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3505 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3506 		if (r) {
3507 			DRM_ERROR("Failed to add page flip irq id!\n");
3508 			return r;
3509 		}
3510 
3511 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3512 		int_params.irq_source =
3513 			dc_interrupt_to_irq_source(dc, i, 0);
3514 
3515 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3516 
3517 		c_irq_params->adev = adev;
3518 		c_irq_params->irq_src = int_params.irq_source;
3519 
3520 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3521 				dm_pflip_high_irq, c_irq_params);
3522 
3523 	}
3524 
3525 	/* HPD */
3526 	r = amdgpu_irq_add_id(adev, client_id,
3527 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3528 	if (r) {
3529 		DRM_ERROR("Failed to add hpd irq id!\n");
3530 		return r;
3531 	}
3532 
3533 	register_hpd_handlers(adev);
3534 
3535 	return 0;
3536 }
3537 
3538 /* Register IRQ sources and initialize IRQ callbacks */
3539 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3540 {
3541 	struct dc *dc = adev->dm.dc;
3542 	struct common_irq_params *c_irq_params;
3543 	struct dc_interrupt_params int_params = {0};
3544 	int r;
3545 	int i;
3546 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3547 	static const unsigned int vrtl_int_srcid[] = {
3548 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3549 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3550 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3551 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3552 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3553 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3554 	};
3555 #endif
3556 
3557 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3558 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3559 
3560 	/*
3561 	 * Actions of amdgpu_irq_add_id():
3562 	 * 1. Register a set() function with base driver.
3563 	 *    Base driver will call set() function to enable/disable an
3564 	 *    interrupt in DC hardware.
3565 	 * 2. Register amdgpu_dm_irq_handler().
3566 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3567 	 *    coming from DC hardware.
3568 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3569 	 *    for acknowledging and handling.
3570 	 */
3571 
3572 	/* Use VSTARTUP interrupt */
3573 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3574 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3575 			i++) {
3576 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3577 
3578 		if (r) {
3579 			DRM_ERROR("Failed to add crtc irq id!\n");
3580 			return r;
3581 		}
3582 
3583 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3584 		int_params.irq_source =
3585 			dc_interrupt_to_irq_source(dc, i, 0);
3586 
3587 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3588 
3589 		c_irq_params->adev = adev;
3590 		c_irq_params->irq_src = int_params.irq_source;
3591 
3592 		amdgpu_dm_irq_register_interrupt(
3593 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3594 	}
3595 
3596 	/* Use otg vertical line interrupt */
3597 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3598 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3599 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3600 				vrtl_int_srcid[i], &adev->vline0_irq);
3601 
3602 		if (r) {
3603 			DRM_ERROR("Failed to add vline0 irq id!\n");
3604 			return r;
3605 		}
3606 
3607 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608 		int_params.irq_source =
3609 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3610 
3611 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3612 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3613 			break;
3614 		}
3615 
3616 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3617 					- DC_IRQ_SOURCE_DC1_VLINE0];
3618 
3619 		c_irq_params->adev = adev;
3620 		c_irq_params->irq_src = int_params.irq_source;
3621 
3622 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3623 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3624 	}
3625 #endif
3626 
3627 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3628 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3629 	 * to trigger at end of each vblank, regardless of state of the lock,
3630 	 * matching DCE behaviour.
3631 	 */
3632 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3633 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3634 	     i++) {
3635 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3636 
3637 		if (r) {
3638 			DRM_ERROR("Failed to add vupdate irq id!\n");
3639 			return r;
3640 		}
3641 
3642 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3643 		int_params.irq_source =
3644 			dc_interrupt_to_irq_source(dc, i, 0);
3645 
3646 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3647 
3648 		c_irq_params->adev = adev;
3649 		c_irq_params->irq_src = int_params.irq_source;
3650 
3651 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3652 				dm_vupdate_high_irq, c_irq_params);
3653 	}
3654 
3655 	/* Use GRPH_PFLIP interrupt */
3656 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3657 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3658 			i++) {
3659 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3660 		if (r) {
3661 			DRM_ERROR("Failed to add page flip irq id!\n");
3662 			return r;
3663 		}
3664 
3665 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3666 		int_params.irq_source =
3667 			dc_interrupt_to_irq_source(dc, i, 0);
3668 
3669 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3670 
3671 		c_irq_params->adev = adev;
3672 		c_irq_params->irq_src = int_params.irq_source;
3673 
3674 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3675 				dm_pflip_high_irq, c_irq_params);
3676 
3677 	}
3678 
3679 	/* HPD */
3680 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3681 			&adev->hpd_irq);
3682 	if (r) {
3683 		DRM_ERROR("Failed to add hpd irq id!\n");
3684 		return r;
3685 	}
3686 
3687 	register_hpd_handlers(adev);
3688 
3689 	return 0;
3690 }
3691 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3692 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3693 {
3694 	struct dc *dc = adev->dm.dc;
3695 	struct common_irq_params *c_irq_params;
3696 	struct dc_interrupt_params int_params = {0};
3697 	int r, i;
3698 
3699 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3700 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3701 
3702 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3703 			&adev->dmub_outbox_irq);
3704 	if (r) {
3705 		DRM_ERROR("Failed to add outbox irq id!\n");
3706 		return r;
3707 	}
3708 
3709 	if (dc->ctx->dmub_srv) {
3710 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3711 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3712 		int_params.irq_source =
3713 		dc_interrupt_to_irq_source(dc, i, 0);
3714 
3715 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3716 
3717 		c_irq_params->adev = adev;
3718 		c_irq_params->irq_src = int_params.irq_source;
3719 
3720 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3721 				dm_dmub_outbox1_low_irq, c_irq_params);
3722 	}
3723 
3724 	return 0;
3725 }
3726 
3727 /*
3728  * Acquires the lock for the atomic state object and returns
3729  * the new atomic state.
3730  *
3731  * This should only be called during atomic check.
3732  */
3733 int dm_atomic_get_state(struct drm_atomic_state *state,
3734 			struct dm_atomic_state **dm_state)
3735 {
3736 	struct drm_device *dev = state->dev;
3737 	struct amdgpu_device *adev = drm_to_adev(dev);
3738 	struct amdgpu_display_manager *dm = &adev->dm;
3739 	struct drm_private_state *priv_state;
3740 
3741 	if (*dm_state)
3742 		return 0;
3743 
3744 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3745 	if (IS_ERR(priv_state))
3746 		return PTR_ERR(priv_state);
3747 
3748 	*dm_state = to_dm_atomic_state(priv_state);
3749 
3750 	return 0;
3751 }
3752 
3753 static struct dm_atomic_state *
3754 dm_atomic_get_new_state(struct drm_atomic_state *state)
3755 {
3756 	struct drm_device *dev = state->dev;
3757 	struct amdgpu_device *adev = drm_to_adev(dev);
3758 	struct amdgpu_display_manager *dm = &adev->dm;
3759 	struct drm_private_obj *obj;
3760 	struct drm_private_state *new_obj_state;
3761 	int i;
3762 
3763 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3764 		if (obj->funcs == dm->atomic_obj.funcs)
3765 			return to_dm_atomic_state(new_obj_state);
3766 	}
3767 
3768 	return NULL;
3769 }
3770 
3771 static struct drm_private_state *
3772 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3773 {
3774 	struct dm_atomic_state *old_state, *new_state;
3775 
3776 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3777 	if (!new_state)
3778 		return NULL;
3779 
3780 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3781 
3782 	old_state = to_dm_atomic_state(obj->state);
3783 
3784 	if (old_state && old_state->context)
3785 		new_state->context = dc_copy_state(old_state->context);
3786 
3787 	if (!new_state->context) {
3788 		kfree(new_state);
3789 		return NULL;
3790 	}
3791 
3792 	return &new_state->base;
3793 }
3794 
3795 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3796 				    struct drm_private_state *state)
3797 {
3798 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3799 
3800 	if (dm_state && dm_state->context)
3801 		dc_release_state(dm_state->context);
3802 
3803 	kfree(dm_state);
3804 }
3805 
3806 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3807 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3808 	.atomic_destroy_state = dm_atomic_destroy_state,
3809 };
3810 
3811 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3812 {
3813 	struct dm_atomic_state *state;
3814 	int r;
3815 
3816 	adev->mode_info.mode_config_initialized = true;
3817 
3818 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3819 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3820 
3821 	adev_to_drm(adev)->mode_config.max_width = 16384;
3822 	adev_to_drm(adev)->mode_config.max_height = 16384;
3823 
3824 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3825 	/* disable prefer shadow for now due to hibernation issues */
3826 	adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3827 	/* indicates support for immediate flip */
3828 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3829 
3830 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3831 
3832 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3833 	if (!state)
3834 		return -ENOMEM;
3835 
3836 	state->context = dc_create_state(adev->dm.dc);
3837 	if (!state->context) {
3838 		kfree(state);
3839 		return -ENOMEM;
3840 	}
3841 
3842 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3843 
3844 	drm_atomic_private_obj_init(adev_to_drm(adev),
3845 				    &adev->dm.atomic_obj,
3846 				    &state->base,
3847 				    &dm_atomic_state_funcs);
3848 
3849 	r = amdgpu_display_modeset_create_props(adev);
3850 	if (r) {
3851 		dc_release_state(state->context);
3852 		kfree(state);
3853 		return r;
3854 	}
3855 
3856 	r = amdgpu_dm_audio_init(adev);
3857 	if (r) {
3858 		dc_release_state(state->context);
3859 		kfree(state);
3860 		return r;
3861 	}
3862 
3863 	return 0;
3864 }
3865 
3866 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3867 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3868 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3869 
3870 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3871 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3872 
3873 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3874 					    int bl_idx)
3875 {
3876 #if defined(CONFIG_ACPI)
3877 	struct amdgpu_dm_backlight_caps caps;
3878 
3879 	memset(&caps, 0, sizeof(caps));
3880 
3881 	if (dm->backlight_caps[bl_idx].caps_valid)
3882 		return;
3883 
3884 	amdgpu_acpi_get_backlight_caps(&caps);
3885 	if (caps.caps_valid) {
3886 		dm->backlight_caps[bl_idx].caps_valid = true;
3887 		if (caps.aux_support)
3888 			return;
3889 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3890 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3891 	} else {
3892 		dm->backlight_caps[bl_idx].min_input_signal =
3893 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3894 		dm->backlight_caps[bl_idx].max_input_signal =
3895 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3896 	}
3897 #else
3898 	if (dm->backlight_caps[bl_idx].aux_support)
3899 		return;
3900 
3901 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3902 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3903 #endif
3904 }
3905 
3906 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3907 				unsigned *min, unsigned *max)
3908 {
3909 	if (!caps)
3910 		return 0;
3911 
3912 	if (caps->aux_support) {
3913 		// Firmware limits are in nits, DC API wants millinits.
3914 		*max = 1000 * caps->aux_max_input_signal;
3915 		*min = 1000 * caps->aux_min_input_signal;
3916 	} else {
3917 		// Firmware limits are 8-bit, PWM control is 16-bit.
3918 		*max = 0x101 * caps->max_input_signal;
3919 		*min = 0x101 * caps->min_input_signal;
3920 	}
3921 	return 1;
3922 }
3923 
3924 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3925 					uint32_t brightness)
3926 {
3927 	unsigned min, max;
3928 
3929 	if (!get_brightness_range(caps, &min, &max))
3930 		return brightness;
3931 
3932 	// Rescale 0..255 to min..max
3933 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3934 				       AMDGPU_MAX_BL_LEVEL);
3935 }
3936 
3937 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3938 				      uint32_t brightness)
3939 {
3940 	unsigned min, max;
3941 
3942 	if (!get_brightness_range(caps, &min, &max))
3943 		return brightness;
3944 
3945 	if (brightness < min)
3946 		return 0;
3947 	// Rescale min..max to 0..255
3948 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3949 				 max - min);
3950 }
3951 
3952 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3953 					 int bl_idx,
3954 					 u32 user_brightness)
3955 {
3956 	struct amdgpu_dm_backlight_caps caps;
3957 	struct dc_link *link;
3958 	u32 brightness;
3959 	bool rc;
3960 
3961 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3962 	caps = dm->backlight_caps[bl_idx];
3963 
3964 	dm->brightness[bl_idx] = user_brightness;
3965 	/* update scratch register */
3966 	if (bl_idx == 0)
3967 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3968 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3969 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3970 
3971 	/* Change brightness based on AUX property */
3972 	if (caps.aux_support) {
3973 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3974 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3975 		if (!rc)
3976 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3977 	} else {
3978 		rc = dc_link_set_backlight_level(link, brightness, 0);
3979 		if (!rc)
3980 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3981 	}
3982 
3983 	if (rc)
3984 		dm->actual_brightness[bl_idx] = user_brightness;
3985 }
3986 
3987 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3988 {
3989 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3990 	int i;
3991 
3992 	for (i = 0; i < dm->num_of_edps; i++) {
3993 		if (bd == dm->backlight_dev[i])
3994 			break;
3995 	}
3996 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3997 		i = 0;
3998 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3999 
4000 	return 0;
4001 }
4002 
4003 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4004 					 int bl_idx)
4005 {
4006 	struct amdgpu_dm_backlight_caps caps;
4007 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4008 
4009 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4010 	caps = dm->backlight_caps[bl_idx];
4011 
4012 	if (caps.aux_support) {
4013 		u32 avg, peak;
4014 		bool rc;
4015 
4016 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4017 		if (!rc)
4018 			return dm->brightness[bl_idx];
4019 		return convert_brightness_to_user(&caps, avg);
4020 	} else {
4021 		int ret = dc_link_get_backlight_level(link);
4022 
4023 		if (ret == DC_ERROR_UNEXPECTED)
4024 			return dm->brightness[bl_idx];
4025 		return convert_brightness_to_user(&caps, ret);
4026 	}
4027 }
4028 
4029 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4030 {
4031 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4032 	int i;
4033 
4034 	for (i = 0; i < dm->num_of_edps; i++) {
4035 		if (bd == dm->backlight_dev[i])
4036 			break;
4037 	}
4038 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4039 		i = 0;
4040 	return amdgpu_dm_backlight_get_level(dm, i);
4041 }
4042 
4043 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4044 	.options = BL_CORE_SUSPENDRESUME,
4045 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4046 	.update_status	= amdgpu_dm_backlight_update_status,
4047 };
4048 
4049 static void
4050 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4051 {
4052 	char bl_name[16];
4053 	struct backlight_properties props = { 0 };
4054 
4055 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4056 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4057 
4058 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4059 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4060 	props.type = BACKLIGHT_RAW;
4061 
4062 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4063 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4064 
4065 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4066 								       adev_to_drm(dm->adev)->dev,
4067 								       dm,
4068 								       &amdgpu_dm_backlight_ops,
4069 								       &props);
4070 
4071 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4072 		DRM_ERROR("DM: Backlight registration failed!\n");
4073 	else
4074 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4075 }
4076 #endif
4077 
4078 static int initialize_plane(struct amdgpu_display_manager *dm,
4079 			    struct amdgpu_mode_info *mode_info, int plane_id,
4080 			    enum drm_plane_type plane_type,
4081 			    const struct dc_plane_cap *plane_cap)
4082 {
4083 	struct drm_plane *plane;
4084 	unsigned long possible_crtcs;
4085 	int ret = 0;
4086 
4087 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4088 	if (!plane) {
4089 		DRM_ERROR("KMS: Failed to allocate plane\n");
4090 		return -ENOMEM;
4091 	}
4092 	plane->type = plane_type;
4093 
4094 	/*
4095 	 * HACK: IGT tests expect that the primary plane for a CRTC
4096 	 * can only have one possible CRTC. Only expose support for
4097 	 * any CRTC if they're not going to be used as a primary plane
4098 	 * for a CRTC - like overlay or underlay planes.
4099 	 */
4100 	possible_crtcs = 1 << plane_id;
4101 	if (plane_id >= dm->dc->caps.max_streams)
4102 		possible_crtcs = 0xff;
4103 
4104 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4105 
4106 	if (ret) {
4107 		DRM_ERROR("KMS: Failed to initialize plane\n");
4108 		kfree(plane);
4109 		return ret;
4110 	}
4111 
4112 	if (mode_info)
4113 		mode_info->planes[plane_id] = plane;
4114 
4115 	return ret;
4116 }
4117 
4118 
4119 static void register_backlight_device(struct amdgpu_display_manager *dm,
4120 				      struct dc_link *link)
4121 {
4122 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4123 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4124 
4125 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4126 	    link->type != dc_connection_none) {
4127 		/*
4128 		 * Event if registration failed, we should continue with
4129 		 * DM initialization because not having a backlight control
4130 		 * is better then a black screen.
4131 		 */
4132 		if (!dm->backlight_dev[dm->num_of_edps])
4133 			amdgpu_dm_register_backlight_device(dm);
4134 
4135 		if (dm->backlight_dev[dm->num_of_edps]) {
4136 			dm->backlight_link[dm->num_of_edps] = link;
4137 			dm->num_of_edps++;
4138 		}
4139 	}
4140 #endif
4141 }
4142 
4143 
4144 /*
4145  * In this architecture, the association
4146  * connector -> encoder -> crtc
4147  * id not really requried. The crtc and connector will hold the
4148  * display_index as an abstraction to use with DAL component
4149  *
4150  * Returns 0 on success
4151  */
4152 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4153 {
4154 	struct amdgpu_display_manager *dm = &adev->dm;
4155 	int32_t i;
4156 	struct amdgpu_dm_connector *aconnector = NULL;
4157 	struct amdgpu_encoder *aencoder = NULL;
4158 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4159 	uint32_t link_cnt;
4160 	int32_t primary_planes;
4161 	enum dc_connection_type new_connection_type = dc_connection_none;
4162 	const struct dc_plane_cap *plane;
4163 	bool psr_feature_enabled = false;
4164 
4165 	dm->display_indexes_num = dm->dc->caps.max_streams;
4166 	/* Update the actual used number of crtc */
4167 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4168 
4169 	link_cnt = dm->dc->caps.max_links;
4170 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4171 		DRM_ERROR("DM: Failed to initialize mode config\n");
4172 		return -EINVAL;
4173 	}
4174 
4175 	/* There is one primary plane per CRTC */
4176 	primary_planes = dm->dc->caps.max_streams;
4177 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4178 
4179 	/*
4180 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4181 	 * Order is reversed to match iteration order in atomic check.
4182 	 */
4183 	for (i = (primary_planes - 1); i >= 0; i--) {
4184 		plane = &dm->dc->caps.planes[i];
4185 
4186 		if (initialize_plane(dm, mode_info, i,
4187 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4188 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4189 			goto fail;
4190 		}
4191 	}
4192 
4193 	/*
4194 	 * Initialize overlay planes, index starting after primary planes.
4195 	 * These planes have a higher DRM index than the primary planes since
4196 	 * they should be considered as having a higher z-order.
4197 	 * Order is reversed to match iteration order in atomic check.
4198 	 *
4199 	 * Only support DCN for now, and only expose one so we don't encourage
4200 	 * userspace to use up all the pipes.
4201 	 */
4202 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4203 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4204 
4205 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4206 			continue;
4207 
4208 		if (!plane->blends_with_above || !plane->blends_with_below)
4209 			continue;
4210 
4211 		if (!plane->pixel_format_support.argb8888)
4212 			continue;
4213 
4214 		if (initialize_plane(dm, NULL, primary_planes + i,
4215 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4216 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4217 			goto fail;
4218 		}
4219 
4220 		/* Only create one overlay plane. */
4221 		break;
4222 	}
4223 
4224 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4225 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4226 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4227 			goto fail;
4228 		}
4229 
4230 	/* Use Outbox interrupt */
4231 	switch (adev->ip_versions[DCE_HWIP][0]) {
4232 	case IP_VERSION(3, 0, 0):
4233 	case IP_VERSION(3, 1, 2):
4234 	case IP_VERSION(3, 1, 3):
4235 	case IP_VERSION(3, 1, 5):
4236 	case IP_VERSION(3, 1, 6):
4237 	case IP_VERSION(2, 1, 0):
4238 		if (register_outbox_irq_handlers(dm->adev)) {
4239 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4240 			goto fail;
4241 		}
4242 		break;
4243 	default:
4244 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4245 			      adev->ip_versions[DCE_HWIP][0]);
4246 	}
4247 
4248 	/* Determine whether to enable PSR support by default. */
4249 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4250 		switch (adev->ip_versions[DCE_HWIP][0]) {
4251 		case IP_VERSION(3, 1, 2):
4252 		case IP_VERSION(3, 1, 3):
4253 		case IP_VERSION(3, 1, 5):
4254 		case IP_VERSION(3, 1, 6):
4255 			psr_feature_enabled = true;
4256 			break;
4257 		default:
4258 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4259 			break;
4260 		}
4261 	}
4262 
4263 	/* loops over all connectors on the board */
4264 	for (i = 0; i < link_cnt; i++) {
4265 		struct dc_link *link = NULL;
4266 
4267 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4268 			DRM_ERROR(
4269 				"KMS: Cannot support more than %d display indexes\n",
4270 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4271 			continue;
4272 		}
4273 
4274 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4275 		if (!aconnector)
4276 			goto fail;
4277 
4278 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4279 		if (!aencoder)
4280 			goto fail;
4281 
4282 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4283 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4284 			goto fail;
4285 		}
4286 
4287 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4288 			DRM_ERROR("KMS: Failed to initialize connector\n");
4289 			goto fail;
4290 		}
4291 
4292 		link = dc_get_link_at_index(dm->dc, i);
4293 
4294 		if (!dc_link_detect_sink(link, &new_connection_type))
4295 			DRM_ERROR("KMS: Failed to detect connector\n");
4296 
4297 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4298 			emulated_link_detect(link);
4299 			amdgpu_dm_update_connector_after_detect(aconnector);
4300 
4301 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4302 			amdgpu_dm_update_connector_after_detect(aconnector);
4303 			register_backlight_device(dm, link);
4304 			if (dm->num_of_edps)
4305 				update_connector_ext_caps(aconnector);
4306 			if (psr_feature_enabled)
4307 				amdgpu_dm_set_psr_caps(link);
4308 
4309 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4310 			 * PSR is also supported.
4311 			 */
4312 			if (link->psr_settings.psr_feature_enabled)
4313 				adev_to_drm(adev)->vblank_disable_immediate = false;
4314 		}
4315 
4316 
4317 	}
4318 
4319 	/* Software is initialized. Now we can register interrupt handlers. */
4320 	switch (adev->asic_type) {
4321 #if defined(CONFIG_DRM_AMD_DC_SI)
4322 	case CHIP_TAHITI:
4323 	case CHIP_PITCAIRN:
4324 	case CHIP_VERDE:
4325 	case CHIP_OLAND:
4326 		if (dce60_register_irq_handlers(dm->adev)) {
4327 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4328 			goto fail;
4329 		}
4330 		break;
4331 #endif
4332 	case CHIP_BONAIRE:
4333 	case CHIP_HAWAII:
4334 	case CHIP_KAVERI:
4335 	case CHIP_KABINI:
4336 	case CHIP_MULLINS:
4337 	case CHIP_TONGA:
4338 	case CHIP_FIJI:
4339 	case CHIP_CARRIZO:
4340 	case CHIP_STONEY:
4341 	case CHIP_POLARIS11:
4342 	case CHIP_POLARIS10:
4343 	case CHIP_POLARIS12:
4344 	case CHIP_VEGAM:
4345 	case CHIP_VEGA10:
4346 	case CHIP_VEGA12:
4347 	case CHIP_VEGA20:
4348 		if (dce110_register_irq_handlers(dm->adev)) {
4349 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4350 			goto fail;
4351 		}
4352 		break;
4353 	default:
4354 		switch (adev->ip_versions[DCE_HWIP][0]) {
4355 		case IP_VERSION(1, 0, 0):
4356 		case IP_VERSION(1, 0, 1):
4357 		case IP_VERSION(2, 0, 2):
4358 		case IP_VERSION(2, 0, 3):
4359 		case IP_VERSION(2, 0, 0):
4360 		case IP_VERSION(2, 1, 0):
4361 		case IP_VERSION(3, 0, 0):
4362 		case IP_VERSION(3, 0, 2):
4363 		case IP_VERSION(3, 0, 3):
4364 		case IP_VERSION(3, 0, 1):
4365 		case IP_VERSION(3, 1, 2):
4366 		case IP_VERSION(3, 1, 3):
4367 		case IP_VERSION(3, 1, 5):
4368 		case IP_VERSION(3, 1, 6):
4369 			if (dcn10_register_irq_handlers(dm->adev)) {
4370 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4371 				goto fail;
4372 			}
4373 			break;
4374 		default:
4375 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4376 					adev->ip_versions[DCE_HWIP][0]);
4377 			goto fail;
4378 		}
4379 		break;
4380 	}
4381 
4382 	return 0;
4383 fail:
4384 	kfree(aencoder);
4385 	kfree(aconnector);
4386 
4387 	return -EINVAL;
4388 }
4389 
4390 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4391 {
4392 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4393 	return;
4394 }
4395 
4396 /******************************************************************************
4397  * amdgpu_display_funcs functions
4398  *****************************************************************************/
4399 
4400 /*
4401  * dm_bandwidth_update - program display watermarks
4402  *
4403  * @adev: amdgpu_device pointer
4404  *
4405  * Calculate and program the display watermarks and line buffer allocation.
4406  */
4407 static void dm_bandwidth_update(struct amdgpu_device *adev)
4408 {
4409 	/* TODO: implement later */
4410 }
4411 
4412 static const struct amdgpu_display_funcs dm_display_funcs = {
4413 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4414 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4415 	.backlight_set_level = NULL, /* never called for DC */
4416 	.backlight_get_level = NULL, /* never called for DC */
4417 	.hpd_sense = NULL,/* called unconditionally */
4418 	.hpd_set_polarity = NULL, /* called unconditionally */
4419 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4420 	.page_flip_get_scanoutpos =
4421 		dm_crtc_get_scanoutpos,/* called unconditionally */
4422 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4423 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4424 };
4425 
4426 #if defined(CONFIG_DEBUG_KERNEL_DC)
4427 
4428 static ssize_t s3_debug_store(struct device *device,
4429 			      struct device_attribute *attr,
4430 			      const char *buf,
4431 			      size_t count)
4432 {
4433 	int ret;
4434 	int s3_state;
4435 	struct drm_device *drm_dev = dev_get_drvdata(device);
4436 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4437 
4438 	ret = kstrtoint(buf, 0, &s3_state);
4439 
4440 	if (ret == 0) {
4441 		if (s3_state) {
4442 			dm_resume(adev);
4443 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4444 		} else
4445 			dm_suspend(adev);
4446 	}
4447 
4448 	return ret == 0 ? count : 0;
4449 }
4450 
4451 DEVICE_ATTR_WO(s3_debug);
4452 
4453 #endif
4454 
4455 static int dm_early_init(void *handle)
4456 {
4457 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4458 
4459 	switch (adev->asic_type) {
4460 #if defined(CONFIG_DRM_AMD_DC_SI)
4461 	case CHIP_TAHITI:
4462 	case CHIP_PITCAIRN:
4463 	case CHIP_VERDE:
4464 		adev->mode_info.num_crtc = 6;
4465 		adev->mode_info.num_hpd = 6;
4466 		adev->mode_info.num_dig = 6;
4467 		break;
4468 	case CHIP_OLAND:
4469 		adev->mode_info.num_crtc = 2;
4470 		adev->mode_info.num_hpd = 2;
4471 		adev->mode_info.num_dig = 2;
4472 		break;
4473 #endif
4474 	case CHIP_BONAIRE:
4475 	case CHIP_HAWAII:
4476 		adev->mode_info.num_crtc = 6;
4477 		adev->mode_info.num_hpd = 6;
4478 		adev->mode_info.num_dig = 6;
4479 		break;
4480 	case CHIP_KAVERI:
4481 		adev->mode_info.num_crtc = 4;
4482 		adev->mode_info.num_hpd = 6;
4483 		adev->mode_info.num_dig = 7;
4484 		break;
4485 	case CHIP_KABINI:
4486 	case CHIP_MULLINS:
4487 		adev->mode_info.num_crtc = 2;
4488 		adev->mode_info.num_hpd = 6;
4489 		adev->mode_info.num_dig = 6;
4490 		break;
4491 	case CHIP_FIJI:
4492 	case CHIP_TONGA:
4493 		adev->mode_info.num_crtc = 6;
4494 		adev->mode_info.num_hpd = 6;
4495 		adev->mode_info.num_dig = 7;
4496 		break;
4497 	case CHIP_CARRIZO:
4498 		adev->mode_info.num_crtc = 3;
4499 		adev->mode_info.num_hpd = 6;
4500 		adev->mode_info.num_dig = 9;
4501 		break;
4502 	case CHIP_STONEY:
4503 		adev->mode_info.num_crtc = 2;
4504 		adev->mode_info.num_hpd = 6;
4505 		adev->mode_info.num_dig = 9;
4506 		break;
4507 	case CHIP_POLARIS11:
4508 	case CHIP_POLARIS12:
4509 		adev->mode_info.num_crtc = 5;
4510 		adev->mode_info.num_hpd = 5;
4511 		adev->mode_info.num_dig = 5;
4512 		break;
4513 	case CHIP_POLARIS10:
4514 	case CHIP_VEGAM:
4515 		adev->mode_info.num_crtc = 6;
4516 		adev->mode_info.num_hpd = 6;
4517 		adev->mode_info.num_dig = 6;
4518 		break;
4519 	case CHIP_VEGA10:
4520 	case CHIP_VEGA12:
4521 	case CHIP_VEGA20:
4522 		adev->mode_info.num_crtc = 6;
4523 		adev->mode_info.num_hpd = 6;
4524 		adev->mode_info.num_dig = 6;
4525 		break;
4526 	default:
4527 
4528 		switch (adev->ip_versions[DCE_HWIP][0]) {
4529 		case IP_VERSION(2, 0, 2):
4530 		case IP_VERSION(3, 0, 0):
4531 			adev->mode_info.num_crtc = 6;
4532 			adev->mode_info.num_hpd = 6;
4533 			adev->mode_info.num_dig = 6;
4534 			break;
4535 		case IP_VERSION(2, 0, 0):
4536 		case IP_VERSION(3, 0, 2):
4537 			adev->mode_info.num_crtc = 5;
4538 			adev->mode_info.num_hpd = 5;
4539 			adev->mode_info.num_dig = 5;
4540 			break;
4541 		case IP_VERSION(2, 0, 3):
4542 		case IP_VERSION(3, 0, 3):
4543 			adev->mode_info.num_crtc = 2;
4544 			adev->mode_info.num_hpd = 2;
4545 			adev->mode_info.num_dig = 2;
4546 			break;
4547 		case IP_VERSION(1, 0, 0):
4548 		case IP_VERSION(1, 0, 1):
4549 		case IP_VERSION(3, 0, 1):
4550 		case IP_VERSION(2, 1, 0):
4551 		case IP_VERSION(3, 1, 2):
4552 		case IP_VERSION(3, 1, 3):
4553 		case IP_VERSION(3, 1, 5):
4554 		case IP_VERSION(3, 1, 6):
4555 			adev->mode_info.num_crtc = 4;
4556 			adev->mode_info.num_hpd = 4;
4557 			adev->mode_info.num_dig = 4;
4558 			break;
4559 		default:
4560 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4561 					adev->ip_versions[DCE_HWIP][0]);
4562 			return -EINVAL;
4563 		}
4564 		break;
4565 	}
4566 
4567 	amdgpu_dm_set_irq_funcs(adev);
4568 
4569 	if (adev->mode_info.funcs == NULL)
4570 		adev->mode_info.funcs = &dm_display_funcs;
4571 
4572 	/*
4573 	 * Note: Do NOT change adev->audio_endpt_rreg and
4574 	 * adev->audio_endpt_wreg because they are initialised in
4575 	 * amdgpu_device_init()
4576 	 */
4577 #if defined(CONFIG_DEBUG_KERNEL_DC)
4578 	device_create_file(
4579 		adev_to_drm(adev)->dev,
4580 		&dev_attr_s3_debug);
4581 #endif
4582 
4583 	return 0;
4584 }
4585 
4586 static bool modeset_required(struct drm_crtc_state *crtc_state,
4587 			     struct dc_stream_state *new_stream,
4588 			     struct dc_stream_state *old_stream)
4589 {
4590 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4591 }
4592 
4593 static bool modereset_required(struct drm_crtc_state *crtc_state)
4594 {
4595 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4596 }
4597 
4598 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4599 {
4600 	drm_encoder_cleanup(encoder);
4601 	kfree(encoder);
4602 }
4603 
4604 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4605 	.destroy = amdgpu_dm_encoder_destroy,
4606 };
4607 
4608 
4609 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4610 					 struct drm_framebuffer *fb,
4611 					 int *min_downscale, int *max_upscale)
4612 {
4613 	struct amdgpu_device *adev = drm_to_adev(dev);
4614 	struct dc *dc = adev->dm.dc;
4615 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4616 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4617 
4618 	switch (fb->format->format) {
4619 	case DRM_FORMAT_P010:
4620 	case DRM_FORMAT_NV12:
4621 	case DRM_FORMAT_NV21:
4622 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4623 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4624 		break;
4625 
4626 	case DRM_FORMAT_XRGB16161616F:
4627 	case DRM_FORMAT_ARGB16161616F:
4628 	case DRM_FORMAT_XBGR16161616F:
4629 	case DRM_FORMAT_ABGR16161616F:
4630 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4631 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4632 		break;
4633 
4634 	default:
4635 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4636 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4637 		break;
4638 	}
4639 
4640 	/*
4641 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4642 	 * scaling factor of 1.0 == 1000 units.
4643 	 */
4644 	if (*max_upscale == 1)
4645 		*max_upscale = 1000;
4646 
4647 	if (*min_downscale == 1)
4648 		*min_downscale = 1000;
4649 }
4650 
4651 
4652 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4653 				const struct drm_plane_state *state,
4654 				struct dc_scaling_info *scaling_info)
4655 {
4656 	int scale_w, scale_h, min_downscale, max_upscale;
4657 
4658 	memset(scaling_info, 0, sizeof(*scaling_info));
4659 
4660 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4661 	scaling_info->src_rect.x = state->src_x >> 16;
4662 	scaling_info->src_rect.y = state->src_y >> 16;
4663 
4664 	/*
4665 	 * For reasons we don't (yet) fully understand a non-zero
4666 	 * src_y coordinate into an NV12 buffer can cause a
4667 	 * system hang on DCN1x.
4668 	 * To avoid hangs (and maybe be overly cautious)
4669 	 * let's reject both non-zero src_x and src_y.
4670 	 *
4671 	 * We currently know of only one use-case to reproduce a
4672 	 * scenario with non-zero src_x and src_y for NV12, which
4673 	 * is to gesture the YouTube Android app into full screen
4674 	 * on ChromeOS.
4675 	 */
4676 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4677 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4678 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4679 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4680 		return -EINVAL;
4681 
4682 	scaling_info->src_rect.width = state->src_w >> 16;
4683 	if (scaling_info->src_rect.width == 0)
4684 		return -EINVAL;
4685 
4686 	scaling_info->src_rect.height = state->src_h >> 16;
4687 	if (scaling_info->src_rect.height == 0)
4688 		return -EINVAL;
4689 
4690 	scaling_info->dst_rect.x = state->crtc_x;
4691 	scaling_info->dst_rect.y = state->crtc_y;
4692 
4693 	if (state->crtc_w == 0)
4694 		return -EINVAL;
4695 
4696 	scaling_info->dst_rect.width = state->crtc_w;
4697 
4698 	if (state->crtc_h == 0)
4699 		return -EINVAL;
4700 
4701 	scaling_info->dst_rect.height = state->crtc_h;
4702 
4703 	/* DRM doesn't specify clipping on destination output. */
4704 	scaling_info->clip_rect = scaling_info->dst_rect;
4705 
4706 	/* Validate scaling per-format with DC plane caps */
4707 	if (state->plane && state->plane->dev && state->fb) {
4708 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4709 					     &min_downscale, &max_upscale);
4710 	} else {
4711 		min_downscale = 250;
4712 		max_upscale = 16000;
4713 	}
4714 
4715 	scale_w = scaling_info->dst_rect.width * 1000 /
4716 		  scaling_info->src_rect.width;
4717 
4718 	if (scale_w < min_downscale || scale_w > max_upscale)
4719 		return -EINVAL;
4720 
4721 	scale_h = scaling_info->dst_rect.height * 1000 /
4722 		  scaling_info->src_rect.height;
4723 
4724 	if (scale_h < min_downscale || scale_h > max_upscale)
4725 		return -EINVAL;
4726 
4727 	/*
4728 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4729 	 * assume reasonable defaults based on the format.
4730 	 */
4731 
4732 	return 0;
4733 }
4734 
4735 static void
4736 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4737 				 uint64_t tiling_flags)
4738 {
4739 	/* Fill GFX8 params */
4740 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4741 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4742 
4743 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4744 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4745 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4746 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4747 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4748 
4749 		/* XXX fix me for VI */
4750 		tiling_info->gfx8.num_banks = num_banks;
4751 		tiling_info->gfx8.array_mode =
4752 				DC_ARRAY_2D_TILED_THIN1;
4753 		tiling_info->gfx8.tile_split = tile_split;
4754 		tiling_info->gfx8.bank_width = bankw;
4755 		tiling_info->gfx8.bank_height = bankh;
4756 		tiling_info->gfx8.tile_aspect = mtaspect;
4757 		tiling_info->gfx8.tile_mode =
4758 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4759 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4760 			== DC_ARRAY_1D_TILED_THIN1) {
4761 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4762 	}
4763 
4764 	tiling_info->gfx8.pipe_config =
4765 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4766 }
4767 
4768 static void
4769 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4770 				  union dc_tiling_info *tiling_info)
4771 {
4772 	tiling_info->gfx9.num_pipes =
4773 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4774 	tiling_info->gfx9.num_banks =
4775 		adev->gfx.config.gb_addr_config_fields.num_banks;
4776 	tiling_info->gfx9.pipe_interleave =
4777 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4778 	tiling_info->gfx9.num_shader_engines =
4779 		adev->gfx.config.gb_addr_config_fields.num_se;
4780 	tiling_info->gfx9.max_compressed_frags =
4781 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4782 	tiling_info->gfx9.num_rb_per_se =
4783 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4784 	tiling_info->gfx9.shaderEnable = 1;
4785 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4786 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4787 }
4788 
4789 static int
4790 validate_dcc(struct amdgpu_device *adev,
4791 	     const enum surface_pixel_format format,
4792 	     const enum dc_rotation_angle rotation,
4793 	     const union dc_tiling_info *tiling_info,
4794 	     const struct dc_plane_dcc_param *dcc,
4795 	     const struct dc_plane_address *address,
4796 	     const struct plane_size *plane_size)
4797 {
4798 	struct dc *dc = adev->dm.dc;
4799 	struct dc_dcc_surface_param input;
4800 	struct dc_surface_dcc_cap output;
4801 
4802 	memset(&input, 0, sizeof(input));
4803 	memset(&output, 0, sizeof(output));
4804 
4805 	if (!dcc->enable)
4806 		return 0;
4807 
4808 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4809 	    !dc->cap_funcs.get_dcc_compression_cap)
4810 		return -EINVAL;
4811 
4812 	input.format = format;
4813 	input.surface_size.width = plane_size->surface_size.width;
4814 	input.surface_size.height = plane_size->surface_size.height;
4815 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4816 
4817 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4818 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4819 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4820 		input.scan = SCAN_DIRECTION_VERTICAL;
4821 
4822 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4823 		return -EINVAL;
4824 
4825 	if (!output.capable)
4826 		return -EINVAL;
4827 
4828 	if (dcc->independent_64b_blks == 0 &&
4829 	    output.grph.rgb.independent_64b_blks != 0)
4830 		return -EINVAL;
4831 
4832 	return 0;
4833 }
4834 
4835 static bool
4836 modifier_has_dcc(uint64_t modifier)
4837 {
4838 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4839 }
4840 
4841 static unsigned
4842 modifier_gfx9_swizzle_mode(uint64_t modifier)
4843 {
4844 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4845 		return 0;
4846 
4847 	return AMD_FMT_MOD_GET(TILE, modifier);
4848 }
4849 
4850 static const struct drm_format_info *
4851 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4852 {
4853 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4854 }
4855 
4856 static void
4857 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4858 				    union dc_tiling_info *tiling_info,
4859 				    uint64_t modifier)
4860 {
4861 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4862 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4863 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4864 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4865 
4866 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4867 
4868 	if (!IS_AMD_FMT_MOD(modifier))
4869 		return;
4870 
4871 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4872 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4873 
4874 	if (adev->family >= AMDGPU_FAMILY_NV) {
4875 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4876 	} else {
4877 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4878 
4879 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4880 	}
4881 }
4882 
4883 enum dm_micro_swizzle {
4884 	MICRO_SWIZZLE_Z = 0,
4885 	MICRO_SWIZZLE_S = 1,
4886 	MICRO_SWIZZLE_D = 2,
4887 	MICRO_SWIZZLE_R = 3
4888 };
4889 
4890 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4891 					  uint32_t format,
4892 					  uint64_t modifier)
4893 {
4894 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4895 	const struct drm_format_info *info = drm_format_info(format);
4896 	int i;
4897 
4898 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4899 
4900 	if (!info)
4901 		return false;
4902 
4903 	/*
4904 	 * We always have to allow these modifiers:
4905 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4906 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4907 	 */
4908 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4909 	    modifier == DRM_FORMAT_MOD_INVALID) {
4910 		return true;
4911 	}
4912 
4913 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4914 	for (i = 0; i < plane->modifier_count; i++) {
4915 		if (modifier == plane->modifiers[i])
4916 			break;
4917 	}
4918 	if (i == plane->modifier_count)
4919 		return false;
4920 
4921 	/*
4922 	 * For D swizzle the canonical modifier depends on the bpp, so check
4923 	 * it here.
4924 	 */
4925 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4926 	    adev->family >= AMDGPU_FAMILY_NV) {
4927 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4928 			return false;
4929 	}
4930 
4931 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4932 	    info->cpp[0] < 8)
4933 		return false;
4934 
4935 	if (modifier_has_dcc(modifier)) {
4936 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4937 		if (info->cpp[0] != 4)
4938 			return false;
4939 		/* We support multi-planar formats, but not when combined with
4940 		 * additional DCC metadata planes. */
4941 		if (info->num_planes > 1)
4942 			return false;
4943 	}
4944 
4945 	return true;
4946 }
4947 
4948 static void
4949 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4950 {
4951 	if (!*mods)
4952 		return;
4953 
4954 	if (*cap - *size < 1) {
4955 		uint64_t new_cap = *cap * 2;
4956 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4957 
4958 		if (!new_mods) {
4959 			kfree(*mods);
4960 			*mods = NULL;
4961 			return;
4962 		}
4963 
4964 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4965 		kfree(*mods);
4966 		*mods = new_mods;
4967 		*cap = new_cap;
4968 	}
4969 
4970 	(*mods)[*size] = mod;
4971 	*size += 1;
4972 }
4973 
4974 static void
4975 add_gfx9_modifiers(const struct amdgpu_device *adev,
4976 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4977 {
4978 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4979 	int pipe_xor_bits = min(8, pipes +
4980 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4981 	int bank_xor_bits = min(8 - pipe_xor_bits,
4982 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4983 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4984 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4985 
4986 
4987 	if (adev->family == AMDGPU_FAMILY_RV) {
4988 		/* Raven2 and later */
4989 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4990 
4991 		/*
4992 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4993 		 * doesn't support _D on DCN
4994 		 */
4995 
4996 		if (has_constant_encode) {
4997 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4998 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4999 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5000 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5001 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5002 				    AMD_FMT_MOD_SET(DCC, 1) |
5003 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5004 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5005 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5006 		}
5007 
5008 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5009 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5010 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5011 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5012 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5013 			    AMD_FMT_MOD_SET(DCC, 1) |
5014 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5015 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5016 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5017 
5018 		if (has_constant_encode) {
5019 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5020 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5021 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5022 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5023 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5024 				    AMD_FMT_MOD_SET(DCC, 1) |
5025 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5026 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5027 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5028 
5029 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5030 				    AMD_FMT_MOD_SET(RB, rb) |
5031 				    AMD_FMT_MOD_SET(PIPE, pipes));
5032 		}
5033 
5034 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5035 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5036 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5037 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5038 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5039 			    AMD_FMT_MOD_SET(DCC, 1) |
5040 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5041 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5042 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5043 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5044 			    AMD_FMT_MOD_SET(RB, rb) |
5045 			    AMD_FMT_MOD_SET(PIPE, pipes));
5046 	}
5047 
5048 	/*
5049 	 * Only supported for 64bpp on Raven, will be filtered on format in
5050 	 * dm_plane_format_mod_supported.
5051 	 */
5052 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5053 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5054 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5055 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5056 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5057 
5058 	if (adev->family == AMDGPU_FAMILY_RV) {
5059 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5060 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5061 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5062 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5063 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5064 	}
5065 
5066 	/*
5067 	 * Only supported for 64bpp on Raven, will be filtered on format in
5068 	 * dm_plane_format_mod_supported.
5069 	 */
5070 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5071 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5072 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5073 
5074 	if (adev->family == AMDGPU_FAMILY_RV) {
5075 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5076 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5077 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5078 	}
5079 }
5080 
5081 static void
5082 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5083 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5084 {
5085 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5086 
5087 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5088 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5089 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5090 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5091 		    AMD_FMT_MOD_SET(DCC, 1) |
5092 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5093 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5094 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5095 
5096 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5097 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5098 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5099 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5100 		    AMD_FMT_MOD_SET(DCC, 1) |
5101 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5102 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5103 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5104 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5105 
5106 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5107 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5108 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5109 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5110 
5111 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5112 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5113 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5114 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5115 
5116 
5117 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5118 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5119 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5120 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5121 
5122 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5124 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5125 }
5126 
5127 static void
5128 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5129 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5130 {
5131 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5132 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5133 
5134 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5135 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5136 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5137 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5138 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5139 		    AMD_FMT_MOD_SET(DCC, 1) |
5140 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5141 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5142 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5143 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5144 
5145 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5147 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5148 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5149 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5150 		    AMD_FMT_MOD_SET(DCC, 1) |
5151 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5152 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5153 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5154 
5155 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5156 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5157 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5158 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5159 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5160 		    AMD_FMT_MOD_SET(DCC, 1) |
5161 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5162 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5163 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5164 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5165 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5166 
5167 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5168 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5169 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5170 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5171 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5172 		    AMD_FMT_MOD_SET(DCC, 1) |
5173 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5174 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5177 
5178 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5179 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5180 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5181 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5182 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5183 
5184 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5185 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5186 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5187 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5188 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5189 
5190 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5191 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5192 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5193 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5194 
5195 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5196 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5197 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5198 }
5199 
5200 static int
5201 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5202 {
5203 	uint64_t size = 0, capacity = 128;
5204 	*mods = NULL;
5205 
5206 	/* We have not hooked up any pre-GFX9 modifiers. */
5207 	if (adev->family < AMDGPU_FAMILY_AI)
5208 		return 0;
5209 
5210 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5211 
5212 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5213 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5214 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5215 		return *mods ? 0 : -ENOMEM;
5216 	}
5217 
5218 	switch (adev->family) {
5219 	case AMDGPU_FAMILY_AI:
5220 	case AMDGPU_FAMILY_RV:
5221 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5222 		break;
5223 	case AMDGPU_FAMILY_NV:
5224 	case AMDGPU_FAMILY_VGH:
5225 	case AMDGPU_FAMILY_YC:
5226 	case AMDGPU_FAMILY_GC_10_3_6:
5227 	case AMDGPU_FAMILY_GC_10_3_7:
5228 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5229 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5230 		else
5231 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5232 		break;
5233 	}
5234 
5235 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5236 
5237 	/* INVALID marks the end of the list. */
5238 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5239 
5240 	if (!*mods)
5241 		return -ENOMEM;
5242 
5243 	return 0;
5244 }
5245 
5246 static int
5247 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5248 					  const struct amdgpu_framebuffer *afb,
5249 					  const enum surface_pixel_format format,
5250 					  const enum dc_rotation_angle rotation,
5251 					  const struct plane_size *plane_size,
5252 					  union dc_tiling_info *tiling_info,
5253 					  struct dc_plane_dcc_param *dcc,
5254 					  struct dc_plane_address *address,
5255 					  const bool force_disable_dcc)
5256 {
5257 	const uint64_t modifier = afb->base.modifier;
5258 	int ret = 0;
5259 
5260 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5261 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5262 
5263 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5264 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5265 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5266 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5267 
5268 		dcc->enable = 1;
5269 		dcc->meta_pitch = afb->base.pitches[1];
5270 		dcc->independent_64b_blks = independent_64b_blks;
5271 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5272 			if (independent_64b_blks && independent_128b_blks)
5273 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5274 			else if (independent_128b_blks)
5275 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5276 			else if (independent_64b_blks && !independent_128b_blks)
5277 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5278 			else
5279 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5280 		} else {
5281 			if (independent_64b_blks)
5282 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5283 			else
5284 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5285 		}
5286 
5287 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5288 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5289 	}
5290 
5291 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5292 	if (ret)
5293 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5294 
5295 	return ret;
5296 }
5297 
5298 static int
5299 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5300 			     const struct amdgpu_framebuffer *afb,
5301 			     const enum surface_pixel_format format,
5302 			     const enum dc_rotation_angle rotation,
5303 			     const uint64_t tiling_flags,
5304 			     union dc_tiling_info *tiling_info,
5305 			     struct plane_size *plane_size,
5306 			     struct dc_plane_dcc_param *dcc,
5307 			     struct dc_plane_address *address,
5308 			     bool tmz_surface,
5309 			     bool force_disable_dcc)
5310 {
5311 	const struct drm_framebuffer *fb = &afb->base;
5312 	int ret;
5313 
5314 	memset(tiling_info, 0, sizeof(*tiling_info));
5315 	memset(plane_size, 0, sizeof(*plane_size));
5316 	memset(dcc, 0, sizeof(*dcc));
5317 	memset(address, 0, sizeof(*address));
5318 
5319 	address->tmz_surface = tmz_surface;
5320 
5321 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5322 		uint64_t addr = afb->address + fb->offsets[0];
5323 
5324 		plane_size->surface_size.x = 0;
5325 		plane_size->surface_size.y = 0;
5326 		plane_size->surface_size.width = fb->width;
5327 		plane_size->surface_size.height = fb->height;
5328 		plane_size->surface_pitch =
5329 			fb->pitches[0] / fb->format->cpp[0];
5330 
5331 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5332 		address->grph.addr.low_part = lower_32_bits(addr);
5333 		address->grph.addr.high_part = upper_32_bits(addr);
5334 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5335 		uint64_t luma_addr = afb->address + fb->offsets[0];
5336 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5337 
5338 		plane_size->surface_size.x = 0;
5339 		plane_size->surface_size.y = 0;
5340 		plane_size->surface_size.width = fb->width;
5341 		plane_size->surface_size.height = fb->height;
5342 		plane_size->surface_pitch =
5343 			fb->pitches[0] / fb->format->cpp[0];
5344 
5345 		plane_size->chroma_size.x = 0;
5346 		plane_size->chroma_size.y = 0;
5347 		/* TODO: set these based on surface format */
5348 		plane_size->chroma_size.width = fb->width / 2;
5349 		plane_size->chroma_size.height = fb->height / 2;
5350 
5351 		plane_size->chroma_pitch =
5352 			fb->pitches[1] / fb->format->cpp[1];
5353 
5354 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5355 		address->video_progressive.luma_addr.low_part =
5356 			lower_32_bits(luma_addr);
5357 		address->video_progressive.luma_addr.high_part =
5358 			upper_32_bits(luma_addr);
5359 		address->video_progressive.chroma_addr.low_part =
5360 			lower_32_bits(chroma_addr);
5361 		address->video_progressive.chroma_addr.high_part =
5362 			upper_32_bits(chroma_addr);
5363 	}
5364 
5365 	if (adev->family >= AMDGPU_FAMILY_AI) {
5366 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5367 								rotation, plane_size,
5368 								tiling_info, dcc,
5369 								address,
5370 								force_disable_dcc);
5371 		if (ret)
5372 			return ret;
5373 	} else {
5374 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5375 	}
5376 
5377 	return 0;
5378 }
5379 
5380 static void
5381 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5382 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5383 			       bool *global_alpha, int *global_alpha_value)
5384 {
5385 	*per_pixel_alpha = false;
5386 	*pre_multiplied_alpha = true;
5387 	*global_alpha = false;
5388 	*global_alpha_value = 0xff;
5389 
5390 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5391 		return;
5392 
5393 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5394 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5395 		static const uint32_t alpha_formats[] = {
5396 			DRM_FORMAT_ARGB8888,
5397 			DRM_FORMAT_RGBA8888,
5398 			DRM_FORMAT_ABGR8888,
5399 		};
5400 		uint32_t format = plane_state->fb->format->format;
5401 		unsigned int i;
5402 
5403 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5404 			if (format == alpha_formats[i]) {
5405 				*per_pixel_alpha = true;
5406 				break;
5407 			}
5408 		}
5409 
5410 		if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5411 			*pre_multiplied_alpha = false;
5412 	}
5413 
5414 	if (plane_state->alpha < 0xffff) {
5415 		*global_alpha = true;
5416 		*global_alpha_value = plane_state->alpha >> 8;
5417 	}
5418 }
5419 
5420 static int
5421 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5422 			    const enum surface_pixel_format format,
5423 			    enum dc_color_space *color_space)
5424 {
5425 	bool full_range;
5426 
5427 	*color_space = COLOR_SPACE_SRGB;
5428 
5429 	/* DRM color properties only affect non-RGB formats. */
5430 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5431 		return 0;
5432 
5433 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5434 
5435 	switch (plane_state->color_encoding) {
5436 	case DRM_COLOR_YCBCR_BT601:
5437 		if (full_range)
5438 			*color_space = COLOR_SPACE_YCBCR601;
5439 		else
5440 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5441 		break;
5442 
5443 	case DRM_COLOR_YCBCR_BT709:
5444 		if (full_range)
5445 			*color_space = COLOR_SPACE_YCBCR709;
5446 		else
5447 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5448 		break;
5449 
5450 	case DRM_COLOR_YCBCR_BT2020:
5451 		if (full_range)
5452 			*color_space = COLOR_SPACE_2020_YCBCR;
5453 		else
5454 			return -EINVAL;
5455 		break;
5456 
5457 	default:
5458 		return -EINVAL;
5459 	}
5460 
5461 	return 0;
5462 }
5463 
5464 static int
5465 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5466 			    const struct drm_plane_state *plane_state,
5467 			    const uint64_t tiling_flags,
5468 			    struct dc_plane_info *plane_info,
5469 			    struct dc_plane_address *address,
5470 			    bool tmz_surface,
5471 			    bool force_disable_dcc)
5472 {
5473 	const struct drm_framebuffer *fb = plane_state->fb;
5474 	const struct amdgpu_framebuffer *afb =
5475 		to_amdgpu_framebuffer(plane_state->fb);
5476 	int ret;
5477 
5478 	memset(plane_info, 0, sizeof(*plane_info));
5479 
5480 	switch (fb->format->format) {
5481 	case DRM_FORMAT_C8:
5482 		plane_info->format =
5483 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5484 		break;
5485 	case DRM_FORMAT_RGB565:
5486 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5487 		break;
5488 	case DRM_FORMAT_XRGB8888:
5489 	case DRM_FORMAT_ARGB8888:
5490 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5491 		break;
5492 	case DRM_FORMAT_XRGB2101010:
5493 	case DRM_FORMAT_ARGB2101010:
5494 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5495 		break;
5496 	case DRM_FORMAT_XBGR2101010:
5497 	case DRM_FORMAT_ABGR2101010:
5498 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5499 		break;
5500 	case DRM_FORMAT_XBGR8888:
5501 	case DRM_FORMAT_ABGR8888:
5502 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5503 		break;
5504 	case DRM_FORMAT_NV21:
5505 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5506 		break;
5507 	case DRM_FORMAT_NV12:
5508 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5509 		break;
5510 	case DRM_FORMAT_P010:
5511 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5512 		break;
5513 	case DRM_FORMAT_XRGB16161616F:
5514 	case DRM_FORMAT_ARGB16161616F:
5515 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5516 		break;
5517 	case DRM_FORMAT_XBGR16161616F:
5518 	case DRM_FORMAT_ABGR16161616F:
5519 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5520 		break;
5521 	case DRM_FORMAT_XRGB16161616:
5522 	case DRM_FORMAT_ARGB16161616:
5523 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5524 		break;
5525 	case DRM_FORMAT_XBGR16161616:
5526 	case DRM_FORMAT_ABGR16161616:
5527 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5528 		break;
5529 	default:
5530 		DRM_ERROR(
5531 			"Unsupported screen format %p4cc\n",
5532 			&fb->format->format);
5533 		return -EINVAL;
5534 	}
5535 
5536 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5537 	case DRM_MODE_ROTATE_0:
5538 		plane_info->rotation = ROTATION_ANGLE_0;
5539 		break;
5540 	case DRM_MODE_ROTATE_90:
5541 		plane_info->rotation = ROTATION_ANGLE_90;
5542 		break;
5543 	case DRM_MODE_ROTATE_180:
5544 		plane_info->rotation = ROTATION_ANGLE_180;
5545 		break;
5546 	case DRM_MODE_ROTATE_270:
5547 		plane_info->rotation = ROTATION_ANGLE_270;
5548 		break;
5549 	default:
5550 		plane_info->rotation = ROTATION_ANGLE_0;
5551 		break;
5552 	}
5553 
5554 	plane_info->visible = true;
5555 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5556 
5557 	plane_info->layer_index = 0;
5558 
5559 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5560 					  &plane_info->color_space);
5561 	if (ret)
5562 		return ret;
5563 
5564 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5565 					   plane_info->rotation, tiling_flags,
5566 					   &plane_info->tiling_info,
5567 					   &plane_info->plane_size,
5568 					   &plane_info->dcc, address, tmz_surface,
5569 					   force_disable_dcc);
5570 	if (ret)
5571 		return ret;
5572 
5573 	fill_blending_from_plane_state(
5574 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5575 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5576 
5577 	return 0;
5578 }
5579 
5580 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5581 				    struct dc_plane_state *dc_plane_state,
5582 				    struct drm_plane_state *plane_state,
5583 				    struct drm_crtc_state *crtc_state)
5584 {
5585 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5586 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5587 	struct dc_scaling_info scaling_info;
5588 	struct dc_plane_info plane_info;
5589 	int ret;
5590 	bool force_disable_dcc = false;
5591 
5592 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5593 	if (ret)
5594 		return ret;
5595 
5596 	dc_plane_state->src_rect = scaling_info.src_rect;
5597 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5598 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5599 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5600 
5601 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5602 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5603 					  afb->tiling_flags,
5604 					  &plane_info,
5605 					  &dc_plane_state->address,
5606 					  afb->tmz_surface,
5607 					  force_disable_dcc);
5608 	if (ret)
5609 		return ret;
5610 
5611 	dc_plane_state->format = plane_info.format;
5612 	dc_plane_state->color_space = plane_info.color_space;
5613 	dc_plane_state->format = plane_info.format;
5614 	dc_plane_state->plane_size = plane_info.plane_size;
5615 	dc_plane_state->rotation = plane_info.rotation;
5616 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5617 	dc_plane_state->stereo_format = plane_info.stereo_format;
5618 	dc_plane_state->tiling_info = plane_info.tiling_info;
5619 	dc_plane_state->visible = plane_info.visible;
5620 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5621 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5622 	dc_plane_state->global_alpha = plane_info.global_alpha;
5623 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5624 	dc_plane_state->dcc = plane_info.dcc;
5625 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5626 	dc_plane_state->flip_int_enabled = true;
5627 
5628 	/*
5629 	 * Always set input transfer function, since plane state is refreshed
5630 	 * every time.
5631 	 */
5632 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5633 	if (ret)
5634 		return ret;
5635 
5636 	return 0;
5637 }
5638 
5639 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5640 					   const struct dm_connector_state *dm_state,
5641 					   struct dc_stream_state *stream)
5642 {
5643 	enum amdgpu_rmx_type rmx_type;
5644 
5645 	struct rect src = { 0 }; /* viewport in composition space*/
5646 	struct rect dst = { 0 }; /* stream addressable area */
5647 
5648 	/* no mode. nothing to be done */
5649 	if (!mode)
5650 		return;
5651 
5652 	/* Full screen scaling by default */
5653 	src.width = mode->hdisplay;
5654 	src.height = mode->vdisplay;
5655 	dst.width = stream->timing.h_addressable;
5656 	dst.height = stream->timing.v_addressable;
5657 
5658 	if (dm_state) {
5659 		rmx_type = dm_state->scaling;
5660 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5661 			if (src.width * dst.height <
5662 					src.height * dst.width) {
5663 				/* height needs less upscaling/more downscaling */
5664 				dst.width = src.width *
5665 						dst.height / src.height;
5666 			} else {
5667 				/* width needs less upscaling/more downscaling */
5668 				dst.height = src.height *
5669 						dst.width / src.width;
5670 			}
5671 		} else if (rmx_type == RMX_CENTER) {
5672 			dst = src;
5673 		}
5674 
5675 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5676 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5677 
5678 		if (dm_state->underscan_enable) {
5679 			dst.x += dm_state->underscan_hborder / 2;
5680 			dst.y += dm_state->underscan_vborder / 2;
5681 			dst.width -= dm_state->underscan_hborder;
5682 			dst.height -= dm_state->underscan_vborder;
5683 		}
5684 	}
5685 
5686 	stream->src = src;
5687 	stream->dst = dst;
5688 
5689 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5690 		      dst.x, dst.y, dst.width, dst.height);
5691 
5692 }
5693 
5694 static enum dc_color_depth
5695 convert_color_depth_from_display_info(const struct drm_connector *connector,
5696 				      bool is_y420, int requested_bpc)
5697 {
5698 	uint8_t bpc;
5699 
5700 	if (is_y420) {
5701 		bpc = 8;
5702 
5703 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5704 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5705 			bpc = 16;
5706 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5707 			bpc = 12;
5708 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5709 			bpc = 10;
5710 	} else {
5711 		bpc = (uint8_t)connector->display_info.bpc;
5712 		/* Assume 8 bpc by default if no bpc is specified. */
5713 		bpc = bpc ? bpc : 8;
5714 	}
5715 
5716 	if (requested_bpc > 0) {
5717 		/*
5718 		 * Cap display bpc based on the user requested value.
5719 		 *
5720 		 * The value for state->max_bpc may not correctly updated
5721 		 * depending on when the connector gets added to the state
5722 		 * or if this was called outside of atomic check, so it
5723 		 * can't be used directly.
5724 		 */
5725 		bpc = min_t(u8, bpc, requested_bpc);
5726 
5727 		/* Round down to the nearest even number. */
5728 		bpc = bpc - (bpc & 1);
5729 	}
5730 
5731 	switch (bpc) {
5732 	case 0:
5733 		/*
5734 		 * Temporary Work around, DRM doesn't parse color depth for
5735 		 * EDID revision before 1.4
5736 		 * TODO: Fix edid parsing
5737 		 */
5738 		return COLOR_DEPTH_888;
5739 	case 6:
5740 		return COLOR_DEPTH_666;
5741 	case 8:
5742 		return COLOR_DEPTH_888;
5743 	case 10:
5744 		return COLOR_DEPTH_101010;
5745 	case 12:
5746 		return COLOR_DEPTH_121212;
5747 	case 14:
5748 		return COLOR_DEPTH_141414;
5749 	case 16:
5750 		return COLOR_DEPTH_161616;
5751 	default:
5752 		return COLOR_DEPTH_UNDEFINED;
5753 	}
5754 }
5755 
5756 static enum dc_aspect_ratio
5757 get_aspect_ratio(const struct drm_display_mode *mode_in)
5758 {
5759 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5760 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5761 }
5762 
5763 static enum dc_color_space
5764 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5765 {
5766 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5767 
5768 	switch (dc_crtc_timing->pixel_encoding)	{
5769 	case PIXEL_ENCODING_YCBCR422:
5770 	case PIXEL_ENCODING_YCBCR444:
5771 	case PIXEL_ENCODING_YCBCR420:
5772 	{
5773 		/*
5774 		 * 27030khz is the separation point between HDTV and SDTV
5775 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5776 		 * respectively
5777 		 */
5778 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5779 			if (dc_crtc_timing->flags.Y_ONLY)
5780 				color_space =
5781 					COLOR_SPACE_YCBCR709_LIMITED;
5782 			else
5783 				color_space = COLOR_SPACE_YCBCR709;
5784 		} else {
5785 			if (dc_crtc_timing->flags.Y_ONLY)
5786 				color_space =
5787 					COLOR_SPACE_YCBCR601_LIMITED;
5788 			else
5789 				color_space = COLOR_SPACE_YCBCR601;
5790 		}
5791 
5792 	}
5793 	break;
5794 	case PIXEL_ENCODING_RGB:
5795 		color_space = COLOR_SPACE_SRGB;
5796 		break;
5797 
5798 	default:
5799 		WARN_ON(1);
5800 		break;
5801 	}
5802 
5803 	return color_space;
5804 }
5805 
5806 static bool adjust_colour_depth_from_display_info(
5807 	struct dc_crtc_timing *timing_out,
5808 	const struct drm_display_info *info)
5809 {
5810 	enum dc_color_depth depth = timing_out->display_color_depth;
5811 	int normalized_clk;
5812 	do {
5813 		normalized_clk = timing_out->pix_clk_100hz / 10;
5814 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5815 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5816 			normalized_clk /= 2;
5817 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5818 		switch (depth) {
5819 		case COLOR_DEPTH_888:
5820 			break;
5821 		case COLOR_DEPTH_101010:
5822 			normalized_clk = (normalized_clk * 30) / 24;
5823 			break;
5824 		case COLOR_DEPTH_121212:
5825 			normalized_clk = (normalized_clk * 36) / 24;
5826 			break;
5827 		case COLOR_DEPTH_161616:
5828 			normalized_clk = (normalized_clk * 48) / 24;
5829 			break;
5830 		default:
5831 			/* The above depths are the only ones valid for HDMI. */
5832 			return false;
5833 		}
5834 		if (normalized_clk <= info->max_tmds_clock) {
5835 			timing_out->display_color_depth = depth;
5836 			return true;
5837 		}
5838 	} while (--depth > COLOR_DEPTH_666);
5839 	return false;
5840 }
5841 
5842 static void fill_stream_properties_from_drm_display_mode(
5843 	struct dc_stream_state *stream,
5844 	const struct drm_display_mode *mode_in,
5845 	const struct drm_connector *connector,
5846 	const struct drm_connector_state *connector_state,
5847 	const struct dc_stream_state *old_stream,
5848 	int requested_bpc)
5849 {
5850 	struct dc_crtc_timing *timing_out = &stream->timing;
5851 	const struct drm_display_info *info = &connector->display_info;
5852 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5853 	struct hdmi_vendor_infoframe hv_frame;
5854 	struct hdmi_avi_infoframe avi_frame;
5855 
5856 	memset(&hv_frame, 0, sizeof(hv_frame));
5857 	memset(&avi_frame, 0, sizeof(avi_frame));
5858 
5859 	timing_out->h_border_left = 0;
5860 	timing_out->h_border_right = 0;
5861 	timing_out->v_border_top = 0;
5862 	timing_out->v_border_bottom = 0;
5863 	/* TODO: un-hardcode */
5864 	if (drm_mode_is_420_only(info, mode_in)
5865 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5866 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5867 	else if (drm_mode_is_420_also(info, mode_in)
5868 			&& aconnector->force_yuv420_output)
5869 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5870 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5871 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5872 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5873 	else
5874 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5875 
5876 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5877 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5878 		connector,
5879 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5880 		requested_bpc);
5881 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5882 	timing_out->hdmi_vic = 0;
5883 
5884 	if(old_stream) {
5885 		timing_out->vic = old_stream->timing.vic;
5886 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5887 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5888 	} else {
5889 		timing_out->vic = drm_match_cea_mode(mode_in);
5890 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5891 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5892 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5893 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5894 	}
5895 
5896 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5897 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5898 		timing_out->vic = avi_frame.video_code;
5899 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5900 		timing_out->hdmi_vic = hv_frame.vic;
5901 	}
5902 
5903 	if (is_freesync_video_mode(mode_in, aconnector)) {
5904 		timing_out->h_addressable = mode_in->hdisplay;
5905 		timing_out->h_total = mode_in->htotal;
5906 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5907 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5908 		timing_out->v_total = mode_in->vtotal;
5909 		timing_out->v_addressable = mode_in->vdisplay;
5910 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5911 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5912 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5913 	} else {
5914 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5915 		timing_out->h_total = mode_in->crtc_htotal;
5916 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5917 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5918 		timing_out->v_total = mode_in->crtc_vtotal;
5919 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5920 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5921 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5922 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5923 	}
5924 
5925 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5926 
5927 	stream->output_color_space = get_output_color_space(timing_out);
5928 
5929 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5930 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5931 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5932 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5933 		    drm_mode_is_420_also(info, mode_in) &&
5934 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5935 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5936 			adjust_colour_depth_from_display_info(timing_out, info);
5937 		}
5938 	}
5939 }
5940 
5941 static void fill_audio_info(struct audio_info *audio_info,
5942 			    const struct drm_connector *drm_connector,
5943 			    const struct dc_sink *dc_sink)
5944 {
5945 	int i = 0;
5946 	int cea_revision = 0;
5947 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5948 
5949 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5950 	audio_info->product_id = edid_caps->product_id;
5951 
5952 	cea_revision = drm_connector->display_info.cea_rev;
5953 
5954 	strscpy(audio_info->display_name,
5955 		edid_caps->display_name,
5956 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5957 
5958 	if (cea_revision >= 3) {
5959 		audio_info->mode_count = edid_caps->audio_mode_count;
5960 
5961 		for (i = 0; i < audio_info->mode_count; ++i) {
5962 			audio_info->modes[i].format_code =
5963 					(enum audio_format_code)
5964 					(edid_caps->audio_modes[i].format_code);
5965 			audio_info->modes[i].channel_count =
5966 					edid_caps->audio_modes[i].channel_count;
5967 			audio_info->modes[i].sample_rates.all =
5968 					edid_caps->audio_modes[i].sample_rate;
5969 			audio_info->modes[i].sample_size =
5970 					edid_caps->audio_modes[i].sample_size;
5971 		}
5972 	}
5973 
5974 	audio_info->flags.all = edid_caps->speaker_flags;
5975 
5976 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5977 	if (drm_connector->latency_present[0]) {
5978 		audio_info->video_latency = drm_connector->video_latency[0];
5979 		audio_info->audio_latency = drm_connector->audio_latency[0];
5980 	}
5981 
5982 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5983 
5984 }
5985 
5986 static void
5987 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5988 				      struct drm_display_mode *dst_mode)
5989 {
5990 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5991 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5992 	dst_mode->crtc_clock = src_mode->crtc_clock;
5993 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5994 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5995 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5996 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5997 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5998 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5999 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6000 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6001 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6002 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6003 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6004 }
6005 
6006 static void
6007 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6008 					const struct drm_display_mode *native_mode,
6009 					bool scale_enabled)
6010 {
6011 	if (scale_enabled) {
6012 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6013 	} else if (native_mode->clock == drm_mode->clock &&
6014 			native_mode->htotal == drm_mode->htotal &&
6015 			native_mode->vtotal == drm_mode->vtotal) {
6016 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6017 	} else {
6018 		/* no scaling nor amdgpu inserted, no need to patch */
6019 	}
6020 }
6021 
6022 static struct dc_sink *
6023 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6024 {
6025 	struct dc_sink_init_data sink_init_data = { 0 };
6026 	struct dc_sink *sink = NULL;
6027 	sink_init_data.link = aconnector->dc_link;
6028 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6029 
6030 	sink = dc_sink_create(&sink_init_data);
6031 	if (!sink) {
6032 		DRM_ERROR("Failed to create sink!\n");
6033 		return NULL;
6034 	}
6035 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6036 
6037 	return sink;
6038 }
6039 
6040 static void set_multisync_trigger_params(
6041 		struct dc_stream_state *stream)
6042 {
6043 	struct dc_stream_state *master = NULL;
6044 
6045 	if (stream->triggered_crtc_reset.enabled) {
6046 		master = stream->triggered_crtc_reset.event_source;
6047 		stream->triggered_crtc_reset.event =
6048 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6049 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6050 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6051 	}
6052 }
6053 
6054 static void set_master_stream(struct dc_stream_state *stream_set[],
6055 			      int stream_count)
6056 {
6057 	int j, highest_rfr = 0, master_stream = 0;
6058 
6059 	for (j = 0;  j < stream_count; j++) {
6060 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6061 			int refresh_rate = 0;
6062 
6063 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6064 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6065 			if (refresh_rate > highest_rfr) {
6066 				highest_rfr = refresh_rate;
6067 				master_stream = j;
6068 			}
6069 		}
6070 	}
6071 	for (j = 0;  j < stream_count; j++) {
6072 		if (stream_set[j])
6073 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6074 	}
6075 }
6076 
6077 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6078 {
6079 	int i = 0;
6080 	struct dc_stream_state *stream;
6081 
6082 	if (context->stream_count < 2)
6083 		return;
6084 	for (i = 0; i < context->stream_count ; i++) {
6085 		if (!context->streams[i])
6086 			continue;
6087 		/*
6088 		 * TODO: add a function to read AMD VSDB bits and set
6089 		 * crtc_sync_master.multi_sync_enabled flag
6090 		 * For now it's set to false
6091 		 */
6092 	}
6093 
6094 	set_master_stream(context->streams, context->stream_count);
6095 
6096 	for (i = 0; i < context->stream_count ; i++) {
6097 		stream = context->streams[i];
6098 
6099 		if (!stream)
6100 			continue;
6101 
6102 		set_multisync_trigger_params(stream);
6103 	}
6104 }
6105 
6106 #if defined(CONFIG_DRM_AMD_DC_DCN)
6107 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6108 							struct dc_sink *sink, struct dc_stream_state *stream,
6109 							struct dsc_dec_dpcd_caps *dsc_caps)
6110 {
6111 	stream->timing.flags.DSC = 0;
6112 	dsc_caps->is_dsc_supported = false;
6113 
6114 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6115 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6116 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6117 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6118 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6119 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6120 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6121 				dsc_caps);
6122 	}
6123 }
6124 
6125 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6126 				    struct dc_sink *sink, struct dc_stream_state *stream,
6127 				    struct dsc_dec_dpcd_caps *dsc_caps,
6128 				    uint32_t max_dsc_target_bpp_limit_override)
6129 {
6130 	const struct dc_link_settings *verified_link_cap = NULL;
6131 	uint32_t link_bw_in_kbps;
6132 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6133 	struct dc *dc = sink->ctx->dc;
6134 	struct dc_dsc_bw_range bw_range = {0};
6135 	struct dc_dsc_config dsc_cfg = {0};
6136 
6137 	verified_link_cap = dc_link_get_link_cap(stream->link);
6138 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6139 	edp_min_bpp_x16 = 8 * 16;
6140 	edp_max_bpp_x16 = 8 * 16;
6141 
6142 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6143 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6144 
6145 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6146 		edp_min_bpp_x16 = edp_max_bpp_x16;
6147 
6148 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6149 				dc->debug.dsc_min_slice_height_override,
6150 				edp_min_bpp_x16, edp_max_bpp_x16,
6151 				dsc_caps,
6152 				&stream->timing,
6153 				&bw_range)) {
6154 
6155 		if (bw_range.max_kbps < link_bw_in_kbps) {
6156 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6157 					dsc_caps,
6158 					dc->debug.dsc_min_slice_height_override,
6159 					max_dsc_target_bpp_limit_override,
6160 					0,
6161 					&stream->timing,
6162 					&dsc_cfg)) {
6163 				stream->timing.dsc_cfg = dsc_cfg;
6164 				stream->timing.flags.DSC = 1;
6165 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6166 			}
6167 			return;
6168 		}
6169 	}
6170 
6171 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6172 				dsc_caps,
6173 				dc->debug.dsc_min_slice_height_override,
6174 				max_dsc_target_bpp_limit_override,
6175 				link_bw_in_kbps,
6176 				&stream->timing,
6177 				&dsc_cfg)) {
6178 		stream->timing.dsc_cfg = dsc_cfg;
6179 		stream->timing.flags.DSC = 1;
6180 	}
6181 }
6182 
6183 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6184 										struct dc_sink *sink, struct dc_stream_state *stream,
6185 										struct dsc_dec_dpcd_caps *dsc_caps)
6186 {
6187 	struct drm_connector *drm_connector = &aconnector->base;
6188 	uint32_t link_bandwidth_kbps;
6189 	uint32_t max_dsc_target_bpp_limit_override = 0;
6190 	struct dc *dc = sink->ctx->dc;
6191 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6192 	uint32_t dsc_max_supported_bw_in_kbps;
6193 
6194 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6195 							dc_link_get_link_cap(aconnector->dc_link));
6196 
6197 	if (stream->link && stream->link->local_sink)
6198 		max_dsc_target_bpp_limit_override =
6199 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6200 
6201 	/* Set DSC policy according to dsc_clock_en */
6202 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6203 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6204 
6205 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6206 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6207 
6208 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6209 
6210 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6211 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6212 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6213 						dsc_caps,
6214 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6215 						max_dsc_target_bpp_limit_override,
6216 						link_bandwidth_kbps,
6217 						&stream->timing,
6218 						&stream->timing.dsc_cfg)) {
6219 				stream->timing.flags.DSC = 1;
6220 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6221 								 __func__, drm_connector->name);
6222 			}
6223 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6224 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6225 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6226 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6227 
6228 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6229 					max_supported_bw_in_kbps > 0 &&
6230 					dsc_max_supported_bw_in_kbps > 0)
6231 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6232 						dsc_caps,
6233 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6234 						max_dsc_target_bpp_limit_override,
6235 						dsc_max_supported_bw_in_kbps,
6236 						&stream->timing,
6237 						&stream->timing.dsc_cfg)) {
6238 					stream->timing.flags.DSC = 1;
6239 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6240 									 __func__, drm_connector->name);
6241 				}
6242 		}
6243 	}
6244 
6245 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6246 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6247 		stream->timing.flags.DSC = 1;
6248 
6249 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6250 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6251 
6252 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6253 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6254 
6255 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6256 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6257 }
6258 #endif /* CONFIG_DRM_AMD_DC_DCN */
6259 
6260 /**
6261  * DOC: FreeSync Video
6262  *
6263  * When a userspace application wants to play a video, the content follows a
6264  * standard format definition that usually specifies the FPS for that format.
6265  * The below list illustrates some video format and the expected FPS,
6266  * respectively:
6267  *
6268  * - TV/NTSC (23.976 FPS)
6269  * - Cinema (24 FPS)
6270  * - TV/PAL (25 FPS)
6271  * - TV/NTSC (29.97 FPS)
6272  * - TV/NTSC (30 FPS)
6273  * - Cinema HFR (48 FPS)
6274  * - TV/PAL (50 FPS)
6275  * - Commonly used (60 FPS)
6276  * - Multiples of 24 (48,72,96,120 FPS)
6277  *
6278  * The list of standards video format is not huge and can be added to the
6279  * connector modeset list beforehand. With that, userspace can leverage
6280  * FreeSync to extends the front porch in order to attain the target refresh
6281  * rate. Such a switch will happen seamlessly, without screen blanking or
6282  * reprogramming of the output in any other way. If the userspace requests a
6283  * modesetting change compatible with FreeSync modes that only differ in the
6284  * refresh rate, DC will skip the full update and avoid blink during the
6285  * transition. For example, the video player can change the modesetting from
6286  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6287  * causing any display blink. This same concept can be applied to a mode
6288  * setting change.
6289  */
6290 static struct drm_display_mode *
6291 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6292 			  bool use_probed_modes)
6293 {
6294 	struct drm_display_mode *m, *m_pref = NULL;
6295 	u16 current_refresh, highest_refresh;
6296 	struct list_head *list_head = use_probed_modes ?
6297 						    &aconnector->base.probed_modes :
6298 						    &aconnector->base.modes;
6299 
6300 	if (aconnector->freesync_vid_base.clock != 0)
6301 		return &aconnector->freesync_vid_base;
6302 
6303 	/* Find the preferred mode */
6304 	list_for_each_entry (m, list_head, head) {
6305 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6306 			m_pref = m;
6307 			break;
6308 		}
6309 	}
6310 
6311 	if (!m_pref) {
6312 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6313 		m_pref = list_first_entry_or_null(
6314 			&aconnector->base.modes, struct drm_display_mode, head);
6315 		if (!m_pref) {
6316 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6317 			return NULL;
6318 		}
6319 	}
6320 
6321 	highest_refresh = drm_mode_vrefresh(m_pref);
6322 
6323 	/*
6324 	 * Find the mode with highest refresh rate with same resolution.
6325 	 * For some monitors, preferred mode is not the mode with highest
6326 	 * supported refresh rate.
6327 	 */
6328 	list_for_each_entry (m, list_head, head) {
6329 		current_refresh  = drm_mode_vrefresh(m);
6330 
6331 		if (m->hdisplay == m_pref->hdisplay &&
6332 		    m->vdisplay == m_pref->vdisplay &&
6333 		    highest_refresh < current_refresh) {
6334 			highest_refresh = current_refresh;
6335 			m_pref = m;
6336 		}
6337 	}
6338 
6339 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6340 	return m_pref;
6341 }
6342 
6343 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6344 				   struct amdgpu_dm_connector *aconnector)
6345 {
6346 	struct drm_display_mode *high_mode;
6347 	int timing_diff;
6348 
6349 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6350 	if (!high_mode || !mode)
6351 		return false;
6352 
6353 	timing_diff = high_mode->vtotal - mode->vtotal;
6354 
6355 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6356 	    high_mode->hdisplay != mode->hdisplay ||
6357 	    high_mode->vdisplay != mode->vdisplay ||
6358 	    high_mode->hsync_start != mode->hsync_start ||
6359 	    high_mode->hsync_end != mode->hsync_end ||
6360 	    high_mode->htotal != mode->htotal ||
6361 	    high_mode->hskew != mode->hskew ||
6362 	    high_mode->vscan != mode->vscan ||
6363 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6364 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6365 		return false;
6366 	else
6367 		return true;
6368 }
6369 
6370 static struct dc_stream_state *
6371 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6372 		       const struct drm_display_mode *drm_mode,
6373 		       const struct dm_connector_state *dm_state,
6374 		       const struct dc_stream_state *old_stream,
6375 		       int requested_bpc)
6376 {
6377 	struct drm_display_mode *preferred_mode = NULL;
6378 	struct drm_connector *drm_connector;
6379 	const struct drm_connector_state *con_state =
6380 		dm_state ? &dm_state->base : NULL;
6381 	struct dc_stream_state *stream = NULL;
6382 	struct drm_display_mode mode = *drm_mode;
6383 	struct drm_display_mode saved_mode;
6384 	struct drm_display_mode *freesync_mode = NULL;
6385 	bool native_mode_found = false;
6386 	bool recalculate_timing = false;
6387 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6388 	int mode_refresh;
6389 	int preferred_refresh = 0;
6390 #if defined(CONFIG_DRM_AMD_DC_DCN)
6391 	struct dsc_dec_dpcd_caps dsc_caps;
6392 #endif
6393 	struct dc_sink *sink = NULL;
6394 
6395 	memset(&saved_mode, 0, sizeof(saved_mode));
6396 
6397 	if (aconnector == NULL) {
6398 		DRM_ERROR("aconnector is NULL!\n");
6399 		return stream;
6400 	}
6401 
6402 	drm_connector = &aconnector->base;
6403 
6404 	if (!aconnector->dc_sink) {
6405 		sink = create_fake_sink(aconnector);
6406 		if (!sink)
6407 			return stream;
6408 	} else {
6409 		sink = aconnector->dc_sink;
6410 		dc_sink_retain(sink);
6411 	}
6412 
6413 	stream = dc_create_stream_for_sink(sink);
6414 
6415 	if (stream == NULL) {
6416 		DRM_ERROR("Failed to create stream for sink!\n");
6417 		goto finish;
6418 	}
6419 
6420 	stream->dm_stream_context = aconnector;
6421 
6422 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6423 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6424 
6425 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6426 		/* Search for preferred mode */
6427 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6428 			native_mode_found = true;
6429 			break;
6430 		}
6431 	}
6432 	if (!native_mode_found)
6433 		preferred_mode = list_first_entry_or_null(
6434 				&aconnector->base.modes,
6435 				struct drm_display_mode,
6436 				head);
6437 
6438 	mode_refresh = drm_mode_vrefresh(&mode);
6439 
6440 	if (preferred_mode == NULL) {
6441 		/*
6442 		 * This may not be an error, the use case is when we have no
6443 		 * usermode calls to reset and set mode upon hotplug. In this
6444 		 * case, we call set mode ourselves to restore the previous mode
6445 		 * and the modelist may not be filled in in time.
6446 		 */
6447 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6448 	} else {
6449 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6450 		if (recalculate_timing) {
6451 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6452 			drm_mode_copy(&saved_mode, &mode);
6453 			drm_mode_copy(&mode, freesync_mode);
6454 		} else {
6455 			decide_crtc_timing_for_drm_display_mode(
6456 				&mode, preferred_mode, scale);
6457 
6458 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6459 		}
6460 	}
6461 
6462 	if (recalculate_timing)
6463 		drm_mode_set_crtcinfo(&saved_mode, 0);
6464 	else if (!dm_state)
6465 		drm_mode_set_crtcinfo(&mode, 0);
6466 
6467        /*
6468 	* If scaling is enabled and refresh rate didn't change
6469 	* we copy the vic and polarities of the old timings
6470 	*/
6471 	if (!scale || mode_refresh != preferred_refresh)
6472 		fill_stream_properties_from_drm_display_mode(
6473 			stream, &mode, &aconnector->base, con_state, NULL,
6474 			requested_bpc);
6475 	else
6476 		fill_stream_properties_from_drm_display_mode(
6477 			stream, &mode, &aconnector->base, con_state, old_stream,
6478 			requested_bpc);
6479 
6480 #if defined(CONFIG_DRM_AMD_DC_DCN)
6481 	/* SST DSC determination policy */
6482 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6483 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6484 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6485 #endif
6486 
6487 	update_stream_scaling_settings(&mode, dm_state, stream);
6488 
6489 	fill_audio_info(
6490 		&stream->audio_info,
6491 		drm_connector,
6492 		sink);
6493 
6494 	update_stream_signal(stream, sink);
6495 
6496 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6497 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6498 
6499 	if (stream->link->psr_settings.psr_feature_enabled) {
6500 		//
6501 		// should decide stream support vsc sdp colorimetry capability
6502 		// before building vsc info packet
6503 		//
6504 		stream->use_vsc_sdp_for_colorimetry = false;
6505 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6506 			stream->use_vsc_sdp_for_colorimetry =
6507 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6508 		} else {
6509 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6510 				stream->use_vsc_sdp_for_colorimetry = true;
6511 		}
6512 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6513 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6514 
6515 	}
6516 finish:
6517 	dc_sink_release(sink);
6518 
6519 	return stream;
6520 }
6521 
6522 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6523 {
6524 	drm_crtc_cleanup(crtc);
6525 	kfree(crtc);
6526 }
6527 
6528 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6529 				  struct drm_crtc_state *state)
6530 {
6531 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6532 
6533 	/* TODO Destroy dc_stream objects are stream object is flattened */
6534 	if (cur->stream)
6535 		dc_stream_release(cur->stream);
6536 
6537 
6538 	__drm_atomic_helper_crtc_destroy_state(state);
6539 
6540 
6541 	kfree(state);
6542 }
6543 
6544 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6545 {
6546 	struct dm_crtc_state *state;
6547 
6548 	if (crtc->state)
6549 		dm_crtc_destroy_state(crtc, crtc->state);
6550 
6551 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6552 	if (WARN_ON(!state))
6553 		return;
6554 
6555 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6556 }
6557 
6558 static struct drm_crtc_state *
6559 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6560 {
6561 	struct dm_crtc_state *state, *cur;
6562 
6563 	cur = to_dm_crtc_state(crtc->state);
6564 
6565 	if (WARN_ON(!crtc->state))
6566 		return NULL;
6567 
6568 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6569 	if (!state)
6570 		return NULL;
6571 
6572 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6573 
6574 	if (cur->stream) {
6575 		state->stream = cur->stream;
6576 		dc_stream_retain(state->stream);
6577 	}
6578 
6579 	state->active_planes = cur->active_planes;
6580 	state->vrr_infopacket = cur->vrr_infopacket;
6581 	state->abm_level = cur->abm_level;
6582 	state->vrr_supported = cur->vrr_supported;
6583 	state->freesync_config = cur->freesync_config;
6584 	state->cm_has_degamma = cur->cm_has_degamma;
6585 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6586 	state->force_dpms_off = cur->force_dpms_off;
6587 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6588 
6589 	return &state->base;
6590 }
6591 
6592 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6593 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6594 {
6595 	crtc_debugfs_init(crtc);
6596 
6597 	return 0;
6598 }
6599 #endif
6600 
6601 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6602 {
6603 	enum dc_irq_source irq_source;
6604 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6605 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6606 	int rc;
6607 
6608 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6609 
6610 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6611 
6612 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6613 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6614 	return rc;
6615 }
6616 
6617 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6618 {
6619 	enum dc_irq_source irq_source;
6620 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6621 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6622 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6623 	struct amdgpu_display_manager *dm = &adev->dm;
6624 	struct vblank_control_work *work;
6625 	int rc = 0;
6626 
6627 	if (enable) {
6628 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6629 		if (amdgpu_dm_vrr_active(acrtc_state))
6630 			rc = dm_set_vupdate_irq(crtc, true);
6631 	} else {
6632 		/* vblank irq off -> vupdate irq off */
6633 		rc = dm_set_vupdate_irq(crtc, false);
6634 	}
6635 
6636 	if (rc)
6637 		return rc;
6638 
6639 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6640 
6641 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6642 		return -EBUSY;
6643 
6644 	if (amdgpu_in_reset(adev))
6645 		return 0;
6646 
6647 	if (dm->vblank_control_workqueue) {
6648 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6649 		if (!work)
6650 			return -ENOMEM;
6651 
6652 		INIT_WORK(&work->work, vblank_control_worker);
6653 		work->dm = dm;
6654 		work->acrtc = acrtc;
6655 		work->enable = enable;
6656 
6657 		if (acrtc_state->stream) {
6658 			dc_stream_retain(acrtc_state->stream);
6659 			work->stream = acrtc_state->stream;
6660 		}
6661 
6662 		queue_work(dm->vblank_control_workqueue, &work->work);
6663 	}
6664 
6665 	return 0;
6666 }
6667 
6668 static int dm_enable_vblank(struct drm_crtc *crtc)
6669 {
6670 	return dm_set_vblank(crtc, true);
6671 }
6672 
6673 static void dm_disable_vblank(struct drm_crtc *crtc)
6674 {
6675 	dm_set_vblank(crtc, false);
6676 }
6677 
6678 /* Implemented only the options currently availible for the driver */
6679 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6680 	.reset = dm_crtc_reset_state,
6681 	.destroy = amdgpu_dm_crtc_destroy,
6682 	.set_config = drm_atomic_helper_set_config,
6683 	.page_flip = drm_atomic_helper_page_flip,
6684 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6685 	.atomic_destroy_state = dm_crtc_destroy_state,
6686 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6687 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6688 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6689 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6690 	.enable_vblank = dm_enable_vblank,
6691 	.disable_vblank = dm_disable_vblank,
6692 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6693 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6694 	.late_register = amdgpu_dm_crtc_late_register,
6695 #endif
6696 };
6697 
6698 static enum drm_connector_status
6699 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6700 {
6701 	bool connected;
6702 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6703 
6704 	/*
6705 	 * Notes:
6706 	 * 1. This interface is NOT called in context of HPD irq.
6707 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6708 	 * makes it a bad place for *any* MST-related activity.
6709 	 */
6710 
6711 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6712 	    !aconnector->fake_enable)
6713 		connected = (aconnector->dc_sink != NULL);
6714 	else
6715 		connected = (aconnector->base.force == DRM_FORCE_ON);
6716 
6717 	update_subconnector_property(aconnector);
6718 
6719 	return (connected ? connector_status_connected :
6720 			connector_status_disconnected);
6721 }
6722 
6723 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6724 					    struct drm_connector_state *connector_state,
6725 					    struct drm_property *property,
6726 					    uint64_t val)
6727 {
6728 	struct drm_device *dev = connector->dev;
6729 	struct amdgpu_device *adev = drm_to_adev(dev);
6730 	struct dm_connector_state *dm_old_state =
6731 		to_dm_connector_state(connector->state);
6732 	struct dm_connector_state *dm_new_state =
6733 		to_dm_connector_state(connector_state);
6734 
6735 	int ret = -EINVAL;
6736 
6737 	if (property == dev->mode_config.scaling_mode_property) {
6738 		enum amdgpu_rmx_type rmx_type;
6739 
6740 		switch (val) {
6741 		case DRM_MODE_SCALE_CENTER:
6742 			rmx_type = RMX_CENTER;
6743 			break;
6744 		case DRM_MODE_SCALE_ASPECT:
6745 			rmx_type = RMX_ASPECT;
6746 			break;
6747 		case DRM_MODE_SCALE_FULLSCREEN:
6748 			rmx_type = RMX_FULL;
6749 			break;
6750 		case DRM_MODE_SCALE_NONE:
6751 		default:
6752 			rmx_type = RMX_OFF;
6753 			break;
6754 		}
6755 
6756 		if (dm_old_state->scaling == rmx_type)
6757 			return 0;
6758 
6759 		dm_new_state->scaling = rmx_type;
6760 		ret = 0;
6761 	} else if (property == adev->mode_info.underscan_hborder_property) {
6762 		dm_new_state->underscan_hborder = val;
6763 		ret = 0;
6764 	} else if (property == adev->mode_info.underscan_vborder_property) {
6765 		dm_new_state->underscan_vborder = val;
6766 		ret = 0;
6767 	} else if (property == adev->mode_info.underscan_property) {
6768 		dm_new_state->underscan_enable = val;
6769 		ret = 0;
6770 	} else if (property == adev->mode_info.abm_level_property) {
6771 		dm_new_state->abm_level = val;
6772 		ret = 0;
6773 	}
6774 
6775 	return ret;
6776 }
6777 
6778 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6779 					    const struct drm_connector_state *state,
6780 					    struct drm_property *property,
6781 					    uint64_t *val)
6782 {
6783 	struct drm_device *dev = connector->dev;
6784 	struct amdgpu_device *adev = drm_to_adev(dev);
6785 	struct dm_connector_state *dm_state =
6786 		to_dm_connector_state(state);
6787 	int ret = -EINVAL;
6788 
6789 	if (property == dev->mode_config.scaling_mode_property) {
6790 		switch (dm_state->scaling) {
6791 		case RMX_CENTER:
6792 			*val = DRM_MODE_SCALE_CENTER;
6793 			break;
6794 		case RMX_ASPECT:
6795 			*val = DRM_MODE_SCALE_ASPECT;
6796 			break;
6797 		case RMX_FULL:
6798 			*val = DRM_MODE_SCALE_FULLSCREEN;
6799 			break;
6800 		case RMX_OFF:
6801 		default:
6802 			*val = DRM_MODE_SCALE_NONE;
6803 			break;
6804 		}
6805 		ret = 0;
6806 	} else if (property == adev->mode_info.underscan_hborder_property) {
6807 		*val = dm_state->underscan_hborder;
6808 		ret = 0;
6809 	} else if (property == adev->mode_info.underscan_vborder_property) {
6810 		*val = dm_state->underscan_vborder;
6811 		ret = 0;
6812 	} else if (property == adev->mode_info.underscan_property) {
6813 		*val = dm_state->underscan_enable;
6814 		ret = 0;
6815 	} else if (property == adev->mode_info.abm_level_property) {
6816 		*val = dm_state->abm_level;
6817 		ret = 0;
6818 	}
6819 
6820 	return ret;
6821 }
6822 
6823 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6824 {
6825 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6826 
6827 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6828 }
6829 
6830 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6831 {
6832 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6833 	const struct dc_link *link = aconnector->dc_link;
6834 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6835 	struct amdgpu_display_manager *dm = &adev->dm;
6836 	int i;
6837 
6838 	/*
6839 	 * Call only if mst_mgr was iniitalized before since it's not done
6840 	 * for all connector types.
6841 	 */
6842 	if (aconnector->mst_mgr.dev)
6843 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6844 
6845 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6846 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6847 	for (i = 0; i < dm->num_of_edps; i++) {
6848 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6849 			backlight_device_unregister(dm->backlight_dev[i]);
6850 			dm->backlight_dev[i] = NULL;
6851 		}
6852 	}
6853 #endif
6854 
6855 	if (aconnector->dc_em_sink)
6856 		dc_sink_release(aconnector->dc_em_sink);
6857 	aconnector->dc_em_sink = NULL;
6858 	if (aconnector->dc_sink)
6859 		dc_sink_release(aconnector->dc_sink);
6860 	aconnector->dc_sink = NULL;
6861 
6862 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6863 	drm_connector_unregister(connector);
6864 	drm_connector_cleanup(connector);
6865 	if (aconnector->i2c) {
6866 		i2c_del_adapter(&aconnector->i2c->base);
6867 		kfree(aconnector->i2c);
6868 	}
6869 	kfree(aconnector->dm_dp_aux.aux.name);
6870 
6871 	kfree(connector);
6872 }
6873 
6874 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6875 {
6876 	struct dm_connector_state *state =
6877 		to_dm_connector_state(connector->state);
6878 
6879 	if (connector->state)
6880 		__drm_atomic_helper_connector_destroy_state(connector->state);
6881 
6882 	kfree(state);
6883 
6884 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6885 
6886 	if (state) {
6887 		state->scaling = RMX_OFF;
6888 		state->underscan_enable = false;
6889 		state->underscan_hborder = 0;
6890 		state->underscan_vborder = 0;
6891 		state->base.max_requested_bpc = 8;
6892 		state->vcpi_slots = 0;
6893 		state->pbn = 0;
6894 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6895 			state->abm_level = amdgpu_dm_abm_level;
6896 
6897 		__drm_atomic_helper_connector_reset(connector, &state->base);
6898 	}
6899 }
6900 
6901 struct drm_connector_state *
6902 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6903 {
6904 	struct dm_connector_state *state =
6905 		to_dm_connector_state(connector->state);
6906 
6907 	struct dm_connector_state *new_state =
6908 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6909 
6910 	if (!new_state)
6911 		return NULL;
6912 
6913 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6914 
6915 	new_state->freesync_capable = state->freesync_capable;
6916 	new_state->abm_level = state->abm_level;
6917 	new_state->scaling = state->scaling;
6918 	new_state->underscan_enable = state->underscan_enable;
6919 	new_state->underscan_hborder = state->underscan_hborder;
6920 	new_state->underscan_vborder = state->underscan_vborder;
6921 	new_state->vcpi_slots = state->vcpi_slots;
6922 	new_state->pbn = state->pbn;
6923 	return &new_state->base;
6924 }
6925 
6926 static int
6927 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6928 {
6929 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6930 		to_amdgpu_dm_connector(connector);
6931 	int r;
6932 
6933 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6934 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6935 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6936 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6937 		if (r)
6938 			return r;
6939 	}
6940 
6941 #if defined(CONFIG_DEBUG_FS)
6942 	connector_debugfs_init(amdgpu_dm_connector);
6943 #endif
6944 
6945 	return 0;
6946 }
6947 
6948 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6949 	.reset = amdgpu_dm_connector_funcs_reset,
6950 	.detect = amdgpu_dm_connector_detect,
6951 	.fill_modes = drm_helper_probe_single_connector_modes,
6952 	.destroy = amdgpu_dm_connector_destroy,
6953 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6954 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6955 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6956 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6957 	.late_register = amdgpu_dm_connector_late_register,
6958 	.early_unregister = amdgpu_dm_connector_unregister
6959 };
6960 
6961 static int get_modes(struct drm_connector *connector)
6962 {
6963 	return amdgpu_dm_connector_get_modes(connector);
6964 }
6965 
6966 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6967 {
6968 	struct dc_sink_init_data init_params = {
6969 			.link = aconnector->dc_link,
6970 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6971 	};
6972 	struct edid *edid;
6973 
6974 	if (!aconnector->base.edid_blob_ptr) {
6975 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6976 				aconnector->base.name);
6977 
6978 		aconnector->base.force = DRM_FORCE_OFF;
6979 		aconnector->base.override_edid = false;
6980 		return;
6981 	}
6982 
6983 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6984 
6985 	aconnector->edid = edid;
6986 
6987 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6988 		aconnector->dc_link,
6989 		(uint8_t *)edid,
6990 		(edid->extensions + 1) * EDID_LENGTH,
6991 		&init_params);
6992 
6993 	if (aconnector->base.force == DRM_FORCE_ON) {
6994 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6995 		aconnector->dc_link->local_sink :
6996 		aconnector->dc_em_sink;
6997 		dc_sink_retain(aconnector->dc_sink);
6998 	}
6999 }
7000 
7001 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7002 {
7003 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7004 
7005 	/*
7006 	 * In case of headless boot with force on for DP managed connector
7007 	 * Those settings have to be != 0 to get initial modeset
7008 	 */
7009 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7010 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7011 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7012 	}
7013 
7014 
7015 	aconnector->base.override_edid = true;
7016 	create_eml_sink(aconnector);
7017 }
7018 
7019 struct dc_stream_state *
7020 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7021 				const struct drm_display_mode *drm_mode,
7022 				const struct dm_connector_state *dm_state,
7023 				const struct dc_stream_state *old_stream)
7024 {
7025 	struct drm_connector *connector = &aconnector->base;
7026 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7027 	struct dc_stream_state *stream;
7028 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7029 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7030 	enum dc_status dc_result = DC_OK;
7031 
7032 	do {
7033 		stream = create_stream_for_sink(aconnector, drm_mode,
7034 						dm_state, old_stream,
7035 						requested_bpc);
7036 		if (stream == NULL) {
7037 			DRM_ERROR("Failed to create stream for sink!\n");
7038 			break;
7039 		}
7040 
7041 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7042 
7043 		if (dc_result != DC_OK) {
7044 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7045 				      drm_mode->hdisplay,
7046 				      drm_mode->vdisplay,
7047 				      drm_mode->clock,
7048 				      dc_result,
7049 				      dc_status_to_str(dc_result));
7050 
7051 			dc_stream_release(stream);
7052 			stream = NULL;
7053 			requested_bpc -= 2; /* lower bpc to retry validation */
7054 		}
7055 
7056 	} while (stream == NULL && requested_bpc >= 6);
7057 
7058 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7059 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7060 
7061 		aconnector->force_yuv420_output = true;
7062 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7063 						dm_state, old_stream);
7064 		aconnector->force_yuv420_output = false;
7065 	}
7066 
7067 	return stream;
7068 }
7069 
7070 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7071 				   struct drm_display_mode *mode)
7072 {
7073 	int result = MODE_ERROR;
7074 	struct dc_sink *dc_sink;
7075 	/* TODO: Unhardcode stream count */
7076 	struct dc_stream_state *stream;
7077 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7078 
7079 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7080 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7081 		return result;
7082 
7083 	/*
7084 	 * Only run this the first time mode_valid is called to initilialize
7085 	 * EDID mgmt
7086 	 */
7087 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7088 		!aconnector->dc_em_sink)
7089 		handle_edid_mgmt(aconnector);
7090 
7091 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7092 
7093 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7094 				aconnector->base.force != DRM_FORCE_ON) {
7095 		DRM_ERROR("dc_sink is NULL!\n");
7096 		goto fail;
7097 	}
7098 
7099 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7100 	if (stream) {
7101 		dc_stream_release(stream);
7102 		result = MODE_OK;
7103 	}
7104 
7105 fail:
7106 	/* TODO: error handling*/
7107 	return result;
7108 }
7109 
7110 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7111 				struct dc_info_packet *out)
7112 {
7113 	struct hdmi_drm_infoframe frame;
7114 	unsigned char buf[30]; /* 26 + 4 */
7115 	ssize_t len;
7116 	int ret, i;
7117 
7118 	memset(out, 0, sizeof(*out));
7119 
7120 	if (!state->hdr_output_metadata)
7121 		return 0;
7122 
7123 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7124 	if (ret)
7125 		return ret;
7126 
7127 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7128 	if (len < 0)
7129 		return (int)len;
7130 
7131 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7132 	if (len != 30)
7133 		return -EINVAL;
7134 
7135 	/* Prepare the infopacket for DC. */
7136 	switch (state->connector->connector_type) {
7137 	case DRM_MODE_CONNECTOR_HDMIA:
7138 		out->hb0 = 0x87; /* type */
7139 		out->hb1 = 0x01; /* version */
7140 		out->hb2 = 0x1A; /* length */
7141 		out->sb[0] = buf[3]; /* checksum */
7142 		i = 1;
7143 		break;
7144 
7145 	case DRM_MODE_CONNECTOR_DisplayPort:
7146 	case DRM_MODE_CONNECTOR_eDP:
7147 		out->hb0 = 0x00; /* sdp id, zero */
7148 		out->hb1 = 0x87; /* type */
7149 		out->hb2 = 0x1D; /* payload len - 1 */
7150 		out->hb3 = (0x13 << 2); /* sdp version */
7151 		out->sb[0] = 0x01; /* version */
7152 		out->sb[1] = 0x1A; /* length */
7153 		i = 2;
7154 		break;
7155 
7156 	default:
7157 		return -EINVAL;
7158 	}
7159 
7160 	memcpy(&out->sb[i], &buf[4], 26);
7161 	out->valid = true;
7162 
7163 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7164 		       sizeof(out->sb), false);
7165 
7166 	return 0;
7167 }
7168 
7169 static int
7170 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7171 				 struct drm_atomic_state *state)
7172 {
7173 	struct drm_connector_state *new_con_state =
7174 		drm_atomic_get_new_connector_state(state, conn);
7175 	struct drm_connector_state *old_con_state =
7176 		drm_atomic_get_old_connector_state(state, conn);
7177 	struct drm_crtc *crtc = new_con_state->crtc;
7178 	struct drm_crtc_state *new_crtc_state;
7179 	int ret;
7180 
7181 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7182 
7183 	if (!crtc)
7184 		return 0;
7185 
7186 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7187 		struct dc_info_packet hdr_infopacket;
7188 
7189 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7190 		if (ret)
7191 			return ret;
7192 
7193 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7194 		if (IS_ERR(new_crtc_state))
7195 			return PTR_ERR(new_crtc_state);
7196 
7197 		/*
7198 		 * DC considers the stream backends changed if the
7199 		 * static metadata changes. Forcing the modeset also
7200 		 * gives a simple way for userspace to switch from
7201 		 * 8bpc to 10bpc when setting the metadata to enter
7202 		 * or exit HDR.
7203 		 *
7204 		 * Changing the static metadata after it's been
7205 		 * set is permissible, however. So only force a
7206 		 * modeset if we're entering or exiting HDR.
7207 		 */
7208 		new_crtc_state->mode_changed =
7209 			!old_con_state->hdr_output_metadata ||
7210 			!new_con_state->hdr_output_metadata;
7211 	}
7212 
7213 	return 0;
7214 }
7215 
7216 static const struct drm_connector_helper_funcs
7217 amdgpu_dm_connector_helper_funcs = {
7218 	/*
7219 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7220 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7221 	 * are missing after user start lightdm. So we need to renew modes list.
7222 	 * in get_modes call back, not just return the modes count
7223 	 */
7224 	.get_modes = get_modes,
7225 	.mode_valid = amdgpu_dm_connector_mode_valid,
7226 	.atomic_check = amdgpu_dm_connector_atomic_check,
7227 };
7228 
7229 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7230 {
7231 }
7232 
7233 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7234 {
7235 	struct drm_atomic_state *state = new_crtc_state->state;
7236 	struct drm_plane *plane;
7237 	int num_active = 0;
7238 
7239 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7240 		struct drm_plane_state *new_plane_state;
7241 
7242 		/* Cursor planes are "fake". */
7243 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7244 			continue;
7245 
7246 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7247 
7248 		if (!new_plane_state) {
7249 			/*
7250 			 * The plane is enable on the CRTC and hasn't changed
7251 			 * state. This means that it previously passed
7252 			 * validation and is therefore enabled.
7253 			 */
7254 			num_active += 1;
7255 			continue;
7256 		}
7257 
7258 		/* We need a framebuffer to be considered enabled. */
7259 		num_active += (new_plane_state->fb != NULL);
7260 	}
7261 
7262 	return num_active;
7263 }
7264 
7265 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7266 					 struct drm_crtc_state *new_crtc_state)
7267 {
7268 	struct dm_crtc_state *dm_new_crtc_state =
7269 		to_dm_crtc_state(new_crtc_state);
7270 
7271 	dm_new_crtc_state->active_planes = 0;
7272 
7273 	if (!dm_new_crtc_state->stream)
7274 		return;
7275 
7276 	dm_new_crtc_state->active_planes =
7277 		count_crtc_active_planes(new_crtc_state);
7278 }
7279 
7280 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7281 				       struct drm_atomic_state *state)
7282 {
7283 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7284 									  crtc);
7285 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7286 	struct dc *dc = adev->dm.dc;
7287 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7288 	int ret = -EINVAL;
7289 
7290 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7291 
7292 	dm_update_crtc_active_planes(crtc, crtc_state);
7293 
7294 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7295 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7296 		return ret;
7297 	}
7298 
7299 	/*
7300 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7301 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7302 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7303 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7304 	 */
7305 	if (crtc_state->enable &&
7306 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7307 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7308 		return -EINVAL;
7309 	}
7310 
7311 	/* In some use cases, like reset, no stream is attached */
7312 	if (!dm_crtc_state->stream)
7313 		return 0;
7314 
7315 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7316 		return 0;
7317 
7318 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7319 	return ret;
7320 }
7321 
7322 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7323 				      const struct drm_display_mode *mode,
7324 				      struct drm_display_mode *adjusted_mode)
7325 {
7326 	return true;
7327 }
7328 
7329 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7330 	.disable = dm_crtc_helper_disable,
7331 	.atomic_check = dm_crtc_helper_atomic_check,
7332 	.mode_fixup = dm_crtc_helper_mode_fixup,
7333 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7334 };
7335 
7336 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7337 {
7338 
7339 }
7340 
7341 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7342 {
7343 	switch (display_color_depth) {
7344 		case COLOR_DEPTH_666:
7345 			return 6;
7346 		case COLOR_DEPTH_888:
7347 			return 8;
7348 		case COLOR_DEPTH_101010:
7349 			return 10;
7350 		case COLOR_DEPTH_121212:
7351 			return 12;
7352 		case COLOR_DEPTH_141414:
7353 			return 14;
7354 		case COLOR_DEPTH_161616:
7355 			return 16;
7356 		default:
7357 			break;
7358 		}
7359 	return 0;
7360 }
7361 
7362 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7363 					  struct drm_crtc_state *crtc_state,
7364 					  struct drm_connector_state *conn_state)
7365 {
7366 	struct drm_atomic_state *state = crtc_state->state;
7367 	struct drm_connector *connector = conn_state->connector;
7368 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7369 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7370 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7371 	struct drm_dp_mst_topology_mgr *mst_mgr;
7372 	struct drm_dp_mst_port *mst_port;
7373 	enum dc_color_depth color_depth;
7374 	int clock, bpp = 0;
7375 	bool is_y420 = false;
7376 
7377 	if (!aconnector->port || !aconnector->dc_sink)
7378 		return 0;
7379 
7380 	mst_port = aconnector->port;
7381 	mst_mgr = &aconnector->mst_port->mst_mgr;
7382 
7383 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7384 		return 0;
7385 
7386 	if (!state->duplicated) {
7387 		int max_bpc = conn_state->max_requested_bpc;
7388 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7389 				aconnector->force_yuv420_output;
7390 		color_depth = convert_color_depth_from_display_info(connector,
7391 								    is_y420,
7392 								    max_bpc);
7393 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7394 		clock = adjusted_mode->clock;
7395 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7396 	}
7397 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7398 									   mst_mgr,
7399 									   mst_port,
7400 									   dm_new_connector_state->pbn,
7401 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7402 	if (dm_new_connector_state->vcpi_slots < 0) {
7403 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7404 		return dm_new_connector_state->vcpi_slots;
7405 	}
7406 	return 0;
7407 }
7408 
7409 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7410 	.disable = dm_encoder_helper_disable,
7411 	.atomic_check = dm_encoder_helper_atomic_check
7412 };
7413 
7414 #if defined(CONFIG_DRM_AMD_DC_DCN)
7415 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7416 					    struct dc_state *dc_state,
7417 					    struct dsc_mst_fairness_vars *vars)
7418 {
7419 	struct dc_stream_state *stream = NULL;
7420 	struct drm_connector *connector;
7421 	struct drm_connector_state *new_con_state;
7422 	struct amdgpu_dm_connector *aconnector;
7423 	struct dm_connector_state *dm_conn_state;
7424 	int i, j;
7425 	int vcpi, pbn_div, pbn, slot_num = 0;
7426 
7427 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7428 
7429 		aconnector = to_amdgpu_dm_connector(connector);
7430 
7431 		if (!aconnector->port)
7432 			continue;
7433 
7434 		if (!new_con_state || !new_con_state->crtc)
7435 			continue;
7436 
7437 		dm_conn_state = to_dm_connector_state(new_con_state);
7438 
7439 		for (j = 0; j < dc_state->stream_count; j++) {
7440 			stream = dc_state->streams[j];
7441 			if (!stream)
7442 				continue;
7443 
7444 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7445 				break;
7446 
7447 			stream = NULL;
7448 		}
7449 
7450 		if (!stream)
7451 			continue;
7452 
7453 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7454 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7455 		for (j = 0; j < dc_state->stream_count; j++) {
7456 			if (vars[j].aconnector == aconnector) {
7457 				pbn = vars[j].pbn;
7458 				break;
7459 			}
7460 		}
7461 
7462 		if (j == dc_state->stream_count)
7463 			continue;
7464 
7465 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7466 
7467 		if (stream->timing.flags.DSC != 1) {
7468 			dm_conn_state->pbn = pbn;
7469 			dm_conn_state->vcpi_slots = slot_num;
7470 
7471 			drm_dp_mst_atomic_enable_dsc(state,
7472 						     aconnector->port,
7473 						     dm_conn_state->pbn,
7474 						     0,
7475 						     false);
7476 			continue;
7477 		}
7478 
7479 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7480 						    aconnector->port,
7481 						    pbn, pbn_div,
7482 						    true);
7483 		if (vcpi < 0)
7484 			return vcpi;
7485 
7486 		dm_conn_state->pbn = pbn;
7487 		dm_conn_state->vcpi_slots = vcpi;
7488 	}
7489 	return 0;
7490 }
7491 #endif
7492 
7493 static void dm_drm_plane_reset(struct drm_plane *plane)
7494 {
7495 	struct dm_plane_state *amdgpu_state = NULL;
7496 
7497 	if (plane->state)
7498 		plane->funcs->atomic_destroy_state(plane, plane->state);
7499 
7500 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7501 	WARN_ON(amdgpu_state == NULL);
7502 
7503 	if (amdgpu_state)
7504 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7505 }
7506 
7507 static struct drm_plane_state *
7508 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7509 {
7510 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7511 
7512 	old_dm_plane_state = to_dm_plane_state(plane->state);
7513 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7514 	if (!dm_plane_state)
7515 		return NULL;
7516 
7517 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7518 
7519 	if (old_dm_plane_state->dc_state) {
7520 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7521 		dc_plane_state_retain(dm_plane_state->dc_state);
7522 	}
7523 
7524 	return &dm_plane_state->base;
7525 }
7526 
7527 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7528 				struct drm_plane_state *state)
7529 {
7530 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7531 
7532 	if (dm_plane_state->dc_state)
7533 		dc_plane_state_release(dm_plane_state->dc_state);
7534 
7535 	drm_atomic_helper_plane_destroy_state(plane, state);
7536 }
7537 
7538 static const struct drm_plane_funcs dm_plane_funcs = {
7539 	.update_plane	= drm_atomic_helper_update_plane,
7540 	.disable_plane	= drm_atomic_helper_disable_plane,
7541 	.destroy	= drm_primary_helper_destroy,
7542 	.reset = dm_drm_plane_reset,
7543 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7544 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7545 	.format_mod_supported = dm_plane_format_mod_supported,
7546 };
7547 
7548 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7549 				      struct drm_plane_state *new_state)
7550 {
7551 	struct amdgpu_framebuffer *afb;
7552 	struct drm_gem_object *obj;
7553 	struct amdgpu_device *adev;
7554 	struct amdgpu_bo *rbo;
7555 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7556 	uint32_t domain;
7557 	int r;
7558 
7559 	if (!new_state->fb) {
7560 		DRM_DEBUG_KMS("No FB bound\n");
7561 		return 0;
7562 	}
7563 
7564 	afb = to_amdgpu_framebuffer(new_state->fb);
7565 	obj = new_state->fb->obj[0];
7566 	rbo = gem_to_amdgpu_bo(obj);
7567 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7568 
7569 	r = amdgpu_bo_reserve(rbo, true);
7570 	if (r) {
7571 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7572 		return r;
7573 	}
7574 
7575 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7576 	if (r) {
7577 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7578 		goto error_unlock;
7579 	}
7580 
7581 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7582 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7583 	else
7584 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7585 
7586 	r = amdgpu_bo_pin(rbo, domain);
7587 	if (unlikely(r != 0)) {
7588 		if (r != -ERESTARTSYS)
7589 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7590 		goto error_unlock;
7591 	}
7592 
7593 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7594 	if (unlikely(r != 0)) {
7595 		DRM_ERROR("%p bind failed\n", rbo);
7596 		goto error_unpin;
7597 	}
7598 
7599 	amdgpu_bo_unreserve(rbo);
7600 
7601 	afb->address = amdgpu_bo_gpu_offset(rbo);
7602 
7603 	amdgpu_bo_ref(rbo);
7604 
7605 	/**
7606 	 * We don't do surface updates on planes that have been newly created,
7607 	 * but we also don't have the afb->address during atomic check.
7608 	 *
7609 	 * Fill in buffer attributes depending on the address here, but only on
7610 	 * newly created planes since they're not being used by DC yet and this
7611 	 * won't modify global state.
7612 	 */
7613 	dm_plane_state_old = to_dm_plane_state(plane->state);
7614 	dm_plane_state_new = to_dm_plane_state(new_state);
7615 
7616 	if (dm_plane_state_new->dc_state &&
7617 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7618 		struct dc_plane_state *plane_state =
7619 			dm_plane_state_new->dc_state;
7620 		bool force_disable_dcc = !plane_state->dcc.enable;
7621 
7622 		fill_plane_buffer_attributes(
7623 			adev, afb, plane_state->format, plane_state->rotation,
7624 			afb->tiling_flags,
7625 			&plane_state->tiling_info, &plane_state->plane_size,
7626 			&plane_state->dcc, &plane_state->address,
7627 			afb->tmz_surface, force_disable_dcc);
7628 	}
7629 
7630 	return 0;
7631 
7632 error_unpin:
7633 	amdgpu_bo_unpin(rbo);
7634 
7635 error_unlock:
7636 	amdgpu_bo_unreserve(rbo);
7637 	return r;
7638 }
7639 
7640 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7641 				       struct drm_plane_state *old_state)
7642 {
7643 	struct amdgpu_bo *rbo;
7644 	int r;
7645 
7646 	if (!old_state->fb)
7647 		return;
7648 
7649 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7650 	r = amdgpu_bo_reserve(rbo, false);
7651 	if (unlikely(r)) {
7652 		DRM_ERROR("failed to reserve rbo before unpin\n");
7653 		return;
7654 	}
7655 
7656 	amdgpu_bo_unpin(rbo);
7657 	amdgpu_bo_unreserve(rbo);
7658 	amdgpu_bo_unref(&rbo);
7659 }
7660 
7661 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7662 				       struct drm_crtc_state *new_crtc_state)
7663 {
7664 	struct drm_framebuffer *fb = state->fb;
7665 	int min_downscale, max_upscale;
7666 	int min_scale = 0;
7667 	int max_scale = INT_MAX;
7668 
7669 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7670 	if (fb && state->crtc) {
7671 		/* Validate viewport to cover the case when only the position changes */
7672 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7673 			int viewport_width = state->crtc_w;
7674 			int viewport_height = state->crtc_h;
7675 
7676 			if (state->crtc_x < 0)
7677 				viewport_width += state->crtc_x;
7678 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7679 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7680 
7681 			if (state->crtc_y < 0)
7682 				viewport_height += state->crtc_y;
7683 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7684 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7685 
7686 			if (viewport_width < 0 || viewport_height < 0) {
7687 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7688 				return -EINVAL;
7689 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7690 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7691 				return -EINVAL;
7692 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7693 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7694 				return -EINVAL;
7695 			}
7696 
7697 		}
7698 
7699 		/* Get min/max allowed scaling factors from plane caps. */
7700 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7701 					     &min_downscale, &max_upscale);
7702 		/*
7703 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7704 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7705 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7706 		 */
7707 		min_scale = (1000 << 16) / max_upscale;
7708 		max_scale = (1000 << 16) / min_downscale;
7709 	}
7710 
7711 	return drm_atomic_helper_check_plane_state(
7712 		state, new_crtc_state, min_scale, max_scale, true, true);
7713 }
7714 
7715 static int dm_plane_atomic_check(struct drm_plane *plane,
7716 				 struct drm_atomic_state *state)
7717 {
7718 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7719 										 plane);
7720 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7721 	struct dc *dc = adev->dm.dc;
7722 	struct dm_plane_state *dm_plane_state;
7723 	struct dc_scaling_info scaling_info;
7724 	struct drm_crtc_state *new_crtc_state;
7725 	int ret;
7726 
7727 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7728 
7729 	dm_plane_state = to_dm_plane_state(new_plane_state);
7730 
7731 	if (!dm_plane_state->dc_state)
7732 		return 0;
7733 
7734 	new_crtc_state =
7735 		drm_atomic_get_new_crtc_state(state,
7736 					      new_plane_state->crtc);
7737 	if (!new_crtc_state)
7738 		return -EINVAL;
7739 
7740 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7741 	if (ret)
7742 		return ret;
7743 
7744 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7745 	if (ret)
7746 		return ret;
7747 
7748 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7749 		return 0;
7750 
7751 	return -EINVAL;
7752 }
7753 
7754 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7755 				       struct drm_atomic_state *state)
7756 {
7757 	/* Only support async updates on cursor planes. */
7758 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7759 		return -EINVAL;
7760 
7761 	return 0;
7762 }
7763 
7764 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7765 					 struct drm_atomic_state *state)
7766 {
7767 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7768 									   plane);
7769 	struct drm_plane_state *old_state =
7770 		drm_atomic_get_old_plane_state(state, plane);
7771 
7772 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7773 
7774 	swap(plane->state->fb, new_state->fb);
7775 
7776 	plane->state->src_x = new_state->src_x;
7777 	plane->state->src_y = new_state->src_y;
7778 	plane->state->src_w = new_state->src_w;
7779 	plane->state->src_h = new_state->src_h;
7780 	plane->state->crtc_x = new_state->crtc_x;
7781 	plane->state->crtc_y = new_state->crtc_y;
7782 	plane->state->crtc_w = new_state->crtc_w;
7783 	plane->state->crtc_h = new_state->crtc_h;
7784 
7785 	handle_cursor_update(plane, old_state);
7786 }
7787 
7788 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7789 	.prepare_fb = dm_plane_helper_prepare_fb,
7790 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7791 	.atomic_check = dm_plane_atomic_check,
7792 	.atomic_async_check = dm_plane_atomic_async_check,
7793 	.atomic_async_update = dm_plane_atomic_async_update
7794 };
7795 
7796 /*
7797  * TODO: these are currently initialized to rgb formats only.
7798  * For future use cases we should either initialize them dynamically based on
7799  * plane capabilities, or initialize this array to all formats, so internal drm
7800  * check will succeed, and let DC implement proper check
7801  */
7802 static const uint32_t rgb_formats[] = {
7803 	DRM_FORMAT_XRGB8888,
7804 	DRM_FORMAT_ARGB8888,
7805 	DRM_FORMAT_RGBA8888,
7806 	DRM_FORMAT_XRGB2101010,
7807 	DRM_FORMAT_XBGR2101010,
7808 	DRM_FORMAT_ARGB2101010,
7809 	DRM_FORMAT_ABGR2101010,
7810 	DRM_FORMAT_XRGB16161616,
7811 	DRM_FORMAT_XBGR16161616,
7812 	DRM_FORMAT_ARGB16161616,
7813 	DRM_FORMAT_ABGR16161616,
7814 	DRM_FORMAT_XBGR8888,
7815 	DRM_FORMAT_ABGR8888,
7816 	DRM_FORMAT_RGB565,
7817 };
7818 
7819 static const uint32_t overlay_formats[] = {
7820 	DRM_FORMAT_XRGB8888,
7821 	DRM_FORMAT_ARGB8888,
7822 	DRM_FORMAT_RGBA8888,
7823 	DRM_FORMAT_XBGR8888,
7824 	DRM_FORMAT_ABGR8888,
7825 	DRM_FORMAT_RGB565
7826 };
7827 
7828 static const u32 cursor_formats[] = {
7829 	DRM_FORMAT_ARGB8888
7830 };
7831 
7832 static int get_plane_formats(const struct drm_plane *plane,
7833 			     const struct dc_plane_cap *plane_cap,
7834 			     uint32_t *formats, int max_formats)
7835 {
7836 	int i, num_formats = 0;
7837 
7838 	/*
7839 	 * TODO: Query support for each group of formats directly from
7840 	 * DC plane caps. This will require adding more formats to the
7841 	 * caps list.
7842 	 */
7843 
7844 	switch (plane->type) {
7845 	case DRM_PLANE_TYPE_PRIMARY:
7846 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7847 			if (num_formats >= max_formats)
7848 				break;
7849 
7850 			formats[num_formats++] = rgb_formats[i];
7851 		}
7852 
7853 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7854 			formats[num_formats++] = DRM_FORMAT_NV12;
7855 		if (plane_cap && plane_cap->pixel_format_support.p010)
7856 			formats[num_formats++] = DRM_FORMAT_P010;
7857 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7858 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7859 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7860 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7861 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7862 		}
7863 		break;
7864 
7865 	case DRM_PLANE_TYPE_OVERLAY:
7866 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7867 			if (num_formats >= max_formats)
7868 				break;
7869 
7870 			formats[num_formats++] = overlay_formats[i];
7871 		}
7872 		break;
7873 
7874 	case DRM_PLANE_TYPE_CURSOR:
7875 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7876 			if (num_formats >= max_formats)
7877 				break;
7878 
7879 			formats[num_formats++] = cursor_formats[i];
7880 		}
7881 		break;
7882 	}
7883 
7884 	return num_formats;
7885 }
7886 
7887 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7888 				struct drm_plane *plane,
7889 				unsigned long possible_crtcs,
7890 				const struct dc_plane_cap *plane_cap)
7891 {
7892 	uint32_t formats[32];
7893 	int num_formats;
7894 	int res = -EPERM;
7895 	unsigned int supported_rotations;
7896 	uint64_t *modifiers = NULL;
7897 
7898 	num_formats = get_plane_formats(plane, plane_cap, formats,
7899 					ARRAY_SIZE(formats));
7900 
7901 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7902 	if (res)
7903 		return res;
7904 
7905 	if (modifiers == NULL)
7906 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7907 
7908 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7909 				       &dm_plane_funcs, formats, num_formats,
7910 				       modifiers, plane->type, NULL);
7911 	kfree(modifiers);
7912 	if (res)
7913 		return res;
7914 
7915 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7916 	    plane_cap && plane_cap->per_pixel_alpha) {
7917 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7918 					  BIT(DRM_MODE_BLEND_PREMULTI) |
7919 					  BIT(DRM_MODE_BLEND_COVERAGE);
7920 
7921 		drm_plane_create_alpha_property(plane);
7922 		drm_plane_create_blend_mode_property(plane, blend_caps);
7923 	}
7924 
7925 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7926 	    plane_cap &&
7927 	    (plane_cap->pixel_format_support.nv12 ||
7928 	     plane_cap->pixel_format_support.p010)) {
7929 		/* This only affects YUV formats. */
7930 		drm_plane_create_color_properties(
7931 			plane,
7932 			BIT(DRM_COLOR_YCBCR_BT601) |
7933 			BIT(DRM_COLOR_YCBCR_BT709) |
7934 			BIT(DRM_COLOR_YCBCR_BT2020),
7935 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7936 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7937 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7938 	}
7939 
7940 	supported_rotations =
7941 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7942 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7943 
7944 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7945 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7946 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7947 						   supported_rotations);
7948 
7949 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7950 
7951 	/* Create (reset) the plane state */
7952 	if (plane->funcs->reset)
7953 		plane->funcs->reset(plane);
7954 
7955 	return 0;
7956 }
7957 
7958 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7959 			       struct drm_plane *plane,
7960 			       uint32_t crtc_index)
7961 {
7962 	struct amdgpu_crtc *acrtc = NULL;
7963 	struct drm_plane *cursor_plane;
7964 
7965 	int res = -ENOMEM;
7966 
7967 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7968 	if (!cursor_plane)
7969 		goto fail;
7970 
7971 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7972 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7973 
7974 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7975 	if (!acrtc)
7976 		goto fail;
7977 
7978 	res = drm_crtc_init_with_planes(
7979 			dm->ddev,
7980 			&acrtc->base,
7981 			plane,
7982 			cursor_plane,
7983 			&amdgpu_dm_crtc_funcs, NULL);
7984 
7985 	if (res)
7986 		goto fail;
7987 
7988 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7989 
7990 	/* Create (reset) the plane state */
7991 	if (acrtc->base.funcs->reset)
7992 		acrtc->base.funcs->reset(&acrtc->base);
7993 
7994 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7995 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7996 
7997 	acrtc->crtc_id = crtc_index;
7998 	acrtc->base.enabled = false;
7999 	acrtc->otg_inst = -1;
8000 
8001 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8002 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8003 				   true, MAX_COLOR_LUT_ENTRIES);
8004 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8005 
8006 	return 0;
8007 
8008 fail:
8009 	kfree(acrtc);
8010 	kfree(cursor_plane);
8011 	return res;
8012 }
8013 
8014 
8015 static int to_drm_connector_type(enum signal_type st)
8016 {
8017 	switch (st) {
8018 	case SIGNAL_TYPE_HDMI_TYPE_A:
8019 		return DRM_MODE_CONNECTOR_HDMIA;
8020 	case SIGNAL_TYPE_EDP:
8021 		return DRM_MODE_CONNECTOR_eDP;
8022 	case SIGNAL_TYPE_LVDS:
8023 		return DRM_MODE_CONNECTOR_LVDS;
8024 	case SIGNAL_TYPE_RGB:
8025 		return DRM_MODE_CONNECTOR_VGA;
8026 	case SIGNAL_TYPE_DISPLAY_PORT:
8027 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8028 		return DRM_MODE_CONNECTOR_DisplayPort;
8029 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8030 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8031 		return DRM_MODE_CONNECTOR_DVID;
8032 	case SIGNAL_TYPE_VIRTUAL:
8033 		return DRM_MODE_CONNECTOR_VIRTUAL;
8034 
8035 	default:
8036 		return DRM_MODE_CONNECTOR_Unknown;
8037 	}
8038 }
8039 
8040 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8041 {
8042 	struct drm_encoder *encoder;
8043 
8044 	/* There is only one encoder per connector */
8045 	drm_connector_for_each_possible_encoder(connector, encoder)
8046 		return encoder;
8047 
8048 	return NULL;
8049 }
8050 
8051 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8052 {
8053 	struct drm_encoder *encoder;
8054 	struct amdgpu_encoder *amdgpu_encoder;
8055 
8056 	encoder = amdgpu_dm_connector_to_encoder(connector);
8057 
8058 	if (encoder == NULL)
8059 		return;
8060 
8061 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8062 
8063 	amdgpu_encoder->native_mode.clock = 0;
8064 
8065 	if (!list_empty(&connector->probed_modes)) {
8066 		struct drm_display_mode *preferred_mode = NULL;
8067 
8068 		list_for_each_entry(preferred_mode,
8069 				    &connector->probed_modes,
8070 				    head) {
8071 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8072 				amdgpu_encoder->native_mode = *preferred_mode;
8073 
8074 			break;
8075 		}
8076 
8077 	}
8078 }
8079 
8080 static struct drm_display_mode *
8081 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8082 			     char *name,
8083 			     int hdisplay, int vdisplay)
8084 {
8085 	struct drm_device *dev = encoder->dev;
8086 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8087 	struct drm_display_mode *mode = NULL;
8088 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8089 
8090 	mode = drm_mode_duplicate(dev, native_mode);
8091 
8092 	if (mode == NULL)
8093 		return NULL;
8094 
8095 	mode->hdisplay = hdisplay;
8096 	mode->vdisplay = vdisplay;
8097 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8098 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8099 
8100 	return mode;
8101 
8102 }
8103 
8104 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8105 						 struct drm_connector *connector)
8106 {
8107 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8108 	struct drm_display_mode *mode = NULL;
8109 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8110 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8111 				to_amdgpu_dm_connector(connector);
8112 	int i;
8113 	int n;
8114 	struct mode_size {
8115 		char name[DRM_DISPLAY_MODE_LEN];
8116 		int w;
8117 		int h;
8118 	} common_modes[] = {
8119 		{  "640x480",  640,  480},
8120 		{  "800x600",  800,  600},
8121 		{ "1024x768", 1024,  768},
8122 		{ "1280x720", 1280,  720},
8123 		{ "1280x800", 1280,  800},
8124 		{"1280x1024", 1280, 1024},
8125 		{ "1440x900", 1440,  900},
8126 		{"1680x1050", 1680, 1050},
8127 		{"1600x1200", 1600, 1200},
8128 		{"1920x1080", 1920, 1080},
8129 		{"1920x1200", 1920, 1200}
8130 	};
8131 
8132 	n = ARRAY_SIZE(common_modes);
8133 
8134 	for (i = 0; i < n; i++) {
8135 		struct drm_display_mode *curmode = NULL;
8136 		bool mode_existed = false;
8137 
8138 		if (common_modes[i].w > native_mode->hdisplay ||
8139 		    common_modes[i].h > native_mode->vdisplay ||
8140 		   (common_modes[i].w == native_mode->hdisplay &&
8141 		    common_modes[i].h == native_mode->vdisplay))
8142 			continue;
8143 
8144 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8145 			if (common_modes[i].w == curmode->hdisplay &&
8146 			    common_modes[i].h == curmode->vdisplay) {
8147 				mode_existed = true;
8148 				break;
8149 			}
8150 		}
8151 
8152 		if (mode_existed)
8153 			continue;
8154 
8155 		mode = amdgpu_dm_create_common_mode(encoder,
8156 				common_modes[i].name, common_modes[i].w,
8157 				common_modes[i].h);
8158 		if (!mode)
8159 			continue;
8160 
8161 		drm_mode_probed_add(connector, mode);
8162 		amdgpu_dm_connector->num_modes++;
8163 	}
8164 }
8165 
8166 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8167 {
8168 	struct drm_encoder *encoder;
8169 	struct amdgpu_encoder *amdgpu_encoder;
8170 	const struct drm_display_mode *native_mode;
8171 
8172 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8173 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8174 		return;
8175 
8176 	encoder = amdgpu_dm_connector_to_encoder(connector);
8177 	if (!encoder)
8178 		return;
8179 
8180 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8181 
8182 	native_mode = &amdgpu_encoder->native_mode;
8183 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8184 		return;
8185 
8186 	drm_connector_set_panel_orientation_with_quirk(connector,
8187 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8188 						       native_mode->hdisplay,
8189 						       native_mode->vdisplay);
8190 }
8191 
8192 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8193 					      struct edid *edid)
8194 {
8195 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8196 			to_amdgpu_dm_connector(connector);
8197 
8198 	if (edid) {
8199 		/* empty probed_modes */
8200 		INIT_LIST_HEAD(&connector->probed_modes);
8201 		amdgpu_dm_connector->num_modes =
8202 				drm_add_edid_modes(connector, edid);
8203 
8204 		/* sorting the probed modes before calling function
8205 		 * amdgpu_dm_get_native_mode() since EDID can have
8206 		 * more than one preferred mode. The modes that are
8207 		 * later in the probed mode list could be of higher
8208 		 * and preferred resolution. For example, 3840x2160
8209 		 * resolution in base EDID preferred timing and 4096x2160
8210 		 * preferred resolution in DID extension block later.
8211 		 */
8212 		drm_mode_sort(&connector->probed_modes);
8213 		amdgpu_dm_get_native_mode(connector);
8214 
8215 		/* Freesync capabilities are reset by calling
8216 		 * drm_add_edid_modes() and need to be
8217 		 * restored here.
8218 		 */
8219 		amdgpu_dm_update_freesync_caps(connector, edid);
8220 
8221 		amdgpu_set_panel_orientation(connector);
8222 	} else {
8223 		amdgpu_dm_connector->num_modes = 0;
8224 	}
8225 }
8226 
8227 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8228 			      struct drm_display_mode *mode)
8229 {
8230 	struct drm_display_mode *m;
8231 
8232 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8233 		if (drm_mode_equal(m, mode))
8234 			return true;
8235 	}
8236 
8237 	return false;
8238 }
8239 
8240 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8241 {
8242 	const struct drm_display_mode *m;
8243 	struct drm_display_mode *new_mode;
8244 	uint i;
8245 	uint32_t new_modes_count = 0;
8246 
8247 	/* Standard FPS values
8248 	 *
8249 	 * 23.976       - TV/NTSC
8250 	 * 24 	        - Cinema
8251 	 * 25 	        - TV/PAL
8252 	 * 29.97        - TV/NTSC
8253 	 * 30 	        - TV/NTSC
8254 	 * 48 	        - Cinema HFR
8255 	 * 50 	        - TV/PAL
8256 	 * 60 	        - Commonly used
8257 	 * 48,72,96,120 - Multiples of 24
8258 	 */
8259 	static const uint32_t common_rates[] = {
8260 		23976, 24000, 25000, 29970, 30000,
8261 		48000, 50000, 60000, 72000, 96000, 120000
8262 	};
8263 
8264 	/*
8265 	 * Find mode with highest refresh rate with the same resolution
8266 	 * as the preferred mode. Some monitors report a preferred mode
8267 	 * with lower resolution than the highest refresh rate supported.
8268 	 */
8269 
8270 	m = get_highest_refresh_rate_mode(aconnector, true);
8271 	if (!m)
8272 		return 0;
8273 
8274 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8275 		uint64_t target_vtotal, target_vtotal_diff;
8276 		uint64_t num, den;
8277 
8278 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8279 			continue;
8280 
8281 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8282 		    common_rates[i] > aconnector->max_vfreq * 1000)
8283 			continue;
8284 
8285 		num = (unsigned long long)m->clock * 1000 * 1000;
8286 		den = common_rates[i] * (unsigned long long)m->htotal;
8287 		target_vtotal = div_u64(num, den);
8288 		target_vtotal_diff = target_vtotal - m->vtotal;
8289 
8290 		/* Check for illegal modes */
8291 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8292 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8293 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8294 			continue;
8295 
8296 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8297 		if (!new_mode)
8298 			goto out;
8299 
8300 		new_mode->vtotal += (u16)target_vtotal_diff;
8301 		new_mode->vsync_start += (u16)target_vtotal_diff;
8302 		new_mode->vsync_end += (u16)target_vtotal_diff;
8303 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8304 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8305 
8306 		if (!is_duplicate_mode(aconnector, new_mode)) {
8307 			drm_mode_probed_add(&aconnector->base, new_mode);
8308 			new_modes_count += 1;
8309 		} else
8310 			drm_mode_destroy(aconnector->base.dev, new_mode);
8311 	}
8312  out:
8313 	return new_modes_count;
8314 }
8315 
8316 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8317 						   struct edid *edid)
8318 {
8319 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8320 		to_amdgpu_dm_connector(connector);
8321 
8322 	if (!edid)
8323 		return;
8324 
8325 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8326 		amdgpu_dm_connector->num_modes +=
8327 			add_fs_modes(amdgpu_dm_connector);
8328 }
8329 
8330 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8331 {
8332 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8333 			to_amdgpu_dm_connector(connector);
8334 	struct drm_encoder *encoder;
8335 	struct edid *edid = amdgpu_dm_connector->edid;
8336 
8337 	encoder = amdgpu_dm_connector_to_encoder(connector);
8338 
8339 	if (!drm_edid_is_valid(edid)) {
8340 		amdgpu_dm_connector->num_modes =
8341 				drm_add_modes_noedid(connector, 640, 480);
8342 	} else {
8343 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8344 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8345 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8346 	}
8347 	amdgpu_dm_fbc_init(connector);
8348 
8349 	return amdgpu_dm_connector->num_modes;
8350 }
8351 
8352 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8353 				     struct amdgpu_dm_connector *aconnector,
8354 				     int connector_type,
8355 				     struct dc_link *link,
8356 				     int link_index)
8357 {
8358 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8359 
8360 	/*
8361 	 * Some of the properties below require access to state, like bpc.
8362 	 * Allocate some default initial connector state with our reset helper.
8363 	 */
8364 	if (aconnector->base.funcs->reset)
8365 		aconnector->base.funcs->reset(&aconnector->base);
8366 
8367 	aconnector->connector_id = link_index;
8368 	aconnector->dc_link = link;
8369 	aconnector->base.interlace_allowed = false;
8370 	aconnector->base.doublescan_allowed = false;
8371 	aconnector->base.stereo_allowed = false;
8372 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8373 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8374 	aconnector->audio_inst = -1;
8375 	mutex_init(&aconnector->hpd_lock);
8376 
8377 	/*
8378 	 * configure support HPD hot plug connector_>polled default value is 0
8379 	 * which means HPD hot plug not supported
8380 	 */
8381 	switch (connector_type) {
8382 	case DRM_MODE_CONNECTOR_HDMIA:
8383 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8384 		aconnector->base.ycbcr_420_allowed =
8385 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8386 		break;
8387 	case DRM_MODE_CONNECTOR_DisplayPort:
8388 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8389 		link->link_enc = link_enc_cfg_get_link_enc(link);
8390 		ASSERT(link->link_enc);
8391 		if (link->link_enc)
8392 			aconnector->base.ycbcr_420_allowed =
8393 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8394 		break;
8395 	case DRM_MODE_CONNECTOR_DVID:
8396 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8397 		break;
8398 	default:
8399 		break;
8400 	}
8401 
8402 	drm_object_attach_property(&aconnector->base.base,
8403 				dm->ddev->mode_config.scaling_mode_property,
8404 				DRM_MODE_SCALE_NONE);
8405 
8406 	drm_object_attach_property(&aconnector->base.base,
8407 				adev->mode_info.underscan_property,
8408 				UNDERSCAN_OFF);
8409 	drm_object_attach_property(&aconnector->base.base,
8410 				adev->mode_info.underscan_hborder_property,
8411 				0);
8412 	drm_object_attach_property(&aconnector->base.base,
8413 				adev->mode_info.underscan_vborder_property,
8414 				0);
8415 
8416 	if (!aconnector->mst_port)
8417 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8418 
8419 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8420 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8421 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8422 
8423 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8424 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8425 		drm_object_attach_property(&aconnector->base.base,
8426 				adev->mode_info.abm_level_property, 0);
8427 	}
8428 
8429 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8430 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8431 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8432 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8433 
8434 		if (!aconnector->mst_port)
8435 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8436 
8437 #ifdef CONFIG_DRM_AMD_DC_HDCP
8438 		if (adev->dm.hdcp_workqueue)
8439 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8440 #endif
8441 	}
8442 }
8443 
8444 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8445 			      struct i2c_msg *msgs, int num)
8446 {
8447 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8448 	struct ddc_service *ddc_service = i2c->ddc_service;
8449 	struct i2c_command cmd;
8450 	int i;
8451 	int result = -EIO;
8452 
8453 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8454 
8455 	if (!cmd.payloads)
8456 		return result;
8457 
8458 	cmd.number_of_payloads = num;
8459 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8460 	cmd.speed = 100;
8461 
8462 	for (i = 0; i < num; i++) {
8463 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8464 		cmd.payloads[i].address = msgs[i].addr;
8465 		cmd.payloads[i].length = msgs[i].len;
8466 		cmd.payloads[i].data = msgs[i].buf;
8467 	}
8468 
8469 	if (dc_submit_i2c(
8470 			ddc_service->ctx->dc,
8471 			ddc_service->ddc_pin->hw_info.ddc_channel,
8472 			&cmd))
8473 		result = num;
8474 
8475 	kfree(cmd.payloads);
8476 	return result;
8477 }
8478 
8479 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8480 {
8481 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8482 }
8483 
8484 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8485 	.master_xfer = amdgpu_dm_i2c_xfer,
8486 	.functionality = amdgpu_dm_i2c_func,
8487 };
8488 
8489 static struct amdgpu_i2c_adapter *
8490 create_i2c(struct ddc_service *ddc_service,
8491 	   int link_index,
8492 	   int *res)
8493 {
8494 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8495 	struct amdgpu_i2c_adapter *i2c;
8496 
8497 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8498 	if (!i2c)
8499 		return NULL;
8500 	i2c->base.owner = THIS_MODULE;
8501 	i2c->base.class = I2C_CLASS_DDC;
8502 	i2c->base.dev.parent = &adev->pdev->dev;
8503 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8504 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8505 	i2c_set_adapdata(&i2c->base, i2c);
8506 	i2c->ddc_service = ddc_service;
8507 	if (i2c->ddc_service->ddc_pin)
8508 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8509 
8510 	return i2c;
8511 }
8512 
8513 
8514 /*
8515  * Note: this function assumes that dc_link_detect() was called for the
8516  * dc_link which will be represented by this aconnector.
8517  */
8518 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8519 				    struct amdgpu_dm_connector *aconnector,
8520 				    uint32_t link_index,
8521 				    struct amdgpu_encoder *aencoder)
8522 {
8523 	int res = 0;
8524 	int connector_type;
8525 	struct dc *dc = dm->dc;
8526 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8527 	struct amdgpu_i2c_adapter *i2c;
8528 
8529 	link->priv = aconnector;
8530 
8531 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8532 
8533 	i2c = create_i2c(link->ddc, link->link_index, &res);
8534 	if (!i2c) {
8535 		DRM_ERROR("Failed to create i2c adapter data\n");
8536 		return -ENOMEM;
8537 	}
8538 
8539 	aconnector->i2c = i2c;
8540 	res = i2c_add_adapter(&i2c->base);
8541 
8542 	if (res) {
8543 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8544 		goto out_free;
8545 	}
8546 
8547 	connector_type = to_drm_connector_type(link->connector_signal);
8548 
8549 	res = drm_connector_init_with_ddc(
8550 			dm->ddev,
8551 			&aconnector->base,
8552 			&amdgpu_dm_connector_funcs,
8553 			connector_type,
8554 			&i2c->base);
8555 
8556 	if (res) {
8557 		DRM_ERROR("connector_init failed\n");
8558 		aconnector->connector_id = -1;
8559 		goto out_free;
8560 	}
8561 
8562 	drm_connector_helper_add(
8563 			&aconnector->base,
8564 			&amdgpu_dm_connector_helper_funcs);
8565 
8566 	amdgpu_dm_connector_init_helper(
8567 		dm,
8568 		aconnector,
8569 		connector_type,
8570 		link,
8571 		link_index);
8572 
8573 	drm_connector_attach_encoder(
8574 		&aconnector->base, &aencoder->base);
8575 
8576 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8577 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8578 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8579 
8580 out_free:
8581 	if (res) {
8582 		kfree(i2c);
8583 		aconnector->i2c = NULL;
8584 	}
8585 	return res;
8586 }
8587 
8588 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8589 {
8590 	switch (adev->mode_info.num_crtc) {
8591 	case 1:
8592 		return 0x1;
8593 	case 2:
8594 		return 0x3;
8595 	case 3:
8596 		return 0x7;
8597 	case 4:
8598 		return 0xf;
8599 	case 5:
8600 		return 0x1f;
8601 	case 6:
8602 	default:
8603 		return 0x3f;
8604 	}
8605 }
8606 
8607 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8608 				  struct amdgpu_encoder *aencoder,
8609 				  uint32_t link_index)
8610 {
8611 	struct amdgpu_device *adev = drm_to_adev(dev);
8612 
8613 	int res = drm_encoder_init(dev,
8614 				   &aencoder->base,
8615 				   &amdgpu_dm_encoder_funcs,
8616 				   DRM_MODE_ENCODER_TMDS,
8617 				   NULL);
8618 
8619 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8620 
8621 	if (!res)
8622 		aencoder->encoder_id = link_index;
8623 	else
8624 		aencoder->encoder_id = -1;
8625 
8626 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8627 
8628 	return res;
8629 }
8630 
8631 static void manage_dm_interrupts(struct amdgpu_device *adev,
8632 				 struct amdgpu_crtc *acrtc,
8633 				 bool enable)
8634 {
8635 	/*
8636 	 * We have no guarantee that the frontend index maps to the same
8637 	 * backend index - some even map to more than one.
8638 	 *
8639 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8640 	 */
8641 	int irq_type =
8642 		amdgpu_display_crtc_idx_to_irq_type(
8643 			adev,
8644 			acrtc->crtc_id);
8645 
8646 	if (enable) {
8647 		drm_crtc_vblank_on(&acrtc->base);
8648 		amdgpu_irq_get(
8649 			adev,
8650 			&adev->pageflip_irq,
8651 			irq_type);
8652 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8653 		amdgpu_irq_get(
8654 			adev,
8655 			&adev->vline0_irq,
8656 			irq_type);
8657 #endif
8658 	} else {
8659 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8660 		amdgpu_irq_put(
8661 			adev,
8662 			&adev->vline0_irq,
8663 			irq_type);
8664 #endif
8665 		amdgpu_irq_put(
8666 			adev,
8667 			&adev->pageflip_irq,
8668 			irq_type);
8669 		drm_crtc_vblank_off(&acrtc->base);
8670 	}
8671 }
8672 
8673 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8674 				      struct amdgpu_crtc *acrtc)
8675 {
8676 	int irq_type =
8677 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8678 
8679 	/**
8680 	 * This reads the current state for the IRQ and force reapplies
8681 	 * the setting to hardware.
8682 	 */
8683 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8684 }
8685 
8686 static bool
8687 is_scaling_state_different(const struct dm_connector_state *dm_state,
8688 			   const struct dm_connector_state *old_dm_state)
8689 {
8690 	if (dm_state->scaling != old_dm_state->scaling)
8691 		return true;
8692 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8693 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8694 			return true;
8695 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8696 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8697 			return true;
8698 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8699 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8700 		return true;
8701 	return false;
8702 }
8703 
8704 #ifdef CONFIG_DRM_AMD_DC_HDCP
8705 static bool is_content_protection_different(struct drm_connector_state *state,
8706 					    const struct drm_connector_state *old_state,
8707 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8708 {
8709 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8710 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8711 
8712 	/* Handle: Type0/1 change */
8713 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8714 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8715 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8716 		return true;
8717 	}
8718 
8719 	/* CP is being re enabled, ignore this
8720 	 *
8721 	 * Handles:	ENABLED -> DESIRED
8722 	 */
8723 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8724 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8725 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8726 		return false;
8727 	}
8728 
8729 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8730 	 *
8731 	 * Handles:	UNDESIRED -> ENABLED
8732 	 */
8733 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8734 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8735 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8736 
8737 	/* Stream removed and re-enabled
8738 	 *
8739 	 * Can sometimes overlap with the HPD case,
8740 	 * thus set update_hdcp to false to avoid
8741 	 * setting HDCP multiple times.
8742 	 *
8743 	 * Handles:	DESIRED -> DESIRED (Special case)
8744 	 */
8745 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8746 		state->crtc && state->crtc->enabled &&
8747 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8748 		dm_con_state->update_hdcp = false;
8749 		return true;
8750 	}
8751 
8752 	/* Hot-plug, headless s3, dpms
8753 	 *
8754 	 * Only start HDCP if the display is connected/enabled.
8755 	 * update_hdcp flag will be set to false until the next
8756 	 * HPD comes in.
8757 	 *
8758 	 * Handles:	DESIRED -> DESIRED (Special case)
8759 	 */
8760 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8761 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8762 		dm_con_state->update_hdcp = false;
8763 		return true;
8764 	}
8765 
8766 	/*
8767 	 * Handles:	UNDESIRED -> UNDESIRED
8768 	 *		DESIRED -> DESIRED
8769 	 *		ENABLED -> ENABLED
8770 	 */
8771 	if (old_state->content_protection == state->content_protection)
8772 		return false;
8773 
8774 	/*
8775 	 * Handles:	UNDESIRED -> DESIRED
8776 	 *		DESIRED -> UNDESIRED
8777 	 *		ENABLED -> UNDESIRED
8778 	 */
8779 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8780 		return true;
8781 
8782 	/*
8783 	 * Handles:	DESIRED -> ENABLED
8784 	 */
8785 	return false;
8786 }
8787 
8788 #endif
8789 static void remove_stream(struct amdgpu_device *adev,
8790 			  struct amdgpu_crtc *acrtc,
8791 			  struct dc_stream_state *stream)
8792 {
8793 	/* this is the update mode case */
8794 
8795 	acrtc->otg_inst = -1;
8796 	acrtc->enabled = false;
8797 }
8798 
8799 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8800 			       struct dc_cursor_position *position)
8801 {
8802 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8803 	int x, y;
8804 	int xorigin = 0, yorigin = 0;
8805 
8806 	if (!crtc || !plane->state->fb)
8807 		return 0;
8808 
8809 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8810 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8811 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8812 			  __func__,
8813 			  plane->state->crtc_w,
8814 			  plane->state->crtc_h);
8815 		return -EINVAL;
8816 	}
8817 
8818 	x = plane->state->crtc_x;
8819 	y = plane->state->crtc_y;
8820 
8821 	if (x <= -amdgpu_crtc->max_cursor_width ||
8822 	    y <= -amdgpu_crtc->max_cursor_height)
8823 		return 0;
8824 
8825 	if (x < 0) {
8826 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8827 		x = 0;
8828 	}
8829 	if (y < 0) {
8830 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8831 		y = 0;
8832 	}
8833 	position->enable = true;
8834 	position->translate_by_source = true;
8835 	position->x = x;
8836 	position->y = y;
8837 	position->x_hotspot = xorigin;
8838 	position->y_hotspot = yorigin;
8839 
8840 	return 0;
8841 }
8842 
8843 static void handle_cursor_update(struct drm_plane *plane,
8844 				 struct drm_plane_state *old_plane_state)
8845 {
8846 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8847 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8848 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8849 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8850 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8851 	uint64_t address = afb ? afb->address : 0;
8852 	struct dc_cursor_position position = {0};
8853 	struct dc_cursor_attributes attributes;
8854 	int ret;
8855 
8856 	if (!plane->state->fb && !old_plane_state->fb)
8857 		return;
8858 
8859 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8860 		      __func__,
8861 		      amdgpu_crtc->crtc_id,
8862 		      plane->state->crtc_w,
8863 		      plane->state->crtc_h);
8864 
8865 	ret = get_cursor_position(plane, crtc, &position);
8866 	if (ret)
8867 		return;
8868 
8869 	if (!position.enable) {
8870 		/* turn off cursor */
8871 		if (crtc_state && crtc_state->stream) {
8872 			mutex_lock(&adev->dm.dc_lock);
8873 			dc_stream_set_cursor_position(crtc_state->stream,
8874 						      &position);
8875 			mutex_unlock(&adev->dm.dc_lock);
8876 		}
8877 		return;
8878 	}
8879 
8880 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8881 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8882 
8883 	memset(&attributes, 0, sizeof(attributes));
8884 	attributes.address.high_part = upper_32_bits(address);
8885 	attributes.address.low_part  = lower_32_bits(address);
8886 	attributes.width             = plane->state->crtc_w;
8887 	attributes.height            = plane->state->crtc_h;
8888 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8889 	attributes.rotation_angle    = 0;
8890 	attributes.attribute_flags.value = 0;
8891 
8892 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8893 
8894 	if (crtc_state->stream) {
8895 		mutex_lock(&adev->dm.dc_lock);
8896 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8897 							 &attributes))
8898 			DRM_ERROR("DC failed to set cursor attributes\n");
8899 
8900 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8901 						   &position))
8902 			DRM_ERROR("DC failed to set cursor position\n");
8903 		mutex_unlock(&adev->dm.dc_lock);
8904 	}
8905 }
8906 
8907 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8908 {
8909 
8910 	assert_spin_locked(&acrtc->base.dev->event_lock);
8911 	WARN_ON(acrtc->event);
8912 
8913 	acrtc->event = acrtc->base.state->event;
8914 
8915 	/* Set the flip status */
8916 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8917 
8918 	/* Mark this event as consumed */
8919 	acrtc->base.state->event = NULL;
8920 
8921 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8922 		     acrtc->crtc_id);
8923 }
8924 
8925 static void update_freesync_state_on_stream(
8926 	struct amdgpu_display_manager *dm,
8927 	struct dm_crtc_state *new_crtc_state,
8928 	struct dc_stream_state *new_stream,
8929 	struct dc_plane_state *surface,
8930 	u32 flip_timestamp_in_us)
8931 {
8932 	struct mod_vrr_params vrr_params;
8933 	struct dc_info_packet vrr_infopacket = {0};
8934 	struct amdgpu_device *adev = dm->adev;
8935 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8936 	unsigned long flags;
8937 	bool pack_sdp_v1_3 = false;
8938 
8939 	if (!new_stream)
8940 		return;
8941 
8942 	/*
8943 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8944 	 * For now it's sufficient to just guard against these conditions.
8945 	 */
8946 
8947 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8948 		return;
8949 
8950 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8951         vrr_params = acrtc->dm_irq_params.vrr_params;
8952 
8953 	if (surface) {
8954 		mod_freesync_handle_preflip(
8955 			dm->freesync_module,
8956 			surface,
8957 			new_stream,
8958 			flip_timestamp_in_us,
8959 			&vrr_params);
8960 
8961 		if (adev->family < AMDGPU_FAMILY_AI &&
8962 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8963 			mod_freesync_handle_v_update(dm->freesync_module,
8964 						     new_stream, &vrr_params);
8965 
8966 			/* Need to call this before the frame ends. */
8967 			dc_stream_adjust_vmin_vmax(dm->dc,
8968 						   new_crtc_state->stream,
8969 						   &vrr_params.adjust);
8970 		}
8971 	}
8972 
8973 	mod_freesync_build_vrr_infopacket(
8974 		dm->freesync_module,
8975 		new_stream,
8976 		&vrr_params,
8977 		PACKET_TYPE_VRR,
8978 		TRANSFER_FUNC_UNKNOWN,
8979 		&vrr_infopacket,
8980 		pack_sdp_v1_3);
8981 
8982 	new_crtc_state->freesync_timing_changed |=
8983 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8984 			&vrr_params.adjust,
8985 			sizeof(vrr_params.adjust)) != 0);
8986 
8987 	new_crtc_state->freesync_vrr_info_changed |=
8988 		(memcmp(&new_crtc_state->vrr_infopacket,
8989 			&vrr_infopacket,
8990 			sizeof(vrr_infopacket)) != 0);
8991 
8992 	acrtc->dm_irq_params.vrr_params = vrr_params;
8993 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8994 
8995 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8996 	new_stream->vrr_infopacket = vrr_infopacket;
8997 
8998 	if (new_crtc_state->freesync_vrr_info_changed)
8999 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9000 			      new_crtc_state->base.crtc->base.id,
9001 			      (int)new_crtc_state->base.vrr_enabled,
9002 			      (int)vrr_params.state);
9003 
9004 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9005 }
9006 
9007 static void update_stream_irq_parameters(
9008 	struct amdgpu_display_manager *dm,
9009 	struct dm_crtc_state *new_crtc_state)
9010 {
9011 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9012 	struct mod_vrr_params vrr_params;
9013 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9014 	struct amdgpu_device *adev = dm->adev;
9015 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9016 	unsigned long flags;
9017 
9018 	if (!new_stream)
9019 		return;
9020 
9021 	/*
9022 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9023 	 * For now it's sufficient to just guard against these conditions.
9024 	 */
9025 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9026 		return;
9027 
9028 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9029 	vrr_params = acrtc->dm_irq_params.vrr_params;
9030 
9031 	if (new_crtc_state->vrr_supported &&
9032 	    config.min_refresh_in_uhz &&
9033 	    config.max_refresh_in_uhz) {
9034 		/*
9035 		 * if freesync compatible mode was set, config.state will be set
9036 		 * in atomic check
9037 		 */
9038 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9039 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9040 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9041 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9042 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9043 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9044 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9045 		} else {
9046 			config.state = new_crtc_state->base.vrr_enabled ?
9047 						     VRR_STATE_ACTIVE_VARIABLE :
9048 						     VRR_STATE_INACTIVE;
9049 		}
9050 	} else {
9051 		config.state = VRR_STATE_UNSUPPORTED;
9052 	}
9053 
9054 	mod_freesync_build_vrr_params(dm->freesync_module,
9055 				      new_stream,
9056 				      &config, &vrr_params);
9057 
9058 	new_crtc_state->freesync_timing_changed |=
9059 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9060 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9061 
9062 	new_crtc_state->freesync_config = config;
9063 	/* Copy state for access from DM IRQ handler */
9064 	acrtc->dm_irq_params.freesync_config = config;
9065 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9066 	acrtc->dm_irq_params.vrr_params = vrr_params;
9067 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9068 }
9069 
9070 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9071 					    struct dm_crtc_state *new_state)
9072 {
9073 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9074 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9075 
9076 	if (!old_vrr_active && new_vrr_active) {
9077 		/* Transition VRR inactive -> active:
9078 		 * While VRR is active, we must not disable vblank irq, as a
9079 		 * reenable after disable would compute bogus vblank/pflip
9080 		 * timestamps if it likely happened inside display front-porch.
9081 		 *
9082 		 * We also need vupdate irq for the actual core vblank handling
9083 		 * at end of vblank.
9084 		 */
9085 		dm_set_vupdate_irq(new_state->base.crtc, true);
9086 		drm_crtc_vblank_get(new_state->base.crtc);
9087 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9088 				 __func__, new_state->base.crtc->base.id);
9089 	} else if (old_vrr_active && !new_vrr_active) {
9090 		/* Transition VRR active -> inactive:
9091 		 * Allow vblank irq disable again for fixed refresh rate.
9092 		 */
9093 		dm_set_vupdate_irq(new_state->base.crtc, false);
9094 		drm_crtc_vblank_put(new_state->base.crtc);
9095 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9096 				 __func__, new_state->base.crtc->base.id);
9097 	}
9098 }
9099 
9100 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9101 {
9102 	struct drm_plane *plane;
9103 	struct drm_plane_state *old_plane_state;
9104 	int i;
9105 
9106 	/*
9107 	 * TODO: Make this per-stream so we don't issue redundant updates for
9108 	 * commits with multiple streams.
9109 	 */
9110 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9111 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9112 			handle_cursor_update(plane, old_plane_state);
9113 }
9114 
9115 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9116 				    struct dc_state *dc_state,
9117 				    struct drm_device *dev,
9118 				    struct amdgpu_display_manager *dm,
9119 				    struct drm_crtc *pcrtc,
9120 				    bool wait_for_vblank)
9121 {
9122 	uint32_t i;
9123 	uint64_t timestamp_ns;
9124 	struct drm_plane *plane;
9125 	struct drm_plane_state *old_plane_state, *new_plane_state;
9126 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9127 	struct drm_crtc_state *new_pcrtc_state =
9128 			drm_atomic_get_new_crtc_state(state, pcrtc);
9129 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9130 	struct dm_crtc_state *dm_old_crtc_state =
9131 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9132 	int planes_count = 0, vpos, hpos;
9133 	long r;
9134 	unsigned long flags;
9135 	struct amdgpu_bo *abo;
9136 	uint32_t target_vblank, last_flip_vblank;
9137 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9138 	bool pflip_present = false;
9139 	struct {
9140 		struct dc_surface_update surface_updates[MAX_SURFACES];
9141 		struct dc_plane_info plane_infos[MAX_SURFACES];
9142 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9143 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9144 		struct dc_stream_update stream_update;
9145 	} *bundle;
9146 
9147 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9148 
9149 	if (!bundle) {
9150 		dm_error("Failed to allocate update bundle\n");
9151 		goto cleanup;
9152 	}
9153 
9154 	/*
9155 	 * Disable the cursor first if we're disabling all the planes.
9156 	 * It'll remain on the screen after the planes are re-enabled
9157 	 * if we don't.
9158 	 */
9159 	if (acrtc_state->active_planes == 0)
9160 		amdgpu_dm_commit_cursors(state);
9161 
9162 	/* update planes when needed */
9163 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9164 		struct drm_crtc *crtc = new_plane_state->crtc;
9165 		struct drm_crtc_state *new_crtc_state;
9166 		struct drm_framebuffer *fb = new_plane_state->fb;
9167 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9168 		bool plane_needs_flip;
9169 		struct dc_plane_state *dc_plane;
9170 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9171 
9172 		/* Cursor plane is handled after stream updates */
9173 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9174 			continue;
9175 
9176 		if (!fb || !crtc || pcrtc != crtc)
9177 			continue;
9178 
9179 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9180 		if (!new_crtc_state->active)
9181 			continue;
9182 
9183 		dc_plane = dm_new_plane_state->dc_state;
9184 
9185 		bundle->surface_updates[planes_count].surface = dc_plane;
9186 		if (new_pcrtc_state->color_mgmt_changed) {
9187 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9188 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9189 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9190 		}
9191 
9192 		fill_dc_scaling_info(dm->adev, new_plane_state,
9193 				     &bundle->scaling_infos[planes_count]);
9194 
9195 		bundle->surface_updates[planes_count].scaling_info =
9196 			&bundle->scaling_infos[planes_count];
9197 
9198 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9199 
9200 		pflip_present = pflip_present || plane_needs_flip;
9201 
9202 		if (!plane_needs_flip) {
9203 			planes_count += 1;
9204 			continue;
9205 		}
9206 
9207 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9208 
9209 		/*
9210 		 * Wait for all fences on this FB. Do limited wait to avoid
9211 		 * deadlock during GPU reset when this fence will not signal
9212 		 * but we hold reservation lock for the BO.
9213 		 */
9214 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9215 					  DMA_RESV_USAGE_WRITE, false,
9216 					  msecs_to_jiffies(5000));
9217 		if (unlikely(r <= 0))
9218 			DRM_ERROR("Waiting for fences timed out!");
9219 
9220 		fill_dc_plane_info_and_addr(
9221 			dm->adev, new_plane_state,
9222 			afb->tiling_flags,
9223 			&bundle->plane_infos[planes_count],
9224 			&bundle->flip_addrs[planes_count].address,
9225 			afb->tmz_surface, false);
9226 
9227 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9228 				 new_plane_state->plane->index,
9229 				 bundle->plane_infos[planes_count].dcc.enable);
9230 
9231 		bundle->surface_updates[planes_count].plane_info =
9232 			&bundle->plane_infos[planes_count];
9233 
9234 		/*
9235 		 * Only allow immediate flips for fast updates that don't
9236 		 * change FB pitch, DCC state, rotation or mirroing.
9237 		 */
9238 		bundle->flip_addrs[planes_count].flip_immediate =
9239 			crtc->state->async_flip &&
9240 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9241 
9242 		timestamp_ns = ktime_get_ns();
9243 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9244 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9245 		bundle->surface_updates[planes_count].surface = dc_plane;
9246 
9247 		if (!bundle->surface_updates[planes_count].surface) {
9248 			DRM_ERROR("No surface for CRTC: id=%d\n",
9249 					acrtc_attach->crtc_id);
9250 			continue;
9251 		}
9252 
9253 		if (plane == pcrtc->primary)
9254 			update_freesync_state_on_stream(
9255 				dm,
9256 				acrtc_state,
9257 				acrtc_state->stream,
9258 				dc_plane,
9259 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9260 
9261 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9262 				 __func__,
9263 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9264 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9265 
9266 		planes_count += 1;
9267 
9268 	}
9269 
9270 	if (pflip_present) {
9271 		if (!vrr_active) {
9272 			/* Use old throttling in non-vrr fixed refresh rate mode
9273 			 * to keep flip scheduling based on target vblank counts
9274 			 * working in a backwards compatible way, e.g., for
9275 			 * clients using the GLX_OML_sync_control extension or
9276 			 * DRI3/Present extension with defined target_msc.
9277 			 */
9278 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9279 		}
9280 		else {
9281 			/* For variable refresh rate mode only:
9282 			 * Get vblank of last completed flip to avoid > 1 vrr
9283 			 * flips per video frame by use of throttling, but allow
9284 			 * flip programming anywhere in the possibly large
9285 			 * variable vrr vblank interval for fine-grained flip
9286 			 * timing control and more opportunity to avoid stutter
9287 			 * on late submission of flips.
9288 			 */
9289 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9290 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9291 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9292 		}
9293 
9294 		target_vblank = last_flip_vblank + wait_for_vblank;
9295 
9296 		/*
9297 		 * Wait until we're out of the vertical blank period before the one
9298 		 * targeted by the flip
9299 		 */
9300 		while ((acrtc_attach->enabled &&
9301 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9302 							    0, &vpos, &hpos, NULL,
9303 							    NULL, &pcrtc->hwmode)
9304 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9305 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9306 			(int)(target_vblank -
9307 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9308 			usleep_range(1000, 1100);
9309 		}
9310 
9311 		/**
9312 		 * Prepare the flip event for the pageflip interrupt to handle.
9313 		 *
9314 		 * This only works in the case where we've already turned on the
9315 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9316 		 * from 0 -> n planes we have to skip a hardware generated event
9317 		 * and rely on sending it from software.
9318 		 */
9319 		if (acrtc_attach->base.state->event &&
9320 		    acrtc_state->active_planes > 0 &&
9321 		    !acrtc_state->force_dpms_off) {
9322 			drm_crtc_vblank_get(pcrtc);
9323 
9324 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9325 
9326 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9327 			prepare_flip_isr(acrtc_attach);
9328 
9329 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9330 		}
9331 
9332 		if (acrtc_state->stream) {
9333 			if (acrtc_state->freesync_vrr_info_changed)
9334 				bundle->stream_update.vrr_infopacket =
9335 					&acrtc_state->stream->vrr_infopacket;
9336 		}
9337 	}
9338 
9339 	/* Update the planes if changed or disable if we don't have any. */
9340 	if ((planes_count || acrtc_state->active_planes == 0) &&
9341 		acrtc_state->stream) {
9342 		/*
9343 		 * If PSR or idle optimizations are enabled then flush out
9344 		 * any pending work before hardware programming.
9345 		 */
9346 		if (dm->vblank_control_workqueue)
9347 			flush_workqueue(dm->vblank_control_workqueue);
9348 
9349 		bundle->stream_update.stream = acrtc_state->stream;
9350 		if (new_pcrtc_state->mode_changed) {
9351 			bundle->stream_update.src = acrtc_state->stream->src;
9352 			bundle->stream_update.dst = acrtc_state->stream->dst;
9353 		}
9354 
9355 		if (new_pcrtc_state->color_mgmt_changed) {
9356 			/*
9357 			 * TODO: This isn't fully correct since we've actually
9358 			 * already modified the stream in place.
9359 			 */
9360 			bundle->stream_update.gamut_remap =
9361 				&acrtc_state->stream->gamut_remap_matrix;
9362 			bundle->stream_update.output_csc_transform =
9363 				&acrtc_state->stream->csc_color_matrix;
9364 			bundle->stream_update.out_transfer_func =
9365 				acrtc_state->stream->out_transfer_func;
9366 		}
9367 
9368 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9369 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9370 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9371 
9372 		/*
9373 		 * If FreeSync state on the stream has changed then we need to
9374 		 * re-adjust the min/max bounds now that DC doesn't handle this
9375 		 * as part of commit.
9376 		 */
9377 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9378 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9379 			dc_stream_adjust_vmin_vmax(
9380 				dm->dc, acrtc_state->stream,
9381 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9382 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9383 		}
9384 		mutex_lock(&dm->dc_lock);
9385 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9386 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9387 			amdgpu_dm_psr_disable(acrtc_state->stream);
9388 
9389 		dc_commit_updates_for_stream(dm->dc,
9390 						     bundle->surface_updates,
9391 						     planes_count,
9392 						     acrtc_state->stream,
9393 						     &bundle->stream_update,
9394 						     dc_state);
9395 
9396 		/**
9397 		 * Enable or disable the interrupts on the backend.
9398 		 *
9399 		 * Most pipes are put into power gating when unused.
9400 		 *
9401 		 * When power gating is enabled on a pipe we lose the
9402 		 * interrupt enablement state when power gating is disabled.
9403 		 *
9404 		 * So we need to update the IRQ control state in hardware
9405 		 * whenever the pipe turns on (since it could be previously
9406 		 * power gated) or off (since some pipes can't be power gated
9407 		 * on some ASICs).
9408 		 */
9409 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9410 			dm_update_pflip_irq_state(drm_to_adev(dev),
9411 						  acrtc_attach);
9412 
9413 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9414 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9415 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9416 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9417 
9418 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9419 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9420 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9421 			struct amdgpu_dm_connector *aconn =
9422 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9423 
9424 			if (aconn->psr_skip_count > 0)
9425 				aconn->psr_skip_count--;
9426 
9427 			/* Allow PSR when skip count is 0. */
9428 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9429 		} else {
9430 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9431 		}
9432 
9433 		mutex_unlock(&dm->dc_lock);
9434 	}
9435 
9436 	/*
9437 	 * Update cursor state *after* programming all the planes.
9438 	 * This avoids redundant programming in the case where we're going
9439 	 * to be disabling a single plane - those pipes are being disabled.
9440 	 */
9441 	if (acrtc_state->active_planes)
9442 		amdgpu_dm_commit_cursors(state);
9443 
9444 cleanup:
9445 	kfree(bundle);
9446 }
9447 
9448 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9449 				   struct drm_atomic_state *state)
9450 {
9451 	struct amdgpu_device *adev = drm_to_adev(dev);
9452 	struct amdgpu_dm_connector *aconnector;
9453 	struct drm_connector *connector;
9454 	struct drm_connector_state *old_con_state, *new_con_state;
9455 	struct drm_crtc_state *new_crtc_state;
9456 	struct dm_crtc_state *new_dm_crtc_state;
9457 	const struct dc_stream_status *status;
9458 	int i, inst;
9459 
9460 	/* Notify device removals. */
9461 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9462 		if (old_con_state->crtc != new_con_state->crtc) {
9463 			/* CRTC changes require notification. */
9464 			goto notify;
9465 		}
9466 
9467 		if (!new_con_state->crtc)
9468 			continue;
9469 
9470 		new_crtc_state = drm_atomic_get_new_crtc_state(
9471 			state, new_con_state->crtc);
9472 
9473 		if (!new_crtc_state)
9474 			continue;
9475 
9476 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9477 			continue;
9478 
9479 	notify:
9480 		aconnector = to_amdgpu_dm_connector(connector);
9481 
9482 		mutex_lock(&adev->dm.audio_lock);
9483 		inst = aconnector->audio_inst;
9484 		aconnector->audio_inst = -1;
9485 		mutex_unlock(&adev->dm.audio_lock);
9486 
9487 		amdgpu_dm_audio_eld_notify(adev, inst);
9488 	}
9489 
9490 	/* Notify audio device additions. */
9491 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9492 		if (!new_con_state->crtc)
9493 			continue;
9494 
9495 		new_crtc_state = drm_atomic_get_new_crtc_state(
9496 			state, new_con_state->crtc);
9497 
9498 		if (!new_crtc_state)
9499 			continue;
9500 
9501 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9502 			continue;
9503 
9504 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9505 		if (!new_dm_crtc_state->stream)
9506 			continue;
9507 
9508 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9509 		if (!status)
9510 			continue;
9511 
9512 		aconnector = to_amdgpu_dm_connector(connector);
9513 
9514 		mutex_lock(&adev->dm.audio_lock);
9515 		inst = status->audio_inst;
9516 		aconnector->audio_inst = inst;
9517 		mutex_unlock(&adev->dm.audio_lock);
9518 
9519 		amdgpu_dm_audio_eld_notify(adev, inst);
9520 	}
9521 }
9522 
9523 /*
9524  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9525  * @crtc_state: the DRM CRTC state
9526  * @stream_state: the DC stream state.
9527  *
9528  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9529  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9530  */
9531 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9532 						struct dc_stream_state *stream_state)
9533 {
9534 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9535 }
9536 
9537 /**
9538  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9539  * @state: The atomic state to commit
9540  *
9541  * This will tell DC to commit the constructed DC state from atomic_check,
9542  * programming the hardware. Any failures here implies a hardware failure, since
9543  * atomic check should have filtered anything non-kosher.
9544  */
9545 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9546 {
9547 	struct drm_device *dev = state->dev;
9548 	struct amdgpu_device *adev = drm_to_adev(dev);
9549 	struct amdgpu_display_manager *dm = &adev->dm;
9550 	struct dm_atomic_state *dm_state;
9551 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9552 	uint32_t i, j;
9553 	struct drm_crtc *crtc;
9554 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9555 	unsigned long flags;
9556 	bool wait_for_vblank = true;
9557 	struct drm_connector *connector;
9558 	struct drm_connector_state *old_con_state, *new_con_state;
9559 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9560 	int crtc_disable_count = 0;
9561 	bool mode_set_reset_required = false;
9562 
9563 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9564 
9565 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9566 
9567 	dm_state = dm_atomic_get_new_state(state);
9568 	if (dm_state && dm_state->context) {
9569 		dc_state = dm_state->context;
9570 	} else {
9571 		/* No state changes, retain current state. */
9572 		dc_state_temp = dc_create_state(dm->dc);
9573 		ASSERT(dc_state_temp);
9574 		dc_state = dc_state_temp;
9575 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9576 	}
9577 
9578 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9579 				       new_crtc_state, i) {
9580 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9581 
9582 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9583 
9584 		if (old_crtc_state->active &&
9585 		    (!new_crtc_state->active ||
9586 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9587 			manage_dm_interrupts(adev, acrtc, false);
9588 			dc_stream_release(dm_old_crtc_state->stream);
9589 		}
9590 	}
9591 
9592 	drm_atomic_helper_calc_timestamping_constants(state);
9593 
9594 	/* update changed items */
9595 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9596 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9597 
9598 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9599 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9600 
9601 		drm_dbg_state(state->dev,
9602 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9603 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9604 			"connectors_changed:%d\n",
9605 			acrtc->crtc_id,
9606 			new_crtc_state->enable,
9607 			new_crtc_state->active,
9608 			new_crtc_state->planes_changed,
9609 			new_crtc_state->mode_changed,
9610 			new_crtc_state->active_changed,
9611 			new_crtc_state->connectors_changed);
9612 
9613 		/* Disable cursor if disabling crtc */
9614 		if (old_crtc_state->active && !new_crtc_state->active) {
9615 			struct dc_cursor_position position;
9616 
9617 			memset(&position, 0, sizeof(position));
9618 			mutex_lock(&dm->dc_lock);
9619 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9620 			mutex_unlock(&dm->dc_lock);
9621 		}
9622 
9623 		/* Copy all transient state flags into dc state */
9624 		if (dm_new_crtc_state->stream) {
9625 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9626 							    dm_new_crtc_state->stream);
9627 		}
9628 
9629 		/* handles headless hotplug case, updating new_state and
9630 		 * aconnector as needed
9631 		 */
9632 
9633 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9634 
9635 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9636 
9637 			if (!dm_new_crtc_state->stream) {
9638 				/*
9639 				 * this could happen because of issues with
9640 				 * userspace notifications delivery.
9641 				 * In this case userspace tries to set mode on
9642 				 * display which is disconnected in fact.
9643 				 * dc_sink is NULL in this case on aconnector.
9644 				 * We expect reset mode will come soon.
9645 				 *
9646 				 * This can also happen when unplug is done
9647 				 * during resume sequence ended
9648 				 *
9649 				 * In this case, we want to pretend we still
9650 				 * have a sink to keep the pipe running so that
9651 				 * hw state is consistent with the sw state
9652 				 */
9653 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9654 						__func__, acrtc->base.base.id);
9655 				continue;
9656 			}
9657 
9658 			if (dm_old_crtc_state->stream)
9659 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9660 
9661 			pm_runtime_get_noresume(dev->dev);
9662 
9663 			acrtc->enabled = true;
9664 			acrtc->hw_mode = new_crtc_state->mode;
9665 			crtc->hwmode = new_crtc_state->mode;
9666 			mode_set_reset_required = true;
9667 		} else if (modereset_required(new_crtc_state)) {
9668 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9669 			/* i.e. reset mode */
9670 			if (dm_old_crtc_state->stream)
9671 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9672 
9673 			mode_set_reset_required = true;
9674 		}
9675 	} /* for_each_crtc_in_state() */
9676 
9677 	if (dc_state) {
9678 		/* if there mode set or reset, disable eDP PSR */
9679 		if (mode_set_reset_required) {
9680 			if (dm->vblank_control_workqueue)
9681 				flush_workqueue(dm->vblank_control_workqueue);
9682 
9683 			amdgpu_dm_psr_disable_all(dm);
9684 		}
9685 
9686 		dm_enable_per_frame_crtc_master_sync(dc_state);
9687 		mutex_lock(&dm->dc_lock);
9688 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9689 
9690 		/* Allow idle optimization when vblank count is 0 for display off */
9691 		if (dm->active_vblank_irq_count == 0)
9692 			dc_allow_idle_optimizations(dm->dc, true);
9693 		mutex_unlock(&dm->dc_lock);
9694 	}
9695 
9696 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9697 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9698 
9699 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9700 
9701 		if (dm_new_crtc_state->stream != NULL) {
9702 			const struct dc_stream_status *status =
9703 					dc_stream_get_status(dm_new_crtc_state->stream);
9704 
9705 			if (!status)
9706 				status = dc_stream_get_status_from_state(dc_state,
9707 									 dm_new_crtc_state->stream);
9708 			if (!status)
9709 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9710 			else
9711 				acrtc->otg_inst = status->primary_otg_inst;
9712 		}
9713 	}
9714 #ifdef CONFIG_DRM_AMD_DC_HDCP
9715 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9716 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9717 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9718 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9719 
9720 		new_crtc_state = NULL;
9721 
9722 		if (acrtc)
9723 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9724 
9725 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9726 
9727 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9728 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9729 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9730 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9731 			dm_new_con_state->update_hdcp = true;
9732 			continue;
9733 		}
9734 
9735 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9736 			hdcp_update_display(
9737 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9738 				new_con_state->hdcp_content_type,
9739 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9740 	}
9741 #endif
9742 
9743 	/* Handle connector state changes */
9744 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9745 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9746 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9747 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9748 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9749 		struct dc_stream_update stream_update;
9750 		struct dc_info_packet hdr_packet;
9751 		struct dc_stream_status *status = NULL;
9752 		bool abm_changed, hdr_changed, scaling_changed;
9753 
9754 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9755 		memset(&stream_update, 0, sizeof(stream_update));
9756 
9757 		if (acrtc) {
9758 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9759 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9760 		}
9761 
9762 		/* Skip any modesets/resets */
9763 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9764 			continue;
9765 
9766 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9767 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9768 
9769 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9770 							     dm_old_con_state);
9771 
9772 		abm_changed = dm_new_crtc_state->abm_level !=
9773 			      dm_old_crtc_state->abm_level;
9774 
9775 		hdr_changed =
9776 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9777 
9778 		if (!scaling_changed && !abm_changed && !hdr_changed)
9779 			continue;
9780 
9781 		stream_update.stream = dm_new_crtc_state->stream;
9782 		if (scaling_changed) {
9783 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9784 					dm_new_con_state, dm_new_crtc_state->stream);
9785 
9786 			stream_update.src = dm_new_crtc_state->stream->src;
9787 			stream_update.dst = dm_new_crtc_state->stream->dst;
9788 		}
9789 
9790 		if (abm_changed) {
9791 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9792 
9793 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9794 		}
9795 
9796 		if (hdr_changed) {
9797 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9798 			stream_update.hdr_static_metadata = &hdr_packet;
9799 		}
9800 
9801 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9802 
9803 		if (WARN_ON(!status))
9804 			continue;
9805 
9806 		WARN_ON(!status->plane_count);
9807 
9808 		/*
9809 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9810 		 * Here we create an empty update on each plane.
9811 		 * To fix this, DC should permit updating only stream properties.
9812 		 */
9813 		for (j = 0; j < status->plane_count; j++)
9814 			dummy_updates[j].surface = status->plane_states[0];
9815 
9816 
9817 		mutex_lock(&dm->dc_lock);
9818 		dc_commit_updates_for_stream(dm->dc,
9819 						     dummy_updates,
9820 						     status->plane_count,
9821 						     dm_new_crtc_state->stream,
9822 						     &stream_update,
9823 						     dc_state);
9824 		mutex_unlock(&dm->dc_lock);
9825 	}
9826 
9827 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9828 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9829 				      new_crtc_state, i) {
9830 		if (old_crtc_state->active && !new_crtc_state->active)
9831 			crtc_disable_count++;
9832 
9833 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9834 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9835 
9836 		/* For freesync config update on crtc state and params for irq */
9837 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9838 
9839 		/* Handle vrr on->off / off->on transitions */
9840 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9841 						dm_new_crtc_state);
9842 	}
9843 
9844 	/**
9845 	 * Enable interrupts for CRTCs that are newly enabled or went through
9846 	 * a modeset. It was intentionally deferred until after the front end
9847 	 * state was modified to wait until the OTG was on and so the IRQ
9848 	 * handlers didn't access stale or invalid state.
9849 	 */
9850 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9851 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9852 #ifdef CONFIG_DEBUG_FS
9853 		bool configure_crc = false;
9854 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9855 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9856 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9857 #endif
9858 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9859 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9860 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9861 #endif
9862 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9863 
9864 		if (new_crtc_state->active &&
9865 		    (!old_crtc_state->active ||
9866 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9867 			dc_stream_retain(dm_new_crtc_state->stream);
9868 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9869 			manage_dm_interrupts(adev, acrtc, true);
9870 
9871 #ifdef CONFIG_DEBUG_FS
9872 			/**
9873 			 * Frontend may have changed so reapply the CRC capture
9874 			 * settings for the stream.
9875 			 */
9876 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9877 
9878 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9879 				configure_crc = true;
9880 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9881 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9882 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9883 					acrtc->dm_irq_params.crc_window.update_win = true;
9884 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9885 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9886 					crc_rd_wrk->crtc = crtc;
9887 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9888 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9889 				}
9890 #endif
9891 			}
9892 
9893 			if (configure_crc)
9894 				if (amdgpu_dm_crtc_configure_crc_source(
9895 					crtc, dm_new_crtc_state, cur_crc_src))
9896 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9897 #endif
9898 		}
9899 	}
9900 
9901 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9902 		if (new_crtc_state->async_flip)
9903 			wait_for_vblank = false;
9904 
9905 	/* update planes when needed per crtc*/
9906 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9907 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9908 
9909 		if (dm_new_crtc_state->stream)
9910 			amdgpu_dm_commit_planes(state, dc_state, dev,
9911 						dm, crtc, wait_for_vblank);
9912 	}
9913 
9914 	/* Update audio instances for each connector. */
9915 	amdgpu_dm_commit_audio(dev, state);
9916 
9917 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9918 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9919 	/* restore the backlight level */
9920 	for (i = 0; i < dm->num_of_edps; i++) {
9921 		if (dm->backlight_dev[i] &&
9922 		    (dm->actual_brightness[i] != dm->brightness[i]))
9923 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9924 	}
9925 #endif
9926 	/*
9927 	 * send vblank event on all events not handled in flip and
9928 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9929 	 */
9930 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9931 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9932 
9933 		if (new_crtc_state->event)
9934 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9935 
9936 		new_crtc_state->event = NULL;
9937 	}
9938 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9939 
9940 	/* Signal HW programming completion */
9941 	drm_atomic_helper_commit_hw_done(state);
9942 
9943 	if (wait_for_vblank)
9944 		drm_atomic_helper_wait_for_flip_done(dev, state);
9945 
9946 	drm_atomic_helper_cleanup_planes(dev, state);
9947 
9948 	/* return the stolen vga memory back to VRAM */
9949 	if (!adev->mman.keep_stolen_vga_memory)
9950 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9951 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9952 
9953 	/*
9954 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9955 	 * so we can put the GPU into runtime suspend if we're not driving any
9956 	 * displays anymore
9957 	 */
9958 	for (i = 0; i < crtc_disable_count; i++)
9959 		pm_runtime_put_autosuspend(dev->dev);
9960 	pm_runtime_mark_last_busy(dev->dev);
9961 
9962 	if (dc_state_temp)
9963 		dc_release_state(dc_state_temp);
9964 }
9965 
9966 
9967 static int dm_force_atomic_commit(struct drm_connector *connector)
9968 {
9969 	int ret = 0;
9970 	struct drm_device *ddev = connector->dev;
9971 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9972 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9973 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9974 	struct drm_connector_state *conn_state;
9975 	struct drm_crtc_state *crtc_state;
9976 	struct drm_plane_state *plane_state;
9977 
9978 	if (!state)
9979 		return -ENOMEM;
9980 
9981 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9982 
9983 	/* Construct an atomic state to restore previous display setting */
9984 
9985 	/*
9986 	 * Attach connectors to drm_atomic_state
9987 	 */
9988 	conn_state = drm_atomic_get_connector_state(state, connector);
9989 
9990 	ret = PTR_ERR_OR_ZERO(conn_state);
9991 	if (ret)
9992 		goto out;
9993 
9994 	/* Attach crtc to drm_atomic_state*/
9995 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9996 
9997 	ret = PTR_ERR_OR_ZERO(crtc_state);
9998 	if (ret)
9999 		goto out;
10000 
10001 	/* force a restore */
10002 	crtc_state->mode_changed = true;
10003 
10004 	/* Attach plane to drm_atomic_state */
10005 	plane_state = drm_atomic_get_plane_state(state, plane);
10006 
10007 	ret = PTR_ERR_OR_ZERO(plane_state);
10008 	if (ret)
10009 		goto out;
10010 
10011 	/* Call commit internally with the state we just constructed */
10012 	ret = drm_atomic_commit(state);
10013 
10014 out:
10015 	drm_atomic_state_put(state);
10016 	if (ret)
10017 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10018 
10019 	return ret;
10020 }
10021 
10022 /*
10023  * This function handles all cases when set mode does not come upon hotplug.
10024  * This includes when a display is unplugged then plugged back into the
10025  * same port and when running without usermode desktop manager supprot
10026  */
10027 void dm_restore_drm_connector_state(struct drm_device *dev,
10028 				    struct drm_connector *connector)
10029 {
10030 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10031 	struct amdgpu_crtc *disconnected_acrtc;
10032 	struct dm_crtc_state *acrtc_state;
10033 
10034 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10035 		return;
10036 
10037 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10038 	if (!disconnected_acrtc)
10039 		return;
10040 
10041 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10042 	if (!acrtc_state->stream)
10043 		return;
10044 
10045 	/*
10046 	 * If the previous sink is not released and different from the current,
10047 	 * we deduce we are in a state where we can not rely on usermode call
10048 	 * to turn on the display, so we do it here
10049 	 */
10050 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10051 		dm_force_atomic_commit(&aconnector->base);
10052 }
10053 
10054 /*
10055  * Grabs all modesetting locks to serialize against any blocking commits,
10056  * Waits for completion of all non blocking commits.
10057  */
10058 static int do_aquire_global_lock(struct drm_device *dev,
10059 				 struct drm_atomic_state *state)
10060 {
10061 	struct drm_crtc *crtc;
10062 	struct drm_crtc_commit *commit;
10063 	long ret;
10064 
10065 	/*
10066 	 * Adding all modeset locks to aquire_ctx will
10067 	 * ensure that when the framework release it the
10068 	 * extra locks we are locking here will get released to
10069 	 */
10070 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10071 	if (ret)
10072 		return ret;
10073 
10074 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10075 		spin_lock(&crtc->commit_lock);
10076 		commit = list_first_entry_or_null(&crtc->commit_list,
10077 				struct drm_crtc_commit, commit_entry);
10078 		if (commit)
10079 			drm_crtc_commit_get(commit);
10080 		spin_unlock(&crtc->commit_lock);
10081 
10082 		if (!commit)
10083 			continue;
10084 
10085 		/*
10086 		 * Make sure all pending HW programming completed and
10087 		 * page flips done
10088 		 */
10089 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10090 
10091 		if (ret > 0)
10092 			ret = wait_for_completion_interruptible_timeout(
10093 					&commit->flip_done, 10*HZ);
10094 
10095 		if (ret == 0)
10096 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10097 				  "timed out\n", crtc->base.id, crtc->name);
10098 
10099 		drm_crtc_commit_put(commit);
10100 	}
10101 
10102 	return ret < 0 ? ret : 0;
10103 }
10104 
10105 static void get_freesync_config_for_crtc(
10106 	struct dm_crtc_state *new_crtc_state,
10107 	struct dm_connector_state *new_con_state)
10108 {
10109 	struct mod_freesync_config config = {0};
10110 	struct amdgpu_dm_connector *aconnector =
10111 			to_amdgpu_dm_connector(new_con_state->base.connector);
10112 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10113 	int vrefresh = drm_mode_vrefresh(mode);
10114 	bool fs_vid_mode = false;
10115 
10116 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10117 					vrefresh >= aconnector->min_vfreq &&
10118 					vrefresh <= aconnector->max_vfreq;
10119 
10120 	if (new_crtc_state->vrr_supported) {
10121 		new_crtc_state->stream->ignore_msa_timing_param = true;
10122 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10123 
10124 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10125 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10126 		config.vsif_supported = true;
10127 		config.btr = true;
10128 
10129 		if (fs_vid_mode) {
10130 			config.state = VRR_STATE_ACTIVE_FIXED;
10131 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10132 			goto out;
10133 		} else if (new_crtc_state->base.vrr_enabled) {
10134 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10135 		} else {
10136 			config.state = VRR_STATE_INACTIVE;
10137 		}
10138 	}
10139 out:
10140 	new_crtc_state->freesync_config = config;
10141 }
10142 
10143 static void reset_freesync_config_for_crtc(
10144 	struct dm_crtc_state *new_crtc_state)
10145 {
10146 	new_crtc_state->vrr_supported = false;
10147 
10148 	memset(&new_crtc_state->vrr_infopacket, 0,
10149 	       sizeof(new_crtc_state->vrr_infopacket));
10150 }
10151 
10152 static bool
10153 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10154 				 struct drm_crtc_state *new_crtc_state)
10155 {
10156 	const struct drm_display_mode *old_mode, *new_mode;
10157 
10158 	if (!old_crtc_state || !new_crtc_state)
10159 		return false;
10160 
10161 	old_mode = &old_crtc_state->mode;
10162 	new_mode = &new_crtc_state->mode;
10163 
10164 	if (old_mode->clock       == new_mode->clock &&
10165 	    old_mode->hdisplay    == new_mode->hdisplay &&
10166 	    old_mode->vdisplay    == new_mode->vdisplay &&
10167 	    old_mode->htotal      == new_mode->htotal &&
10168 	    old_mode->vtotal      != new_mode->vtotal &&
10169 	    old_mode->hsync_start == new_mode->hsync_start &&
10170 	    old_mode->vsync_start != new_mode->vsync_start &&
10171 	    old_mode->hsync_end   == new_mode->hsync_end &&
10172 	    old_mode->vsync_end   != new_mode->vsync_end &&
10173 	    old_mode->hskew       == new_mode->hskew &&
10174 	    old_mode->vscan       == new_mode->vscan &&
10175 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10176 	    (new_mode->vsync_end - new_mode->vsync_start))
10177 		return true;
10178 
10179 	return false;
10180 }
10181 
10182 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10183 	uint64_t num, den, res;
10184 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10185 
10186 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10187 
10188 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10189 	den = (unsigned long long)new_crtc_state->mode.htotal *
10190 	      (unsigned long long)new_crtc_state->mode.vtotal;
10191 
10192 	res = div_u64(num, den);
10193 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10194 }
10195 
10196 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10197 			 struct drm_atomic_state *state,
10198 			 struct drm_crtc *crtc,
10199 			 struct drm_crtc_state *old_crtc_state,
10200 			 struct drm_crtc_state *new_crtc_state,
10201 			 bool enable,
10202 			 bool *lock_and_validation_needed)
10203 {
10204 	struct dm_atomic_state *dm_state = NULL;
10205 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10206 	struct dc_stream_state *new_stream;
10207 	int ret = 0;
10208 
10209 	/*
10210 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10211 	 * update changed items
10212 	 */
10213 	struct amdgpu_crtc *acrtc = NULL;
10214 	struct amdgpu_dm_connector *aconnector = NULL;
10215 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10216 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10217 
10218 	new_stream = NULL;
10219 
10220 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10221 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10222 	acrtc = to_amdgpu_crtc(crtc);
10223 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10224 
10225 	/* TODO This hack should go away */
10226 	if (aconnector && enable) {
10227 		/* Make sure fake sink is created in plug-in scenario */
10228 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10229 							    &aconnector->base);
10230 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10231 							    &aconnector->base);
10232 
10233 		if (IS_ERR(drm_new_conn_state)) {
10234 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10235 			goto fail;
10236 		}
10237 
10238 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10239 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10240 
10241 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10242 			goto skip_modeset;
10243 
10244 		new_stream = create_validate_stream_for_sink(aconnector,
10245 							     &new_crtc_state->mode,
10246 							     dm_new_conn_state,
10247 							     dm_old_crtc_state->stream);
10248 
10249 		/*
10250 		 * we can have no stream on ACTION_SET if a display
10251 		 * was disconnected during S3, in this case it is not an
10252 		 * error, the OS will be updated after detection, and
10253 		 * will do the right thing on next atomic commit
10254 		 */
10255 
10256 		if (!new_stream) {
10257 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10258 					__func__, acrtc->base.base.id);
10259 			ret = -ENOMEM;
10260 			goto fail;
10261 		}
10262 
10263 		/*
10264 		 * TODO: Check VSDB bits to decide whether this should
10265 		 * be enabled or not.
10266 		 */
10267 		new_stream->triggered_crtc_reset.enabled =
10268 			dm->force_timing_sync;
10269 
10270 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10271 
10272 		ret = fill_hdr_info_packet(drm_new_conn_state,
10273 					   &new_stream->hdr_static_metadata);
10274 		if (ret)
10275 			goto fail;
10276 
10277 		/*
10278 		 * If we already removed the old stream from the context
10279 		 * (and set the new stream to NULL) then we can't reuse
10280 		 * the old stream even if the stream and scaling are unchanged.
10281 		 * We'll hit the BUG_ON and black screen.
10282 		 *
10283 		 * TODO: Refactor this function to allow this check to work
10284 		 * in all conditions.
10285 		 */
10286 		if (dm_new_crtc_state->stream &&
10287 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10288 			goto skip_modeset;
10289 
10290 		if (dm_new_crtc_state->stream &&
10291 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10292 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10293 			new_crtc_state->mode_changed = false;
10294 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10295 					 new_crtc_state->mode_changed);
10296 		}
10297 	}
10298 
10299 	/* mode_changed flag may get updated above, need to check again */
10300 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10301 		goto skip_modeset;
10302 
10303 	drm_dbg_state(state->dev,
10304 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10305 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10306 		"connectors_changed:%d\n",
10307 		acrtc->crtc_id,
10308 		new_crtc_state->enable,
10309 		new_crtc_state->active,
10310 		new_crtc_state->planes_changed,
10311 		new_crtc_state->mode_changed,
10312 		new_crtc_state->active_changed,
10313 		new_crtc_state->connectors_changed);
10314 
10315 	/* Remove stream for any changed/disabled CRTC */
10316 	if (!enable) {
10317 
10318 		if (!dm_old_crtc_state->stream)
10319 			goto skip_modeset;
10320 
10321 		if (dm_new_crtc_state->stream &&
10322 		    is_timing_unchanged_for_freesync(new_crtc_state,
10323 						     old_crtc_state)) {
10324 			new_crtc_state->mode_changed = false;
10325 			DRM_DEBUG_DRIVER(
10326 				"Mode change not required for front porch change, "
10327 				"setting mode_changed to %d",
10328 				new_crtc_state->mode_changed);
10329 
10330 			set_freesync_fixed_config(dm_new_crtc_state);
10331 
10332 			goto skip_modeset;
10333 		} else if (aconnector &&
10334 			   is_freesync_video_mode(&new_crtc_state->mode,
10335 						  aconnector)) {
10336 			struct drm_display_mode *high_mode;
10337 
10338 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10339 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10340 				set_freesync_fixed_config(dm_new_crtc_state);
10341 			}
10342 		}
10343 
10344 		ret = dm_atomic_get_state(state, &dm_state);
10345 		if (ret)
10346 			goto fail;
10347 
10348 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10349 				crtc->base.id);
10350 
10351 		/* i.e. reset mode */
10352 		if (dc_remove_stream_from_ctx(
10353 				dm->dc,
10354 				dm_state->context,
10355 				dm_old_crtc_state->stream) != DC_OK) {
10356 			ret = -EINVAL;
10357 			goto fail;
10358 		}
10359 
10360 		dc_stream_release(dm_old_crtc_state->stream);
10361 		dm_new_crtc_state->stream = NULL;
10362 
10363 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10364 
10365 		*lock_and_validation_needed = true;
10366 
10367 	} else {/* Add stream for any updated/enabled CRTC */
10368 		/*
10369 		 * Quick fix to prevent NULL pointer on new_stream when
10370 		 * added MST connectors not found in existing crtc_state in the chained mode
10371 		 * TODO: need to dig out the root cause of that
10372 		 */
10373 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10374 			goto skip_modeset;
10375 
10376 		if (modereset_required(new_crtc_state))
10377 			goto skip_modeset;
10378 
10379 		if (modeset_required(new_crtc_state, new_stream,
10380 				     dm_old_crtc_state->stream)) {
10381 
10382 			WARN_ON(dm_new_crtc_state->stream);
10383 
10384 			ret = dm_atomic_get_state(state, &dm_state);
10385 			if (ret)
10386 				goto fail;
10387 
10388 			dm_new_crtc_state->stream = new_stream;
10389 
10390 			dc_stream_retain(new_stream);
10391 
10392 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10393 					 crtc->base.id);
10394 
10395 			if (dc_add_stream_to_ctx(
10396 					dm->dc,
10397 					dm_state->context,
10398 					dm_new_crtc_state->stream) != DC_OK) {
10399 				ret = -EINVAL;
10400 				goto fail;
10401 			}
10402 
10403 			*lock_and_validation_needed = true;
10404 		}
10405 	}
10406 
10407 skip_modeset:
10408 	/* Release extra reference */
10409 	if (new_stream)
10410 		 dc_stream_release(new_stream);
10411 
10412 	/*
10413 	 * We want to do dc stream updates that do not require a
10414 	 * full modeset below.
10415 	 */
10416 	if (!(enable && aconnector && new_crtc_state->active))
10417 		return 0;
10418 	/*
10419 	 * Given above conditions, the dc state cannot be NULL because:
10420 	 * 1. We're in the process of enabling CRTCs (just been added
10421 	 *    to the dc context, or already is on the context)
10422 	 * 2. Has a valid connector attached, and
10423 	 * 3. Is currently active and enabled.
10424 	 * => The dc stream state currently exists.
10425 	 */
10426 	BUG_ON(dm_new_crtc_state->stream == NULL);
10427 
10428 	/* Scaling or underscan settings */
10429 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10430 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10431 		update_stream_scaling_settings(
10432 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10433 
10434 	/* ABM settings */
10435 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10436 
10437 	/*
10438 	 * Color management settings. We also update color properties
10439 	 * when a modeset is needed, to ensure it gets reprogrammed.
10440 	 */
10441 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10442 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10443 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10444 		if (ret)
10445 			goto fail;
10446 	}
10447 
10448 	/* Update Freesync settings. */
10449 	get_freesync_config_for_crtc(dm_new_crtc_state,
10450 				     dm_new_conn_state);
10451 
10452 	return ret;
10453 
10454 fail:
10455 	if (new_stream)
10456 		dc_stream_release(new_stream);
10457 	return ret;
10458 }
10459 
10460 static bool should_reset_plane(struct drm_atomic_state *state,
10461 			       struct drm_plane *plane,
10462 			       struct drm_plane_state *old_plane_state,
10463 			       struct drm_plane_state *new_plane_state)
10464 {
10465 	struct drm_plane *other;
10466 	struct drm_plane_state *old_other_state, *new_other_state;
10467 	struct drm_crtc_state *new_crtc_state;
10468 	int i;
10469 
10470 	/*
10471 	 * TODO: Remove this hack once the checks below are sufficient
10472 	 * enough to determine when we need to reset all the planes on
10473 	 * the stream.
10474 	 */
10475 	if (state->allow_modeset)
10476 		return true;
10477 
10478 	/* Exit early if we know that we're adding or removing the plane. */
10479 	if (old_plane_state->crtc != new_plane_state->crtc)
10480 		return true;
10481 
10482 	/* old crtc == new_crtc == NULL, plane not in context. */
10483 	if (!new_plane_state->crtc)
10484 		return false;
10485 
10486 	new_crtc_state =
10487 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10488 
10489 	if (!new_crtc_state)
10490 		return true;
10491 
10492 	/* CRTC Degamma changes currently require us to recreate planes. */
10493 	if (new_crtc_state->color_mgmt_changed)
10494 		return true;
10495 
10496 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10497 		return true;
10498 
10499 	/*
10500 	 * If there are any new primary or overlay planes being added or
10501 	 * removed then the z-order can potentially change. To ensure
10502 	 * correct z-order and pipe acquisition the current DC architecture
10503 	 * requires us to remove and recreate all existing planes.
10504 	 *
10505 	 * TODO: Come up with a more elegant solution for this.
10506 	 */
10507 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10508 		struct amdgpu_framebuffer *old_afb, *new_afb;
10509 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10510 			continue;
10511 
10512 		if (old_other_state->crtc != new_plane_state->crtc &&
10513 		    new_other_state->crtc != new_plane_state->crtc)
10514 			continue;
10515 
10516 		if (old_other_state->crtc != new_other_state->crtc)
10517 			return true;
10518 
10519 		/* Src/dst size and scaling updates. */
10520 		if (old_other_state->src_w != new_other_state->src_w ||
10521 		    old_other_state->src_h != new_other_state->src_h ||
10522 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10523 		    old_other_state->crtc_h != new_other_state->crtc_h)
10524 			return true;
10525 
10526 		/* Rotation / mirroring updates. */
10527 		if (old_other_state->rotation != new_other_state->rotation)
10528 			return true;
10529 
10530 		/* Blending updates. */
10531 		if (old_other_state->pixel_blend_mode !=
10532 		    new_other_state->pixel_blend_mode)
10533 			return true;
10534 
10535 		/* Alpha updates. */
10536 		if (old_other_state->alpha != new_other_state->alpha)
10537 			return true;
10538 
10539 		/* Colorspace changes. */
10540 		if (old_other_state->color_range != new_other_state->color_range ||
10541 		    old_other_state->color_encoding != new_other_state->color_encoding)
10542 			return true;
10543 
10544 		/* Framebuffer checks fall at the end. */
10545 		if (!old_other_state->fb || !new_other_state->fb)
10546 			continue;
10547 
10548 		/* Pixel format changes can require bandwidth updates. */
10549 		if (old_other_state->fb->format != new_other_state->fb->format)
10550 			return true;
10551 
10552 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10553 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10554 
10555 		/* Tiling and DCC changes also require bandwidth updates. */
10556 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10557 		    old_afb->base.modifier != new_afb->base.modifier)
10558 			return true;
10559 	}
10560 
10561 	return false;
10562 }
10563 
10564 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10565 			      struct drm_plane_state *new_plane_state,
10566 			      struct drm_framebuffer *fb)
10567 {
10568 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10569 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10570 	unsigned int pitch;
10571 	bool linear;
10572 
10573 	if (fb->width > new_acrtc->max_cursor_width ||
10574 	    fb->height > new_acrtc->max_cursor_height) {
10575 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10576 				 new_plane_state->fb->width,
10577 				 new_plane_state->fb->height);
10578 		return -EINVAL;
10579 	}
10580 	if (new_plane_state->src_w != fb->width << 16 ||
10581 	    new_plane_state->src_h != fb->height << 16) {
10582 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10583 		return -EINVAL;
10584 	}
10585 
10586 	/* Pitch in pixels */
10587 	pitch = fb->pitches[0] / fb->format->cpp[0];
10588 
10589 	if (fb->width != pitch) {
10590 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10591 				 fb->width, pitch);
10592 		return -EINVAL;
10593 	}
10594 
10595 	switch (pitch) {
10596 	case 64:
10597 	case 128:
10598 	case 256:
10599 		/* FB pitch is supported by cursor plane */
10600 		break;
10601 	default:
10602 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10603 		return -EINVAL;
10604 	}
10605 
10606 	/* Core DRM takes care of checking FB modifiers, so we only need to
10607 	 * check tiling flags when the FB doesn't have a modifier. */
10608 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10609 		if (adev->family < AMDGPU_FAMILY_AI) {
10610 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10611 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10612 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10613 		} else {
10614 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10615 		}
10616 		if (!linear) {
10617 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10618 			return -EINVAL;
10619 		}
10620 	}
10621 
10622 	return 0;
10623 }
10624 
10625 static int dm_update_plane_state(struct dc *dc,
10626 				 struct drm_atomic_state *state,
10627 				 struct drm_plane *plane,
10628 				 struct drm_plane_state *old_plane_state,
10629 				 struct drm_plane_state *new_plane_state,
10630 				 bool enable,
10631 				 bool *lock_and_validation_needed)
10632 {
10633 
10634 	struct dm_atomic_state *dm_state = NULL;
10635 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10636 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10637 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10638 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10639 	struct amdgpu_crtc *new_acrtc;
10640 	bool needs_reset;
10641 	int ret = 0;
10642 
10643 
10644 	new_plane_crtc = new_plane_state->crtc;
10645 	old_plane_crtc = old_plane_state->crtc;
10646 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10647 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10648 
10649 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10650 		if (!enable || !new_plane_crtc ||
10651 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10652 			return 0;
10653 
10654 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10655 
10656 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10657 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10658 			return -EINVAL;
10659 		}
10660 
10661 		if (new_plane_state->fb) {
10662 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10663 						 new_plane_state->fb);
10664 			if (ret)
10665 				return ret;
10666 		}
10667 
10668 		return 0;
10669 	}
10670 
10671 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10672 					 new_plane_state);
10673 
10674 	/* Remove any changed/removed planes */
10675 	if (!enable) {
10676 		if (!needs_reset)
10677 			return 0;
10678 
10679 		if (!old_plane_crtc)
10680 			return 0;
10681 
10682 		old_crtc_state = drm_atomic_get_old_crtc_state(
10683 				state, old_plane_crtc);
10684 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10685 
10686 		if (!dm_old_crtc_state->stream)
10687 			return 0;
10688 
10689 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10690 				plane->base.id, old_plane_crtc->base.id);
10691 
10692 		ret = dm_atomic_get_state(state, &dm_state);
10693 		if (ret)
10694 			return ret;
10695 
10696 		if (!dc_remove_plane_from_context(
10697 				dc,
10698 				dm_old_crtc_state->stream,
10699 				dm_old_plane_state->dc_state,
10700 				dm_state->context)) {
10701 
10702 			return -EINVAL;
10703 		}
10704 
10705 
10706 		dc_plane_state_release(dm_old_plane_state->dc_state);
10707 		dm_new_plane_state->dc_state = NULL;
10708 
10709 		*lock_and_validation_needed = true;
10710 
10711 	} else { /* Add new planes */
10712 		struct dc_plane_state *dc_new_plane_state;
10713 
10714 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10715 			return 0;
10716 
10717 		if (!new_plane_crtc)
10718 			return 0;
10719 
10720 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10721 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10722 
10723 		if (!dm_new_crtc_state->stream)
10724 			return 0;
10725 
10726 		if (!needs_reset)
10727 			return 0;
10728 
10729 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10730 		if (ret)
10731 			return ret;
10732 
10733 		WARN_ON(dm_new_plane_state->dc_state);
10734 
10735 		dc_new_plane_state = dc_create_plane_state(dc);
10736 		if (!dc_new_plane_state)
10737 			return -ENOMEM;
10738 
10739 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10740 				 plane->base.id, new_plane_crtc->base.id);
10741 
10742 		ret = fill_dc_plane_attributes(
10743 			drm_to_adev(new_plane_crtc->dev),
10744 			dc_new_plane_state,
10745 			new_plane_state,
10746 			new_crtc_state);
10747 		if (ret) {
10748 			dc_plane_state_release(dc_new_plane_state);
10749 			return ret;
10750 		}
10751 
10752 		ret = dm_atomic_get_state(state, &dm_state);
10753 		if (ret) {
10754 			dc_plane_state_release(dc_new_plane_state);
10755 			return ret;
10756 		}
10757 
10758 		/*
10759 		 * Any atomic check errors that occur after this will
10760 		 * not need a release. The plane state will be attached
10761 		 * to the stream, and therefore part of the atomic
10762 		 * state. It'll be released when the atomic state is
10763 		 * cleaned.
10764 		 */
10765 		if (!dc_add_plane_to_context(
10766 				dc,
10767 				dm_new_crtc_state->stream,
10768 				dc_new_plane_state,
10769 				dm_state->context)) {
10770 
10771 			dc_plane_state_release(dc_new_plane_state);
10772 			return -EINVAL;
10773 		}
10774 
10775 		dm_new_plane_state->dc_state = dc_new_plane_state;
10776 
10777 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10778 
10779 		/* Tell DC to do a full surface update every time there
10780 		 * is a plane change. Inefficient, but works for now.
10781 		 */
10782 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10783 
10784 		*lock_and_validation_needed = true;
10785 	}
10786 
10787 
10788 	return ret;
10789 }
10790 
10791 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10792 				       int *src_w, int *src_h)
10793 {
10794 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10795 	case DRM_MODE_ROTATE_90:
10796 	case DRM_MODE_ROTATE_270:
10797 		*src_w = plane_state->src_h >> 16;
10798 		*src_h = plane_state->src_w >> 16;
10799 		break;
10800 	case DRM_MODE_ROTATE_0:
10801 	case DRM_MODE_ROTATE_180:
10802 	default:
10803 		*src_w = plane_state->src_w >> 16;
10804 		*src_h = plane_state->src_h >> 16;
10805 		break;
10806 	}
10807 }
10808 
10809 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10810 				struct drm_crtc *crtc,
10811 				struct drm_crtc_state *new_crtc_state)
10812 {
10813 	struct drm_plane *cursor = crtc->cursor, *underlying;
10814 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10815 	int i;
10816 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10817 	int cursor_src_w, cursor_src_h;
10818 	int underlying_src_w, underlying_src_h;
10819 
10820 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10821 	 * cursor per pipe but it's going to inherit the scaling and
10822 	 * positioning from the underlying pipe. Check the cursor plane's
10823 	 * blending properties match the underlying planes'. */
10824 
10825 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10826 	if (!new_cursor_state || !new_cursor_state->fb) {
10827 		return 0;
10828 	}
10829 
10830 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10831 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10832 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10833 
10834 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10835 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10836 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10837 			continue;
10838 
10839 		/* Ignore disabled planes */
10840 		if (!new_underlying_state->fb)
10841 			continue;
10842 
10843 		dm_get_oriented_plane_size(new_underlying_state,
10844 					   &underlying_src_w, &underlying_src_h);
10845 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10846 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10847 
10848 		if (cursor_scale_w != underlying_scale_w ||
10849 		    cursor_scale_h != underlying_scale_h) {
10850 			drm_dbg_atomic(crtc->dev,
10851 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10852 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10853 			return -EINVAL;
10854 		}
10855 
10856 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10857 		if (new_underlying_state->crtc_x <= 0 &&
10858 		    new_underlying_state->crtc_y <= 0 &&
10859 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10860 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10861 			break;
10862 	}
10863 
10864 	return 0;
10865 }
10866 
10867 #if defined(CONFIG_DRM_AMD_DC_DCN)
10868 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10869 {
10870 	struct drm_connector *connector;
10871 	struct drm_connector_state *conn_state, *old_conn_state;
10872 	struct amdgpu_dm_connector *aconnector = NULL;
10873 	int i;
10874 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10875 		if (!conn_state->crtc)
10876 			conn_state = old_conn_state;
10877 
10878 		if (conn_state->crtc != crtc)
10879 			continue;
10880 
10881 		aconnector = to_amdgpu_dm_connector(connector);
10882 		if (!aconnector->port || !aconnector->mst_port)
10883 			aconnector = NULL;
10884 		else
10885 			break;
10886 	}
10887 
10888 	if (!aconnector)
10889 		return 0;
10890 
10891 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10892 }
10893 #endif
10894 
10895 /**
10896  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10897  * @dev: The DRM device
10898  * @state: The atomic state to commit
10899  *
10900  * Validate that the given atomic state is programmable by DC into hardware.
10901  * This involves constructing a &struct dc_state reflecting the new hardware
10902  * state we wish to commit, then querying DC to see if it is programmable. It's
10903  * important not to modify the existing DC state. Otherwise, atomic_check
10904  * may unexpectedly commit hardware changes.
10905  *
10906  * When validating the DC state, it's important that the right locks are
10907  * acquired. For full updates case which removes/adds/updates streams on one
10908  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10909  * that any such full update commit will wait for completion of any outstanding
10910  * flip using DRMs synchronization events.
10911  *
10912  * Note that DM adds the affected connectors for all CRTCs in state, when that
10913  * might not seem necessary. This is because DC stream creation requires the
10914  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10915  * be possible but non-trivial - a possible TODO item.
10916  *
10917  * Return: -Error code if validation failed.
10918  */
10919 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10920 				  struct drm_atomic_state *state)
10921 {
10922 	struct amdgpu_device *adev = drm_to_adev(dev);
10923 	struct dm_atomic_state *dm_state = NULL;
10924 	struct dc *dc = adev->dm.dc;
10925 	struct drm_connector *connector;
10926 	struct drm_connector_state *old_con_state, *new_con_state;
10927 	struct drm_crtc *crtc;
10928 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10929 	struct drm_plane *plane;
10930 	struct drm_plane_state *old_plane_state, *new_plane_state;
10931 	enum dc_status status;
10932 	int ret, i;
10933 	bool lock_and_validation_needed = false;
10934 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10935 #if defined(CONFIG_DRM_AMD_DC_DCN)
10936 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10937 	struct drm_dp_mst_topology_state *mst_state;
10938 	struct drm_dp_mst_topology_mgr *mgr;
10939 #endif
10940 
10941 	trace_amdgpu_dm_atomic_check_begin(state);
10942 
10943 	ret = drm_atomic_helper_check_modeset(dev, state);
10944 	if (ret) {
10945 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10946 		goto fail;
10947 	}
10948 
10949 	/* Check connector changes */
10950 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10951 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10952 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10953 
10954 		/* Skip connectors that are disabled or part of modeset already. */
10955 		if (!old_con_state->crtc && !new_con_state->crtc)
10956 			continue;
10957 
10958 		if (!new_con_state->crtc)
10959 			continue;
10960 
10961 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10962 		if (IS_ERR(new_crtc_state)) {
10963 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10964 			ret = PTR_ERR(new_crtc_state);
10965 			goto fail;
10966 		}
10967 
10968 		if (dm_old_con_state->abm_level !=
10969 		    dm_new_con_state->abm_level)
10970 			new_crtc_state->connectors_changed = true;
10971 	}
10972 
10973 #if defined(CONFIG_DRM_AMD_DC_DCN)
10974 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10975 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10976 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10977 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10978 				if (ret) {
10979 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10980 					goto fail;
10981 				}
10982 			}
10983 		}
10984 		pre_validate_dsc(state, &dm_state, vars);
10985 	}
10986 #endif
10987 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10988 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10989 
10990 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10991 		    !new_crtc_state->color_mgmt_changed &&
10992 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10993 			dm_old_crtc_state->dsc_force_changed == false)
10994 			continue;
10995 
10996 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10997 		if (ret) {
10998 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10999 			goto fail;
11000 		}
11001 
11002 		if (!new_crtc_state->enable)
11003 			continue;
11004 
11005 		ret = drm_atomic_add_affected_connectors(state, crtc);
11006 		if (ret) {
11007 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11008 			goto fail;
11009 		}
11010 
11011 		ret = drm_atomic_add_affected_planes(state, crtc);
11012 		if (ret) {
11013 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11014 			goto fail;
11015 		}
11016 
11017 		if (dm_old_crtc_state->dsc_force_changed)
11018 			new_crtc_state->mode_changed = true;
11019 	}
11020 
11021 	/*
11022 	 * Add all primary and overlay planes on the CRTC to the state
11023 	 * whenever a plane is enabled to maintain correct z-ordering
11024 	 * and to enable fast surface updates.
11025 	 */
11026 	drm_for_each_crtc(crtc, dev) {
11027 		bool modified = false;
11028 
11029 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11030 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11031 				continue;
11032 
11033 			if (new_plane_state->crtc == crtc ||
11034 			    old_plane_state->crtc == crtc) {
11035 				modified = true;
11036 				break;
11037 			}
11038 		}
11039 
11040 		if (!modified)
11041 			continue;
11042 
11043 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11044 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11045 				continue;
11046 
11047 			new_plane_state =
11048 				drm_atomic_get_plane_state(state, plane);
11049 
11050 			if (IS_ERR(new_plane_state)) {
11051 				ret = PTR_ERR(new_plane_state);
11052 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11053 				goto fail;
11054 			}
11055 		}
11056 	}
11057 
11058 	/* Remove exiting planes if they are modified */
11059 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11060 		ret = dm_update_plane_state(dc, state, plane,
11061 					    old_plane_state,
11062 					    new_plane_state,
11063 					    false,
11064 					    &lock_and_validation_needed);
11065 		if (ret) {
11066 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11067 			goto fail;
11068 		}
11069 	}
11070 
11071 	/* Disable all crtcs which require disable */
11072 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11073 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11074 					   old_crtc_state,
11075 					   new_crtc_state,
11076 					   false,
11077 					   &lock_and_validation_needed);
11078 		if (ret) {
11079 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11080 			goto fail;
11081 		}
11082 	}
11083 
11084 	/* Enable all crtcs which require enable */
11085 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11086 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11087 					   old_crtc_state,
11088 					   new_crtc_state,
11089 					   true,
11090 					   &lock_and_validation_needed);
11091 		if (ret) {
11092 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11093 			goto fail;
11094 		}
11095 	}
11096 
11097 	/* Add new/modified planes */
11098 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11099 		ret = dm_update_plane_state(dc, state, plane,
11100 					    old_plane_state,
11101 					    new_plane_state,
11102 					    true,
11103 					    &lock_and_validation_needed);
11104 		if (ret) {
11105 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11106 			goto fail;
11107 		}
11108 	}
11109 
11110 	/* Run this here since we want to validate the streams we created */
11111 	ret = drm_atomic_helper_check_planes(dev, state);
11112 	if (ret) {
11113 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11114 		goto fail;
11115 	}
11116 
11117 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11118 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11119 		if (dm_new_crtc_state->mpo_requested)
11120 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11121 	}
11122 
11123 	/* Check cursor planes scaling */
11124 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11125 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11126 		if (ret) {
11127 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11128 			goto fail;
11129 		}
11130 	}
11131 
11132 	if (state->legacy_cursor_update) {
11133 		/*
11134 		 * This is a fast cursor update coming from the plane update
11135 		 * helper, check if it can be done asynchronously for better
11136 		 * performance.
11137 		 */
11138 		state->async_update =
11139 			!drm_atomic_helper_async_check(dev, state);
11140 
11141 		/*
11142 		 * Skip the remaining global validation if this is an async
11143 		 * update. Cursor updates can be done without affecting
11144 		 * state or bandwidth calcs and this avoids the performance
11145 		 * penalty of locking the private state object and
11146 		 * allocating a new dc_state.
11147 		 */
11148 		if (state->async_update)
11149 			return 0;
11150 	}
11151 
11152 	/* Check scaling and underscan changes*/
11153 	/* TODO Removed scaling changes validation due to inability to commit
11154 	 * new stream into context w\o causing full reset. Need to
11155 	 * decide how to handle.
11156 	 */
11157 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11158 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11159 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11160 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11161 
11162 		/* Skip any modesets/resets */
11163 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11164 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11165 			continue;
11166 
11167 		/* Skip any thing not scale or underscan changes */
11168 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11169 			continue;
11170 
11171 		lock_and_validation_needed = true;
11172 	}
11173 
11174 #if defined(CONFIG_DRM_AMD_DC_DCN)
11175 	/* set the slot info for each mst_state based on the link encoding format */
11176 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11177 		struct amdgpu_dm_connector *aconnector;
11178 		struct drm_connector *connector;
11179 		struct drm_connector_list_iter iter;
11180 		u8 link_coding_cap;
11181 
11182 		if (!mgr->mst_state )
11183 			continue;
11184 
11185 		drm_connector_list_iter_begin(dev, &iter);
11186 		drm_for_each_connector_iter(connector, &iter) {
11187 			int id = connector->index;
11188 
11189 			if (id == mst_state->mgr->conn_base_id) {
11190 				aconnector = to_amdgpu_dm_connector(connector);
11191 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11192 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11193 
11194 				break;
11195 			}
11196 		}
11197 		drm_connector_list_iter_end(&iter);
11198 
11199 	}
11200 #endif
11201 	/**
11202 	 * Streams and planes are reset when there are changes that affect
11203 	 * bandwidth. Anything that affects bandwidth needs to go through
11204 	 * DC global validation to ensure that the configuration can be applied
11205 	 * to hardware.
11206 	 *
11207 	 * We have to currently stall out here in atomic_check for outstanding
11208 	 * commits to finish in this case because our IRQ handlers reference
11209 	 * DRM state directly - we can end up disabling interrupts too early
11210 	 * if we don't.
11211 	 *
11212 	 * TODO: Remove this stall and drop DM state private objects.
11213 	 */
11214 	if (lock_and_validation_needed) {
11215 		ret = dm_atomic_get_state(state, &dm_state);
11216 		if (ret) {
11217 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11218 			goto fail;
11219 		}
11220 
11221 		ret = do_aquire_global_lock(dev, state);
11222 		if (ret) {
11223 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11224 			goto fail;
11225 		}
11226 
11227 #if defined(CONFIG_DRM_AMD_DC_DCN)
11228 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11229 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11230 			goto fail;
11231 		}
11232 
11233 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11234 		if (ret) {
11235 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11236 			goto fail;
11237 		}
11238 #endif
11239 
11240 		/*
11241 		 * Perform validation of MST topology in the state:
11242 		 * We need to perform MST atomic check before calling
11243 		 * dc_validate_global_state(), or there is a chance
11244 		 * to get stuck in an infinite loop and hang eventually.
11245 		 */
11246 		ret = drm_dp_mst_atomic_check(state);
11247 		if (ret) {
11248 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11249 			goto fail;
11250 		}
11251 		status = dc_validate_global_state(dc, dm_state->context, true);
11252 		if (status != DC_OK) {
11253 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11254 				       dc_status_to_str(status), status);
11255 			ret = -EINVAL;
11256 			goto fail;
11257 		}
11258 	} else {
11259 		/*
11260 		 * The commit is a fast update. Fast updates shouldn't change
11261 		 * the DC context, affect global validation, and can have their
11262 		 * commit work done in parallel with other commits not touching
11263 		 * the same resource. If we have a new DC context as part of
11264 		 * the DM atomic state from validation we need to free it and
11265 		 * retain the existing one instead.
11266 		 *
11267 		 * Furthermore, since the DM atomic state only contains the DC
11268 		 * context and can safely be annulled, we can free the state
11269 		 * and clear the associated private object now to free
11270 		 * some memory and avoid a possible use-after-free later.
11271 		 */
11272 
11273 		for (i = 0; i < state->num_private_objs; i++) {
11274 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11275 
11276 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11277 				int j = state->num_private_objs-1;
11278 
11279 				dm_atomic_destroy_state(obj,
11280 						state->private_objs[i].state);
11281 
11282 				/* If i is not at the end of the array then the
11283 				 * last element needs to be moved to where i was
11284 				 * before the array can safely be truncated.
11285 				 */
11286 				if (i != j)
11287 					state->private_objs[i] =
11288 						state->private_objs[j];
11289 
11290 				state->private_objs[j].ptr = NULL;
11291 				state->private_objs[j].state = NULL;
11292 				state->private_objs[j].old_state = NULL;
11293 				state->private_objs[j].new_state = NULL;
11294 
11295 				state->num_private_objs = j;
11296 				break;
11297 			}
11298 		}
11299 	}
11300 
11301 	/* Store the overall update type for use later in atomic check. */
11302 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11303 		struct dm_crtc_state *dm_new_crtc_state =
11304 			to_dm_crtc_state(new_crtc_state);
11305 
11306 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11307 							 UPDATE_TYPE_FULL :
11308 							 UPDATE_TYPE_FAST;
11309 	}
11310 
11311 	/* Must be success */
11312 	WARN_ON(ret);
11313 
11314 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11315 
11316 	return ret;
11317 
11318 fail:
11319 	if (ret == -EDEADLK)
11320 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11321 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11322 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11323 	else
11324 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11325 
11326 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11327 
11328 	return ret;
11329 }
11330 
11331 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11332 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11333 {
11334 	uint8_t dpcd_data;
11335 	bool capable = false;
11336 
11337 	if (amdgpu_dm_connector->dc_link &&
11338 		dm_helpers_dp_read_dpcd(
11339 				NULL,
11340 				amdgpu_dm_connector->dc_link,
11341 				DP_DOWN_STREAM_PORT_COUNT,
11342 				&dpcd_data,
11343 				sizeof(dpcd_data))) {
11344 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11345 	}
11346 
11347 	return capable;
11348 }
11349 
11350 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11351 		unsigned int offset,
11352 		unsigned int total_length,
11353 		uint8_t *data,
11354 		unsigned int length,
11355 		struct amdgpu_hdmi_vsdb_info *vsdb)
11356 {
11357 	bool res;
11358 	union dmub_rb_cmd cmd;
11359 	struct dmub_cmd_send_edid_cea *input;
11360 	struct dmub_cmd_edid_cea_output *output;
11361 
11362 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11363 		return false;
11364 
11365 	memset(&cmd, 0, sizeof(cmd));
11366 
11367 	input = &cmd.edid_cea.data.input;
11368 
11369 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11370 	cmd.edid_cea.header.sub_type = 0;
11371 	cmd.edid_cea.header.payload_bytes =
11372 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11373 	input->offset = offset;
11374 	input->length = length;
11375 	input->cea_total_length = total_length;
11376 	memcpy(input->payload, data, length);
11377 
11378 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11379 	if (!res) {
11380 		DRM_ERROR("EDID CEA parser failed\n");
11381 		return false;
11382 	}
11383 
11384 	output = &cmd.edid_cea.data.output;
11385 
11386 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11387 		if (!output->ack.success) {
11388 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11389 					output->ack.offset);
11390 		}
11391 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11392 		if (!output->amd_vsdb.vsdb_found)
11393 			return false;
11394 
11395 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11396 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11397 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11398 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11399 	} else {
11400 		DRM_WARN("Unknown EDID CEA parser results\n");
11401 		return false;
11402 	}
11403 
11404 	return true;
11405 }
11406 
11407 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11408 		uint8_t *edid_ext, int len,
11409 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11410 {
11411 	int i;
11412 
11413 	/* send extension block to DMCU for parsing */
11414 	for (i = 0; i < len; i += 8) {
11415 		bool res;
11416 		int offset;
11417 
11418 		/* send 8 bytes a time */
11419 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11420 			return false;
11421 
11422 		if (i+8 == len) {
11423 			/* EDID block sent completed, expect result */
11424 			int version, min_rate, max_rate;
11425 
11426 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11427 			if (res) {
11428 				/* amd vsdb found */
11429 				vsdb_info->freesync_supported = 1;
11430 				vsdb_info->amd_vsdb_version = version;
11431 				vsdb_info->min_refresh_rate_hz = min_rate;
11432 				vsdb_info->max_refresh_rate_hz = max_rate;
11433 				return true;
11434 			}
11435 			/* not amd vsdb */
11436 			return false;
11437 		}
11438 
11439 		/* check for ack*/
11440 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11441 		if (!res)
11442 			return false;
11443 	}
11444 
11445 	return false;
11446 }
11447 
11448 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11449 		uint8_t *edid_ext, int len,
11450 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11451 {
11452 	int i;
11453 
11454 	/* send extension block to DMCU for parsing */
11455 	for (i = 0; i < len; i += 8) {
11456 		/* send 8 bytes a time */
11457 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11458 			return false;
11459 	}
11460 
11461 	return vsdb_info->freesync_supported;
11462 }
11463 
11464 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11465 		uint8_t *edid_ext, int len,
11466 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11467 {
11468 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11469 
11470 	if (adev->dm.dmub_srv)
11471 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11472 	else
11473 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11474 }
11475 
11476 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11477 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11478 {
11479 	uint8_t *edid_ext = NULL;
11480 	int i;
11481 	bool valid_vsdb_found = false;
11482 
11483 	/*----- drm_find_cea_extension() -----*/
11484 	/* No EDID or EDID extensions */
11485 	if (edid == NULL || edid->extensions == 0)
11486 		return -ENODEV;
11487 
11488 	/* Find CEA extension */
11489 	for (i = 0; i < edid->extensions; i++) {
11490 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11491 		if (edid_ext[0] == CEA_EXT)
11492 			break;
11493 	}
11494 
11495 	if (i == edid->extensions)
11496 		return -ENODEV;
11497 
11498 	/*----- cea_db_offsets() -----*/
11499 	if (edid_ext[0] != CEA_EXT)
11500 		return -ENODEV;
11501 
11502 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11503 
11504 	return valid_vsdb_found ? i : -ENODEV;
11505 }
11506 
11507 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11508 					struct edid *edid)
11509 {
11510 	int i = 0;
11511 	struct detailed_timing *timing;
11512 	struct detailed_non_pixel *data;
11513 	struct detailed_data_monitor_range *range;
11514 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11515 			to_amdgpu_dm_connector(connector);
11516 	struct dm_connector_state *dm_con_state = NULL;
11517 	struct dc_sink *sink;
11518 
11519 	struct drm_device *dev = connector->dev;
11520 	struct amdgpu_device *adev = drm_to_adev(dev);
11521 	bool freesync_capable = false;
11522 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11523 
11524 	if (!connector->state) {
11525 		DRM_ERROR("%s - Connector has no state", __func__);
11526 		goto update;
11527 	}
11528 
11529 	sink = amdgpu_dm_connector->dc_sink ?
11530 		amdgpu_dm_connector->dc_sink :
11531 		amdgpu_dm_connector->dc_em_sink;
11532 
11533 	if (!edid || !sink) {
11534 		dm_con_state = to_dm_connector_state(connector->state);
11535 
11536 		amdgpu_dm_connector->min_vfreq = 0;
11537 		amdgpu_dm_connector->max_vfreq = 0;
11538 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11539 		connector->display_info.monitor_range.min_vfreq = 0;
11540 		connector->display_info.monitor_range.max_vfreq = 0;
11541 		freesync_capable = false;
11542 
11543 		goto update;
11544 	}
11545 
11546 	dm_con_state = to_dm_connector_state(connector->state);
11547 
11548 	if (!adev->dm.freesync_module)
11549 		goto update;
11550 
11551 
11552 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11553 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11554 		bool edid_check_required = false;
11555 
11556 		if (edid) {
11557 			edid_check_required = is_dp_capable_without_timing_msa(
11558 						adev->dm.dc,
11559 						amdgpu_dm_connector);
11560 		}
11561 
11562 		if (edid_check_required == true && (edid->version > 1 ||
11563 		   (edid->version == 1 && edid->revision > 1))) {
11564 			for (i = 0; i < 4; i++) {
11565 
11566 				timing	= &edid->detailed_timings[i];
11567 				data	= &timing->data.other_data;
11568 				range	= &data->data.range;
11569 				/*
11570 				 * Check if monitor has continuous frequency mode
11571 				 */
11572 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11573 					continue;
11574 				/*
11575 				 * Check for flag range limits only. If flag == 1 then
11576 				 * no additional timing information provided.
11577 				 * Default GTF, GTF Secondary curve and CVT are not
11578 				 * supported
11579 				 */
11580 				if (range->flags != 1)
11581 					continue;
11582 
11583 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11584 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11585 				amdgpu_dm_connector->pixel_clock_mhz =
11586 					range->pixel_clock_mhz * 10;
11587 
11588 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11589 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11590 
11591 				break;
11592 			}
11593 
11594 			if (amdgpu_dm_connector->max_vfreq -
11595 			    amdgpu_dm_connector->min_vfreq > 10) {
11596 
11597 				freesync_capable = true;
11598 			}
11599 		}
11600 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11601 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11602 		if (i >= 0 && vsdb_info.freesync_supported) {
11603 			timing  = &edid->detailed_timings[i];
11604 			data    = &timing->data.other_data;
11605 
11606 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11607 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11608 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11609 				freesync_capable = true;
11610 
11611 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11612 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11613 		}
11614 	}
11615 
11616 update:
11617 	if (dm_con_state)
11618 		dm_con_state->freesync_capable = freesync_capable;
11619 
11620 	if (connector->vrr_capable_property)
11621 		drm_connector_set_vrr_capable_property(connector,
11622 						       freesync_capable);
11623 }
11624 
11625 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11626 {
11627 	struct amdgpu_device *adev = drm_to_adev(dev);
11628 	struct dc *dc = adev->dm.dc;
11629 	int i;
11630 
11631 	mutex_lock(&adev->dm.dc_lock);
11632 	if (dc->current_state) {
11633 		for (i = 0; i < dc->current_state->stream_count; ++i)
11634 			dc->current_state->streams[i]
11635 				->triggered_crtc_reset.enabled =
11636 				adev->dm.force_timing_sync;
11637 
11638 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11639 		dc_trigger_sync(dc, dc->current_state);
11640 	}
11641 	mutex_unlock(&adev->dm.dc_lock);
11642 }
11643 
11644 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11645 		       uint32_t value, const char *func_name)
11646 {
11647 #ifdef DM_CHECK_ADDR_0
11648 	if (address == 0) {
11649 		DC_ERR("invalid register write. address = 0");
11650 		return;
11651 	}
11652 #endif
11653 	cgs_write_register(ctx->cgs_device, address, value);
11654 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11655 }
11656 
11657 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11658 			  const char *func_name)
11659 {
11660 	uint32_t value;
11661 #ifdef DM_CHECK_ADDR_0
11662 	if (address == 0) {
11663 		DC_ERR("invalid register read; address = 0\n");
11664 		return 0;
11665 	}
11666 #endif
11667 
11668 	if (ctx->dmub_srv &&
11669 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11670 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11671 		ASSERT(false);
11672 		return 0;
11673 	}
11674 
11675 	value = cgs_read_register(ctx->cgs_device, address);
11676 
11677 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11678 
11679 	return value;
11680 }
11681 
11682 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11683 						struct dc_context *ctx,
11684 						uint8_t status_type,
11685 						uint32_t *operation_result)
11686 {
11687 	struct amdgpu_device *adev = ctx->driver_context;
11688 	int return_status = -1;
11689 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11690 
11691 	if (is_cmd_aux) {
11692 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11693 			return_status = p_notify->aux_reply.length;
11694 			*operation_result = p_notify->result;
11695 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11696 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11697 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11698 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11699 		} else {
11700 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11701 		}
11702 	} else {
11703 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11704 			return_status = 0;
11705 			*operation_result = p_notify->sc_status;
11706 		} else {
11707 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11708 		}
11709 	}
11710 
11711 	return return_status;
11712 }
11713 
11714 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11715 	unsigned int link_index, void *cmd_payload, void *operation_result)
11716 {
11717 	struct amdgpu_device *adev = ctx->driver_context;
11718 	int ret = 0;
11719 
11720 	if (is_cmd_aux) {
11721 		dc_process_dmub_aux_transfer_async(ctx->dc,
11722 			link_index, (struct aux_payload *)cmd_payload);
11723 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11724 					(struct set_config_cmd_payload *)cmd_payload,
11725 					adev->dm.dmub_notify)) {
11726 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11727 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11728 					(uint32_t *)operation_result);
11729 	}
11730 
11731 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11732 	if (ret == 0) {
11733 		DRM_ERROR("wait_for_completion_timeout timeout!");
11734 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11735 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11736 				(uint32_t *)operation_result);
11737 	}
11738 
11739 	if (is_cmd_aux) {
11740 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11741 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11742 
11743 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11744 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11745 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11746 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11747 				       adev->dm.dmub_notify->aux_reply.length);
11748 			}
11749 		}
11750 	}
11751 
11752 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11753 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11754 			(uint32_t *)operation_result);
11755 }
11756 
11757 /*
11758  * Check whether seamless boot is supported.
11759  *
11760  * So far we only support seamless boot on CHIP_VANGOGH.
11761  * If everything goes well, we may consider expanding
11762  * seamless boot to other ASICs.
11763  */
11764 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11765 {
11766 	switch (adev->asic_type) {
11767 	case CHIP_VANGOGH:
11768 		if (!adev->mman.keep_stolen_vga_memory)
11769 			return true;
11770 		break;
11771 	default:
11772 		break;
11773 	}
11774 
11775 	return false;
11776 }
11777