1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39 
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51 
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59 
60 #include "ivsrcid/ivsrcid_vislands30.h"
61 
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80 
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88 
89 #include "soc15_common.h"
90 #endif
91 
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95 
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 
109 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 
112 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
117 
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
120 
121 /**
122  * DOC: overview
123  *
124  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126  * requests into DC requests, and DC responses into DRM responses.
127  *
128  * The root control structure is &struct amdgpu_display_manager.
129  */
130 
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 				  struct drm_atomic_state *state);
203 
204 static void handle_cursor_update(struct drm_plane *plane,
205 				 struct drm_plane_state *old_plane_state);
206 
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215 
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 				 struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234 	if (crtc >= adev->mode_info.num_crtc)
235 		return 0;
236 	else {
237 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238 
239 		if (acrtc->dm_irq_params.stream == NULL) {
240 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 				  crtc);
242 			return 0;
243 		}
244 
245 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 	}
247 }
248 
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 				  u32 *vbl, u32 *position)
251 {
252 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
253 
254 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255 		return -EINVAL;
256 	else {
257 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 
259 		if (acrtc->dm_irq_params.stream ==  NULL) {
260 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 				  crtc);
262 			return 0;
263 		}
264 
265 		/*
266 		 * TODO rework base driver to use values directly.
267 		 * for now parse it back into reg-format
268 		 */
269 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270 					 &v_blank_start,
271 					 &v_blank_end,
272 					 &h_position,
273 					 &v_position);
274 
275 		*position = v_position | (h_position << 16);
276 		*vbl = v_blank_start | (v_blank_end << 16);
277 	}
278 
279 	return 0;
280 }
281 
282 static bool dm_is_idle(void *handle)
283 {
284 	/* XXX todo */
285 	return true;
286 }
287 
288 static int dm_wait_for_idle(void *handle)
289 {
290 	/* XXX todo */
291 	return 0;
292 }
293 
294 static bool dm_check_soft_reset(void *handle)
295 {
296 	return false;
297 }
298 
299 static int dm_soft_reset(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307 		     int otg_inst)
308 {
309 	struct drm_device *dev = adev_to_drm(adev);
310 	struct drm_crtc *crtc;
311 	struct amdgpu_crtc *amdgpu_crtc;
312 
313 	if (otg_inst == -1) {
314 		WARN_ON(1);
315 		return adev->mode_info.crtcs[0];
316 	}
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	if (!e)
396 		WARN_ON(1);
397 
398 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 
400 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 	if (!vrr_active ||
402 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 				      &v_blank_end, &hpos, &vpos) ||
404 	    (vpos < v_blank_start)) {
405 		/* Update to correct count and vblank timestamp if racing with
406 		 * vblank irq. This also updates to the correct vblank timestamp
407 		 * even in VRR mode, as scanout is past the front-porch atm.
408 		 */
409 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 
411 		/* Wake up userspace by sending the pageflip event with proper
412 		 * count and timestamp of vblank of flip completion.
413 		 */
414 		if (e) {
415 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 
417 			/* Event sent, so done with vblank for this flip */
418 			drm_crtc_vblank_put(&amdgpu_crtc->base);
419 		}
420 	} else if (e) {
421 		/* VRR active and inside front-porch: vblank count and
422 		 * timestamp for pageflip event will only be up to date after
423 		 * drm_crtc_handle_vblank() has been executed from late vblank
424 		 * irq handler after start of back-porch (vline 0). We queue the
425 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 		 * updated timestamp and count, once it runs after us.
427 		 *
428 		 * We need to open-code this instead of using the helper
429 		 * drm_crtc_arm_vblank_event(), as that helper would
430 		 * call drm_crtc_accurate_vblank_count(), which we must
431 		 * not call in VRR mode while we are in front-porch!
432 		 */
433 
434 		/* sequence will be replaced by real count during send-out. */
435 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 		e->pipe = amdgpu_crtc->crtc_id;
437 
438 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439 		e = NULL;
440 	}
441 
442 	/* Keep track of vblank of this flip for flip throttling. We use the
443 	 * cooked hw counter, as that one incremented at start of this vblank
444 	 * of pageflip completion, so last_flip_vblank is the forbidden count
445 	 * for queueing new pageflips if vsync + VRR is enabled.
446 	 */
447 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 
450 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 
453 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
455 			 vrr_active, (int) !e);
456 }
457 
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460 	struct common_irq_params *irq_params = interrupt_params;
461 	struct amdgpu_device *adev = irq_params->adev;
462 	struct amdgpu_crtc *acrtc;
463 	unsigned long flags;
464 	int vrr_active;
465 
466 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
467 
468 	if (acrtc) {
469 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
470 
471 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
472 			      acrtc->crtc_id,
473 			      vrr_active);
474 
475 		/* Core vblank handling is done here after end of front-porch in
476 		 * vrr mode, as vblank timestamping will give valid results
477 		 * while now done after front-porch. This will also deliver
478 		 * page-flip completion events that have been queued to us
479 		 * if a pageflip happened inside front-porch.
480 		 */
481 		if (vrr_active) {
482 			drm_crtc_handle_vblank(&acrtc->base);
483 
484 			/* BTR processing for pre-DCE12 ASICs */
485 			if (acrtc->dm_irq_params.stream &&
486 			    adev->family < AMDGPU_FAMILY_AI) {
487 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
488 				mod_freesync_handle_v_update(
489 				    adev->dm.freesync_module,
490 				    acrtc->dm_irq_params.stream,
491 				    &acrtc->dm_irq_params.vrr_params);
492 
493 				dc_stream_adjust_vmin_vmax(
494 				    adev->dm.dc,
495 				    acrtc->dm_irq_params.stream,
496 				    &acrtc->dm_irq_params.vrr_params.adjust);
497 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
498 			}
499 		}
500 	}
501 }
502 
503 /**
504  * dm_crtc_high_irq() - Handles CRTC interrupt
505  * @interrupt_params: used for determining the CRTC instance
506  *
507  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
508  * event handler.
509  */
510 static void dm_crtc_high_irq(void *interrupt_params)
511 {
512 	struct common_irq_params *irq_params = interrupt_params;
513 	struct amdgpu_device *adev = irq_params->adev;
514 	struct amdgpu_crtc *acrtc;
515 	unsigned long flags;
516 	int vrr_active;
517 
518 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
519 	if (!acrtc)
520 		return;
521 
522 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
523 
524 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
525 		      vrr_active, acrtc->dm_irq_params.active_planes);
526 
527 	/**
528 	 * Core vblank handling at start of front-porch is only possible
529 	 * in non-vrr mode, as only there vblank timestamping will give
530 	 * valid results while done in front-porch. Otherwise defer it
531 	 * to dm_vupdate_high_irq after end of front-porch.
532 	 */
533 	if (!vrr_active)
534 		drm_crtc_handle_vblank(&acrtc->base);
535 
536 	/**
537 	 * Following stuff must happen at start of vblank, for crc
538 	 * computation and below-the-range btr support in vrr mode.
539 	 */
540 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
541 
542 	/* BTR updates need to happen before VUPDATE on Vega and above. */
543 	if (adev->family < AMDGPU_FAMILY_AI)
544 		return;
545 
546 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
547 
548 	if (acrtc->dm_irq_params.stream &&
549 	    acrtc->dm_irq_params.vrr_params.supported &&
550 	    acrtc->dm_irq_params.freesync_config.state ==
551 		    VRR_STATE_ACTIVE_VARIABLE) {
552 		mod_freesync_handle_v_update(adev->dm.freesync_module,
553 					     acrtc->dm_irq_params.stream,
554 					     &acrtc->dm_irq_params.vrr_params);
555 
556 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
557 					   &acrtc->dm_irq_params.vrr_params.adjust);
558 	}
559 
560 	/*
561 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
562 	 * In that case, pageflip completion interrupts won't fire and pageflip
563 	 * completion events won't get delivered. Prevent this by sending
564 	 * pending pageflip events from here if a flip is still pending.
565 	 *
566 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
567 	 * avoid race conditions between flip programming and completion,
568 	 * which could cause too early flip completion events.
569 	 */
570 	if (adev->family >= AMDGPU_FAMILY_RV &&
571 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
572 	    acrtc->dm_irq_params.active_planes == 0) {
573 		if (acrtc->event) {
574 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
575 			acrtc->event = NULL;
576 			drm_crtc_vblank_put(&acrtc->base);
577 		}
578 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
579 	}
580 
581 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
582 }
583 
584 #if defined(CONFIG_DRM_AMD_DC_DCN)
585 /**
586  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
587  * DCN generation ASICs
588  * @interrupt params - interrupt parameters
589  *
590  * Used to set crc window/read out crc value at vertical line 0 position
591  */
592 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
593 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
594 {
595 	struct common_irq_params *irq_params = interrupt_params;
596 	struct amdgpu_device *adev = irq_params->adev;
597 	struct amdgpu_crtc *acrtc;
598 
599 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
600 
601 	if (!acrtc)
602 		return;
603 
604 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
605 }
606 #endif
607 #endif
608 
609 static int dm_set_clockgating_state(void *handle,
610 		  enum amd_clockgating_state state)
611 {
612 	return 0;
613 }
614 
615 static int dm_set_powergating_state(void *handle,
616 		  enum amd_powergating_state state)
617 {
618 	return 0;
619 }
620 
621 /* Prototypes of private functions */
622 static int dm_early_init(void* handle);
623 
624 /* Allocate memory for FBC compressed data  */
625 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
626 {
627 	struct drm_device *dev = connector->dev;
628 	struct amdgpu_device *adev = drm_to_adev(dev);
629 	struct dm_compressor_info *compressor = &adev->dm.compressor;
630 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
631 	struct drm_display_mode *mode;
632 	unsigned long max_size = 0;
633 
634 	if (adev->dm.dc->fbc_compressor == NULL)
635 		return;
636 
637 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
638 		return;
639 
640 	if (compressor->bo_ptr)
641 		return;
642 
643 
644 	list_for_each_entry(mode, &connector->modes, head) {
645 		if (max_size < mode->htotal * mode->vtotal)
646 			max_size = mode->htotal * mode->vtotal;
647 	}
648 
649 	if (max_size) {
650 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
651 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
652 			    &compressor->gpu_addr, &compressor->cpu_addr);
653 
654 		if (r)
655 			DRM_ERROR("DM: Failed to initialize FBC\n");
656 		else {
657 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
658 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
659 		}
660 
661 	}
662 
663 }
664 
665 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
666 					  int pipe, bool *enabled,
667 					  unsigned char *buf, int max_bytes)
668 {
669 	struct drm_device *dev = dev_get_drvdata(kdev);
670 	struct amdgpu_device *adev = drm_to_adev(dev);
671 	struct drm_connector *connector;
672 	struct drm_connector_list_iter conn_iter;
673 	struct amdgpu_dm_connector *aconnector;
674 	int ret = 0;
675 
676 	*enabled = false;
677 
678 	mutex_lock(&adev->dm.audio_lock);
679 
680 	drm_connector_list_iter_begin(dev, &conn_iter);
681 	drm_for_each_connector_iter(connector, &conn_iter) {
682 		aconnector = to_amdgpu_dm_connector(connector);
683 		if (aconnector->audio_inst != port)
684 			continue;
685 
686 		*enabled = true;
687 		ret = drm_eld_size(connector->eld);
688 		memcpy(buf, connector->eld, min(max_bytes, ret));
689 
690 		break;
691 	}
692 	drm_connector_list_iter_end(&conn_iter);
693 
694 	mutex_unlock(&adev->dm.audio_lock);
695 
696 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
697 
698 	return ret;
699 }
700 
701 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
702 	.get_eld = amdgpu_dm_audio_component_get_eld,
703 };
704 
705 static int amdgpu_dm_audio_component_bind(struct device *kdev,
706 				       struct device *hda_kdev, void *data)
707 {
708 	struct drm_device *dev = dev_get_drvdata(kdev);
709 	struct amdgpu_device *adev = drm_to_adev(dev);
710 	struct drm_audio_component *acomp = data;
711 
712 	acomp->ops = &amdgpu_dm_audio_component_ops;
713 	acomp->dev = kdev;
714 	adev->dm.audio_component = acomp;
715 
716 	return 0;
717 }
718 
719 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
720 					  struct device *hda_kdev, void *data)
721 {
722 	struct drm_device *dev = dev_get_drvdata(kdev);
723 	struct amdgpu_device *adev = drm_to_adev(dev);
724 	struct drm_audio_component *acomp = data;
725 
726 	acomp->ops = NULL;
727 	acomp->dev = NULL;
728 	adev->dm.audio_component = NULL;
729 }
730 
731 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
732 	.bind	= amdgpu_dm_audio_component_bind,
733 	.unbind	= amdgpu_dm_audio_component_unbind,
734 };
735 
736 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
737 {
738 	int i, ret;
739 
740 	if (!amdgpu_audio)
741 		return 0;
742 
743 	adev->mode_info.audio.enabled = true;
744 
745 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
746 
747 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
748 		adev->mode_info.audio.pin[i].channels = -1;
749 		adev->mode_info.audio.pin[i].rate = -1;
750 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
751 		adev->mode_info.audio.pin[i].status_bits = 0;
752 		adev->mode_info.audio.pin[i].category_code = 0;
753 		adev->mode_info.audio.pin[i].connected = false;
754 		adev->mode_info.audio.pin[i].id =
755 			adev->dm.dc->res_pool->audios[i]->inst;
756 		adev->mode_info.audio.pin[i].offset = 0;
757 	}
758 
759 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
760 	if (ret < 0)
761 		return ret;
762 
763 	adev->dm.audio_registered = true;
764 
765 	return 0;
766 }
767 
768 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
769 {
770 	if (!amdgpu_audio)
771 		return;
772 
773 	if (!adev->mode_info.audio.enabled)
774 		return;
775 
776 	if (adev->dm.audio_registered) {
777 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
778 		adev->dm.audio_registered = false;
779 	}
780 
781 	/* TODO: Disable audio? */
782 
783 	adev->mode_info.audio.enabled = false;
784 }
785 
786 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
787 {
788 	struct drm_audio_component *acomp = adev->dm.audio_component;
789 
790 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
791 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
792 
793 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
794 						 pin, -1);
795 	}
796 }
797 
798 static int dm_dmub_hw_init(struct amdgpu_device *adev)
799 {
800 	const struct dmcub_firmware_header_v1_0 *hdr;
801 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
802 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
803 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
804 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
805 	struct abm *abm = adev->dm.dc->res_pool->abm;
806 	struct dmub_srv_hw_params hw_params;
807 	enum dmub_status status;
808 	const unsigned char *fw_inst_const, *fw_bss_data;
809 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
810 	bool has_hw_support;
811 
812 	if (!dmub_srv)
813 		/* DMUB isn't supported on the ASIC. */
814 		return 0;
815 
816 	if (!fb_info) {
817 		DRM_ERROR("No framebuffer info for DMUB service.\n");
818 		return -EINVAL;
819 	}
820 
821 	if (!dmub_fw) {
822 		/* Firmware required for DMUB support. */
823 		DRM_ERROR("No firmware provided for DMUB.\n");
824 		return -EINVAL;
825 	}
826 
827 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
828 	if (status != DMUB_STATUS_OK) {
829 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
830 		return -EINVAL;
831 	}
832 
833 	if (!has_hw_support) {
834 		DRM_INFO("DMUB unsupported on ASIC\n");
835 		return 0;
836 	}
837 
838 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
839 
840 	fw_inst_const = dmub_fw->data +
841 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
842 			PSP_HEADER_BYTES;
843 
844 	fw_bss_data = dmub_fw->data +
845 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
846 		      le32_to_cpu(hdr->inst_const_bytes);
847 
848 	/* Copy firmware and bios info into FB memory. */
849 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
850 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
851 
852 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
853 
854 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
855 	 * amdgpu_ucode_init_single_fw will load dmub firmware
856 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
857 	 * will be done by dm_dmub_hw_init
858 	 */
859 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
860 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
861 				fw_inst_const_size);
862 	}
863 
864 	if (fw_bss_data_size)
865 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
866 		       fw_bss_data, fw_bss_data_size);
867 
868 	/* Copy firmware bios info into FB memory. */
869 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
870 	       adev->bios_size);
871 
872 	/* Reset regions that need to be reset. */
873 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
874 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
875 
876 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
877 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
878 
879 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
880 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
881 
882 	/* Initialize hardware. */
883 	memset(&hw_params, 0, sizeof(hw_params));
884 	hw_params.fb_base = adev->gmc.fb_start;
885 	hw_params.fb_offset = adev->gmc.aper_base;
886 
887 	/* backdoor load firmware and trigger dmub running */
888 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
889 		hw_params.load_inst_const = true;
890 
891 	if (dmcu)
892 		hw_params.psp_version = dmcu->psp_version;
893 
894 	for (i = 0; i < fb_info->num_fb; ++i)
895 		hw_params.fb[i] = &fb_info->fb[i];
896 
897 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
898 	if (status != DMUB_STATUS_OK) {
899 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
900 		return -EINVAL;
901 	}
902 
903 	/* Wait for firmware load to finish. */
904 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
905 	if (status != DMUB_STATUS_OK)
906 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
907 
908 	/* Init DMCU and ABM if available. */
909 	if (dmcu && abm) {
910 		dmcu->funcs->dmcu_init(dmcu);
911 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
912 	}
913 
914 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
915 	if (!adev->dm.dc->ctx->dmub_srv) {
916 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
917 		return -ENOMEM;
918 	}
919 
920 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
921 		 adev->dm.dmcub_fw_version);
922 
923 	return 0;
924 }
925 
926 #if defined(CONFIG_DRM_AMD_DC_DCN)
927 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
928 {
929 	uint64_t pt_base;
930 	uint32_t logical_addr_low;
931 	uint32_t logical_addr_high;
932 	uint32_t agp_base, agp_bot, agp_top;
933 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
934 
935 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
936 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
937 
938 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
939 		/*
940 		 * Raven2 has a HW issue that it is unable to use the vram which
941 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
942 		 * workaround that increase system aperture high address (add 1)
943 		 * to get rid of the VM fault and hardware hang.
944 		 */
945 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
946 	else
947 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
948 
949 	agp_base = 0;
950 	agp_bot = adev->gmc.agp_start >> 24;
951 	agp_top = adev->gmc.agp_end >> 24;
952 
953 
954 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
955 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
956 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
957 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
958 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
959 	page_table_base.low_part = lower_32_bits(pt_base);
960 
961 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
962 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
963 
964 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
965 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
966 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
967 
968 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
969 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
970 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
971 
972 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
973 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
974 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
975 
976 	pa_config->is_hvm_enabled = 0;
977 
978 }
979 #endif
980 #if defined(CONFIG_DRM_AMD_DC_DCN)
981 static void event_mall_stutter(struct work_struct *work)
982 {
983 
984 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
985 	struct amdgpu_display_manager *dm = vblank_work->dm;
986 
987 	mutex_lock(&dm->dc_lock);
988 
989 	if (vblank_work->enable)
990 		dm->active_vblank_irq_count++;
991 	else
992 		dm->active_vblank_irq_count--;
993 
994 
995 	dc_allow_idle_optimizations(
996 		dm->dc, dm->active_vblank_irq_count == 0);
997 
998 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
999 
1000 
1001 	mutex_unlock(&dm->dc_lock);
1002 }
1003 
1004 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1005 {
1006 
1007 	int max_caps = dc->caps.max_links;
1008 	struct vblank_workqueue *vblank_work;
1009 	int i = 0;
1010 
1011 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1012 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1013 		kfree(vblank_work);
1014 		return NULL;
1015 	}
1016 
1017 	for (i = 0; i < max_caps; i++)
1018 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1019 
1020 	return vblank_work;
1021 }
1022 #endif
1023 static int amdgpu_dm_init(struct amdgpu_device *adev)
1024 {
1025 	struct dc_init_data init_data;
1026 #ifdef CONFIG_DRM_AMD_DC_HDCP
1027 	struct dc_callback_init init_params;
1028 #endif
1029 	int r;
1030 
1031 	adev->dm.ddev = adev_to_drm(adev);
1032 	adev->dm.adev = adev;
1033 
1034 	/* Zero all the fields */
1035 	memset(&init_data, 0, sizeof(init_data));
1036 #ifdef CONFIG_DRM_AMD_DC_HDCP
1037 	memset(&init_params, 0, sizeof(init_params));
1038 #endif
1039 
1040 	mutex_init(&adev->dm.dc_lock);
1041 	mutex_init(&adev->dm.audio_lock);
1042 #if defined(CONFIG_DRM_AMD_DC_DCN)
1043 	spin_lock_init(&adev->dm.vblank_lock);
1044 #endif
1045 
1046 	if(amdgpu_dm_irq_init(adev)) {
1047 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1048 		goto error;
1049 	}
1050 
1051 	init_data.asic_id.chip_family = adev->family;
1052 
1053 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1054 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1055 
1056 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1057 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1058 	init_data.asic_id.atombios_base_address =
1059 		adev->mode_info.atom_context->bios;
1060 
1061 	init_data.driver = adev;
1062 
1063 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1064 
1065 	if (!adev->dm.cgs_device) {
1066 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1067 		goto error;
1068 	}
1069 
1070 	init_data.cgs_device = adev->dm.cgs_device;
1071 
1072 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1073 
1074 	switch (adev->asic_type) {
1075 	case CHIP_CARRIZO:
1076 	case CHIP_STONEY:
1077 	case CHIP_RAVEN:
1078 	case CHIP_RENOIR:
1079 		init_data.flags.gpu_vm_support = true;
1080 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1081 			init_data.flags.disable_dmcu = true;
1082 		break;
1083 #if defined(CONFIG_DRM_AMD_DC_DCN)
1084 	case CHIP_VANGOGH:
1085 		init_data.flags.gpu_vm_support = true;
1086 		break;
1087 #endif
1088 	default:
1089 		break;
1090 	}
1091 
1092 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1093 		init_data.flags.fbc_support = true;
1094 
1095 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1096 		init_data.flags.multi_mon_pp_mclk_switch = true;
1097 
1098 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1099 		init_data.flags.disable_fractional_pwm = true;
1100 
1101 	init_data.flags.power_down_display_on_boot = true;
1102 
1103 	/* Display Core create. */
1104 	adev->dm.dc = dc_create(&init_data);
1105 
1106 	if (adev->dm.dc) {
1107 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1108 	} else {
1109 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1110 		goto error;
1111 	}
1112 
1113 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1114 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1115 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1116 	}
1117 
1118 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1119 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1120 
1121 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1122 		adev->dm.dc->debug.disable_stutter = true;
1123 
1124 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1125 		adev->dm.dc->debug.disable_dsc = true;
1126 
1127 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1128 		adev->dm.dc->debug.disable_clock_gate = true;
1129 
1130 	r = dm_dmub_hw_init(adev);
1131 	if (r) {
1132 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1133 		goto error;
1134 	}
1135 
1136 	dc_hardware_init(adev->dm.dc);
1137 
1138 #if defined(CONFIG_DRM_AMD_DC_DCN)
1139 	if (adev->apu_flags) {
1140 		struct dc_phy_addr_space_config pa_config;
1141 
1142 		mmhub_read_system_context(adev, &pa_config);
1143 
1144 		// Call the DC init_memory func
1145 		dc_setup_system_context(adev->dm.dc, &pa_config);
1146 	}
1147 #endif
1148 
1149 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1150 	if (!adev->dm.freesync_module) {
1151 		DRM_ERROR(
1152 		"amdgpu: failed to initialize freesync_module.\n");
1153 	} else
1154 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1155 				adev->dm.freesync_module);
1156 
1157 	amdgpu_dm_init_color_mod();
1158 
1159 #if defined(CONFIG_DRM_AMD_DC_DCN)
1160 	if (adev->dm.dc->caps.max_links > 0) {
1161 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1162 
1163 		if (!adev->dm.vblank_workqueue)
1164 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1165 		else
1166 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1167 	}
1168 #endif
1169 
1170 #ifdef CONFIG_DRM_AMD_DC_HDCP
1171 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1172 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1173 
1174 		if (!adev->dm.hdcp_workqueue)
1175 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1176 		else
1177 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1178 
1179 		dc_init_callbacks(adev->dm.dc, &init_params);
1180 	}
1181 #endif
1182 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1183 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1184 #endif
1185 	if (amdgpu_dm_initialize_drm_device(adev)) {
1186 		DRM_ERROR(
1187 		"amdgpu: failed to initialize sw for display support.\n");
1188 		goto error;
1189 	}
1190 
1191 	/* create fake encoders for MST */
1192 	dm_dp_create_fake_mst_encoders(adev);
1193 
1194 	/* TODO: Add_display_info? */
1195 
1196 	/* TODO use dynamic cursor width */
1197 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1198 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1199 
1200 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1201 		DRM_ERROR(
1202 		"amdgpu: failed to initialize sw for display support.\n");
1203 		goto error;
1204 	}
1205 
1206 
1207 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1208 
1209 	return 0;
1210 error:
1211 	amdgpu_dm_fini(adev);
1212 
1213 	return -EINVAL;
1214 }
1215 
1216 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1217 {
1218 	int i;
1219 
1220 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1221 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1222 	}
1223 
1224 	amdgpu_dm_audio_fini(adev);
1225 
1226 	amdgpu_dm_destroy_drm_device(&adev->dm);
1227 
1228 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1229 	if (adev->dm.crc_rd_wrk) {
1230 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1231 		kfree(adev->dm.crc_rd_wrk);
1232 		adev->dm.crc_rd_wrk = NULL;
1233 	}
1234 #endif
1235 #ifdef CONFIG_DRM_AMD_DC_HDCP
1236 	if (adev->dm.hdcp_workqueue) {
1237 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1238 		adev->dm.hdcp_workqueue = NULL;
1239 	}
1240 
1241 	if (adev->dm.dc)
1242 		dc_deinit_callbacks(adev->dm.dc);
1243 #endif
1244 	if (adev->dm.dc->ctx->dmub_srv) {
1245 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1246 		adev->dm.dc->ctx->dmub_srv = NULL;
1247 	}
1248 
1249 	if (adev->dm.dmub_bo)
1250 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1251 				      &adev->dm.dmub_bo_gpu_addr,
1252 				      &adev->dm.dmub_bo_cpu_addr);
1253 
1254 	/* DC Destroy TODO: Replace destroy DAL */
1255 	if (adev->dm.dc)
1256 		dc_destroy(&adev->dm.dc);
1257 	/*
1258 	 * TODO: pageflip, vlank interrupt
1259 	 *
1260 	 * amdgpu_dm_irq_fini(adev);
1261 	 */
1262 
1263 	if (adev->dm.cgs_device) {
1264 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1265 		adev->dm.cgs_device = NULL;
1266 	}
1267 	if (adev->dm.freesync_module) {
1268 		mod_freesync_destroy(adev->dm.freesync_module);
1269 		adev->dm.freesync_module = NULL;
1270 	}
1271 
1272 	mutex_destroy(&adev->dm.audio_lock);
1273 	mutex_destroy(&adev->dm.dc_lock);
1274 
1275 	return;
1276 }
1277 
1278 static int load_dmcu_fw(struct amdgpu_device *adev)
1279 {
1280 	const char *fw_name_dmcu = NULL;
1281 	int r;
1282 	const struct dmcu_firmware_header_v1_0 *hdr;
1283 
1284 	switch(adev->asic_type) {
1285 #if defined(CONFIG_DRM_AMD_DC_SI)
1286 	case CHIP_TAHITI:
1287 	case CHIP_PITCAIRN:
1288 	case CHIP_VERDE:
1289 	case CHIP_OLAND:
1290 #endif
1291 	case CHIP_BONAIRE:
1292 	case CHIP_HAWAII:
1293 	case CHIP_KAVERI:
1294 	case CHIP_KABINI:
1295 	case CHIP_MULLINS:
1296 	case CHIP_TONGA:
1297 	case CHIP_FIJI:
1298 	case CHIP_CARRIZO:
1299 	case CHIP_STONEY:
1300 	case CHIP_POLARIS11:
1301 	case CHIP_POLARIS10:
1302 	case CHIP_POLARIS12:
1303 	case CHIP_VEGAM:
1304 	case CHIP_VEGA10:
1305 	case CHIP_VEGA12:
1306 	case CHIP_VEGA20:
1307 	case CHIP_NAVI10:
1308 	case CHIP_NAVI14:
1309 	case CHIP_RENOIR:
1310 	case CHIP_SIENNA_CICHLID:
1311 	case CHIP_NAVY_FLOUNDER:
1312 	case CHIP_DIMGREY_CAVEFISH:
1313 	case CHIP_VANGOGH:
1314 		return 0;
1315 	case CHIP_NAVI12:
1316 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1317 		break;
1318 	case CHIP_RAVEN:
1319 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1320 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1321 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1322 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1323 		else
1324 			return 0;
1325 		break;
1326 	default:
1327 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1328 		return -EINVAL;
1329 	}
1330 
1331 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1332 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1333 		return 0;
1334 	}
1335 
1336 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1337 	if (r == -ENOENT) {
1338 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1339 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1340 		adev->dm.fw_dmcu = NULL;
1341 		return 0;
1342 	}
1343 	if (r) {
1344 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1345 			fw_name_dmcu);
1346 		return r;
1347 	}
1348 
1349 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1350 	if (r) {
1351 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1352 			fw_name_dmcu);
1353 		release_firmware(adev->dm.fw_dmcu);
1354 		adev->dm.fw_dmcu = NULL;
1355 		return r;
1356 	}
1357 
1358 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1359 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1360 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1361 	adev->firmware.fw_size +=
1362 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1363 
1364 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1365 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1366 	adev->firmware.fw_size +=
1367 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1368 
1369 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1370 
1371 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1372 
1373 	return 0;
1374 }
1375 
1376 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1377 {
1378 	struct amdgpu_device *adev = ctx;
1379 
1380 	return dm_read_reg(adev->dm.dc->ctx, address);
1381 }
1382 
1383 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1384 				     uint32_t value)
1385 {
1386 	struct amdgpu_device *adev = ctx;
1387 
1388 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1389 }
1390 
1391 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1392 {
1393 	struct dmub_srv_create_params create_params;
1394 	struct dmub_srv_region_params region_params;
1395 	struct dmub_srv_region_info region_info;
1396 	struct dmub_srv_fb_params fb_params;
1397 	struct dmub_srv_fb_info *fb_info;
1398 	struct dmub_srv *dmub_srv;
1399 	const struct dmcub_firmware_header_v1_0 *hdr;
1400 	const char *fw_name_dmub;
1401 	enum dmub_asic dmub_asic;
1402 	enum dmub_status status;
1403 	int r;
1404 
1405 	switch (adev->asic_type) {
1406 	case CHIP_RENOIR:
1407 		dmub_asic = DMUB_ASIC_DCN21;
1408 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1409 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1410 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1411 		break;
1412 	case CHIP_SIENNA_CICHLID:
1413 		dmub_asic = DMUB_ASIC_DCN30;
1414 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1415 		break;
1416 	case CHIP_NAVY_FLOUNDER:
1417 		dmub_asic = DMUB_ASIC_DCN30;
1418 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1419 		break;
1420 	case CHIP_VANGOGH:
1421 		dmub_asic = DMUB_ASIC_DCN301;
1422 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1423 		break;
1424 	case CHIP_DIMGREY_CAVEFISH:
1425 		dmub_asic = DMUB_ASIC_DCN302;
1426 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1427 		break;
1428 
1429 	default:
1430 		/* ASIC doesn't support DMUB. */
1431 		return 0;
1432 	}
1433 
1434 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1435 	if (r) {
1436 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1437 		return 0;
1438 	}
1439 
1440 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1441 	if (r) {
1442 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1443 		return 0;
1444 	}
1445 
1446 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1447 
1448 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1449 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1450 			AMDGPU_UCODE_ID_DMCUB;
1451 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1452 			adev->dm.dmub_fw;
1453 		adev->firmware.fw_size +=
1454 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1455 
1456 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1457 			 adev->dm.dmcub_fw_version);
1458 	}
1459 
1460 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1461 
1462 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1463 	dmub_srv = adev->dm.dmub_srv;
1464 
1465 	if (!dmub_srv) {
1466 		DRM_ERROR("Failed to allocate DMUB service!\n");
1467 		return -ENOMEM;
1468 	}
1469 
1470 	memset(&create_params, 0, sizeof(create_params));
1471 	create_params.user_ctx = adev;
1472 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1473 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1474 	create_params.asic = dmub_asic;
1475 
1476 	/* Create the DMUB service. */
1477 	status = dmub_srv_create(dmub_srv, &create_params);
1478 	if (status != DMUB_STATUS_OK) {
1479 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1480 		return -EINVAL;
1481 	}
1482 
1483 	/* Calculate the size of all the regions for the DMUB service. */
1484 	memset(&region_params, 0, sizeof(region_params));
1485 
1486 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1487 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1488 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1489 	region_params.vbios_size = adev->bios_size;
1490 	region_params.fw_bss_data = region_params.bss_data_size ?
1491 		adev->dm.dmub_fw->data +
1492 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1493 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1494 	region_params.fw_inst_const =
1495 		adev->dm.dmub_fw->data +
1496 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1497 		PSP_HEADER_BYTES;
1498 
1499 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1500 					   &region_info);
1501 
1502 	if (status != DMUB_STATUS_OK) {
1503 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1504 		return -EINVAL;
1505 	}
1506 
1507 	/*
1508 	 * Allocate a framebuffer based on the total size of all the regions.
1509 	 * TODO: Move this into GART.
1510 	 */
1511 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1512 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1513 				    &adev->dm.dmub_bo_gpu_addr,
1514 				    &adev->dm.dmub_bo_cpu_addr);
1515 	if (r)
1516 		return r;
1517 
1518 	/* Rebase the regions on the framebuffer address. */
1519 	memset(&fb_params, 0, sizeof(fb_params));
1520 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1521 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1522 	fb_params.region_info = &region_info;
1523 
1524 	adev->dm.dmub_fb_info =
1525 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1526 	fb_info = adev->dm.dmub_fb_info;
1527 
1528 	if (!fb_info) {
1529 		DRM_ERROR(
1530 			"Failed to allocate framebuffer info for DMUB service!\n");
1531 		return -ENOMEM;
1532 	}
1533 
1534 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1535 	if (status != DMUB_STATUS_OK) {
1536 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1537 		return -EINVAL;
1538 	}
1539 
1540 	return 0;
1541 }
1542 
1543 static int dm_sw_init(void *handle)
1544 {
1545 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546 	int r;
1547 
1548 	r = dm_dmub_sw_init(adev);
1549 	if (r)
1550 		return r;
1551 
1552 	return load_dmcu_fw(adev);
1553 }
1554 
1555 static int dm_sw_fini(void *handle)
1556 {
1557 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1558 
1559 	kfree(adev->dm.dmub_fb_info);
1560 	adev->dm.dmub_fb_info = NULL;
1561 
1562 	if (adev->dm.dmub_srv) {
1563 		dmub_srv_destroy(adev->dm.dmub_srv);
1564 		adev->dm.dmub_srv = NULL;
1565 	}
1566 
1567 	release_firmware(adev->dm.dmub_fw);
1568 	adev->dm.dmub_fw = NULL;
1569 
1570 	release_firmware(adev->dm.fw_dmcu);
1571 	adev->dm.fw_dmcu = NULL;
1572 
1573 	return 0;
1574 }
1575 
1576 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1577 {
1578 	struct amdgpu_dm_connector *aconnector;
1579 	struct drm_connector *connector;
1580 	struct drm_connector_list_iter iter;
1581 	int ret = 0;
1582 
1583 	drm_connector_list_iter_begin(dev, &iter);
1584 	drm_for_each_connector_iter(connector, &iter) {
1585 		aconnector = to_amdgpu_dm_connector(connector);
1586 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1587 		    aconnector->mst_mgr.aux) {
1588 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1589 					 aconnector,
1590 					 aconnector->base.base.id);
1591 
1592 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1593 			if (ret < 0) {
1594 				DRM_ERROR("DM_MST: Failed to start MST\n");
1595 				aconnector->dc_link->type =
1596 					dc_connection_single;
1597 				break;
1598 			}
1599 		}
1600 	}
1601 	drm_connector_list_iter_end(&iter);
1602 
1603 	return ret;
1604 }
1605 
1606 static int dm_late_init(void *handle)
1607 {
1608 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1609 
1610 	struct dmcu_iram_parameters params;
1611 	unsigned int linear_lut[16];
1612 	int i;
1613 	struct dmcu *dmcu = NULL;
1614 	bool ret = true;
1615 
1616 	dmcu = adev->dm.dc->res_pool->dmcu;
1617 
1618 	for (i = 0; i < 16; i++)
1619 		linear_lut[i] = 0xFFFF * i / 15;
1620 
1621 	params.set = 0;
1622 	params.backlight_ramping_start = 0xCCCC;
1623 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1624 	params.backlight_lut_array_size = 16;
1625 	params.backlight_lut_array = linear_lut;
1626 
1627 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1628 	 * 0xFFFF x 0.01 = 0x28F
1629 	 */
1630 	params.min_abm_backlight = 0x28F;
1631 
1632 	/* In the case where abm is implemented on dmcub,
1633 	 * dmcu object will be null.
1634 	 * ABM 2.4 and up are implemented on dmcub.
1635 	 */
1636 	if (dmcu)
1637 		ret = dmcu_load_iram(dmcu, params);
1638 	else if (adev->dm.dc->ctx->dmub_srv)
1639 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1640 
1641 	if (!ret)
1642 		return -EINVAL;
1643 
1644 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1645 }
1646 
1647 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1648 {
1649 	struct amdgpu_dm_connector *aconnector;
1650 	struct drm_connector *connector;
1651 	struct drm_connector_list_iter iter;
1652 	struct drm_dp_mst_topology_mgr *mgr;
1653 	int ret;
1654 	bool need_hotplug = false;
1655 
1656 	drm_connector_list_iter_begin(dev, &iter);
1657 	drm_for_each_connector_iter(connector, &iter) {
1658 		aconnector = to_amdgpu_dm_connector(connector);
1659 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1660 		    aconnector->mst_port)
1661 			continue;
1662 
1663 		mgr = &aconnector->mst_mgr;
1664 
1665 		if (suspend) {
1666 			drm_dp_mst_topology_mgr_suspend(mgr);
1667 		} else {
1668 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1669 			if (ret < 0) {
1670 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1671 				need_hotplug = true;
1672 			}
1673 		}
1674 	}
1675 	drm_connector_list_iter_end(&iter);
1676 
1677 	if (need_hotplug)
1678 		drm_kms_helper_hotplug_event(dev);
1679 }
1680 
1681 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1682 {
1683 	struct smu_context *smu = &adev->smu;
1684 	int ret = 0;
1685 
1686 	if (!is_support_sw_smu(adev))
1687 		return 0;
1688 
1689 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1690 	 * on window driver dc implementation.
1691 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1692 	 * should be passed to smu during boot up and resume from s3.
1693 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1694 	 * dcn20_resource_construct
1695 	 * then call pplib functions below to pass the settings to smu:
1696 	 * smu_set_watermarks_for_clock_ranges
1697 	 * smu_set_watermarks_table
1698 	 * navi10_set_watermarks_table
1699 	 * smu_write_watermarks_table
1700 	 *
1701 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1702 	 * dc has implemented different flow for window driver:
1703 	 * dc_hardware_init / dc_set_power_state
1704 	 * dcn10_init_hw
1705 	 * notify_wm_ranges
1706 	 * set_wm_ranges
1707 	 * -- Linux
1708 	 * smu_set_watermarks_for_clock_ranges
1709 	 * renoir_set_watermarks_table
1710 	 * smu_write_watermarks_table
1711 	 *
1712 	 * For Linux,
1713 	 * dc_hardware_init -> amdgpu_dm_init
1714 	 * dc_set_power_state --> dm_resume
1715 	 *
1716 	 * therefore, this function apply to navi10/12/14 but not Renoir
1717 	 * *
1718 	 */
1719 	switch(adev->asic_type) {
1720 	case CHIP_NAVI10:
1721 	case CHIP_NAVI14:
1722 	case CHIP_NAVI12:
1723 		break;
1724 	default:
1725 		return 0;
1726 	}
1727 
1728 	ret = smu_write_watermarks_table(smu);
1729 	if (ret) {
1730 		DRM_ERROR("Failed to update WMTABLE!\n");
1731 		return ret;
1732 	}
1733 
1734 	return 0;
1735 }
1736 
1737 /**
1738  * dm_hw_init() - Initialize DC device
1739  * @handle: The base driver device containing the amdgpu_dm device.
1740  *
1741  * Initialize the &struct amdgpu_display_manager device. This involves calling
1742  * the initializers of each DM component, then populating the struct with them.
1743  *
1744  * Although the function implies hardware initialization, both hardware and
1745  * software are initialized here. Splitting them out to their relevant init
1746  * hooks is a future TODO item.
1747  *
1748  * Some notable things that are initialized here:
1749  *
1750  * - Display Core, both software and hardware
1751  * - DC modules that we need (freesync and color management)
1752  * - DRM software states
1753  * - Interrupt sources and handlers
1754  * - Vblank support
1755  * - Debug FS entries, if enabled
1756  */
1757 static int dm_hw_init(void *handle)
1758 {
1759 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1760 	/* Create DAL display manager */
1761 	amdgpu_dm_init(adev);
1762 	amdgpu_dm_hpd_init(adev);
1763 
1764 	return 0;
1765 }
1766 
1767 /**
1768  * dm_hw_fini() - Teardown DC device
1769  * @handle: The base driver device containing the amdgpu_dm device.
1770  *
1771  * Teardown components within &struct amdgpu_display_manager that require
1772  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1773  * were loaded. Also flush IRQ workqueues and disable them.
1774  */
1775 static int dm_hw_fini(void *handle)
1776 {
1777 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1778 
1779 	amdgpu_dm_hpd_fini(adev);
1780 
1781 	amdgpu_dm_irq_fini(adev);
1782 	amdgpu_dm_fini(adev);
1783 	return 0;
1784 }
1785 
1786 
1787 static int dm_enable_vblank(struct drm_crtc *crtc);
1788 static void dm_disable_vblank(struct drm_crtc *crtc);
1789 
1790 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1791 				 struct dc_state *state, bool enable)
1792 {
1793 	enum dc_irq_source irq_source;
1794 	struct amdgpu_crtc *acrtc;
1795 	int rc = -EBUSY;
1796 	int i = 0;
1797 
1798 	for (i = 0; i < state->stream_count; i++) {
1799 		acrtc = get_crtc_by_otg_inst(
1800 				adev, state->stream_status[i].primary_otg_inst);
1801 
1802 		if (acrtc && state->stream_status[i].plane_count != 0) {
1803 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1804 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1805 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1806 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1807 			if (rc)
1808 				DRM_WARN("Failed to %s pflip interrupts\n",
1809 					 enable ? "enable" : "disable");
1810 
1811 			if (enable) {
1812 				rc = dm_enable_vblank(&acrtc->base);
1813 				if (rc)
1814 					DRM_WARN("Failed to enable vblank interrupts\n");
1815 			} else {
1816 				dm_disable_vblank(&acrtc->base);
1817 			}
1818 
1819 		}
1820 	}
1821 
1822 }
1823 
1824 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1825 {
1826 	struct dc_state *context = NULL;
1827 	enum dc_status res = DC_ERROR_UNEXPECTED;
1828 	int i;
1829 	struct dc_stream_state *del_streams[MAX_PIPES];
1830 	int del_streams_count = 0;
1831 
1832 	memset(del_streams, 0, sizeof(del_streams));
1833 
1834 	context = dc_create_state(dc);
1835 	if (context == NULL)
1836 		goto context_alloc_fail;
1837 
1838 	dc_resource_state_copy_construct_current(dc, context);
1839 
1840 	/* First remove from context all streams */
1841 	for (i = 0; i < context->stream_count; i++) {
1842 		struct dc_stream_state *stream = context->streams[i];
1843 
1844 		del_streams[del_streams_count++] = stream;
1845 	}
1846 
1847 	/* Remove all planes for removed streams and then remove the streams */
1848 	for (i = 0; i < del_streams_count; i++) {
1849 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1850 			res = DC_FAIL_DETACH_SURFACES;
1851 			goto fail;
1852 		}
1853 
1854 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1855 		if (res != DC_OK)
1856 			goto fail;
1857 	}
1858 
1859 
1860 	res = dc_validate_global_state(dc, context, false);
1861 
1862 	if (res != DC_OK) {
1863 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1864 		goto fail;
1865 	}
1866 
1867 	res = dc_commit_state(dc, context);
1868 
1869 fail:
1870 	dc_release_state(context);
1871 
1872 context_alloc_fail:
1873 	return res;
1874 }
1875 
1876 static int dm_suspend(void *handle)
1877 {
1878 	struct amdgpu_device *adev = handle;
1879 	struct amdgpu_display_manager *dm = &adev->dm;
1880 	int ret = 0;
1881 
1882 	if (amdgpu_in_reset(adev)) {
1883 		mutex_lock(&dm->dc_lock);
1884 
1885 #if defined(CONFIG_DRM_AMD_DC_DCN)
1886 		dc_allow_idle_optimizations(adev->dm.dc, false);
1887 #endif
1888 
1889 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1890 
1891 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1892 
1893 		amdgpu_dm_commit_zero_streams(dm->dc);
1894 
1895 		amdgpu_dm_irq_suspend(adev);
1896 
1897 		return ret;
1898 	}
1899 
1900 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1901 	amdgpu_dm_crtc_secure_display_suspend(adev);
1902 #endif
1903 	WARN_ON(adev->dm.cached_state);
1904 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1905 
1906 	s3_handle_mst(adev_to_drm(adev), true);
1907 
1908 	amdgpu_dm_irq_suspend(adev);
1909 
1910 
1911 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1912 
1913 	return 0;
1914 }
1915 
1916 static struct amdgpu_dm_connector *
1917 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1918 					     struct drm_crtc *crtc)
1919 {
1920 	uint32_t i;
1921 	struct drm_connector_state *new_con_state;
1922 	struct drm_connector *connector;
1923 	struct drm_crtc *crtc_from_state;
1924 
1925 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1926 		crtc_from_state = new_con_state->crtc;
1927 
1928 		if (crtc_from_state == crtc)
1929 			return to_amdgpu_dm_connector(connector);
1930 	}
1931 
1932 	return NULL;
1933 }
1934 
1935 static void emulated_link_detect(struct dc_link *link)
1936 {
1937 	struct dc_sink_init_data sink_init_data = { 0 };
1938 	struct display_sink_capability sink_caps = { 0 };
1939 	enum dc_edid_status edid_status;
1940 	struct dc_context *dc_ctx = link->ctx;
1941 	struct dc_sink *sink = NULL;
1942 	struct dc_sink *prev_sink = NULL;
1943 
1944 	link->type = dc_connection_none;
1945 	prev_sink = link->local_sink;
1946 
1947 	if (prev_sink)
1948 		dc_sink_release(prev_sink);
1949 
1950 	switch (link->connector_signal) {
1951 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1952 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1953 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1954 		break;
1955 	}
1956 
1957 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1958 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1959 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1960 		break;
1961 	}
1962 
1963 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1964 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1965 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1966 		break;
1967 	}
1968 
1969 	case SIGNAL_TYPE_LVDS: {
1970 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1971 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1972 		break;
1973 	}
1974 
1975 	case SIGNAL_TYPE_EDP: {
1976 		sink_caps.transaction_type =
1977 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1978 		sink_caps.signal = SIGNAL_TYPE_EDP;
1979 		break;
1980 	}
1981 
1982 	case SIGNAL_TYPE_DISPLAY_PORT: {
1983 		sink_caps.transaction_type =
1984 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1985 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1986 		break;
1987 	}
1988 
1989 	default:
1990 		DC_ERROR("Invalid connector type! signal:%d\n",
1991 			link->connector_signal);
1992 		return;
1993 	}
1994 
1995 	sink_init_data.link = link;
1996 	sink_init_data.sink_signal = sink_caps.signal;
1997 
1998 	sink = dc_sink_create(&sink_init_data);
1999 	if (!sink) {
2000 		DC_ERROR("Failed to create sink!\n");
2001 		return;
2002 	}
2003 
2004 	/* dc_sink_create returns a new reference */
2005 	link->local_sink = sink;
2006 
2007 	edid_status = dm_helpers_read_local_edid(
2008 			link->ctx,
2009 			link,
2010 			sink);
2011 
2012 	if (edid_status != EDID_OK)
2013 		DC_ERROR("Failed to read EDID");
2014 
2015 }
2016 
2017 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2018 				     struct amdgpu_display_manager *dm)
2019 {
2020 	struct {
2021 		struct dc_surface_update surface_updates[MAX_SURFACES];
2022 		struct dc_plane_info plane_infos[MAX_SURFACES];
2023 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2024 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2025 		struct dc_stream_update stream_update;
2026 	} * bundle;
2027 	int k, m;
2028 
2029 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2030 
2031 	if (!bundle) {
2032 		dm_error("Failed to allocate update bundle\n");
2033 		goto cleanup;
2034 	}
2035 
2036 	for (k = 0; k < dc_state->stream_count; k++) {
2037 		bundle->stream_update.stream = dc_state->streams[k];
2038 
2039 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2040 			bundle->surface_updates[m].surface =
2041 				dc_state->stream_status->plane_states[m];
2042 			bundle->surface_updates[m].surface->force_full_update =
2043 				true;
2044 		}
2045 		dc_commit_updates_for_stream(
2046 			dm->dc, bundle->surface_updates,
2047 			dc_state->stream_status->plane_count,
2048 			dc_state->streams[k], &bundle->stream_update, dc_state);
2049 	}
2050 
2051 cleanup:
2052 	kfree(bundle);
2053 
2054 	return;
2055 }
2056 
2057 static void dm_set_dpms_off(struct dc_link *link)
2058 {
2059 	struct dc_stream_state *stream_state;
2060 	struct amdgpu_dm_connector *aconnector = link->priv;
2061 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2062 	struct dc_stream_update stream_update;
2063 	bool dpms_off = true;
2064 
2065 	memset(&stream_update, 0, sizeof(stream_update));
2066 	stream_update.dpms_off = &dpms_off;
2067 
2068 	mutex_lock(&adev->dm.dc_lock);
2069 	stream_state = dc_stream_find_from_link(link);
2070 
2071 	if (stream_state == NULL) {
2072 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2073 		mutex_unlock(&adev->dm.dc_lock);
2074 		return;
2075 	}
2076 
2077 	stream_update.stream = stream_state;
2078 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2079 				     stream_state, &stream_update,
2080 				     stream_state->ctx->dc->current_state);
2081 	mutex_unlock(&adev->dm.dc_lock);
2082 }
2083 
2084 static int dm_resume(void *handle)
2085 {
2086 	struct amdgpu_device *adev = handle;
2087 	struct drm_device *ddev = adev_to_drm(adev);
2088 	struct amdgpu_display_manager *dm = &adev->dm;
2089 	struct amdgpu_dm_connector *aconnector;
2090 	struct drm_connector *connector;
2091 	struct drm_connector_list_iter iter;
2092 	struct drm_crtc *crtc;
2093 	struct drm_crtc_state *new_crtc_state;
2094 	struct dm_crtc_state *dm_new_crtc_state;
2095 	struct drm_plane *plane;
2096 	struct drm_plane_state *new_plane_state;
2097 	struct dm_plane_state *dm_new_plane_state;
2098 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2099 	enum dc_connection_type new_connection_type = dc_connection_none;
2100 	struct dc_state *dc_state;
2101 	int i, r, j;
2102 
2103 	if (amdgpu_in_reset(adev)) {
2104 		dc_state = dm->cached_dc_state;
2105 
2106 		r = dm_dmub_hw_init(adev);
2107 		if (r)
2108 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2109 
2110 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2111 		dc_resume(dm->dc);
2112 
2113 		amdgpu_dm_irq_resume_early(adev);
2114 
2115 		for (i = 0; i < dc_state->stream_count; i++) {
2116 			dc_state->streams[i]->mode_changed = true;
2117 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2118 				dc_state->stream_status->plane_states[j]->update_flags.raw
2119 					= 0xffffffff;
2120 			}
2121 		}
2122 
2123 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2124 
2125 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2126 
2127 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2128 
2129 		dc_release_state(dm->cached_dc_state);
2130 		dm->cached_dc_state = NULL;
2131 
2132 		amdgpu_dm_irq_resume_late(adev);
2133 
2134 		mutex_unlock(&dm->dc_lock);
2135 
2136 		return 0;
2137 	}
2138 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2139 	dc_release_state(dm_state->context);
2140 	dm_state->context = dc_create_state(dm->dc);
2141 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2142 	dc_resource_state_construct(dm->dc, dm_state->context);
2143 
2144 	/* Before powering on DC we need to re-initialize DMUB. */
2145 	r = dm_dmub_hw_init(adev);
2146 	if (r)
2147 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2148 
2149 	/* power on hardware */
2150 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2151 
2152 	/* program HPD filter */
2153 	dc_resume(dm->dc);
2154 
2155 	/*
2156 	 * early enable HPD Rx IRQ, should be done before set mode as short
2157 	 * pulse interrupts are used for MST
2158 	 */
2159 	amdgpu_dm_irq_resume_early(adev);
2160 
2161 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2162 	s3_handle_mst(ddev, false);
2163 
2164 	/* Do detection*/
2165 	drm_connector_list_iter_begin(ddev, &iter);
2166 	drm_for_each_connector_iter(connector, &iter) {
2167 		aconnector = to_amdgpu_dm_connector(connector);
2168 
2169 		/*
2170 		 * this is the case when traversing through already created
2171 		 * MST connectors, should be skipped
2172 		 */
2173 		if (aconnector->mst_port)
2174 			continue;
2175 
2176 		mutex_lock(&aconnector->hpd_lock);
2177 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2178 			DRM_ERROR("KMS: Failed to detect connector\n");
2179 
2180 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2181 			emulated_link_detect(aconnector->dc_link);
2182 		else
2183 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2184 
2185 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2186 			aconnector->fake_enable = false;
2187 
2188 		if (aconnector->dc_sink)
2189 			dc_sink_release(aconnector->dc_sink);
2190 		aconnector->dc_sink = NULL;
2191 		amdgpu_dm_update_connector_after_detect(aconnector);
2192 		mutex_unlock(&aconnector->hpd_lock);
2193 	}
2194 	drm_connector_list_iter_end(&iter);
2195 
2196 	/* Force mode set in atomic commit */
2197 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2198 		new_crtc_state->active_changed = true;
2199 
2200 	/*
2201 	 * atomic_check is expected to create the dc states. We need to release
2202 	 * them here, since they were duplicated as part of the suspend
2203 	 * procedure.
2204 	 */
2205 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2206 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2207 		if (dm_new_crtc_state->stream) {
2208 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2209 			dc_stream_release(dm_new_crtc_state->stream);
2210 			dm_new_crtc_state->stream = NULL;
2211 		}
2212 	}
2213 
2214 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2215 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2216 		if (dm_new_plane_state->dc_state) {
2217 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2218 			dc_plane_state_release(dm_new_plane_state->dc_state);
2219 			dm_new_plane_state->dc_state = NULL;
2220 		}
2221 	}
2222 
2223 	drm_atomic_helper_resume(ddev, dm->cached_state);
2224 
2225 	dm->cached_state = NULL;
2226 
2227 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2228 	amdgpu_dm_crtc_secure_display_resume(adev);
2229 #endif
2230 
2231 	amdgpu_dm_irq_resume_late(adev);
2232 
2233 	amdgpu_dm_smu_write_watermarks_table(adev);
2234 
2235 	return 0;
2236 }
2237 
2238 /**
2239  * DOC: DM Lifecycle
2240  *
2241  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2242  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2243  * the base driver's device list to be initialized and torn down accordingly.
2244  *
2245  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2246  */
2247 
2248 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2249 	.name = "dm",
2250 	.early_init = dm_early_init,
2251 	.late_init = dm_late_init,
2252 	.sw_init = dm_sw_init,
2253 	.sw_fini = dm_sw_fini,
2254 	.hw_init = dm_hw_init,
2255 	.hw_fini = dm_hw_fini,
2256 	.suspend = dm_suspend,
2257 	.resume = dm_resume,
2258 	.is_idle = dm_is_idle,
2259 	.wait_for_idle = dm_wait_for_idle,
2260 	.check_soft_reset = dm_check_soft_reset,
2261 	.soft_reset = dm_soft_reset,
2262 	.set_clockgating_state = dm_set_clockgating_state,
2263 	.set_powergating_state = dm_set_powergating_state,
2264 };
2265 
2266 const struct amdgpu_ip_block_version dm_ip_block =
2267 {
2268 	.type = AMD_IP_BLOCK_TYPE_DCE,
2269 	.major = 1,
2270 	.minor = 0,
2271 	.rev = 0,
2272 	.funcs = &amdgpu_dm_funcs,
2273 };
2274 
2275 
2276 /**
2277  * DOC: atomic
2278  *
2279  * *WIP*
2280  */
2281 
2282 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2283 	.fb_create = amdgpu_display_user_framebuffer_create,
2284 	.get_format_info = amd_get_format_info,
2285 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2286 	.atomic_check = amdgpu_dm_atomic_check,
2287 	.atomic_commit = drm_atomic_helper_commit,
2288 };
2289 
2290 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2291 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2292 };
2293 
2294 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2295 {
2296 	u32 max_cll, min_cll, max, min, q, r;
2297 	struct amdgpu_dm_backlight_caps *caps;
2298 	struct amdgpu_display_manager *dm;
2299 	struct drm_connector *conn_base;
2300 	struct amdgpu_device *adev;
2301 	struct dc_link *link = NULL;
2302 	static const u8 pre_computed_values[] = {
2303 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2304 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2305 
2306 	if (!aconnector || !aconnector->dc_link)
2307 		return;
2308 
2309 	link = aconnector->dc_link;
2310 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2311 		return;
2312 
2313 	conn_base = &aconnector->base;
2314 	adev = drm_to_adev(conn_base->dev);
2315 	dm = &adev->dm;
2316 	caps = &dm->backlight_caps;
2317 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2318 	caps->aux_support = false;
2319 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2320 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2321 
2322 	if (caps->ext_caps->bits.oled == 1 ||
2323 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2324 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2325 		caps->aux_support = true;
2326 
2327 	if (amdgpu_backlight == 0)
2328 		caps->aux_support = false;
2329 	else if (amdgpu_backlight == 1)
2330 		caps->aux_support = true;
2331 
2332 	/* From the specification (CTA-861-G), for calculating the maximum
2333 	 * luminance we need to use:
2334 	 *	Luminance = 50*2**(CV/32)
2335 	 * Where CV is a one-byte value.
2336 	 * For calculating this expression we may need float point precision;
2337 	 * to avoid this complexity level, we take advantage that CV is divided
2338 	 * by a constant. From the Euclids division algorithm, we know that CV
2339 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2340 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2341 	 * need to pre-compute the value of r/32. For pre-computing the values
2342 	 * We just used the following Ruby line:
2343 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2344 	 * The results of the above expressions can be verified at
2345 	 * pre_computed_values.
2346 	 */
2347 	q = max_cll >> 5;
2348 	r = max_cll % 32;
2349 	max = (1 << q) * pre_computed_values[r];
2350 
2351 	// min luminance: maxLum * (CV/255)^2 / 100
2352 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2353 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2354 
2355 	caps->aux_max_input_signal = max;
2356 	caps->aux_min_input_signal = min;
2357 }
2358 
2359 void amdgpu_dm_update_connector_after_detect(
2360 		struct amdgpu_dm_connector *aconnector)
2361 {
2362 	struct drm_connector *connector = &aconnector->base;
2363 	struct drm_device *dev = connector->dev;
2364 	struct dc_sink *sink;
2365 
2366 	/* MST handled by drm_mst framework */
2367 	if (aconnector->mst_mgr.mst_state == true)
2368 		return;
2369 
2370 	sink = aconnector->dc_link->local_sink;
2371 	if (sink)
2372 		dc_sink_retain(sink);
2373 
2374 	/*
2375 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2376 	 * the connector sink is set to either fake or physical sink depends on link status.
2377 	 * Skip if already done during boot.
2378 	 */
2379 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2380 			&& aconnector->dc_em_sink) {
2381 
2382 		/*
2383 		 * For S3 resume with headless use eml_sink to fake stream
2384 		 * because on resume connector->sink is set to NULL
2385 		 */
2386 		mutex_lock(&dev->mode_config.mutex);
2387 
2388 		if (sink) {
2389 			if (aconnector->dc_sink) {
2390 				amdgpu_dm_update_freesync_caps(connector, NULL);
2391 				/*
2392 				 * retain and release below are used to
2393 				 * bump up refcount for sink because the link doesn't point
2394 				 * to it anymore after disconnect, so on next crtc to connector
2395 				 * reshuffle by UMD we will get into unwanted dc_sink release
2396 				 */
2397 				dc_sink_release(aconnector->dc_sink);
2398 			}
2399 			aconnector->dc_sink = sink;
2400 			dc_sink_retain(aconnector->dc_sink);
2401 			amdgpu_dm_update_freesync_caps(connector,
2402 					aconnector->edid);
2403 		} else {
2404 			amdgpu_dm_update_freesync_caps(connector, NULL);
2405 			if (!aconnector->dc_sink) {
2406 				aconnector->dc_sink = aconnector->dc_em_sink;
2407 				dc_sink_retain(aconnector->dc_sink);
2408 			}
2409 		}
2410 
2411 		mutex_unlock(&dev->mode_config.mutex);
2412 
2413 		if (sink)
2414 			dc_sink_release(sink);
2415 		return;
2416 	}
2417 
2418 	/*
2419 	 * TODO: temporary guard to look for proper fix
2420 	 * if this sink is MST sink, we should not do anything
2421 	 */
2422 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2423 		dc_sink_release(sink);
2424 		return;
2425 	}
2426 
2427 	if (aconnector->dc_sink == sink) {
2428 		/*
2429 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2430 		 * Do nothing!!
2431 		 */
2432 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2433 				aconnector->connector_id);
2434 		if (sink)
2435 			dc_sink_release(sink);
2436 		return;
2437 	}
2438 
2439 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2440 		aconnector->connector_id, aconnector->dc_sink, sink);
2441 
2442 	mutex_lock(&dev->mode_config.mutex);
2443 
2444 	/*
2445 	 * 1. Update status of the drm connector
2446 	 * 2. Send an event and let userspace tell us what to do
2447 	 */
2448 	if (sink) {
2449 		/*
2450 		 * TODO: check if we still need the S3 mode update workaround.
2451 		 * If yes, put it here.
2452 		 */
2453 		if (aconnector->dc_sink) {
2454 			amdgpu_dm_update_freesync_caps(connector, NULL);
2455 			dc_sink_release(aconnector->dc_sink);
2456 		}
2457 
2458 		aconnector->dc_sink = sink;
2459 		dc_sink_retain(aconnector->dc_sink);
2460 		if (sink->dc_edid.length == 0) {
2461 			aconnector->edid = NULL;
2462 			if (aconnector->dc_link->aux_mode) {
2463 				drm_dp_cec_unset_edid(
2464 					&aconnector->dm_dp_aux.aux);
2465 			}
2466 		} else {
2467 			aconnector->edid =
2468 				(struct edid *)sink->dc_edid.raw_edid;
2469 
2470 			drm_connector_update_edid_property(connector,
2471 							   aconnector->edid);
2472 			if (aconnector->dc_link->aux_mode)
2473 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2474 						    aconnector->edid);
2475 		}
2476 
2477 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2478 		update_connector_ext_caps(aconnector);
2479 	} else {
2480 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2481 		amdgpu_dm_update_freesync_caps(connector, NULL);
2482 		drm_connector_update_edid_property(connector, NULL);
2483 		aconnector->num_modes = 0;
2484 		dc_sink_release(aconnector->dc_sink);
2485 		aconnector->dc_sink = NULL;
2486 		aconnector->edid = NULL;
2487 #ifdef CONFIG_DRM_AMD_DC_HDCP
2488 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2489 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2490 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2491 #endif
2492 	}
2493 
2494 	mutex_unlock(&dev->mode_config.mutex);
2495 
2496 	update_subconnector_property(aconnector);
2497 
2498 	if (sink)
2499 		dc_sink_release(sink);
2500 }
2501 
2502 static void handle_hpd_irq(void *param)
2503 {
2504 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2505 	struct drm_connector *connector = &aconnector->base;
2506 	struct drm_device *dev = connector->dev;
2507 	enum dc_connection_type new_connection_type = dc_connection_none;
2508 #ifdef CONFIG_DRM_AMD_DC_HDCP
2509 	struct amdgpu_device *adev = drm_to_adev(dev);
2510 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2511 #endif
2512 
2513 	/*
2514 	 * In case of failure or MST no need to update connector status or notify the OS
2515 	 * since (for MST case) MST does this in its own context.
2516 	 */
2517 	mutex_lock(&aconnector->hpd_lock);
2518 
2519 #ifdef CONFIG_DRM_AMD_DC_HDCP
2520 	if (adev->dm.hdcp_workqueue) {
2521 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2522 		dm_con_state->update_hdcp = true;
2523 	}
2524 #endif
2525 	if (aconnector->fake_enable)
2526 		aconnector->fake_enable = false;
2527 
2528 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2529 		DRM_ERROR("KMS: Failed to detect connector\n");
2530 
2531 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2532 		emulated_link_detect(aconnector->dc_link);
2533 
2534 
2535 		drm_modeset_lock_all(dev);
2536 		dm_restore_drm_connector_state(dev, connector);
2537 		drm_modeset_unlock_all(dev);
2538 
2539 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2540 			drm_kms_helper_hotplug_event(dev);
2541 
2542 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2543 		if (new_connection_type == dc_connection_none &&
2544 		    aconnector->dc_link->type == dc_connection_none)
2545 			dm_set_dpms_off(aconnector->dc_link);
2546 
2547 		amdgpu_dm_update_connector_after_detect(aconnector);
2548 
2549 		drm_modeset_lock_all(dev);
2550 		dm_restore_drm_connector_state(dev, connector);
2551 		drm_modeset_unlock_all(dev);
2552 
2553 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2554 			drm_kms_helper_hotplug_event(dev);
2555 	}
2556 	mutex_unlock(&aconnector->hpd_lock);
2557 
2558 }
2559 
2560 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2561 {
2562 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2563 	uint8_t dret;
2564 	bool new_irq_handled = false;
2565 	int dpcd_addr;
2566 	int dpcd_bytes_to_read;
2567 
2568 	const int max_process_count = 30;
2569 	int process_count = 0;
2570 
2571 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2572 
2573 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2574 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2575 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2576 		dpcd_addr = DP_SINK_COUNT;
2577 	} else {
2578 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2579 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2580 		dpcd_addr = DP_SINK_COUNT_ESI;
2581 	}
2582 
2583 	dret = drm_dp_dpcd_read(
2584 		&aconnector->dm_dp_aux.aux,
2585 		dpcd_addr,
2586 		esi,
2587 		dpcd_bytes_to_read);
2588 
2589 	while (dret == dpcd_bytes_to_read &&
2590 		process_count < max_process_count) {
2591 		uint8_t retry;
2592 		dret = 0;
2593 
2594 		process_count++;
2595 
2596 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2597 		/* handle HPD short pulse irq */
2598 		if (aconnector->mst_mgr.mst_state)
2599 			drm_dp_mst_hpd_irq(
2600 				&aconnector->mst_mgr,
2601 				esi,
2602 				&new_irq_handled);
2603 
2604 		if (new_irq_handled) {
2605 			/* ACK at DPCD to notify down stream */
2606 			const int ack_dpcd_bytes_to_write =
2607 				dpcd_bytes_to_read - 1;
2608 
2609 			for (retry = 0; retry < 3; retry++) {
2610 				uint8_t wret;
2611 
2612 				wret = drm_dp_dpcd_write(
2613 					&aconnector->dm_dp_aux.aux,
2614 					dpcd_addr + 1,
2615 					&esi[1],
2616 					ack_dpcd_bytes_to_write);
2617 				if (wret == ack_dpcd_bytes_to_write)
2618 					break;
2619 			}
2620 
2621 			/* check if there is new irq to be handled */
2622 			dret = drm_dp_dpcd_read(
2623 				&aconnector->dm_dp_aux.aux,
2624 				dpcd_addr,
2625 				esi,
2626 				dpcd_bytes_to_read);
2627 
2628 			new_irq_handled = false;
2629 		} else {
2630 			break;
2631 		}
2632 	}
2633 
2634 	if (process_count == max_process_count)
2635 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2636 }
2637 
2638 static void handle_hpd_rx_irq(void *param)
2639 {
2640 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2641 	struct drm_connector *connector = &aconnector->base;
2642 	struct drm_device *dev = connector->dev;
2643 	struct dc_link *dc_link = aconnector->dc_link;
2644 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2645 	bool result = false;
2646 	enum dc_connection_type new_connection_type = dc_connection_none;
2647 	struct amdgpu_device *adev = drm_to_adev(dev);
2648 	union hpd_irq_data hpd_irq_data;
2649 
2650 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2651 
2652 	/*
2653 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2654 	 * conflict, after implement i2c helper, this mutex should be
2655 	 * retired.
2656 	 */
2657 	if (dc_link->type != dc_connection_mst_branch)
2658 		mutex_lock(&aconnector->hpd_lock);
2659 
2660 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2661 
2662 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2663 		(dc_link->type == dc_connection_mst_branch)) {
2664 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2665 			result = true;
2666 			dm_handle_hpd_rx_irq(aconnector);
2667 			goto out;
2668 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2669 			result = false;
2670 			dm_handle_hpd_rx_irq(aconnector);
2671 			goto out;
2672 		}
2673 	}
2674 
2675 	mutex_lock(&adev->dm.dc_lock);
2676 #ifdef CONFIG_DRM_AMD_DC_HDCP
2677 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2678 #else
2679 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2680 #endif
2681 	mutex_unlock(&adev->dm.dc_lock);
2682 
2683 out:
2684 	if (result && !is_mst_root_connector) {
2685 		/* Downstream Port status changed. */
2686 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2687 			DRM_ERROR("KMS: Failed to detect connector\n");
2688 
2689 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2690 			emulated_link_detect(dc_link);
2691 
2692 			if (aconnector->fake_enable)
2693 				aconnector->fake_enable = false;
2694 
2695 			amdgpu_dm_update_connector_after_detect(aconnector);
2696 
2697 
2698 			drm_modeset_lock_all(dev);
2699 			dm_restore_drm_connector_state(dev, connector);
2700 			drm_modeset_unlock_all(dev);
2701 
2702 			drm_kms_helper_hotplug_event(dev);
2703 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2704 
2705 			if (aconnector->fake_enable)
2706 				aconnector->fake_enable = false;
2707 
2708 			amdgpu_dm_update_connector_after_detect(aconnector);
2709 
2710 
2711 			drm_modeset_lock_all(dev);
2712 			dm_restore_drm_connector_state(dev, connector);
2713 			drm_modeset_unlock_all(dev);
2714 
2715 			drm_kms_helper_hotplug_event(dev);
2716 		}
2717 	}
2718 #ifdef CONFIG_DRM_AMD_DC_HDCP
2719 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2720 		if (adev->dm.hdcp_workqueue)
2721 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2722 	}
2723 #endif
2724 
2725 	if (dc_link->type != dc_connection_mst_branch) {
2726 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2727 		mutex_unlock(&aconnector->hpd_lock);
2728 	}
2729 }
2730 
2731 static void register_hpd_handlers(struct amdgpu_device *adev)
2732 {
2733 	struct drm_device *dev = adev_to_drm(adev);
2734 	struct drm_connector *connector;
2735 	struct amdgpu_dm_connector *aconnector;
2736 	const struct dc_link *dc_link;
2737 	struct dc_interrupt_params int_params = {0};
2738 
2739 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2740 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2741 
2742 	list_for_each_entry(connector,
2743 			&dev->mode_config.connector_list, head)	{
2744 
2745 		aconnector = to_amdgpu_dm_connector(connector);
2746 		dc_link = aconnector->dc_link;
2747 
2748 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2749 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2750 			int_params.irq_source = dc_link->irq_source_hpd;
2751 
2752 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2753 					handle_hpd_irq,
2754 					(void *) aconnector);
2755 		}
2756 
2757 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2758 
2759 			/* Also register for DP short pulse (hpd_rx). */
2760 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2761 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2762 
2763 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2764 					handle_hpd_rx_irq,
2765 					(void *) aconnector);
2766 		}
2767 	}
2768 }
2769 
2770 #if defined(CONFIG_DRM_AMD_DC_SI)
2771 /* Register IRQ sources and initialize IRQ callbacks */
2772 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2773 {
2774 	struct dc *dc = adev->dm.dc;
2775 	struct common_irq_params *c_irq_params;
2776 	struct dc_interrupt_params int_params = {0};
2777 	int r;
2778 	int i;
2779 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2780 
2781 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2782 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2783 
2784 	/*
2785 	 * Actions of amdgpu_irq_add_id():
2786 	 * 1. Register a set() function with base driver.
2787 	 *    Base driver will call set() function to enable/disable an
2788 	 *    interrupt in DC hardware.
2789 	 * 2. Register amdgpu_dm_irq_handler().
2790 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2791 	 *    coming from DC hardware.
2792 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2793 	 *    for acknowledging and handling. */
2794 
2795 	/* Use VBLANK interrupt */
2796 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2797 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2798 		if (r) {
2799 			DRM_ERROR("Failed to add crtc irq id!\n");
2800 			return r;
2801 		}
2802 
2803 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2804 		int_params.irq_source =
2805 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2806 
2807 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2808 
2809 		c_irq_params->adev = adev;
2810 		c_irq_params->irq_src = int_params.irq_source;
2811 
2812 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2813 				dm_crtc_high_irq, c_irq_params);
2814 	}
2815 
2816 	/* Use GRPH_PFLIP interrupt */
2817 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2818 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2819 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2820 		if (r) {
2821 			DRM_ERROR("Failed to add page flip irq id!\n");
2822 			return r;
2823 		}
2824 
2825 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2826 		int_params.irq_source =
2827 			dc_interrupt_to_irq_source(dc, i, 0);
2828 
2829 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2830 
2831 		c_irq_params->adev = adev;
2832 		c_irq_params->irq_src = int_params.irq_source;
2833 
2834 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2835 				dm_pflip_high_irq, c_irq_params);
2836 
2837 	}
2838 
2839 	/* HPD */
2840 	r = amdgpu_irq_add_id(adev, client_id,
2841 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2842 	if (r) {
2843 		DRM_ERROR("Failed to add hpd irq id!\n");
2844 		return r;
2845 	}
2846 
2847 	register_hpd_handlers(adev);
2848 
2849 	return 0;
2850 }
2851 #endif
2852 
2853 /* Register IRQ sources and initialize IRQ callbacks */
2854 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2855 {
2856 	struct dc *dc = adev->dm.dc;
2857 	struct common_irq_params *c_irq_params;
2858 	struct dc_interrupt_params int_params = {0};
2859 	int r;
2860 	int i;
2861 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2862 
2863 	if (adev->asic_type >= CHIP_VEGA10)
2864 		client_id = SOC15_IH_CLIENTID_DCE;
2865 
2866 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2867 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2868 
2869 	/*
2870 	 * Actions of amdgpu_irq_add_id():
2871 	 * 1. Register a set() function with base driver.
2872 	 *    Base driver will call set() function to enable/disable an
2873 	 *    interrupt in DC hardware.
2874 	 * 2. Register amdgpu_dm_irq_handler().
2875 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2876 	 *    coming from DC hardware.
2877 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2878 	 *    for acknowledging and handling. */
2879 
2880 	/* Use VBLANK interrupt */
2881 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2882 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2883 		if (r) {
2884 			DRM_ERROR("Failed to add crtc irq id!\n");
2885 			return r;
2886 		}
2887 
2888 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2889 		int_params.irq_source =
2890 			dc_interrupt_to_irq_source(dc, i, 0);
2891 
2892 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2893 
2894 		c_irq_params->adev = adev;
2895 		c_irq_params->irq_src = int_params.irq_source;
2896 
2897 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2898 				dm_crtc_high_irq, c_irq_params);
2899 	}
2900 
2901 	/* Use VUPDATE interrupt */
2902 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2903 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2904 		if (r) {
2905 			DRM_ERROR("Failed to add vupdate irq id!\n");
2906 			return r;
2907 		}
2908 
2909 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2910 		int_params.irq_source =
2911 			dc_interrupt_to_irq_source(dc, i, 0);
2912 
2913 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2914 
2915 		c_irq_params->adev = adev;
2916 		c_irq_params->irq_src = int_params.irq_source;
2917 
2918 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2919 				dm_vupdate_high_irq, c_irq_params);
2920 	}
2921 
2922 	/* Use GRPH_PFLIP interrupt */
2923 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2924 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2925 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2926 		if (r) {
2927 			DRM_ERROR("Failed to add page flip irq id!\n");
2928 			return r;
2929 		}
2930 
2931 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2932 		int_params.irq_source =
2933 			dc_interrupt_to_irq_source(dc, i, 0);
2934 
2935 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2936 
2937 		c_irq_params->adev = adev;
2938 		c_irq_params->irq_src = int_params.irq_source;
2939 
2940 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2941 				dm_pflip_high_irq, c_irq_params);
2942 
2943 	}
2944 
2945 	/* HPD */
2946 	r = amdgpu_irq_add_id(adev, client_id,
2947 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2948 	if (r) {
2949 		DRM_ERROR("Failed to add hpd irq id!\n");
2950 		return r;
2951 	}
2952 
2953 	register_hpd_handlers(adev);
2954 
2955 	return 0;
2956 }
2957 
2958 #if defined(CONFIG_DRM_AMD_DC_DCN)
2959 /* Register IRQ sources and initialize IRQ callbacks */
2960 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2961 {
2962 	struct dc *dc = adev->dm.dc;
2963 	struct common_irq_params *c_irq_params;
2964 	struct dc_interrupt_params int_params = {0};
2965 	int r;
2966 	int i;
2967 
2968 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2969 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2970 
2971 	/*
2972 	 * Actions of amdgpu_irq_add_id():
2973 	 * 1. Register a set() function with base driver.
2974 	 *    Base driver will call set() function to enable/disable an
2975 	 *    interrupt in DC hardware.
2976 	 * 2. Register amdgpu_dm_irq_handler().
2977 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2978 	 *    coming from DC hardware.
2979 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2980 	 *    for acknowledging and handling.
2981 	 */
2982 
2983 	/* Use VSTARTUP interrupt */
2984 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2985 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2986 			i++) {
2987 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2988 
2989 		if (r) {
2990 			DRM_ERROR("Failed to add crtc irq id!\n");
2991 			return r;
2992 		}
2993 
2994 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2995 		int_params.irq_source =
2996 			dc_interrupt_to_irq_source(dc, i, 0);
2997 
2998 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2999 
3000 		c_irq_params->adev = adev;
3001 		c_irq_params->irq_src = int_params.irq_source;
3002 
3003 		amdgpu_dm_irq_register_interrupt(
3004 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3005 	}
3006 
3007 	/* Use otg vertical line interrupt */
3008 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3009 	for (i = DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL;
3010 			i <= DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL
3011 					+ adev->mode_info.num_crtc - 1;
3012 			i++) {
3013 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vline0_irq);
3014 
3015 		if (r) {
3016 			DRM_ERROR("Failed to add vline0 irq id!\n");
3017 			return r;
3018 		}
3019 
3020 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3021 		int_params.irq_source =
3022 			dc_interrupt_to_irq_source(dc, i, 0);
3023 
3024 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3025 					- DC_IRQ_SOURCE_DC1_VLINE0];
3026 
3027 		c_irq_params->adev = adev;
3028 		c_irq_params->irq_src = int_params.irq_source;
3029 
3030 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3031 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3032 	}
3033 #endif
3034 
3035 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3036 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3037 	 * to trigger at end of each vblank, regardless of state of the lock,
3038 	 * matching DCE behaviour.
3039 	 */
3040 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3041 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3042 	     i++) {
3043 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3044 
3045 		if (r) {
3046 			DRM_ERROR("Failed to add vupdate irq id!\n");
3047 			return r;
3048 		}
3049 
3050 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3051 		int_params.irq_source =
3052 			dc_interrupt_to_irq_source(dc, i, 0);
3053 
3054 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3055 
3056 		c_irq_params->adev = adev;
3057 		c_irq_params->irq_src = int_params.irq_source;
3058 
3059 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3060 				dm_vupdate_high_irq, c_irq_params);
3061 	}
3062 
3063 	/* Use GRPH_PFLIP interrupt */
3064 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3065 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3066 			i++) {
3067 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3068 		if (r) {
3069 			DRM_ERROR("Failed to add page flip irq id!\n");
3070 			return r;
3071 		}
3072 
3073 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3074 		int_params.irq_source =
3075 			dc_interrupt_to_irq_source(dc, i, 0);
3076 
3077 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3078 
3079 		c_irq_params->adev = adev;
3080 		c_irq_params->irq_src = int_params.irq_source;
3081 
3082 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3083 				dm_pflip_high_irq, c_irq_params);
3084 
3085 	}
3086 
3087 	/* HPD */
3088 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3089 			&adev->hpd_irq);
3090 	if (r) {
3091 		DRM_ERROR("Failed to add hpd irq id!\n");
3092 		return r;
3093 	}
3094 
3095 	register_hpd_handlers(adev);
3096 
3097 	return 0;
3098 }
3099 #endif
3100 
3101 /*
3102  * Acquires the lock for the atomic state object and returns
3103  * the new atomic state.
3104  *
3105  * This should only be called during atomic check.
3106  */
3107 static int dm_atomic_get_state(struct drm_atomic_state *state,
3108 			       struct dm_atomic_state **dm_state)
3109 {
3110 	struct drm_device *dev = state->dev;
3111 	struct amdgpu_device *adev = drm_to_adev(dev);
3112 	struct amdgpu_display_manager *dm = &adev->dm;
3113 	struct drm_private_state *priv_state;
3114 
3115 	if (*dm_state)
3116 		return 0;
3117 
3118 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3119 	if (IS_ERR(priv_state))
3120 		return PTR_ERR(priv_state);
3121 
3122 	*dm_state = to_dm_atomic_state(priv_state);
3123 
3124 	return 0;
3125 }
3126 
3127 static struct dm_atomic_state *
3128 dm_atomic_get_new_state(struct drm_atomic_state *state)
3129 {
3130 	struct drm_device *dev = state->dev;
3131 	struct amdgpu_device *adev = drm_to_adev(dev);
3132 	struct amdgpu_display_manager *dm = &adev->dm;
3133 	struct drm_private_obj *obj;
3134 	struct drm_private_state *new_obj_state;
3135 	int i;
3136 
3137 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3138 		if (obj->funcs == dm->atomic_obj.funcs)
3139 			return to_dm_atomic_state(new_obj_state);
3140 	}
3141 
3142 	return NULL;
3143 }
3144 
3145 static struct drm_private_state *
3146 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3147 {
3148 	struct dm_atomic_state *old_state, *new_state;
3149 
3150 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3151 	if (!new_state)
3152 		return NULL;
3153 
3154 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3155 
3156 	old_state = to_dm_atomic_state(obj->state);
3157 
3158 	if (old_state && old_state->context)
3159 		new_state->context = dc_copy_state(old_state->context);
3160 
3161 	if (!new_state->context) {
3162 		kfree(new_state);
3163 		return NULL;
3164 	}
3165 
3166 	return &new_state->base;
3167 }
3168 
3169 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3170 				    struct drm_private_state *state)
3171 {
3172 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3173 
3174 	if (dm_state && dm_state->context)
3175 		dc_release_state(dm_state->context);
3176 
3177 	kfree(dm_state);
3178 }
3179 
3180 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3181 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3182 	.atomic_destroy_state = dm_atomic_destroy_state,
3183 };
3184 
3185 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3186 {
3187 	struct dm_atomic_state *state;
3188 	int r;
3189 
3190 	adev->mode_info.mode_config_initialized = true;
3191 
3192 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3193 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3194 
3195 	adev_to_drm(adev)->mode_config.max_width = 16384;
3196 	adev_to_drm(adev)->mode_config.max_height = 16384;
3197 
3198 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3199 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3200 	/* indicates support for immediate flip */
3201 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3202 
3203 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3204 
3205 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3206 	if (!state)
3207 		return -ENOMEM;
3208 
3209 	state->context = dc_create_state(adev->dm.dc);
3210 	if (!state->context) {
3211 		kfree(state);
3212 		return -ENOMEM;
3213 	}
3214 
3215 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3216 
3217 	drm_atomic_private_obj_init(adev_to_drm(adev),
3218 				    &adev->dm.atomic_obj,
3219 				    &state->base,
3220 				    &dm_atomic_state_funcs);
3221 
3222 	r = amdgpu_display_modeset_create_props(adev);
3223 	if (r) {
3224 		dc_release_state(state->context);
3225 		kfree(state);
3226 		return r;
3227 	}
3228 
3229 	r = amdgpu_dm_audio_init(adev);
3230 	if (r) {
3231 		dc_release_state(state->context);
3232 		kfree(state);
3233 		return r;
3234 	}
3235 
3236 	return 0;
3237 }
3238 
3239 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3240 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3241 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3242 
3243 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3244 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3245 
3246 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3247 {
3248 #if defined(CONFIG_ACPI)
3249 	struct amdgpu_dm_backlight_caps caps;
3250 
3251 	memset(&caps, 0, sizeof(caps));
3252 
3253 	if (dm->backlight_caps.caps_valid)
3254 		return;
3255 
3256 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3257 	if (caps.caps_valid) {
3258 		dm->backlight_caps.caps_valid = true;
3259 		if (caps.aux_support)
3260 			return;
3261 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3262 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3263 	} else {
3264 		dm->backlight_caps.min_input_signal =
3265 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3266 		dm->backlight_caps.max_input_signal =
3267 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3268 	}
3269 #else
3270 	if (dm->backlight_caps.aux_support)
3271 		return;
3272 
3273 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3274 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3275 #endif
3276 }
3277 
3278 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3279 				unsigned *min, unsigned *max)
3280 {
3281 	if (!caps)
3282 		return 0;
3283 
3284 	if (caps->aux_support) {
3285 		// Firmware limits are in nits, DC API wants millinits.
3286 		*max = 1000 * caps->aux_max_input_signal;
3287 		*min = 1000 * caps->aux_min_input_signal;
3288 	} else {
3289 		// Firmware limits are 8-bit, PWM control is 16-bit.
3290 		*max = 0x101 * caps->max_input_signal;
3291 		*min = 0x101 * caps->min_input_signal;
3292 	}
3293 	return 1;
3294 }
3295 
3296 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3297 					uint32_t brightness)
3298 {
3299 	unsigned min, max;
3300 
3301 	if (!get_brightness_range(caps, &min, &max))
3302 		return brightness;
3303 
3304 	// Rescale 0..255 to min..max
3305 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3306 				       AMDGPU_MAX_BL_LEVEL);
3307 }
3308 
3309 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3310 				      uint32_t brightness)
3311 {
3312 	unsigned min, max;
3313 
3314 	if (!get_brightness_range(caps, &min, &max))
3315 		return brightness;
3316 
3317 	if (brightness < min)
3318 		return 0;
3319 	// Rescale min..max to 0..255
3320 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3321 				 max - min);
3322 }
3323 
3324 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3325 {
3326 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3327 	struct amdgpu_dm_backlight_caps caps;
3328 	struct dc_link *link = NULL;
3329 	u32 brightness;
3330 	bool rc;
3331 
3332 	amdgpu_dm_update_backlight_caps(dm);
3333 	caps = dm->backlight_caps;
3334 
3335 	link = (struct dc_link *)dm->backlight_link;
3336 
3337 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3338 	// Change brightness based on AUX property
3339 	if (caps.aux_support)
3340 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3341 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3342 	else
3343 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3344 
3345 	return rc ? 0 : 1;
3346 }
3347 
3348 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3349 {
3350 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3351 	struct amdgpu_dm_backlight_caps caps;
3352 
3353 	amdgpu_dm_update_backlight_caps(dm);
3354 	caps = dm->backlight_caps;
3355 
3356 	if (caps.aux_support) {
3357 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3358 		u32 avg, peak;
3359 		bool rc;
3360 
3361 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3362 		if (!rc)
3363 			return bd->props.brightness;
3364 		return convert_brightness_to_user(&caps, avg);
3365 	} else {
3366 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3367 
3368 		if (ret == DC_ERROR_UNEXPECTED)
3369 			return bd->props.brightness;
3370 		return convert_brightness_to_user(&caps, ret);
3371 	}
3372 }
3373 
3374 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3375 	.options = BL_CORE_SUSPENDRESUME,
3376 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3377 	.update_status	= amdgpu_dm_backlight_update_status,
3378 };
3379 
3380 static void
3381 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3382 {
3383 	char bl_name[16];
3384 	struct backlight_properties props = { 0 };
3385 
3386 	amdgpu_dm_update_backlight_caps(dm);
3387 
3388 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3389 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3390 	props.type = BACKLIGHT_RAW;
3391 
3392 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3393 		 adev_to_drm(dm->adev)->primary->index);
3394 
3395 	dm->backlight_dev = backlight_device_register(bl_name,
3396 						      adev_to_drm(dm->adev)->dev,
3397 						      dm,
3398 						      &amdgpu_dm_backlight_ops,
3399 						      &props);
3400 
3401 	if (IS_ERR(dm->backlight_dev))
3402 		DRM_ERROR("DM: Backlight registration failed!\n");
3403 	else
3404 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3405 }
3406 
3407 #endif
3408 
3409 static int initialize_plane(struct amdgpu_display_manager *dm,
3410 			    struct amdgpu_mode_info *mode_info, int plane_id,
3411 			    enum drm_plane_type plane_type,
3412 			    const struct dc_plane_cap *plane_cap)
3413 {
3414 	struct drm_plane *plane;
3415 	unsigned long possible_crtcs;
3416 	int ret = 0;
3417 
3418 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3419 	if (!plane) {
3420 		DRM_ERROR("KMS: Failed to allocate plane\n");
3421 		return -ENOMEM;
3422 	}
3423 	plane->type = plane_type;
3424 
3425 	/*
3426 	 * HACK: IGT tests expect that the primary plane for a CRTC
3427 	 * can only have one possible CRTC. Only expose support for
3428 	 * any CRTC if they're not going to be used as a primary plane
3429 	 * for a CRTC - like overlay or underlay planes.
3430 	 */
3431 	possible_crtcs = 1 << plane_id;
3432 	if (plane_id >= dm->dc->caps.max_streams)
3433 		possible_crtcs = 0xff;
3434 
3435 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3436 
3437 	if (ret) {
3438 		DRM_ERROR("KMS: Failed to initialize plane\n");
3439 		kfree(plane);
3440 		return ret;
3441 	}
3442 
3443 	if (mode_info)
3444 		mode_info->planes[plane_id] = plane;
3445 
3446 	return ret;
3447 }
3448 
3449 
3450 static void register_backlight_device(struct amdgpu_display_manager *dm,
3451 				      struct dc_link *link)
3452 {
3453 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3454 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3455 
3456 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3457 	    link->type != dc_connection_none) {
3458 		/*
3459 		 * Event if registration failed, we should continue with
3460 		 * DM initialization because not having a backlight control
3461 		 * is better then a black screen.
3462 		 */
3463 		amdgpu_dm_register_backlight_device(dm);
3464 
3465 		if (dm->backlight_dev)
3466 			dm->backlight_link = link;
3467 	}
3468 #endif
3469 }
3470 
3471 
3472 /*
3473  * In this architecture, the association
3474  * connector -> encoder -> crtc
3475  * id not really requried. The crtc and connector will hold the
3476  * display_index as an abstraction to use with DAL component
3477  *
3478  * Returns 0 on success
3479  */
3480 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3481 {
3482 	struct amdgpu_display_manager *dm = &adev->dm;
3483 	int32_t i;
3484 	struct amdgpu_dm_connector *aconnector = NULL;
3485 	struct amdgpu_encoder *aencoder = NULL;
3486 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3487 	uint32_t link_cnt;
3488 	int32_t primary_planes;
3489 	enum dc_connection_type new_connection_type = dc_connection_none;
3490 	const struct dc_plane_cap *plane;
3491 
3492 	dm->display_indexes_num = dm->dc->caps.max_streams;
3493 	/* Update the actual used number of crtc */
3494 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3495 
3496 	link_cnt = dm->dc->caps.max_links;
3497 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3498 		DRM_ERROR("DM: Failed to initialize mode config\n");
3499 		return -EINVAL;
3500 	}
3501 
3502 	/* There is one primary plane per CRTC */
3503 	primary_planes = dm->dc->caps.max_streams;
3504 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3505 
3506 	/*
3507 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3508 	 * Order is reversed to match iteration order in atomic check.
3509 	 */
3510 	for (i = (primary_planes - 1); i >= 0; i--) {
3511 		plane = &dm->dc->caps.planes[i];
3512 
3513 		if (initialize_plane(dm, mode_info, i,
3514 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3515 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3516 			goto fail;
3517 		}
3518 	}
3519 
3520 	/*
3521 	 * Initialize overlay planes, index starting after primary planes.
3522 	 * These planes have a higher DRM index than the primary planes since
3523 	 * they should be considered as having a higher z-order.
3524 	 * Order is reversed to match iteration order in atomic check.
3525 	 *
3526 	 * Only support DCN for now, and only expose one so we don't encourage
3527 	 * userspace to use up all the pipes.
3528 	 */
3529 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3530 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3531 
3532 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3533 			continue;
3534 
3535 		if (!plane->blends_with_above || !plane->blends_with_below)
3536 			continue;
3537 
3538 		if (!plane->pixel_format_support.argb8888)
3539 			continue;
3540 
3541 		if (initialize_plane(dm, NULL, primary_planes + i,
3542 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3543 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3544 			goto fail;
3545 		}
3546 
3547 		/* Only create one overlay plane. */
3548 		break;
3549 	}
3550 
3551 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3552 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3553 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3554 			goto fail;
3555 		}
3556 
3557 	/* loops over all connectors on the board */
3558 	for (i = 0; i < link_cnt; i++) {
3559 		struct dc_link *link = NULL;
3560 
3561 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3562 			DRM_ERROR(
3563 				"KMS: Cannot support more than %d display indexes\n",
3564 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3565 			continue;
3566 		}
3567 
3568 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3569 		if (!aconnector)
3570 			goto fail;
3571 
3572 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3573 		if (!aencoder)
3574 			goto fail;
3575 
3576 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3577 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3578 			goto fail;
3579 		}
3580 
3581 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3582 			DRM_ERROR("KMS: Failed to initialize connector\n");
3583 			goto fail;
3584 		}
3585 
3586 		link = dc_get_link_at_index(dm->dc, i);
3587 
3588 		if (!dc_link_detect_sink(link, &new_connection_type))
3589 			DRM_ERROR("KMS: Failed to detect connector\n");
3590 
3591 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3592 			emulated_link_detect(link);
3593 			amdgpu_dm_update_connector_after_detect(aconnector);
3594 
3595 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3596 			amdgpu_dm_update_connector_after_detect(aconnector);
3597 			register_backlight_device(dm, link);
3598 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3599 				amdgpu_dm_set_psr_caps(link);
3600 		}
3601 
3602 
3603 	}
3604 
3605 	/* Software is initialized. Now we can register interrupt handlers. */
3606 	switch (adev->asic_type) {
3607 #if defined(CONFIG_DRM_AMD_DC_SI)
3608 	case CHIP_TAHITI:
3609 	case CHIP_PITCAIRN:
3610 	case CHIP_VERDE:
3611 	case CHIP_OLAND:
3612 		if (dce60_register_irq_handlers(dm->adev)) {
3613 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3614 			goto fail;
3615 		}
3616 		break;
3617 #endif
3618 	case CHIP_BONAIRE:
3619 	case CHIP_HAWAII:
3620 	case CHIP_KAVERI:
3621 	case CHIP_KABINI:
3622 	case CHIP_MULLINS:
3623 	case CHIP_TONGA:
3624 	case CHIP_FIJI:
3625 	case CHIP_CARRIZO:
3626 	case CHIP_STONEY:
3627 	case CHIP_POLARIS11:
3628 	case CHIP_POLARIS10:
3629 	case CHIP_POLARIS12:
3630 	case CHIP_VEGAM:
3631 	case CHIP_VEGA10:
3632 	case CHIP_VEGA12:
3633 	case CHIP_VEGA20:
3634 		if (dce110_register_irq_handlers(dm->adev)) {
3635 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3636 			goto fail;
3637 		}
3638 		break;
3639 #if defined(CONFIG_DRM_AMD_DC_DCN)
3640 	case CHIP_RAVEN:
3641 	case CHIP_NAVI12:
3642 	case CHIP_NAVI10:
3643 	case CHIP_NAVI14:
3644 	case CHIP_RENOIR:
3645 	case CHIP_SIENNA_CICHLID:
3646 	case CHIP_NAVY_FLOUNDER:
3647 	case CHIP_DIMGREY_CAVEFISH:
3648 	case CHIP_VANGOGH:
3649 		if (dcn10_register_irq_handlers(dm->adev)) {
3650 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3651 			goto fail;
3652 		}
3653 		break;
3654 #endif
3655 	default:
3656 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3657 		goto fail;
3658 	}
3659 
3660 	return 0;
3661 fail:
3662 	kfree(aencoder);
3663 	kfree(aconnector);
3664 
3665 	return -EINVAL;
3666 }
3667 
3668 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3669 {
3670 	drm_mode_config_cleanup(dm->ddev);
3671 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3672 	return;
3673 }
3674 
3675 /******************************************************************************
3676  * amdgpu_display_funcs functions
3677  *****************************************************************************/
3678 
3679 /*
3680  * dm_bandwidth_update - program display watermarks
3681  *
3682  * @adev: amdgpu_device pointer
3683  *
3684  * Calculate and program the display watermarks and line buffer allocation.
3685  */
3686 static void dm_bandwidth_update(struct amdgpu_device *adev)
3687 {
3688 	/* TODO: implement later */
3689 }
3690 
3691 static const struct amdgpu_display_funcs dm_display_funcs = {
3692 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3693 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3694 	.backlight_set_level = NULL, /* never called for DC */
3695 	.backlight_get_level = NULL, /* never called for DC */
3696 	.hpd_sense = NULL,/* called unconditionally */
3697 	.hpd_set_polarity = NULL, /* called unconditionally */
3698 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3699 	.page_flip_get_scanoutpos =
3700 		dm_crtc_get_scanoutpos,/* called unconditionally */
3701 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3702 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3703 };
3704 
3705 #if defined(CONFIG_DEBUG_KERNEL_DC)
3706 
3707 static ssize_t s3_debug_store(struct device *device,
3708 			      struct device_attribute *attr,
3709 			      const char *buf,
3710 			      size_t count)
3711 {
3712 	int ret;
3713 	int s3_state;
3714 	struct drm_device *drm_dev = dev_get_drvdata(device);
3715 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3716 
3717 	ret = kstrtoint(buf, 0, &s3_state);
3718 
3719 	if (ret == 0) {
3720 		if (s3_state) {
3721 			dm_resume(adev);
3722 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3723 		} else
3724 			dm_suspend(adev);
3725 	}
3726 
3727 	return ret == 0 ? count : 0;
3728 }
3729 
3730 DEVICE_ATTR_WO(s3_debug);
3731 
3732 #endif
3733 
3734 static int dm_early_init(void *handle)
3735 {
3736 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3737 
3738 	switch (adev->asic_type) {
3739 #if defined(CONFIG_DRM_AMD_DC_SI)
3740 	case CHIP_TAHITI:
3741 	case CHIP_PITCAIRN:
3742 	case CHIP_VERDE:
3743 		adev->mode_info.num_crtc = 6;
3744 		adev->mode_info.num_hpd = 6;
3745 		adev->mode_info.num_dig = 6;
3746 		break;
3747 	case CHIP_OLAND:
3748 		adev->mode_info.num_crtc = 2;
3749 		adev->mode_info.num_hpd = 2;
3750 		adev->mode_info.num_dig = 2;
3751 		break;
3752 #endif
3753 	case CHIP_BONAIRE:
3754 	case CHIP_HAWAII:
3755 		adev->mode_info.num_crtc = 6;
3756 		adev->mode_info.num_hpd = 6;
3757 		adev->mode_info.num_dig = 6;
3758 		break;
3759 	case CHIP_KAVERI:
3760 		adev->mode_info.num_crtc = 4;
3761 		adev->mode_info.num_hpd = 6;
3762 		adev->mode_info.num_dig = 7;
3763 		break;
3764 	case CHIP_KABINI:
3765 	case CHIP_MULLINS:
3766 		adev->mode_info.num_crtc = 2;
3767 		adev->mode_info.num_hpd = 6;
3768 		adev->mode_info.num_dig = 6;
3769 		break;
3770 	case CHIP_FIJI:
3771 	case CHIP_TONGA:
3772 		adev->mode_info.num_crtc = 6;
3773 		adev->mode_info.num_hpd = 6;
3774 		adev->mode_info.num_dig = 7;
3775 		break;
3776 	case CHIP_CARRIZO:
3777 		adev->mode_info.num_crtc = 3;
3778 		adev->mode_info.num_hpd = 6;
3779 		adev->mode_info.num_dig = 9;
3780 		break;
3781 	case CHIP_STONEY:
3782 		adev->mode_info.num_crtc = 2;
3783 		adev->mode_info.num_hpd = 6;
3784 		adev->mode_info.num_dig = 9;
3785 		break;
3786 	case CHIP_POLARIS11:
3787 	case CHIP_POLARIS12:
3788 		adev->mode_info.num_crtc = 5;
3789 		adev->mode_info.num_hpd = 5;
3790 		adev->mode_info.num_dig = 5;
3791 		break;
3792 	case CHIP_POLARIS10:
3793 	case CHIP_VEGAM:
3794 		adev->mode_info.num_crtc = 6;
3795 		adev->mode_info.num_hpd = 6;
3796 		adev->mode_info.num_dig = 6;
3797 		break;
3798 	case CHIP_VEGA10:
3799 	case CHIP_VEGA12:
3800 	case CHIP_VEGA20:
3801 		adev->mode_info.num_crtc = 6;
3802 		adev->mode_info.num_hpd = 6;
3803 		adev->mode_info.num_dig = 6;
3804 		break;
3805 #if defined(CONFIG_DRM_AMD_DC_DCN)
3806 	case CHIP_RAVEN:
3807 	case CHIP_RENOIR:
3808 	case CHIP_VANGOGH:
3809 		adev->mode_info.num_crtc = 4;
3810 		adev->mode_info.num_hpd = 4;
3811 		adev->mode_info.num_dig = 4;
3812 		break;
3813 	case CHIP_NAVI10:
3814 	case CHIP_NAVI12:
3815 	case CHIP_SIENNA_CICHLID:
3816 	case CHIP_NAVY_FLOUNDER:
3817 		adev->mode_info.num_crtc = 6;
3818 		adev->mode_info.num_hpd = 6;
3819 		adev->mode_info.num_dig = 6;
3820 		break;
3821 	case CHIP_NAVI14:
3822 	case CHIP_DIMGREY_CAVEFISH:
3823 		adev->mode_info.num_crtc = 5;
3824 		adev->mode_info.num_hpd = 5;
3825 		adev->mode_info.num_dig = 5;
3826 		break;
3827 #endif
3828 	default:
3829 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3830 		return -EINVAL;
3831 	}
3832 
3833 	amdgpu_dm_set_irq_funcs(adev);
3834 
3835 	if (adev->mode_info.funcs == NULL)
3836 		adev->mode_info.funcs = &dm_display_funcs;
3837 
3838 	/*
3839 	 * Note: Do NOT change adev->audio_endpt_rreg and
3840 	 * adev->audio_endpt_wreg because they are initialised in
3841 	 * amdgpu_device_init()
3842 	 */
3843 #if defined(CONFIG_DEBUG_KERNEL_DC)
3844 	device_create_file(
3845 		adev_to_drm(adev)->dev,
3846 		&dev_attr_s3_debug);
3847 #endif
3848 
3849 	return 0;
3850 }
3851 
3852 static bool modeset_required(struct drm_crtc_state *crtc_state,
3853 			     struct dc_stream_state *new_stream,
3854 			     struct dc_stream_state *old_stream)
3855 {
3856 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3857 }
3858 
3859 static bool modereset_required(struct drm_crtc_state *crtc_state)
3860 {
3861 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3862 }
3863 
3864 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3865 {
3866 	drm_encoder_cleanup(encoder);
3867 	kfree(encoder);
3868 }
3869 
3870 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3871 	.destroy = amdgpu_dm_encoder_destroy,
3872 };
3873 
3874 
3875 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3876 					 struct drm_framebuffer *fb,
3877 					 int *min_downscale, int *max_upscale)
3878 {
3879 	struct amdgpu_device *adev = drm_to_adev(dev);
3880 	struct dc *dc = adev->dm.dc;
3881 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3882 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3883 
3884 	switch (fb->format->format) {
3885 	case DRM_FORMAT_P010:
3886 	case DRM_FORMAT_NV12:
3887 	case DRM_FORMAT_NV21:
3888 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3889 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3890 		break;
3891 
3892 	case DRM_FORMAT_XRGB16161616F:
3893 	case DRM_FORMAT_ARGB16161616F:
3894 	case DRM_FORMAT_XBGR16161616F:
3895 	case DRM_FORMAT_ABGR16161616F:
3896 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3897 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3898 		break;
3899 
3900 	default:
3901 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3902 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3903 		break;
3904 	}
3905 
3906 	/*
3907 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3908 	 * scaling factor of 1.0 == 1000 units.
3909 	 */
3910 	if (*max_upscale == 1)
3911 		*max_upscale = 1000;
3912 
3913 	if (*min_downscale == 1)
3914 		*min_downscale = 1000;
3915 }
3916 
3917 
3918 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3919 				struct dc_scaling_info *scaling_info)
3920 {
3921 	int scale_w, scale_h, min_downscale, max_upscale;
3922 
3923 	memset(scaling_info, 0, sizeof(*scaling_info));
3924 
3925 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3926 	scaling_info->src_rect.x = state->src_x >> 16;
3927 	scaling_info->src_rect.y = state->src_y >> 16;
3928 
3929 	scaling_info->src_rect.width = state->src_w >> 16;
3930 	if (scaling_info->src_rect.width == 0)
3931 		return -EINVAL;
3932 
3933 	scaling_info->src_rect.height = state->src_h >> 16;
3934 	if (scaling_info->src_rect.height == 0)
3935 		return -EINVAL;
3936 
3937 	scaling_info->dst_rect.x = state->crtc_x;
3938 	scaling_info->dst_rect.y = state->crtc_y;
3939 
3940 	if (state->crtc_w == 0)
3941 		return -EINVAL;
3942 
3943 	scaling_info->dst_rect.width = state->crtc_w;
3944 
3945 	if (state->crtc_h == 0)
3946 		return -EINVAL;
3947 
3948 	scaling_info->dst_rect.height = state->crtc_h;
3949 
3950 	/* DRM doesn't specify clipping on destination output. */
3951 	scaling_info->clip_rect = scaling_info->dst_rect;
3952 
3953 	/* Validate scaling per-format with DC plane caps */
3954 	if (state->plane && state->plane->dev && state->fb) {
3955 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3956 					     &min_downscale, &max_upscale);
3957 	} else {
3958 		min_downscale = 250;
3959 		max_upscale = 16000;
3960 	}
3961 
3962 	scale_w = scaling_info->dst_rect.width * 1000 /
3963 		  scaling_info->src_rect.width;
3964 
3965 	if (scale_w < min_downscale || scale_w > max_upscale)
3966 		return -EINVAL;
3967 
3968 	scale_h = scaling_info->dst_rect.height * 1000 /
3969 		  scaling_info->src_rect.height;
3970 
3971 	if (scale_h < min_downscale || scale_h > max_upscale)
3972 		return -EINVAL;
3973 
3974 	/*
3975 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3976 	 * assume reasonable defaults based on the format.
3977 	 */
3978 
3979 	return 0;
3980 }
3981 
3982 static void
3983 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3984 				 uint64_t tiling_flags)
3985 {
3986 	/* Fill GFX8 params */
3987 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3988 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3989 
3990 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3991 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3992 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3993 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3994 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3995 
3996 		/* XXX fix me for VI */
3997 		tiling_info->gfx8.num_banks = num_banks;
3998 		tiling_info->gfx8.array_mode =
3999 				DC_ARRAY_2D_TILED_THIN1;
4000 		tiling_info->gfx8.tile_split = tile_split;
4001 		tiling_info->gfx8.bank_width = bankw;
4002 		tiling_info->gfx8.bank_height = bankh;
4003 		tiling_info->gfx8.tile_aspect = mtaspect;
4004 		tiling_info->gfx8.tile_mode =
4005 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4006 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4007 			== DC_ARRAY_1D_TILED_THIN1) {
4008 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4009 	}
4010 
4011 	tiling_info->gfx8.pipe_config =
4012 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4013 }
4014 
4015 static void
4016 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4017 				  union dc_tiling_info *tiling_info)
4018 {
4019 	tiling_info->gfx9.num_pipes =
4020 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4021 	tiling_info->gfx9.num_banks =
4022 		adev->gfx.config.gb_addr_config_fields.num_banks;
4023 	tiling_info->gfx9.pipe_interleave =
4024 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4025 	tiling_info->gfx9.num_shader_engines =
4026 		adev->gfx.config.gb_addr_config_fields.num_se;
4027 	tiling_info->gfx9.max_compressed_frags =
4028 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4029 	tiling_info->gfx9.num_rb_per_se =
4030 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4031 	tiling_info->gfx9.shaderEnable = 1;
4032 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4033 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4034 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4035 	    adev->asic_type == CHIP_VANGOGH)
4036 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4037 }
4038 
4039 static int
4040 validate_dcc(struct amdgpu_device *adev,
4041 	     const enum surface_pixel_format format,
4042 	     const enum dc_rotation_angle rotation,
4043 	     const union dc_tiling_info *tiling_info,
4044 	     const struct dc_plane_dcc_param *dcc,
4045 	     const struct dc_plane_address *address,
4046 	     const struct plane_size *plane_size)
4047 {
4048 	struct dc *dc = adev->dm.dc;
4049 	struct dc_dcc_surface_param input;
4050 	struct dc_surface_dcc_cap output;
4051 
4052 	memset(&input, 0, sizeof(input));
4053 	memset(&output, 0, sizeof(output));
4054 
4055 	if (!dcc->enable)
4056 		return 0;
4057 
4058 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4059 	    !dc->cap_funcs.get_dcc_compression_cap)
4060 		return -EINVAL;
4061 
4062 	input.format = format;
4063 	input.surface_size.width = plane_size->surface_size.width;
4064 	input.surface_size.height = plane_size->surface_size.height;
4065 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4066 
4067 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4068 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4069 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4070 		input.scan = SCAN_DIRECTION_VERTICAL;
4071 
4072 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4073 		return -EINVAL;
4074 
4075 	if (!output.capable)
4076 		return -EINVAL;
4077 
4078 	if (dcc->independent_64b_blks == 0 &&
4079 	    output.grph.rgb.independent_64b_blks != 0)
4080 		return -EINVAL;
4081 
4082 	return 0;
4083 }
4084 
4085 static bool
4086 modifier_has_dcc(uint64_t modifier)
4087 {
4088 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4089 }
4090 
4091 static unsigned
4092 modifier_gfx9_swizzle_mode(uint64_t modifier)
4093 {
4094 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4095 		return 0;
4096 
4097 	return AMD_FMT_MOD_GET(TILE, modifier);
4098 }
4099 
4100 static const struct drm_format_info *
4101 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4102 {
4103 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4104 }
4105 
4106 static void
4107 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4108 				    union dc_tiling_info *tiling_info,
4109 				    uint64_t modifier)
4110 {
4111 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4112 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4113 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4114 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4115 
4116 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4117 
4118 	if (!IS_AMD_FMT_MOD(modifier))
4119 		return;
4120 
4121 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4122 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4123 
4124 	if (adev->family >= AMDGPU_FAMILY_NV) {
4125 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4126 	} else {
4127 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4128 
4129 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4130 	}
4131 }
4132 
4133 enum dm_micro_swizzle {
4134 	MICRO_SWIZZLE_Z = 0,
4135 	MICRO_SWIZZLE_S = 1,
4136 	MICRO_SWIZZLE_D = 2,
4137 	MICRO_SWIZZLE_R = 3
4138 };
4139 
4140 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4141 					  uint32_t format,
4142 					  uint64_t modifier)
4143 {
4144 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4145 	const struct drm_format_info *info = drm_format_info(format);
4146 
4147 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4148 
4149 	if (!info)
4150 		return false;
4151 
4152 	/*
4153 	 * We always have to allow this modifier, because core DRM still
4154 	 * checks LINEAR support if userspace does not provide modifers.
4155 	 */
4156 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4157 		return true;
4158 
4159 	/*
4160 	 * The arbitrary tiling support for multiplane formats has not been hooked
4161 	 * up.
4162 	 */
4163 	if (info->num_planes > 1)
4164 		return false;
4165 
4166 	/*
4167 	 * For D swizzle the canonical modifier depends on the bpp, so check
4168 	 * it here.
4169 	 */
4170 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4171 	    adev->family >= AMDGPU_FAMILY_NV) {
4172 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4173 			return false;
4174 	}
4175 
4176 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4177 	    info->cpp[0] < 8)
4178 		return false;
4179 
4180 	if (modifier_has_dcc(modifier)) {
4181 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4182 		if (info->cpp[0] != 4)
4183 			return false;
4184 	}
4185 
4186 	return true;
4187 }
4188 
4189 static void
4190 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4191 {
4192 	if (!*mods)
4193 		return;
4194 
4195 	if (*cap - *size < 1) {
4196 		uint64_t new_cap = *cap * 2;
4197 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4198 
4199 		if (!new_mods) {
4200 			kfree(*mods);
4201 			*mods = NULL;
4202 			return;
4203 		}
4204 
4205 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4206 		kfree(*mods);
4207 		*mods = new_mods;
4208 		*cap = new_cap;
4209 	}
4210 
4211 	(*mods)[*size] = mod;
4212 	*size += 1;
4213 }
4214 
4215 static void
4216 add_gfx9_modifiers(const struct amdgpu_device *adev,
4217 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4218 {
4219 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4220 	int pipe_xor_bits = min(8, pipes +
4221 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4222 	int bank_xor_bits = min(8 - pipe_xor_bits,
4223 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4224 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4225 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4226 
4227 
4228 	if (adev->family == AMDGPU_FAMILY_RV) {
4229 		/* Raven2 and later */
4230 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4231 
4232 		/*
4233 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4234 		 * doesn't support _D on DCN
4235 		 */
4236 
4237 		if (has_constant_encode) {
4238 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4240 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4241 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4242 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4243 				    AMD_FMT_MOD_SET(DCC, 1) |
4244 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4245 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4246 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4247 		}
4248 
4249 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4250 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4251 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4252 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4253 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4254 			    AMD_FMT_MOD_SET(DCC, 1) |
4255 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4256 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4257 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4258 
4259 		if (has_constant_encode) {
4260 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4261 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4262 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4263 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4264 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4265 				    AMD_FMT_MOD_SET(DCC, 1) |
4266 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4267 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4268 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4269 
4270 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4271 				    AMD_FMT_MOD_SET(RB, rb) |
4272 				    AMD_FMT_MOD_SET(PIPE, pipes));
4273 		}
4274 
4275 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4276 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4277 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4278 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4279 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4280 			    AMD_FMT_MOD_SET(DCC, 1) |
4281 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4282 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4283 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4284 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4285 			    AMD_FMT_MOD_SET(RB, rb) |
4286 			    AMD_FMT_MOD_SET(PIPE, pipes));
4287 	}
4288 
4289 	/*
4290 	 * Only supported for 64bpp on Raven, will be filtered on format in
4291 	 * dm_plane_format_mod_supported.
4292 	 */
4293 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4294 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4295 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4296 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4297 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4298 
4299 	if (adev->family == AMDGPU_FAMILY_RV) {
4300 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4301 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4302 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4303 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4304 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4305 	}
4306 
4307 	/*
4308 	 * Only supported for 64bpp on Raven, will be filtered on format in
4309 	 * dm_plane_format_mod_supported.
4310 	 */
4311 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4312 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4313 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4314 
4315 	if (adev->family == AMDGPU_FAMILY_RV) {
4316 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4317 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4318 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4319 	}
4320 }
4321 
4322 static void
4323 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4324 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4325 {
4326 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4327 
4328 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4329 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4330 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4331 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4332 		    AMD_FMT_MOD_SET(DCC, 1) |
4333 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4334 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4335 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4336 
4337 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4338 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4339 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4340 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4341 		    AMD_FMT_MOD_SET(DCC, 1) |
4342 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4343 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4344 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4345 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4346 
4347 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4348 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4349 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4350 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4351 
4352 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4353 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4354 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4355 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4356 
4357 
4358 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4359 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4360 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4361 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4362 
4363 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4364 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4365 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4366 }
4367 
4368 static void
4369 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4370 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4371 {
4372 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4373 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4374 
4375 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4376 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4377 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4378 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4379 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4380 		    AMD_FMT_MOD_SET(DCC, 1) |
4381 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4382 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4383 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4384 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4385 
4386 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4387 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4388 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4389 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4390 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4391 		    AMD_FMT_MOD_SET(DCC, 1) |
4392 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4393 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4394 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4395 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4396 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4397 
4398 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4399 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4400 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4401 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4402 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4403 
4404 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4405 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4406 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4407 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4408 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4409 
4410 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4411 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4412 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4413 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4414 
4415 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4416 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4417 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4418 }
4419 
4420 static int
4421 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4422 {
4423 	uint64_t size = 0, capacity = 128;
4424 	*mods = NULL;
4425 
4426 	/* We have not hooked up any pre-GFX9 modifiers. */
4427 	if (adev->family < AMDGPU_FAMILY_AI)
4428 		return 0;
4429 
4430 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4431 
4432 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4433 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4434 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4435 		return *mods ? 0 : -ENOMEM;
4436 	}
4437 
4438 	switch (adev->family) {
4439 	case AMDGPU_FAMILY_AI:
4440 	case AMDGPU_FAMILY_RV:
4441 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4442 		break;
4443 	case AMDGPU_FAMILY_NV:
4444 	case AMDGPU_FAMILY_VGH:
4445 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4446 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4447 		else
4448 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4449 		break;
4450 	}
4451 
4452 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4453 
4454 	/* INVALID marks the end of the list. */
4455 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4456 
4457 	if (!*mods)
4458 		return -ENOMEM;
4459 
4460 	return 0;
4461 }
4462 
4463 static int
4464 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4465 					  const struct amdgpu_framebuffer *afb,
4466 					  const enum surface_pixel_format format,
4467 					  const enum dc_rotation_angle rotation,
4468 					  const struct plane_size *plane_size,
4469 					  union dc_tiling_info *tiling_info,
4470 					  struct dc_plane_dcc_param *dcc,
4471 					  struct dc_plane_address *address,
4472 					  const bool force_disable_dcc)
4473 {
4474 	const uint64_t modifier = afb->base.modifier;
4475 	int ret;
4476 
4477 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4478 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4479 
4480 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4481 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4482 
4483 		dcc->enable = 1;
4484 		dcc->meta_pitch = afb->base.pitches[1];
4485 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4486 
4487 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4488 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4489 	}
4490 
4491 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4492 	if (ret)
4493 		return ret;
4494 
4495 	return 0;
4496 }
4497 
4498 static int
4499 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4500 			     const struct amdgpu_framebuffer *afb,
4501 			     const enum surface_pixel_format format,
4502 			     const enum dc_rotation_angle rotation,
4503 			     const uint64_t tiling_flags,
4504 			     union dc_tiling_info *tiling_info,
4505 			     struct plane_size *plane_size,
4506 			     struct dc_plane_dcc_param *dcc,
4507 			     struct dc_plane_address *address,
4508 			     bool tmz_surface,
4509 			     bool force_disable_dcc)
4510 {
4511 	const struct drm_framebuffer *fb = &afb->base;
4512 	int ret;
4513 
4514 	memset(tiling_info, 0, sizeof(*tiling_info));
4515 	memset(plane_size, 0, sizeof(*plane_size));
4516 	memset(dcc, 0, sizeof(*dcc));
4517 	memset(address, 0, sizeof(*address));
4518 
4519 	address->tmz_surface = tmz_surface;
4520 
4521 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4522 		uint64_t addr = afb->address + fb->offsets[0];
4523 
4524 		plane_size->surface_size.x = 0;
4525 		plane_size->surface_size.y = 0;
4526 		plane_size->surface_size.width = fb->width;
4527 		plane_size->surface_size.height = fb->height;
4528 		plane_size->surface_pitch =
4529 			fb->pitches[0] / fb->format->cpp[0];
4530 
4531 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4532 		address->grph.addr.low_part = lower_32_bits(addr);
4533 		address->grph.addr.high_part = upper_32_bits(addr);
4534 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4535 		uint64_t luma_addr = afb->address + fb->offsets[0];
4536 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4537 
4538 		plane_size->surface_size.x = 0;
4539 		plane_size->surface_size.y = 0;
4540 		plane_size->surface_size.width = fb->width;
4541 		plane_size->surface_size.height = fb->height;
4542 		plane_size->surface_pitch =
4543 			fb->pitches[0] / fb->format->cpp[0];
4544 
4545 		plane_size->chroma_size.x = 0;
4546 		plane_size->chroma_size.y = 0;
4547 		/* TODO: set these based on surface format */
4548 		plane_size->chroma_size.width = fb->width / 2;
4549 		plane_size->chroma_size.height = fb->height / 2;
4550 
4551 		plane_size->chroma_pitch =
4552 			fb->pitches[1] / fb->format->cpp[1];
4553 
4554 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4555 		address->video_progressive.luma_addr.low_part =
4556 			lower_32_bits(luma_addr);
4557 		address->video_progressive.luma_addr.high_part =
4558 			upper_32_bits(luma_addr);
4559 		address->video_progressive.chroma_addr.low_part =
4560 			lower_32_bits(chroma_addr);
4561 		address->video_progressive.chroma_addr.high_part =
4562 			upper_32_bits(chroma_addr);
4563 	}
4564 
4565 	if (adev->family >= AMDGPU_FAMILY_AI) {
4566 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4567 								rotation, plane_size,
4568 								tiling_info, dcc,
4569 								address,
4570 								force_disable_dcc);
4571 		if (ret)
4572 			return ret;
4573 	} else {
4574 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4575 	}
4576 
4577 	return 0;
4578 }
4579 
4580 static void
4581 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4582 			       bool *per_pixel_alpha, bool *global_alpha,
4583 			       int *global_alpha_value)
4584 {
4585 	*per_pixel_alpha = false;
4586 	*global_alpha = false;
4587 	*global_alpha_value = 0xff;
4588 
4589 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4590 		return;
4591 
4592 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4593 		static const uint32_t alpha_formats[] = {
4594 			DRM_FORMAT_ARGB8888,
4595 			DRM_FORMAT_RGBA8888,
4596 			DRM_FORMAT_ABGR8888,
4597 		};
4598 		uint32_t format = plane_state->fb->format->format;
4599 		unsigned int i;
4600 
4601 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4602 			if (format == alpha_formats[i]) {
4603 				*per_pixel_alpha = true;
4604 				break;
4605 			}
4606 		}
4607 	}
4608 
4609 	if (plane_state->alpha < 0xffff) {
4610 		*global_alpha = true;
4611 		*global_alpha_value = plane_state->alpha >> 8;
4612 	}
4613 }
4614 
4615 static int
4616 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4617 			    const enum surface_pixel_format format,
4618 			    enum dc_color_space *color_space)
4619 {
4620 	bool full_range;
4621 
4622 	*color_space = COLOR_SPACE_SRGB;
4623 
4624 	/* DRM color properties only affect non-RGB formats. */
4625 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4626 		return 0;
4627 
4628 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4629 
4630 	switch (plane_state->color_encoding) {
4631 	case DRM_COLOR_YCBCR_BT601:
4632 		if (full_range)
4633 			*color_space = COLOR_SPACE_YCBCR601;
4634 		else
4635 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4636 		break;
4637 
4638 	case DRM_COLOR_YCBCR_BT709:
4639 		if (full_range)
4640 			*color_space = COLOR_SPACE_YCBCR709;
4641 		else
4642 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4643 		break;
4644 
4645 	case DRM_COLOR_YCBCR_BT2020:
4646 		if (full_range)
4647 			*color_space = COLOR_SPACE_2020_YCBCR;
4648 		else
4649 			return -EINVAL;
4650 		break;
4651 
4652 	default:
4653 		return -EINVAL;
4654 	}
4655 
4656 	return 0;
4657 }
4658 
4659 static int
4660 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4661 			    const struct drm_plane_state *plane_state,
4662 			    const uint64_t tiling_flags,
4663 			    struct dc_plane_info *plane_info,
4664 			    struct dc_plane_address *address,
4665 			    bool tmz_surface,
4666 			    bool force_disable_dcc)
4667 {
4668 	const struct drm_framebuffer *fb = plane_state->fb;
4669 	const struct amdgpu_framebuffer *afb =
4670 		to_amdgpu_framebuffer(plane_state->fb);
4671 	struct drm_format_name_buf format_name;
4672 	int ret;
4673 
4674 	memset(plane_info, 0, sizeof(*plane_info));
4675 
4676 	switch (fb->format->format) {
4677 	case DRM_FORMAT_C8:
4678 		plane_info->format =
4679 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4680 		break;
4681 	case DRM_FORMAT_RGB565:
4682 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4683 		break;
4684 	case DRM_FORMAT_XRGB8888:
4685 	case DRM_FORMAT_ARGB8888:
4686 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4687 		break;
4688 	case DRM_FORMAT_XRGB2101010:
4689 	case DRM_FORMAT_ARGB2101010:
4690 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4691 		break;
4692 	case DRM_FORMAT_XBGR2101010:
4693 	case DRM_FORMAT_ABGR2101010:
4694 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4695 		break;
4696 	case DRM_FORMAT_XBGR8888:
4697 	case DRM_FORMAT_ABGR8888:
4698 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4699 		break;
4700 	case DRM_FORMAT_NV21:
4701 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4702 		break;
4703 	case DRM_FORMAT_NV12:
4704 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4705 		break;
4706 	case DRM_FORMAT_P010:
4707 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4708 		break;
4709 	case DRM_FORMAT_XRGB16161616F:
4710 	case DRM_FORMAT_ARGB16161616F:
4711 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4712 		break;
4713 	case DRM_FORMAT_XBGR16161616F:
4714 	case DRM_FORMAT_ABGR16161616F:
4715 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4716 		break;
4717 	default:
4718 		DRM_ERROR(
4719 			"Unsupported screen format %s\n",
4720 			drm_get_format_name(fb->format->format, &format_name));
4721 		return -EINVAL;
4722 	}
4723 
4724 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4725 	case DRM_MODE_ROTATE_0:
4726 		plane_info->rotation = ROTATION_ANGLE_0;
4727 		break;
4728 	case DRM_MODE_ROTATE_90:
4729 		plane_info->rotation = ROTATION_ANGLE_90;
4730 		break;
4731 	case DRM_MODE_ROTATE_180:
4732 		plane_info->rotation = ROTATION_ANGLE_180;
4733 		break;
4734 	case DRM_MODE_ROTATE_270:
4735 		plane_info->rotation = ROTATION_ANGLE_270;
4736 		break;
4737 	default:
4738 		plane_info->rotation = ROTATION_ANGLE_0;
4739 		break;
4740 	}
4741 
4742 	plane_info->visible = true;
4743 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4744 
4745 	plane_info->layer_index = 0;
4746 
4747 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4748 					  &plane_info->color_space);
4749 	if (ret)
4750 		return ret;
4751 
4752 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4753 					   plane_info->rotation, tiling_flags,
4754 					   &plane_info->tiling_info,
4755 					   &plane_info->plane_size,
4756 					   &plane_info->dcc, address, tmz_surface,
4757 					   force_disable_dcc);
4758 	if (ret)
4759 		return ret;
4760 
4761 	fill_blending_from_plane_state(
4762 		plane_state, &plane_info->per_pixel_alpha,
4763 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4764 
4765 	return 0;
4766 }
4767 
4768 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4769 				    struct dc_plane_state *dc_plane_state,
4770 				    struct drm_plane_state *plane_state,
4771 				    struct drm_crtc_state *crtc_state)
4772 {
4773 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4774 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4775 	struct dc_scaling_info scaling_info;
4776 	struct dc_plane_info plane_info;
4777 	int ret;
4778 	bool force_disable_dcc = false;
4779 
4780 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4781 	if (ret)
4782 		return ret;
4783 
4784 	dc_plane_state->src_rect = scaling_info.src_rect;
4785 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4786 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4787 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4788 
4789 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4790 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4791 					  afb->tiling_flags,
4792 					  &plane_info,
4793 					  &dc_plane_state->address,
4794 					  afb->tmz_surface,
4795 					  force_disable_dcc);
4796 	if (ret)
4797 		return ret;
4798 
4799 	dc_plane_state->format = plane_info.format;
4800 	dc_plane_state->color_space = plane_info.color_space;
4801 	dc_plane_state->format = plane_info.format;
4802 	dc_plane_state->plane_size = plane_info.plane_size;
4803 	dc_plane_state->rotation = plane_info.rotation;
4804 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4805 	dc_plane_state->stereo_format = plane_info.stereo_format;
4806 	dc_plane_state->tiling_info = plane_info.tiling_info;
4807 	dc_plane_state->visible = plane_info.visible;
4808 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4809 	dc_plane_state->global_alpha = plane_info.global_alpha;
4810 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4811 	dc_plane_state->dcc = plane_info.dcc;
4812 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4813 
4814 	/*
4815 	 * Always set input transfer function, since plane state is refreshed
4816 	 * every time.
4817 	 */
4818 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4819 	if (ret)
4820 		return ret;
4821 
4822 	return 0;
4823 }
4824 
4825 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4826 					   const struct dm_connector_state *dm_state,
4827 					   struct dc_stream_state *stream)
4828 {
4829 	enum amdgpu_rmx_type rmx_type;
4830 
4831 	struct rect src = { 0 }; /* viewport in composition space*/
4832 	struct rect dst = { 0 }; /* stream addressable area */
4833 
4834 	/* no mode. nothing to be done */
4835 	if (!mode)
4836 		return;
4837 
4838 	/* Full screen scaling by default */
4839 	src.width = mode->hdisplay;
4840 	src.height = mode->vdisplay;
4841 	dst.width = stream->timing.h_addressable;
4842 	dst.height = stream->timing.v_addressable;
4843 
4844 	if (dm_state) {
4845 		rmx_type = dm_state->scaling;
4846 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4847 			if (src.width * dst.height <
4848 					src.height * dst.width) {
4849 				/* height needs less upscaling/more downscaling */
4850 				dst.width = src.width *
4851 						dst.height / src.height;
4852 			} else {
4853 				/* width needs less upscaling/more downscaling */
4854 				dst.height = src.height *
4855 						dst.width / src.width;
4856 			}
4857 		} else if (rmx_type == RMX_CENTER) {
4858 			dst = src;
4859 		}
4860 
4861 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4862 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4863 
4864 		if (dm_state->underscan_enable) {
4865 			dst.x += dm_state->underscan_hborder / 2;
4866 			dst.y += dm_state->underscan_vborder / 2;
4867 			dst.width -= dm_state->underscan_hborder;
4868 			dst.height -= dm_state->underscan_vborder;
4869 		}
4870 	}
4871 
4872 	stream->src = src;
4873 	stream->dst = dst;
4874 
4875 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4876 			dst.x, dst.y, dst.width, dst.height);
4877 
4878 }
4879 
4880 static enum dc_color_depth
4881 convert_color_depth_from_display_info(const struct drm_connector *connector,
4882 				      bool is_y420, int requested_bpc)
4883 {
4884 	uint8_t bpc;
4885 
4886 	if (is_y420) {
4887 		bpc = 8;
4888 
4889 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4890 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4891 			bpc = 16;
4892 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4893 			bpc = 12;
4894 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4895 			bpc = 10;
4896 	} else {
4897 		bpc = (uint8_t)connector->display_info.bpc;
4898 		/* Assume 8 bpc by default if no bpc is specified. */
4899 		bpc = bpc ? bpc : 8;
4900 	}
4901 
4902 	if (requested_bpc > 0) {
4903 		/*
4904 		 * Cap display bpc based on the user requested value.
4905 		 *
4906 		 * The value for state->max_bpc may not correctly updated
4907 		 * depending on when the connector gets added to the state
4908 		 * or if this was called outside of atomic check, so it
4909 		 * can't be used directly.
4910 		 */
4911 		bpc = min_t(u8, bpc, requested_bpc);
4912 
4913 		/* Round down to the nearest even number. */
4914 		bpc = bpc - (bpc & 1);
4915 	}
4916 
4917 	switch (bpc) {
4918 	case 0:
4919 		/*
4920 		 * Temporary Work around, DRM doesn't parse color depth for
4921 		 * EDID revision before 1.4
4922 		 * TODO: Fix edid parsing
4923 		 */
4924 		return COLOR_DEPTH_888;
4925 	case 6:
4926 		return COLOR_DEPTH_666;
4927 	case 8:
4928 		return COLOR_DEPTH_888;
4929 	case 10:
4930 		return COLOR_DEPTH_101010;
4931 	case 12:
4932 		return COLOR_DEPTH_121212;
4933 	case 14:
4934 		return COLOR_DEPTH_141414;
4935 	case 16:
4936 		return COLOR_DEPTH_161616;
4937 	default:
4938 		return COLOR_DEPTH_UNDEFINED;
4939 	}
4940 }
4941 
4942 static enum dc_aspect_ratio
4943 get_aspect_ratio(const struct drm_display_mode *mode_in)
4944 {
4945 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4946 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4947 }
4948 
4949 static enum dc_color_space
4950 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4951 {
4952 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4953 
4954 	switch (dc_crtc_timing->pixel_encoding)	{
4955 	case PIXEL_ENCODING_YCBCR422:
4956 	case PIXEL_ENCODING_YCBCR444:
4957 	case PIXEL_ENCODING_YCBCR420:
4958 	{
4959 		/*
4960 		 * 27030khz is the separation point between HDTV and SDTV
4961 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4962 		 * respectively
4963 		 */
4964 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4965 			if (dc_crtc_timing->flags.Y_ONLY)
4966 				color_space =
4967 					COLOR_SPACE_YCBCR709_LIMITED;
4968 			else
4969 				color_space = COLOR_SPACE_YCBCR709;
4970 		} else {
4971 			if (dc_crtc_timing->flags.Y_ONLY)
4972 				color_space =
4973 					COLOR_SPACE_YCBCR601_LIMITED;
4974 			else
4975 				color_space = COLOR_SPACE_YCBCR601;
4976 		}
4977 
4978 	}
4979 	break;
4980 	case PIXEL_ENCODING_RGB:
4981 		color_space = COLOR_SPACE_SRGB;
4982 		break;
4983 
4984 	default:
4985 		WARN_ON(1);
4986 		break;
4987 	}
4988 
4989 	return color_space;
4990 }
4991 
4992 static bool adjust_colour_depth_from_display_info(
4993 	struct dc_crtc_timing *timing_out,
4994 	const struct drm_display_info *info)
4995 {
4996 	enum dc_color_depth depth = timing_out->display_color_depth;
4997 	int normalized_clk;
4998 	do {
4999 		normalized_clk = timing_out->pix_clk_100hz / 10;
5000 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5001 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5002 			normalized_clk /= 2;
5003 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5004 		switch (depth) {
5005 		case COLOR_DEPTH_888:
5006 			break;
5007 		case COLOR_DEPTH_101010:
5008 			normalized_clk = (normalized_clk * 30) / 24;
5009 			break;
5010 		case COLOR_DEPTH_121212:
5011 			normalized_clk = (normalized_clk * 36) / 24;
5012 			break;
5013 		case COLOR_DEPTH_161616:
5014 			normalized_clk = (normalized_clk * 48) / 24;
5015 			break;
5016 		default:
5017 			/* The above depths are the only ones valid for HDMI. */
5018 			return false;
5019 		}
5020 		if (normalized_clk <= info->max_tmds_clock) {
5021 			timing_out->display_color_depth = depth;
5022 			return true;
5023 		}
5024 	} while (--depth > COLOR_DEPTH_666);
5025 	return false;
5026 }
5027 
5028 static void fill_stream_properties_from_drm_display_mode(
5029 	struct dc_stream_state *stream,
5030 	const struct drm_display_mode *mode_in,
5031 	const struct drm_connector *connector,
5032 	const struct drm_connector_state *connector_state,
5033 	const struct dc_stream_state *old_stream,
5034 	int requested_bpc)
5035 {
5036 	struct dc_crtc_timing *timing_out = &stream->timing;
5037 	const struct drm_display_info *info = &connector->display_info;
5038 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5039 	struct hdmi_vendor_infoframe hv_frame;
5040 	struct hdmi_avi_infoframe avi_frame;
5041 
5042 	memset(&hv_frame, 0, sizeof(hv_frame));
5043 	memset(&avi_frame, 0, sizeof(avi_frame));
5044 
5045 	timing_out->h_border_left = 0;
5046 	timing_out->h_border_right = 0;
5047 	timing_out->v_border_top = 0;
5048 	timing_out->v_border_bottom = 0;
5049 	/* TODO: un-hardcode */
5050 	if (drm_mode_is_420_only(info, mode_in)
5051 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5052 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5053 	else if (drm_mode_is_420_also(info, mode_in)
5054 			&& aconnector->force_yuv420_output)
5055 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5056 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5057 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5058 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5059 	else
5060 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5061 
5062 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5063 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5064 		connector,
5065 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5066 		requested_bpc);
5067 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5068 	timing_out->hdmi_vic = 0;
5069 
5070 	if(old_stream) {
5071 		timing_out->vic = old_stream->timing.vic;
5072 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5073 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5074 	} else {
5075 		timing_out->vic = drm_match_cea_mode(mode_in);
5076 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5077 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5078 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5079 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5080 	}
5081 
5082 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5083 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5084 		timing_out->vic = avi_frame.video_code;
5085 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5086 		timing_out->hdmi_vic = hv_frame.vic;
5087 	}
5088 
5089 	timing_out->h_addressable = mode_in->hdisplay;
5090 	timing_out->h_total = mode_in->htotal;
5091 	timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5092 	timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5093 	timing_out->v_total = mode_in->vtotal;
5094 	timing_out->v_addressable = mode_in->vdisplay;
5095 	timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5096 	timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5097 	timing_out->pix_clk_100hz = mode_in->clock * 10;
5098 
5099 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5100 
5101 	stream->output_color_space = get_output_color_space(timing_out);
5102 
5103 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5104 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5105 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5106 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5107 		    drm_mode_is_420_also(info, mode_in) &&
5108 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5109 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5110 			adjust_colour_depth_from_display_info(timing_out, info);
5111 		}
5112 	}
5113 }
5114 
5115 static void fill_audio_info(struct audio_info *audio_info,
5116 			    const struct drm_connector *drm_connector,
5117 			    const struct dc_sink *dc_sink)
5118 {
5119 	int i = 0;
5120 	int cea_revision = 0;
5121 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5122 
5123 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5124 	audio_info->product_id = edid_caps->product_id;
5125 
5126 	cea_revision = drm_connector->display_info.cea_rev;
5127 
5128 	strscpy(audio_info->display_name,
5129 		edid_caps->display_name,
5130 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5131 
5132 	if (cea_revision >= 3) {
5133 		audio_info->mode_count = edid_caps->audio_mode_count;
5134 
5135 		for (i = 0; i < audio_info->mode_count; ++i) {
5136 			audio_info->modes[i].format_code =
5137 					(enum audio_format_code)
5138 					(edid_caps->audio_modes[i].format_code);
5139 			audio_info->modes[i].channel_count =
5140 					edid_caps->audio_modes[i].channel_count;
5141 			audio_info->modes[i].sample_rates.all =
5142 					edid_caps->audio_modes[i].sample_rate;
5143 			audio_info->modes[i].sample_size =
5144 					edid_caps->audio_modes[i].sample_size;
5145 		}
5146 	}
5147 
5148 	audio_info->flags.all = edid_caps->speaker_flags;
5149 
5150 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5151 	if (drm_connector->latency_present[0]) {
5152 		audio_info->video_latency = drm_connector->video_latency[0];
5153 		audio_info->audio_latency = drm_connector->audio_latency[0];
5154 	}
5155 
5156 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5157 
5158 }
5159 
5160 static void
5161 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5162 				      struct drm_display_mode *dst_mode)
5163 {
5164 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5165 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5166 	dst_mode->crtc_clock = src_mode->crtc_clock;
5167 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5168 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5169 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5170 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5171 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5172 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5173 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5174 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5175 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5176 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5177 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5178 }
5179 
5180 static void
5181 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5182 					const struct drm_display_mode *native_mode,
5183 					bool scale_enabled)
5184 {
5185 	if (scale_enabled) {
5186 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5187 	} else if (native_mode->clock == drm_mode->clock &&
5188 			native_mode->htotal == drm_mode->htotal &&
5189 			native_mode->vtotal == drm_mode->vtotal) {
5190 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5191 	} else {
5192 		/* no scaling nor amdgpu inserted, no need to patch */
5193 	}
5194 }
5195 
5196 static struct dc_sink *
5197 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5198 {
5199 	struct dc_sink_init_data sink_init_data = { 0 };
5200 	struct dc_sink *sink = NULL;
5201 	sink_init_data.link = aconnector->dc_link;
5202 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5203 
5204 	sink = dc_sink_create(&sink_init_data);
5205 	if (!sink) {
5206 		DRM_ERROR("Failed to create sink!\n");
5207 		return NULL;
5208 	}
5209 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5210 
5211 	return sink;
5212 }
5213 
5214 static void set_multisync_trigger_params(
5215 		struct dc_stream_state *stream)
5216 {
5217 	if (stream->triggered_crtc_reset.enabled) {
5218 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5219 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5220 	}
5221 }
5222 
5223 static void set_master_stream(struct dc_stream_state *stream_set[],
5224 			      int stream_count)
5225 {
5226 	int j, highest_rfr = 0, master_stream = 0;
5227 
5228 	for (j = 0;  j < stream_count; j++) {
5229 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5230 			int refresh_rate = 0;
5231 
5232 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5233 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5234 			if (refresh_rate > highest_rfr) {
5235 				highest_rfr = refresh_rate;
5236 				master_stream = j;
5237 			}
5238 		}
5239 	}
5240 	for (j = 0;  j < stream_count; j++) {
5241 		if (stream_set[j])
5242 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5243 	}
5244 }
5245 
5246 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5247 {
5248 	int i = 0;
5249 
5250 	if (context->stream_count < 2)
5251 		return;
5252 	for (i = 0; i < context->stream_count ; i++) {
5253 		if (!context->streams[i])
5254 			continue;
5255 		/*
5256 		 * TODO: add a function to read AMD VSDB bits and set
5257 		 * crtc_sync_master.multi_sync_enabled flag
5258 		 * For now it's set to false
5259 		 */
5260 		set_multisync_trigger_params(context->streams[i]);
5261 	}
5262 	set_master_stream(context->streams, context->stream_count);
5263 }
5264 
5265 static struct drm_display_mode *
5266 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5267 			  bool use_probed_modes)
5268 {
5269 	struct drm_display_mode *m, *m_pref = NULL;
5270 	u16 current_refresh, highest_refresh;
5271 	struct list_head *list_head = use_probed_modes ?
5272 						    &aconnector->base.probed_modes :
5273 						    &aconnector->base.modes;
5274 
5275 	if (aconnector->freesync_vid_base.clock != 0)
5276 		return &aconnector->freesync_vid_base;
5277 
5278 	/* Find the preferred mode */
5279 	list_for_each_entry (m, list_head, head) {
5280 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5281 			m_pref = m;
5282 			break;
5283 		}
5284 	}
5285 
5286 	if (!m_pref) {
5287 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5288 		m_pref = list_first_entry_or_null(
5289 			&aconnector->base.modes, struct drm_display_mode, head);
5290 		if (!m_pref) {
5291 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5292 			return NULL;
5293 		}
5294 	}
5295 
5296 	highest_refresh = drm_mode_vrefresh(m_pref);
5297 
5298 	/*
5299 	 * Find the mode with highest refresh rate with same resolution.
5300 	 * For some monitors, preferred mode is not the mode with highest
5301 	 * supported refresh rate.
5302 	 */
5303 	list_for_each_entry (m, list_head, head) {
5304 		current_refresh  = drm_mode_vrefresh(m);
5305 
5306 		if (m->hdisplay == m_pref->hdisplay &&
5307 		    m->vdisplay == m_pref->vdisplay &&
5308 		    highest_refresh < current_refresh) {
5309 			highest_refresh = current_refresh;
5310 			m_pref = m;
5311 		}
5312 	}
5313 
5314 	aconnector->freesync_vid_base = *m_pref;
5315 	return m_pref;
5316 }
5317 
5318 static bool is_freesync_video_mode(struct drm_display_mode *mode,
5319 				   struct amdgpu_dm_connector *aconnector)
5320 {
5321 	struct drm_display_mode *high_mode;
5322 	int timing_diff;
5323 
5324 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5325 	if (!high_mode || !mode)
5326 		return false;
5327 
5328 	timing_diff = high_mode->vtotal - mode->vtotal;
5329 
5330 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5331 	    high_mode->hdisplay != mode->hdisplay ||
5332 	    high_mode->vdisplay != mode->vdisplay ||
5333 	    high_mode->hsync_start != mode->hsync_start ||
5334 	    high_mode->hsync_end != mode->hsync_end ||
5335 	    high_mode->htotal != mode->htotal ||
5336 	    high_mode->hskew != mode->hskew ||
5337 	    high_mode->vscan != mode->vscan ||
5338 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5339 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5340 		return false;
5341 	else
5342 		return true;
5343 }
5344 
5345 static struct dc_stream_state *
5346 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5347 		       const struct drm_display_mode *drm_mode,
5348 		       const struct dm_connector_state *dm_state,
5349 		       const struct dc_stream_state *old_stream,
5350 		       int requested_bpc)
5351 {
5352 	struct drm_display_mode *preferred_mode = NULL;
5353 	struct drm_connector *drm_connector;
5354 	const struct drm_connector_state *con_state =
5355 		dm_state ? &dm_state->base : NULL;
5356 	struct dc_stream_state *stream = NULL;
5357 	struct drm_display_mode mode = *drm_mode;
5358 	struct drm_display_mode saved_mode;
5359 	struct drm_display_mode *freesync_mode = NULL;
5360 	bool native_mode_found = false;
5361 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5362 	int mode_refresh;
5363 	int preferred_refresh = 0;
5364 #if defined(CONFIG_DRM_AMD_DC_DCN)
5365 	struct dsc_dec_dpcd_caps dsc_caps;
5366 	uint32_t link_bandwidth_kbps;
5367 #endif
5368 	struct dc_sink *sink = NULL;
5369 
5370 	memset(&saved_mode, 0, sizeof(saved_mode));
5371 
5372 	if (aconnector == NULL) {
5373 		DRM_ERROR("aconnector is NULL!\n");
5374 		return stream;
5375 	}
5376 
5377 	drm_connector = &aconnector->base;
5378 
5379 	if (!aconnector->dc_sink) {
5380 		sink = create_fake_sink(aconnector);
5381 		if (!sink)
5382 			return stream;
5383 	} else {
5384 		sink = aconnector->dc_sink;
5385 		dc_sink_retain(sink);
5386 	}
5387 
5388 	stream = dc_create_stream_for_sink(sink);
5389 
5390 	if (stream == NULL) {
5391 		DRM_ERROR("Failed to create stream for sink!\n");
5392 		goto finish;
5393 	}
5394 
5395 	stream->dm_stream_context = aconnector;
5396 
5397 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5398 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5399 
5400 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5401 		/* Search for preferred mode */
5402 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5403 			native_mode_found = true;
5404 			break;
5405 		}
5406 	}
5407 	if (!native_mode_found)
5408 		preferred_mode = list_first_entry_or_null(
5409 				&aconnector->base.modes,
5410 				struct drm_display_mode,
5411 				head);
5412 
5413 	mode_refresh = drm_mode_vrefresh(&mode);
5414 
5415 	if (preferred_mode == NULL) {
5416 		/*
5417 		 * This may not be an error, the use case is when we have no
5418 		 * usermode calls to reset and set mode upon hotplug. In this
5419 		 * case, we call set mode ourselves to restore the previous mode
5420 		 * and the modelist may not be filled in in time.
5421 		 */
5422 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5423 	} else {
5424 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5425 				 is_freesync_video_mode(&mode, aconnector);
5426 		if (recalculate_timing) {
5427 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5428 			saved_mode = mode;
5429 			mode = *freesync_mode;
5430 		} else {
5431 			decide_crtc_timing_for_drm_display_mode(
5432 				&mode, preferred_mode,
5433 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5434 		}
5435 
5436 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5437 	}
5438 
5439 	if (recalculate_timing)
5440 		drm_mode_set_crtcinfo(&saved_mode, 0);
5441 	else
5442 		drm_mode_set_crtcinfo(&mode, 0);
5443 
5444        /*
5445 	* If scaling is enabled and refresh rate didn't change
5446 	* we copy the vic and polarities of the old timings
5447 	*/
5448 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5449 		fill_stream_properties_from_drm_display_mode(
5450 			stream, &mode, &aconnector->base, con_state, NULL,
5451 			requested_bpc);
5452 	else
5453 		fill_stream_properties_from_drm_display_mode(
5454 			stream, &mode, &aconnector->base, con_state, old_stream,
5455 			requested_bpc);
5456 
5457 	stream->timing.flags.DSC = 0;
5458 
5459 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5460 #if defined(CONFIG_DRM_AMD_DC_DCN)
5461 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5462 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5463 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5464 				      &dsc_caps);
5465 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5466 							     dc_link_get_link_cap(aconnector->dc_link));
5467 
5468 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5469 			/* Set DSC policy according to dsc_clock_en */
5470 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5471 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5472 
5473 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5474 						  &dsc_caps,
5475 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5476 						  0,
5477 						  link_bandwidth_kbps,
5478 						  &stream->timing,
5479 						  &stream->timing.dsc_cfg))
5480 				stream->timing.flags.DSC = 1;
5481 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5482 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5483 				stream->timing.flags.DSC = 1;
5484 
5485 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5486 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5487 
5488 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5489 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5490 
5491 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5492 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5493 		}
5494 #endif
5495 	}
5496 
5497 	update_stream_scaling_settings(&mode, dm_state, stream);
5498 
5499 	fill_audio_info(
5500 		&stream->audio_info,
5501 		drm_connector,
5502 		sink);
5503 
5504 	update_stream_signal(stream, sink);
5505 
5506 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5507 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5508 
5509 	if (stream->link->psr_settings.psr_feature_enabled) {
5510 		//
5511 		// should decide stream support vsc sdp colorimetry capability
5512 		// before building vsc info packet
5513 		//
5514 		stream->use_vsc_sdp_for_colorimetry = false;
5515 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5516 			stream->use_vsc_sdp_for_colorimetry =
5517 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5518 		} else {
5519 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5520 				stream->use_vsc_sdp_for_colorimetry = true;
5521 		}
5522 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5523 	}
5524 finish:
5525 	dc_sink_release(sink);
5526 
5527 	return stream;
5528 }
5529 
5530 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5531 {
5532 	drm_crtc_cleanup(crtc);
5533 	kfree(crtc);
5534 }
5535 
5536 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5537 				  struct drm_crtc_state *state)
5538 {
5539 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5540 
5541 	/* TODO Destroy dc_stream objects are stream object is flattened */
5542 	if (cur->stream)
5543 		dc_stream_release(cur->stream);
5544 
5545 
5546 	__drm_atomic_helper_crtc_destroy_state(state);
5547 
5548 
5549 	kfree(state);
5550 }
5551 
5552 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5553 {
5554 	struct dm_crtc_state *state;
5555 
5556 	if (crtc->state)
5557 		dm_crtc_destroy_state(crtc, crtc->state);
5558 
5559 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5560 	if (WARN_ON(!state))
5561 		return;
5562 
5563 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5564 }
5565 
5566 static struct drm_crtc_state *
5567 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5568 {
5569 	struct dm_crtc_state *state, *cur;
5570 
5571 	cur = to_dm_crtc_state(crtc->state);
5572 
5573 	if (WARN_ON(!crtc->state))
5574 		return NULL;
5575 
5576 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5577 	if (!state)
5578 		return NULL;
5579 
5580 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5581 
5582 	if (cur->stream) {
5583 		state->stream = cur->stream;
5584 		dc_stream_retain(state->stream);
5585 	}
5586 
5587 	state->active_planes = cur->active_planes;
5588 	state->vrr_infopacket = cur->vrr_infopacket;
5589 	state->abm_level = cur->abm_level;
5590 	state->vrr_supported = cur->vrr_supported;
5591 	state->freesync_config = cur->freesync_config;
5592 	state->cm_has_degamma = cur->cm_has_degamma;
5593 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5594 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5595 
5596 	return &state->base;
5597 }
5598 
5599 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5600 int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5601 {
5602 	crtc_debugfs_init(crtc);
5603 
5604 	return 0;
5605 }
5606 #endif
5607 
5608 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5609 {
5610 	enum dc_irq_source irq_source;
5611 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5612 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5613 	int rc;
5614 
5615 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5616 
5617 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5618 
5619 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5620 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5621 	return rc;
5622 }
5623 
5624 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5625 {
5626 	enum dc_irq_source irq_source;
5627 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5628 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5629 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5630 #if defined(CONFIG_DRM_AMD_DC_DCN)
5631 	struct amdgpu_display_manager *dm = &adev->dm;
5632 	unsigned long flags;
5633 #endif
5634 	int rc = 0;
5635 
5636 	if (enable) {
5637 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5638 		if (amdgpu_dm_vrr_active(acrtc_state))
5639 			rc = dm_set_vupdate_irq(crtc, true);
5640 	} else {
5641 		/* vblank irq off -> vupdate irq off */
5642 		rc = dm_set_vupdate_irq(crtc, false);
5643 	}
5644 
5645 	if (rc)
5646 		return rc;
5647 
5648 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5649 
5650 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5651 		return -EBUSY;
5652 
5653 	if (amdgpu_in_reset(adev))
5654 		return 0;
5655 
5656 #if defined(CONFIG_DRM_AMD_DC_DCN)
5657 	spin_lock_irqsave(&dm->vblank_lock, flags);
5658 	dm->vblank_workqueue->dm = dm;
5659 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5660 	dm->vblank_workqueue->enable = enable;
5661 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5662 	schedule_work(&dm->vblank_workqueue->mall_work);
5663 #endif
5664 
5665 	return 0;
5666 }
5667 
5668 static int dm_enable_vblank(struct drm_crtc *crtc)
5669 {
5670 	return dm_set_vblank(crtc, true);
5671 }
5672 
5673 static void dm_disable_vblank(struct drm_crtc *crtc)
5674 {
5675 	dm_set_vblank(crtc, false);
5676 }
5677 
5678 /* Implemented only the options currently availible for the driver */
5679 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5680 	.reset = dm_crtc_reset_state,
5681 	.destroy = amdgpu_dm_crtc_destroy,
5682 	.set_config = drm_atomic_helper_set_config,
5683 	.page_flip = drm_atomic_helper_page_flip,
5684 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5685 	.atomic_destroy_state = dm_crtc_destroy_state,
5686 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5687 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5688 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5689 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5690 	.enable_vblank = dm_enable_vblank,
5691 	.disable_vblank = dm_disable_vblank,
5692 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5693 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5694 	.late_register = amdgpu_dm_crtc_late_register,
5695 #endif
5696 };
5697 
5698 static enum drm_connector_status
5699 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5700 {
5701 	bool connected;
5702 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5703 
5704 	/*
5705 	 * Notes:
5706 	 * 1. This interface is NOT called in context of HPD irq.
5707 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5708 	 * makes it a bad place for *any* MST-related activity.
5709 	 */
5710 
5711 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5712 	    !aconnector->fake_enable)
5713 		connected = (aconnector->dc_sink != NULL);
5714 	else
5715 		connected = (aconnector->base.force == DRM_FORCE_ON);
5716 
5717 	update_subconnector_property(aconnector);
5718 
5719 	return (connected ? connector_status_connected :
5720 			connector_status_disconnected);
5721 }
5722 
5723 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5724 					    struct drm_connector_state *connector_state,
5725 					    struct drm_property *property,
5726 					    uint64_t val)
5727 {
5728 	struct drm_device *dev = connector->dev;
5729 	struct amdgpu_device *adev = drm_to_adev(dev);
5730 	struct dm_connector_state *dm_old_state =
5731 		to_dm_connector_state(connector->state);
5732 	struct dm_connector_state *dm_new_state =
5733 		to_dm_connector_state(connector_state);
5734 
5735 	int ret = -EINVAL;
5736 
5737 	if (property == dev->mode_config.scaling_mode_property) {
5738 		enum amdgpu_rmx_type rmx_type;
5739 
5740 		switch (val) {
5741 		case DRM_MODE_SCALE_CENTER:
5742 			rmx_type = RMX_CENTER;
5743 			break;
5744 		case DRM_MODE_SCALE_ASPECT:
5745 			rmx_type = RMX_ASPECT;
5746 			break;
5747 		case DRM_MODE_SCALE_FULLSCREEN:
5748 			rmx_type = RMX_FULL;
5749 			break;
5750 		case DRM_MODE_SCALE_NONE:
5751 		default:
5752 			rmx_type = RMX_OFF;
5753 			break;
5754 		}
5755 
5756 		if (dm_old_state->scaling == rmx_type)
5757 			return 0;
5758 
5759 		dm_new_state->scaling = rmx_type;
5760 		ret = 0;
5761 	} else if (property == adev->mode_info.underscan_hborder_property) {
5762 		dm_new_state->underscan_hborder = val;
5763 		ret = 0;
5764 	} else if (property == adev->mode_info.underscan_vborder_property) {
5765 		dm_new_state->underscan_vborder = val;
5766 		ret = 0;
5767 	} else if (property == adev->mode_info.underscan_property) {
5768 		dm_new_state->underscan_enable = val;
5769 		ret = 0;
5770 	} else if (property == adev->mode_info.abm_level_property) {
5771 		dm_new_state->abm_level = val;
5772 		ret = 0;
5773 	}
5774 
5775 	return ret;
5776 }
5777 
5778 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5779 					    const struct drm_connector_state *state,
5780 					    struct drm_property *property,
5781 					    uint64_t *val)
5782 {
5783 	struct drm_device *dev = connector->dev;
5784 	struct amdgpu_device *adev = drm_to_adev(dev);
5785 	struct dm_connector_state *dm_state =
5786 		to_dm_connector_state(state);
5787 	int ret = -EINVAL;
5788 
5789 	if (property == dev->mode_config.scaling_mode_property) {
5790 		switch (dm_state->scaling) {
5791 		case RMX_CENTER:
5792 			*val = DRM_MODE_SCALE_CENTER;
5793 			break;
5794 		case RMX_ASPECT:
5795 			*val = DRM_MODE_SCALE_ASPECT;
5796 			break;
5797 		case RMX_FULL:
5798 			*val = DRM_MODE_SCALE_FULLSCREEN;
5799 			break;
5800 		case RMX_OFF:
5801 		default:
5802 			*val = DRM_MODE_SCALE_NONE;
5803 			break;
5804 		}
5805 		ret = 0;
5806 	} else if (property == adev->mode_info.underscan_hborder_property) {
5807 		*val = dm_state->underscan_hborder;
5808 		ret = 0;
5809 	} else if (property == adev->mode_info.underscan_vborder_property) {
5810 		*val = dm_state->underscan_vborder;
5811 		ret = 0;
5812 	} else if (property == adev->mode_info.underscan_property) {
5813 		*val = dm_state->underscan_enable;
5814 		ret = 0;
5815 	} else if (property == adev->mode_info.abm_level_property) {
5816 		*val = dm_state->abm_level;
5817 		ret = 0;
5818 	}
5819 
5820 	return ret;
5821 }
5822 
5823 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5824 {
5825 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5826 
5827 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5828 }
5829 
5830 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5831 {
5832 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5833 	const struct dc_link *link = aconnector->dc_link;
5834 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5835 	struct amdgpu_display_manager *dm = &adev->dm;
5836 
5837 	/*
5838 	 * Call only if mst_mgr was iniitalized before since it's not done
5839 	 * for all connector types.
5840 	 */
5841 	if (aconnector->mst_mgr.dev)
5842 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5843 
5844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5845 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5846 
5847 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5848 	    link->type != dc_connection_none &&
5849 	    dm->backlight_dev) {
5850 		backlight_device_unregister(dm->backlight_dev);
5851 		dm->backlight_dev = NULL;
5852 	}
5853 #endif
5854 
5855 	if (aconnector->dc_em_sink)
5856 		dc_sink_release(aconnector->dc_em_sink);
5857 	aconnector->dc_em_sink = NULL;
5858 	if (aconnector->dc_sink)
5859 		dc_sink_release(aconnector->dc_sink);
5860 	aconnector->dc_sink = NULL;
5861 
5862 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5863 	drm_connector_unregister(connector);
5864 	drm_connector_cleanup(connector);
5865 	if (aconnector->i2c) {
5866 		i2c_del_adapter(&aconnector->i2c->base);
5867 		kfree(aconnector->i2c);
5868 	}
5869 	kfree(aconnector->dm_dp_aux.aux.name);
5870 
5871 	kfree(connector);
5872 }
5873 
5874 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5875 {
5876 	struct dm_connector_state *state =
5877 		to_dm_connector_state(connector->state);
5878 
5879 	if (connector->state)
5880 		__drm_atomic_helper_connector_destroy_state(connector->state);
5881 
5882 	kfree(state);
5883 
5884 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5885 
5886 	if (state) {
5887 		state->scaling = RMX_OFF;
5888 		state->underscan_enable = false;
5889 		state->underscan_hborder = 0;
5890 		state->underscan_vborder = 0;
5891 		state->base.max_requested_bpc = 8;
5892 		state->vcpi_slots = 0;
5893 		state->pbn = 0;
5894 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5895 			state->abm_level = amdgpu_dm_abm_level;
5896 
5897 		__drm_atomic_helper_connector_reset(connector, &state->base);
5898 	}
5899 }
5900 
5901 struct drm_connector_state *
5902 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5903 {
5904 	struct dm_connector_state *state =
5905 		to_dm_connector_state(connector->state);
5906 
5907 	struct dm_connector_state *new_state =
5908 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5909 
5910 	if (!new_state)
5911 		return NULL;
5912 
5913 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5914 
5915 	new_state->freesync_capable = state->freesync_capable;
5916 	new_state->abm_level = state->abm_level;
5917 	new_state->scaling = state->scaling;
5918 	new_state->underscan_enable = state->underscan_enable;
5919 	new_state->underscan_hborder = state->underscan_hborder;
5920 	new_state->underscan_vborder = state->underscan_vborder;
5921 	new_state->vcpi_slots = state->vcpi_slots;
5922 	new_state->pbn = state->pbn;
5923 	return &new_state->base;
5924 }
5925 
5926 static int
5927 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5928 {
5929 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5930 		to_amdgpu_dm_connector(connector);
5931 	int r;
5932 
5933 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5934 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5935 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5936 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5937 		if (r)
5938 			return r;
5939 	}
5940 
5941 #if defined(CONFIG_DEBUG_FS)
5942 	connector_debugfs_init(amdgpu_dm_connector);
5943 #endif
5944 
5945 	return 0;
5946 }
5947 
5948 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5949 	.reset = amdgpu_dm_connector_funcs_reset,
5950 	.detect = amdgpu_dm_connector_detect,
5951 	.fill_modes = drm_helper_probe_single_connector_modes,
5952 	.destroy = amdgpu_dm_connector_destroy,
5953 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5954 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5955 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5956 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5957 	.late_register = amdgpu_dm_connector_late_register,
5958 	.early_unregister = amdgpu_dm_connector_unregister
5959 };
5960 
5961 static int get_modes(struct drm_connector *connector)
5962 {
5963 	return amdgpu_dm_connector_get_modes(connector);
5964 }
5965 
5966 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5967 {
5968 	struct dc_sink_init_data init_params = {
5969 			.link = aconnector->dc_link,
5970 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5971 	};
5972 	struct edid *edid;
5973 
5974 	if (!aconnector->base.edid_blob_ptr) {
5975 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5976 				aconnector->base.name);
5977 
5978 		aconnector->base.force = DRM_FORCE_OFF;
5979 		aconnector->base.override_edid = false;
5980 		return;
5981 	}
5982 
5983 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5984 
5985 	aconnector->edid = edid;
5986 
5987 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5988 		aconnector->dc_link,
5989 		(uint8_t *)edid,
5990 		(edid->extensions + 1) * EDID_LENGTH,
5991 		&init_params);
5992 
5993 	if (aconnector->base.force == DRM_FORCE_ON) {
5994 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5995 		aconnector->dc_link->local_sink :
5996 		aconnector->dc_em_sink;
5997 		dc_sink_retain(aconnector->dc_sink);
5998 	}
5999 }
6000 
6001 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6002 {
6003 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6004 
6005 	/*
6006 	 * In case of headless boot with force on for DP managed connector
6007 	 * Those settings have to be != 0 to get initial modeset
6008 	 */
6009 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6010 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6011 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6012 	}
6013 
6014 
6015 	aconnector->base.override_edid = true;
6016 	create_eml_sink(aconnector);
6017 }
6018 
6019 static struct dc_stream_state *
6020 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6021 				const struct drm_display_mode *drm_mode,
6022 				const struct dm_connector_state *dm_state,
6023 				const struct dc_stream_state *old_stream)
6024 {
6025 	struct drm_connector *connector = &aconnector->base;
6026 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6027 	struct dc_stream_state *stream;
6028 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6029 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6030 	enum dc_status dc_result = DC_OK;
6031 
6032 	do {
6033 		stream = create_stream_for_sink(aconnector, drm_mode,
6034 						dm_state, old_stream,
6035 						requested_bpc);
6036 		if (stream == NULL) {
6037 			DRM_ERROR("Failed to create stream for sink!\n");
6038 			break;
6039 		}
6040 
6041 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6042 
6043 		if (dc_result != DC_OK) {
6044 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6045 				      drm_mode->hdisplay,
6046 				      drm_mode->vdisplay,
6047 				      drm_mode->clock,
6048 				      dc_result,
6049 				      dc_status_to_str(dc_result));
6050 
6051 			dc_stream_release(stream);
6052 			stream = NULL;
6053 			requested_bpc -= 2; /* lower bpc to retry validation */
6054 		}
6055 
6056 	} while (stream == NULL && requested_bpc >= 6);
6057 
6058 	return stream;
6059 }
6060 
6061 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6062 				   struct drm_display_mode *mode)
6063 {
6064 	int result = MODE_ERROR;
6065 	struct dc_sink *dc_sink;
6066 	/* TODO: Unhardcode stream count */
6067 	struct dc_stream_state *stream;
6068 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6069 
6070 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6071 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6072 		return result;
6073 
6074 	/*
6075 	 * Only run this the first time mode_valid is called to initilialize
6076 	 * EDID mgmt
6077 	 */
6078 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6079 		!aconnector->dc_em_sink)
6080 		handle_edid_mgmt(aconnector);
6081 
6082 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6083 
6084 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6085 				aconnector->base.force != DRM_FORCE_ON) {
6086 		DRM_ERROR("dc_sink is NULL!\n");
6087 		goto fail;
6088 	}
6089 
6090 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6091 	if (stream) {
6092 		dc_stream_release(stream);
6093 		result = MODE_OK;
6094 	}
6095 
6096 fail:
6097 	/* TODO: error handling*/
6098 	return result;
6099 }
6100 
6101 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6102 				struct dc_info_packet *out)
6103 {
6104 	struct hdmi_drm_infoframe frame;
6105 	unsigned char buf[30]; /* 26 + 4 */
6106 	ssize_t len;
6107 	int ret, i;
6108 
6109 	memset(out, 0, sizeof(*out));
6110 
6111 	if (!state->hdr_output_metadata)
6112 		return 0;
6113 
6114 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6115 	if (ret)
6116 		return ret;
6117 
6118 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6119 	if (len < 0)
6120 		return (int)len;
6121 
6122 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6123 	if (len != 30)
6124 		return -EINVAL;
6125 
6126 	/* Prepare the infopacket for DC. */
6127 	switch (state->connector->connector_type) {
6128 	case DRM_MODE_CONNECTOR_HDMIA:
6129 		out->hb0 = 0x87; /* type */
6130 		out->hb1 = 0x01; /* version */
6131 		out->hb2 = 0x1A; /* length */
6132 		out->sb[0] = buf[3]; /* checksum */
6133 		i = 1;
6134 		break;
6135 
6136 	case DRM_MODE_CONNECTOR_DisplayPort:
6137 	case DRM_MODE_CONNECTOR_eDP:
6138 		out->hb0 = 0x00; /* sdp id, zero */
6139 		out->hb1 = 0x87; /* type */
6140 		out->hb2 = 0x1D; /* payload len - 1 */
6141 		out->hb3 = (0x13 << 2); /* sdp version */
6142 		out->sb[0] = 0x01; /* version */
6143 		out->sb[1] = 0x1A; /* length */
6144 		i = 2;
6145 		break;
6146 
6147 	default:
6148 		return -EINVAL;
6149 	}
6150 
6151 	memcpy(&out->sb[i], &buf[4], 26);
6152 	out->valid = true;
6153 
6154 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6155 		       sizeof(out->sb), false);
6156 
6157 	return 0;
6158 }
6159 
6160 static bool
6161 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6162 			  const struct drm_connector_state *new_state)
6163 {
6164 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6165 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6166 
6167 	if (old_blob != new_blob) {
6168 		if (old_blob && new_blob &&
6169 		    old_blob->length == new_blob->length)
6170 			return memcmp(old_blob->data, new_blob->data,
6171 				      old_blob->length);
6172 
6173 		return true;
6174 	}
6175 
6176 	return false;
6177 }
6178 
6179 static int
6180 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6181 				 struct drm_atomic_state *state)
6182 {
6183 	struct drm_connector_state *new_con_state =
6184 		drm_atomic_get_new_connector_state(state, conn);
6185 	struct drm_connector_state *old_con_state =
6186 		drm_atomic_get_old_connector_state(state, conn);
6187 	struct drm_crtc *crtc = new_con_state->crtc;
6188 	struct drm_crtc_state *new_crtc_state;
6189 	int ret;
6190 
6191 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6192 
6193 	if (!crtc)
6194 		return 0;
6195 
6196 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6197 		struct dc_info_packet hdr_infopacket;
6198 
6199 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6200 		if (ret)
6201 			return ret;
6202 
6203 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6204 		if (IS_ERR(new_crtc_state))
6205 			return PTR_ERR(new_crtc_state);
6206 
6207 		/*
6208 		 * DC considers the stream backends changed if the
6209 		 * static metadata changes. Forcing the modeset also
6210 		 * gives a simple way for userspace to switch from
6211 		 * 8bpc to 10bpc when setting the metadata to enter
6212 		 * or exit HDR.
6213 		 *
6214 		 * Changing the static metadata after it's been
6215 		 * set is permissible, however. So only force a
6216 		 * modeset if we're entering or exiting HDR.
6217 		 */
6218 		new_crtc_state->mode_changed =
6219 			!old_con_state->hdr_output_metadata ||
6220 			!new_con_state->hdr_output_metadata;
6221 	}
6222 
6223 	return 0;
6224 }
6225 
6226 static const struct drm_connector_helper_funcs
6227 amdgpu_dm_connector_helper_funcs = {
6228 	/*
6229 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6230 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6231 	 * are missing after user start lightdm. So we need to renew modes list.
6232 	 * in get_modes call back, not just return the modes count
6233 	 */
6234 	.get_modes = get_modes,
6235 	.mode_valid = amdgpu_dm_connector_mode_valid,
6236 	.atomic_check = amdgpu_dm_connector_atomic_check,
6237 };
6238 
6239 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6240 {
6241 }
6242 
6243 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6244 {
6245 	struct drm_atomic_state *state = new_crtc_state->state;
6246 	struct drm_plane *plane;
6247 	int num_active = 0;
6248 
6249 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6250 		struct drm_plane_state *new_plane_state;
6251 
6252 		/* Cursor planes are "fake". */
6253 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6254 			continue;
6255 
6256 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6257 
6258 		if (!new_plane_state) {
6259 			/*
6260 			 * The plane is enable on the CRTC and hasn't changed
6261 			 * state. This means that it previously passed
6262 			 * validation and is therefore enabled.
6263 			 */
6264 			num_active += 1;
6265 			continue;
6266 		}
6267 
6268 		/* We need a framebuffer to be considered enabled. */
6269 		num_active += (new_plane_state->fb != NULL);
6270 	}
6271 
6272 	return num_active;
6273 }
6274 
6275 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6276 					 struct drm_crtc_state *new_crtc_state)
6277 {
6278 	struct dm_crtc_state *dm_new_crtc_state =
6279 		to_dm_crtc_state(new_crtc_state);
6280 
6281 	dm_new_crtc_state->active_planes = 0;
6282 
6283 	if (!dm_new_crtc_state->stream)
6284 		return;
6285 
6286 	dm_new_crtc_state->active_planes =
6287 		count_crtc_active_planes(new_crtc_state);
6288 }
6289 
6290 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6291 				       struct drm_atomic_state *state)
6292 {
6293 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6294 									  crtc);
6295 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6296 	struct dc *dc = adev->dm.dc;
6297 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6298 	int ret = -EINVAL;
6299 
6300 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6301 
6302 	dm_update_crtc_active_planes(crtc, crtc_state);
6303 
6304 	if (unlikely(!dm_crtc_state->stream &&
6305 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6306 		WARN_ON(1);
6307 		return ret;
6308 	}
6309 
6310 	/*
6311 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6312 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6313 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6314 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6315 	 */
6316 	if (crtc_state->enable &&
6317 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6318 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6319 		return -EINVAL;
6320 	}
6321 
6322 	/* In some use cases, like reset, no stream is attached */
6323 	if (!dm_crtc_state->stream)
6324 		return 0;
6325 
6326 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6327 		return 0;
6328 
6329 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6330 	return ret;
6331 }
6332 
6333 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6334 				      const struct drm_display_mode *mode,
6335 				      struct drm_display_mode *adjusted_mode)
6336 {
6337 	return true;
6338 }
6339 
6340 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6341 	.disable = dm_crtc_helper_disable,
6342 	.atomic_check = dm_crtc_helper_atomic_check,
6343 	.mode_fixup = dm_crtc_helper_mode_fixup,
6344 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6345 };
6346 
6347 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6348 {
6349 
6350 }
6351 
6352 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6353 {
6354 	switch (display_color_depth) {
6355 		case COLOR_DEPTH_666:
6356 			return 6;
6357 		case COLOR_DEPTH_888:
6358 			return 8;
6359 		case COLOR_DEPTH_101010:
6360 			return 10;
6361 		case COLOR_DEPTH_121212:
6362 			return 12;
6363 		case COLOR_DEPTH_141414:
6364 			return 14;
6365 		case COLOR_DEPTH_161616:
6366 			return 16;
6367 		default:
6368 			break;
6369 		}
6370 	return 0;
6371 }
6372 
6373 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6374 					  struct drm_crtc_state *crtc_state,
6375 					  struct drm_connector_state *conn_state)
6376 {
6377 	struct drm_atomic_state *state = crtc_state->state;
6378 	struct drm_connector *connector = conn_state->connector;
6379 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6380 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6381 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6382 	struct drm_dp_mst_topology_mgr *mst_mgr;
6383 	struct drm_dp_mst_port *mst_port;
6384 	enum dc_color_depth color_depth;
6385 	int clock, bpp = 0;
6386 	bool is_y420 = false;
6387 
6388 	if (!aconnector->port || !aconnector->dc_sink)
6389 		return 0;
6390 
6391 	mst_port = aconnector->port;
6392 	mst_mgr = &aconnector->mst_port->mst_mgr;
6393 
6394 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6395 		return 0;
6396 
6397 	if (!state->duplicated) {
6398 		int max_bpc = conn_state->max_requested_bpc;
6399 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6400 				aconnector->force_yuv420_output;
6401 		color_depth = convert_color_depth_from_display_info(connector,
6402 								    is_y420,
6403 								    max_bpc);
6404 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6405 		clock = adjusted_mode->clock;
6406 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6407 	}
6408 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6409 									   mst_mgr,
6410 									   mst_port,
6411 									   dm_new_connector_state->pbn,
6412 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6413 	if (dm_new_connector_state->vcpi_slots < 0) {
6414 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6415 		return dm_new_connector_state->vcpi_slots;
6416 	}
6417 	return 0;
6418 }
6419 
6420 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6421 	.disable = dm_encoder_helper_disable,
6422 	.atomic_check = dm_encoder_helper_atomic_check
6423 };
6424 
6425 #if defined(CONFIG_DRM_AMD_DC_DCN)
6426 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6427 					    struct dc_state *dc_state)
6428 {
6429 	struct dc_stream_state *stream = NULL;
6430 	struct drm_connector *connector;
6431 	struct drm_connector_state *new_con_state, *old_con_state;
6432 	struct amdgpu_dm_connector *aconnector;
6433 	struct dm_connector_state *dm_conn_state;
6434 	int i, j, clock, bpp;
6435 	int vcpi, pbn_div, pbn = 0;
6436 
6437 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6438 
6439 		aconnector = to_amdgpu_dm_connector(connector);
6440 
6441 		if (!aconnector->port)
6442 			continue;
6443 
6444 		if (!new_con_state || !new_con_state->crtc)
6445 			continue;
6446 
6447 		dm_conn_state = to_dm_connector_state(new_con_state);
6448 
6449 		for (j = 0; j < dc_state->stream_count; j++) {
6450 			stream = dc_state->streams[j];
6451 			if (!stream)
6452 				continue;
6453 
6454 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6455 				break;
6456 
6457 			stream = NULL;
6458 		}
6459 
6460 		if (!stream)
6461 			continue;
6462 
6463 		if (stream->timing.flags.DSC != 1) {
6464 			drm_dp_mst_atomic_enable_dsc(state,
6465 						     aconnector->port,
6466 						     dm_conn_state->pbn,
6467 						     0,
6468 						     false);
6469 			continue;
6470 		}
6471 
6472 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6473 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6474 		clock = stream->timing.pix_clk_100hz / 10;
6475 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6476 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6477 						    aconnector->port,
6478 						    pbn, pbn_div,
6479 						    true);
6480 		if (vcpi < 0)
6481 			return vcpi;
6482 
6483 		dm_conn_state->pbn = pbn;
6484 		dm_conn_state->vcpi_slots = vcpi;
6485 	}
6486 	return 0;
6487 }
6488 #endif
6489 
6490 static void dm_drm_plane_reset(struct drm_plane *plane)
6491 {
6492 	struct dm_plane_state *amdgpu_state = NULL;
6493 
6494 	if (plane->state)
6495 		plane->funcs->atomic_destroy_state(plane, plane->state);
6496 
6497 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6498 	WARN_ON(amdgpu_state == NULL);
6499 
6500 	if (amdgpu_state)
6501 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6502 }
6503 
6504 static struct drm_plane_state *
6505 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6506 {
6507 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6508 
6509 	old_dm_plane_state = to_dm_plane_state(plane->state);
6510 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6511 	if (!dm_plane_state)
6512 		return NULL;
6513 
6514 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6515 
6516 	if (old_dm_plane_state->dc_state) {
6517 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6518 		dc_plane_state_retain(dm_plane_state->dc_state);
6519 	}
6520 
6521 	return &dm_plane_state->base;
6522 }
6523 
6524 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6525 				struct drm_plane_state *state)
6526 {
6527 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6528 
6529 	if (dm_plane_state->dc_state)
6530 		dc_plane_state_release(dm_plane_state->dc_state);
6531 
6532 	drm_atomic_helper_plane_destroy_state(plane, state);
6533 }
6534 
6535 static const struct drm_plane_funcs dm_plane_funcs = {
6536 	.update_plane	= drm_atomic_helper_update_plane,
6537 	.disable_plane	= drm_atomic_helper_disable_plane,
6538 	.destroy	= drm_primary_helper_destroy,
6539 	.reset = dm_drm_plane_reset,
6540 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6541 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6542 	.format_mod_supported = dm_plane_format_mod_supported,
6543 };
6544 
6545 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6546 				      struct drm_plane_state *new_state)
6547 {
6548 	struct amdgpu_framebuffer *afb;
6549 	struct drm_gem_object *obj;
6550 	struct amdgpu_device *adev;
6551 	struct amdgpu_bo *rbo;
6552 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6553 	struct list_head list;
6554 	struct ttm_validate_buffer tv;
6555 	struct ww_acquire_ctx ticket;
6556 	uint32_t domain;
6557 	int r;
6558 
6559 	if (!new_state->fb) {
6560 		DRM_DEBUG_DRIVER("No FB bound\n");
6561 		return 0;
6562 	}
6563 
6564 	afb = to_amdgpu_framebuffer(new_state->fb);
6565 	obj = new_state->fb->obj[0];
6566 	rbo = gem_to_amdgpu_bo(obj);
6567 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6568 	INIT_LIST_HEAD(&list);
6569 
6570 	tv.bo = &rbo->tbo;
6571 	tv.num_shared = 1;
6572 	list_add(&tv.head, &list);
6573 
6574 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6575 	if (r) {
6576 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6577 		return r;
6578 	}
6579 
6580 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6581 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6582 	else
6583 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6584 
6585 	r = amdgpu_bo_pin(rbo, domain);
6586 	if (unlikely(r != 0)) {
6587 		if (r != -ERESTARTSYS)
6588 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6589 		ttm_eu_backoff_reservation(&ticket, &list);
6590 		return r;
6591 	}
6592 
6593 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6594 	if (unlikely(r != 0)) {
6595 		amdgpu_bo_unpin(rbo);
6596 		ttm_eu_backoff_reservation(&ticket, &list);
6597 		DRM_ERROR("%p bind failed\n", rbo);
6598 		return r;
6599 	}
6600 
6601 	ttm_eu_backoff_reservation(&ticket, &list);
6602 
6603 	afb->address = amdgpu_bo_gpu_offset(rbo);
6604 
6605 	amdgpu_bo_ref(rbo);
6606 
6607 	/**
6608 	 * We don't do surface updates on planes that have been newly created,
6609 	 * but we also don't have the afb->address during atomic check.
6610 	 *
6611 	 * Fill in buffer attributes depending on the address here, but only on
6612 	 * newly created planes since they're not being used by DC yet and this
6613 	 * won't modify global state.
6614 	 */
6615 	dm_plane_state_old = to_dm_plane_state(plane->state);
6616 	dm_plane_state_new = to_dm_plane_state(new_state);
6617 
6618 	if (dm_plane_state_new->dc_state &&
6619 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6620 		struct dc_plane_state *plane_state =
6621 			dm_plane_state_new->dc_state;
6622 		bool force_disable_dcc = !plane_state->dcc.enable;
6623 
6624 		fill_plane_buffer_attributes(
6625 			adev, afb, plane_state->format, plane_state->rotation,
6626 			afb->tiling_flags,
6627 			&plane_state->tiling_info, &plane_state->plane_size,
6628 			&plane_state->dcc, &plane_state->address,
6629 			afb->tmz_surface, force_disable_dcc);
6630 	}
6631 
6632 	return 0;
6633 }
6634 
6635 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6636 				       struct drm_plane_state *old_state)
6637 {
6638 	struct amdgpu_bo *rbo;
6639 	int r;
6640 
6641 	if (!old_state->fb)
6642 		return;
6643 
6644 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6645 	r = amdgpu_bo_reserve(rbo, false);
6646 	if (unlikely(r)) {
6647 		DRM_ERROR("failed to reserve rbo before unpin\n");
6648 		return;
6649 	}
6650 
6651 	amdgpu_bo_unpin(rbo);
6652 	amdgpu_bo_unreserve(rbo);
6653 	amdgpu_bo_unref(&rbo);
6654 }
6655 
6656 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6657 				       struct drm_crtc_state *new_crtc_state)
6658 {
6659 	struct drm_framebuffer *fb = state->fb;
6660 	int min_downscale, max_upscale;
6661 	int min_scale = 0;
6662 	int max_scale = INT_MAX;
6663 
6664 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6665 	if (fb && state->crtc) {
6666 		/* Validate viewport to cover the case when only the position changes */
6667 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6668 			int viewport_width = state->crtc_w;
6669 			int viewport_height = state->crtc_h;
6670 
6671 			if (state->crtc_x < 0)
6672 				viewport_width += state->crtc_x;
6673 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6674 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6675 
6676 			if (state->crtc_y < 0)
6677 				viewport_height += state->crtc_y;
6678 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6679 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6680 
6681 			/* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6682 			 * which is still OK to satisfy the condition below, thereby also covering these cases
6683 			 * (when plane is completely outside of screen).
6684 			 * x2 for width is because of pipe-split.
6685 			 */
6686 			if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6687 				return -EINVAL;
6688 		}
6689 
6690 		/* Get min/max allowed scaling factors from plane caps. */
6691 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6692 					     &min_downscale, &max_upscale);
6693 		/*
6694 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6695 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6696 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6697 		 */
6698 		min_scale = (1000 << 16) / max_upscale;
6699 		max_scale = (1000 << 16) / min_downscale;
6700 	}
6701 
6702 	return drm_atomic_helper_check_plane_state(
6703 		state, new_crtc_state, min_scale, max_scale, true, true);
6704 }
6705 
6706 static int dm_plane_atomic_check(struct drm_plane *plane,
6707 				 struct drm_plane_state *state)
6708 {
6709 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6710 	struct dc *dc = adev->dm.dc;
6711 	struct dm_plane_state *dm_plane_state;
6712 	struct dc_scaling_info scaling_info;
6713 	struct drm_crtc_state *new_crtc_state;
6714 	int ret;
6715 
6716 	trace_amdgpu_dm_plane_atomic_check(state);
6717 
6718 	dm_plane_state = to_dm_plane_state(state);
6719 
6720 	if (!dm_plane_state->dc_state)
6721 		return 0;
6722 
6723 	new_crtc_state =
6724 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6725 	if (!new_crtc_state)
6726 		return -EINVAL;
6727 
6728 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6729 	if (ret)
6730 		return ret;
6731 
6732 	ret = fill_dc_scaling_info(state, &scaling_info);
6733 	if (ret)
6734 		return ret;
6735 
6736 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6737 		return 0;
6738 
6739 	return -EINVAL;
6740 }
6741 
6742 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6743 				       struct drm_plane_state *new_plane_state)
6744 {
6745 	/* Only support async updates on cursor planes. */
6746 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6747 		return -EINVAL;
6748 
6749 	return 0;
6750 }
6751 
6752 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6753 					 struct drm_plane_state *new_state)
6754 {
6755 	struct drm_plane_state *old_state =
6756 		drm_atomic_get_old_plane_state(new_state->state, plane);
6757 
6758 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6759 
6760 	swap(plane->state->fb, new_state->fb);
6761 
6762 	plane->state->src_x = new_state->src_x;
6763 	plane->state->src_y = new_state->src_y;
6764 	plane->state->src_w = new_state->src_w;
6765 	plane->state->src_h = new_state->src_h;
6766 	plane->state->crtc_x = new_state->crtc_x;
6767 	plane->state->crtc_y = new_state->crtc_y;
6768 	plane->state->crtc_w = new_state->crtc_w;
6769 	plane->state->crtc_h = new_state->crtc_h;
6770 
6771 	handle_cursor_update(plane, old_state);
6772 }
6773 
6774 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6775 	.prepare_fb = dm_plane_helper_prepare_fb,
6776 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6777 	.atomic_check = dm_plane_atomic_check,
6778 	.atomic_async_check = dm_plane_atomic_async_check,
6779 	.atomic_async_update = dm_plane_atomic_async_update
6780 };
6781 
6782 /*
6783  * TODO: these are currently initialized to rgb formats only.
6784  * For future use cases we should either initialize them dynamically based on
6785  * plane capabilities, or initialize this array to all formats, so internal drm
6786  * check will succeed, and let DC implement proper check
6787  */
6788 static const uint32_t rgb_formats[] = {
6789 	DRM_FORMAT_XRGB8888,
6790 	DRM_FORMAT_ARGB8888,
6791 	DRM_FORMAT_RGBA8888,
6792 	DRM_FORMAT_XRGB2101010,
6793 	DRM_FORMAT_XBGR2101010,
6794 	DRM_FORMAT_ARGB2101010,
6795 	DRM_FORMAT_ABGR2101010,
6796 	DRM_FORMAT_XBGR8888,
6797 	DRM_FORMAT_ABGR8888,
6798 	DRM_FORMAT_RGB565,
6799 };
6800 
6801 static const uint32_t overlay_formats[] = {
6802 	DRM_FORMAT_XRGB8888,
6803 	DRM_FORMAT_ARGB8888,
6804 	DRM_FORMAT_RGBA8888,
6805 	DRM_FORMAT_XBGR8888,
6806 	DRM_FORMAT_ABGR8888,
6807 	DRM_FORMAT_RGB565
6808 };
6809 
6810 static const u32 cursor_formats[] = {
6811 	DRM_FORMAT_ARGB8888
6812 };
6813 
6814 static int get_plane_formats(const struct drm_plane *plane,
6815 			     const struct dc_plane_cap *plane_cap,
6816 			     uint32_t *formats, int max_formats)
6817 {
6818 	int i, num_formats = 0;
6819 
6820 	/*
6821 	 * TODO: Query support for each group of formats directly from
6822 	 * DC plane caps. This will require adding more formats to the
6823 	 * caps list.
6824 	 */
6825 
6826 	switch (plane->type) {
6827 	case DRM_PLANE_TYPE_PRIMARY:
6828 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6829 			if (num_formats >= max_formats)
6830 				break;
6831 
6832 			formats[num_formats++] = rgb_formats[i];
6833 		}
6834 
6835 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6836 			formats[num_formats++] = DRM_FORMAT_NV12;
6837 		if (plane_cap && plane_cap->pixel_format_support.p010)
6838 			formats[num_formats++] = DRM_FORMAT_P010;
6839 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6840 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6841 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6842 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6843 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6844 		}
6845 		break;
6846 
6847 	case DRM_PLANE_TYPE_OVERLAY:
6848 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6849 			if (num_formats >= max_formats)
6850 				break;
6851 
6852 			formats[num_formats++] = overlay_formats[i];
6853 		}
6854 		break;
6855 
6856 	case DRM_PLANE_TYPE_CURSOR:
6857 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6858 			if (num_formats >= max_formats)
6859 				break;
6860 
6861 			formats[num_formats++] = cursor_formats[i];
6862 		}
6863 		break;
6864 	}
6865 
6866 	return num_formats;
6867 }
6868 
6869 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6870 				struct drm_plane *plane,
6871 				unsigned long possible_crtcs,
6872 				const struct dc_plane_cap *plane_cap)
6873 {
6874 	uint32_t formats[32];
6875 	int num_formats;
6876 	int res = -EPERM;
6877 	unsigned int supported_rotations;
6878 	uint64_t *modifiers = NULL;
6879 
6880 	num_formats = get_plane_formats(plane, plane_cap, formats,
6881 					ARRAY_SIZE(formats));
6882 
6883 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6884 	if (res)
6885 		return res;
6886 
6887 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6888 				       &dm_plane_funcs, formats, num_formats,
6889 				       modifiers, plane->type, NULL);
6890 	kfree(modifiers);
6891 	if (res)
6892 		return res;
6893 
6894 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6895 	    plane_cap && plane_cap->per_pixel_alpha) {
6896 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6897 					  BIT(DRM_MODE_BLEND_PREMULTI);
6898 
6899 		drm_plane_create_alpha_property(plane);
6900 		drm_plane_create_blend_mode_property(plane, blend_caps);
6901 	}
6902 
6903 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6904 	    plane_cap &&
6905 	    (plane_cap->pixel_format_support.nv12 ||
6906 	     plane_cap->pixel_format_support.p010)) {
6907 		/* This only affects YUV formats. */
6908 		drm_plane_create_color_properties(
6909 			plane,
6910 			BIT(DRM_COLOR_YCBCR_BT601) |
6911 			BIT(DRM_COLOR_YCBCR_BT709) |
6912 			BIT(DRM_COLOR_YCBCR_BT2020),
6913 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6914 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6915 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6916 	}
6917 
6918 	supported_rotations =
6919 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6920 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6921 
6922 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6923 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6924 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6925 						   supported_rotations);
6926 
6927 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6928 
6929 	/* Create (reset) the plane state */
6930 	if (plane->funcs->reset)
6931 		plane->funcs->reset(plane);
6932 
6933 	return 0;
6934 }
6935 
6936 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6937 			       struct drm_plane *plane,
6938 			       uint32_t crtc_index)
6939 {
6940 	struct amdgpu_crtc *acrtc = NULL;
6941 	struct drm_plane *cursor_plane;
6942 
6943 	int res = -ENOMEM;
6944 
6945 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6946 	if (!cursor_plane)
6947 		goto fail;
6948 
6949 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6950 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6951 
6952 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6953 	if (!acrtc)
6954 		goto fail;
6955 
6956 	res = drm_crtc_init_with_planes(
6957 			dm->ddev,
6958 			&acrtc->base,
6959 			plane,
6960 			cursor_plane,
6961 			&amdgpu_dm_crtc_funcs, NULL);
6962 
6963 	if (res)
6964 		goto fail;
6965 
6966 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6967 
6968 	/* Create (reset) the plane state */
6969 	if (acrtc->base.funcs->reset)
6970 		acrtc->base.funcs->reset(&acrtc->base);
6971 
6972 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6973 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6974 
6975 	acrtc->crtc_id = crtc_index;
6976 	acrtc->base.enabled = false;
6977 	acrtc->otg_inst = -1;
6978 
6979 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6980 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6981 				   true, MAX_COLOR_LUT_ENTRIES);
6982 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6983 
6984 	return 0;
6985 
6986 fail:
6987 	kfree(acrtc);
6988 	kfree(cursor_plane);
6989 	return res;
6990 }
6991 
6992 
6993 static int to_drm_connector_type(enum signal_type st)
6994 {
6995 	switch (st) {
6996 	case SIGNAL_TYPE_HDMI_TYPE_A:
6997 		return DRM_MODE_CONNECTOR_HDMIA;
6998 	case SIGNAL_TYPE_EDP:
6999 		return DRM_MODE_CONNECTOR_eDP;
7000 	case SIGNAL_TYPE_LVDS:
7001 		return DRM_MODE_CONNECTOR_LVDS;
7002 	case SIGNAL_TYPE_RGB:
7003 		return DRM_MODE_CONNECTOR_VGA;
7004 	case SIGNAL_TYPE_DISPLAY_PORT:
7005 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7006 		return DRM_MODE_CONNECTOR_DisplayPort;
7007 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7008 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7009 		return DRM_MODE_CONNECTOR_DVID;
7010 	case SIGNAL_TYPE_VIRTUAL:
7011 		return DRM_MODE_CONNECTOR_VIRTUAL;
7012 
7013 	default:
7014 		return DRM_MODE_CONNECTOR_Unknown;
7015 	}
7016 }
7017 
7018 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7019 {
7020 	struct drm_encoder *encoder;
7021 
7022 	/* There is only one encoder per connector */
7023 	drm_connector_for_each_possible_encoder(connector, encoder)
7024 		return encoder;
7025 
7026 	return NULL;
7027 }
7028 
7029 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7030 {
7031 	struct drm_encoder *encoder;
7032 	struct amdgpu_encoder *amdgpu_encoder;
7033 
7034 	encoder = amdgpu_dm_connector_to_encoder(connector);
7035 
7036 	if (encoder == NULL)
7037 		return;
7038 
7039 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7040 
7041 	amdgpu_encoder->native_mode.clock = 0;
7042 
7043 	if (!list_empty(&connector->probed_modes)) {
7044 		struct drm_display_mode *preferred_mode = NULL;
7045 
7046 		list_for_each_entry(preferred_mode,
7047 				    &connector->probed_modes,
7048 				    head) {
7049 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7050 				amdgpu_encoder->native_mode = *preferred_mode;
7051 
7052 			break;
7053 		}
7054 
7055 	}
7056 }
7057 
7058 static struct drm_display_mode *
7059 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7060 			     char *name,
7061 			     int hdisplay, int vdisplay)
7062 {
7063 	struct drm_device *dev = encoder->dev;
7064 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7065 	struct drm_display_mode *mode = NULL;
7066 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7067 
7068 	mode = drm_mode_duplicate(dev, native_mode);
7069 
7070 	if (mode == NULL)
7071 		return NULL;
7072 
7073 	mode->hdisplay = hdisplay;
7074 	mode->vdisplay = vdisplay;
7075 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7076 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7077 
7078 	return mode;
7079 
7080 }
7081 
7082 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7083 						 struct drm_connector *connector)
7084 {
7085 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7086 	struct drm_display_mode *mode = NULL;
7087 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7088 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7089 				to_amdgpu_dm_connector(connector);
7090 	int i;
7091 	int n;
7092 	struct mode_size {
7093 		char name[DRM_DISPLAY_MODE_LEN];
7094 		int w;
7095 		int h;
7096 	} common_modes[] = {
7097 		{  "640x480",  640,  480},
7098 		{  "800x600",  800,  600},
7099 		{ "1024x768", 1024,  768},
7100 		{ "1280x720", 1280,  720},
7101 		{ "1280x800", 1280,  800},
7102 		{"1280x1024", 1280, 1024},
7103 		{ "1440x900", 1440,  900},
7104 		{"1680x1050", 1680, 1050},
7105 		{"1600x1200", 1600, 1200},
7106 		{"1920x1080", 1920, 1080},
7107 		{"1920x1200", 1920, 1200}
7108 	};
7109 
7110 	n = ARRAY_SIZE(common_modes);
7111 
7112 	for (i = 0; i < n; i++) {
7113 		struct drm_display_mode *curmode = NULL;
7114 		bool mode_existed = false;
7115 
7116 		if (common_modes[i].w > native_mode->hdisplay ||
7117 		    common_modes[i].h > native_mode->vdisplay ||
7118 		   (common_modes[i].w == native_mode->hdisplay &&
7119 		    common_modes[i].h == native_mode->vdisplay))
7120 			continue;
7121 
7122 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7123 			if (common_modes[i].w == curmode->hdisplay &&
7124 			    common_modes[i].h == curmode->vdisplay) {
7125 				mode_existed = true;
7126 				break;
7127 			}
7128 		}
7129 
7130 		if (mode_existed)
7131 			continue;
7132 
7133 		mode = amdgpu_dm_create_common_mode(encoder,
7134 				common_modes[i].name, common_modes[i].w,
7135 				common_modes[i].h);
7136 		drm_mode_probed_add(connector, mode);
7137 		amdgpu_dm_connector->num_modes++;
7138 	}
7139 }
7140 
7141 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7142 					      struct edid *edid)
7143 {
7144 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7145 			to_amdgpu_dm_connector(connector);
7146 
7147 	if (edid) {
7148 		/* empty probed_modes */
7149 		INIT_LIST_HEAD(&connector->probed_modes);
7150 		amdgpu_dm_connector->num_modes =
7151 				drm_add_edid_modes(connector, edid);
7152 
7153 		/* sorting the probed modes before calling function
7154 		 * amdgpu_dm_get_native_mode() since EDID can have
7155 		 * more than one preferred mode. The modes that are
7156 		 * later in the probed mode list could be of higher
7157 		 * and preferred resolution. For example, 3840x2160
7158 		 * resolution in base EDID preferred timing and 4096x2160
7159 		 * preferred resolution in DID extension block later.
7160 		 */
7161 		drm_mode_sort(&connector->probed_modes);
7162 		amdgpu_dm_get_native_mode(connector);
7163 
7164 		/* Freesync capabilities are reset by calling
7165 		 * drm_add_edid_modes() and need to be
7166 		 * restored here.
7167 		 */
7168 		amdgpu_dm_update_freesync_caps(connector, edid);
7169 	} else {
7170 		amdgpu_dm_connector->num_modes = 0;
7171 	}
7172 }
7173 
7174 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7175 			      struct drm_display_mode *mode)
7176 {
7177 	struct drm_display_mode *m;
7178 
7179 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7180 		if (drm_mode_equal(m, mode))
7181 			return true;
7182 	}
7183 
7184 	return false;
7185 }
7186 
7187 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7188 {
7189 	const struct drm_display_mode *m;
7190 	struct drm_display_mode *new_mode;
7191 	uint i;
7192 	uint32_t new_modes_count = 0;
7193 
7194 	/* Standard FPS values
7195 	 *
7196 	 * 23.976   - TV/NTSC
7197 	 * 24 	    - Cinema
7198 	 * 25 	    - TV/PAL
7199 	 * 29.97    - TV/NTSC
7200 	 * 30 	    - TV/NTSC
7201 	 * 48 	    - Cinema HFR
7202 	 * 50 	    - TV/PAL
7203 	 * 60 	    - Commonly used
7204 	 * 48,72,96 - Multiples of 24
7205 	 */
7206 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7207 					 48000, 50000, 60000, 72000, 96000 };
7208 
7209 	/*
7210 	 * Find mode with highest refresh rate with the same resolution
7211 	 * as the preferred mode. Some monitors report a preferred mode
7212 	 * with lower resolution than the highest refresh rate supported.
7213 	 */
7214 
7215 	m = get_highest_refresh_rate_mode(aconnector, true);
7216 	if (!m)
7217 		return 0;
7218 
7219 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7220 		uint64_t target_vtotal, target_vtotal_diff;
7221 		uint64_t num, den;
7222 
7223 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7224 			continue;
7225 
7226 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7227 		    common_rates[i] > aconnector->max_vfreq * 1000)
7228 			continue;
7229 
7230 		num = (unsigned long long)m->clock * 1000 * 1000;
7231 		den = common_rates[i] * (unsigned long long)m->htotal;
7232 		target_vtotal = div_u64(num, den);
7233 		target_vtotal_diff = target_vtotal - m->vtotal;
7234 
7235 		/* Check for illegal modes */
7236 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7237 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7238 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7239 			continue;
7240 
7241 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7242 		if (!new_mode)
7243 			goto out;
7244 
7245 		new_mode->vtotal += (u16)target_vtotal_diff;
7246 		new_mode->vsync_start += (u16)target_vtotal_diff;
7247 		new_mode->vsync_end += (u16)target_vtotal_diff;
7248 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7249 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7250 
7251 		if (!is_duplicate_mode(aconnector, new_mode)) {
7252 			drm_mode_probed_add(&aconnector->base, new_mode);
7253 			new_modes_count += 1;
7254 		} else
7255 			drm_mode_destroy(aconnector->base.dev, new_mode);
7256 	}
7257  out:
7258 	return new_modes_count;
7259 }
7260 
7261 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7262 						   struct edid *edid)
7263 {
7264 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7265 		to_amdgpu_dm_connector(connector);
7266 
7267 	if (!(amdgpu_freesync_vid_mode && edid))
7268 		return;
7269 
7270 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7271 		amdgpu_dm_connector->num_modes +=
7272 			add_fs_modes(amdgpu_dm_connector);
7273 }
7274 
7275 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7276 {
7277 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7278 			to_amdgpu_dm_connector(connector);
7279 	struct drm_encoder *encoder;
7280 	struct edid *edid = amdgpu_dm_connector->edid;
7281 
7282 	encoder = amdgpu_dm_connector_to_encoder(connector);
7283 
7284 	if (!drm_edid_is_valid(edid)) {
7285 		amdgpu_dm_connector->num_modes =
7286 				drm_add_modes_noedid(connector, 640, 480);
7287 	} else {
7288 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7289 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7290 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7291 	}
7292 	amdgpu_dm_fbc_init(connector);
7293 
7294 	return amdgpu_dm_connector->num_modes;
7295 }
7296 
7297 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7298 				     struct amdgpu_dm_connector *aconnector,
7299 				     int connector_type,
7300 				     struct dc_link *link,
7301 				     int link_index)
7302 {
7303 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7304 
7305 	/*
7306 	 * Some of the properties below require access to state, like bpc.
7307 	 * Allocate some default initial connector state with our reset helper.
7308 	 */
7309 	if (aconnector->base.funcs->reset)
7310 		aconnector->base.funcs->reset(&aconnector->base);
7311 
7312 	aconnector->connector_id = link_index;
7313 	aconnector->dc_link = link;
7314 	aconnector->base.interlace_allowed = false;
7315 	aconnector->base.doublescan_allowed = false;
7316 	aconnector->base.stereo_allowed = false;
7317 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7318 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7319 	aconnector->audio_inst = -1;
7320 	mutex_init(&aconnector->hpd_lock);
7321 
7322 	/*
7323 	 * configure support HPD hot plug connector_>polled default value is 0
7324 	 * which means HPD hot plug not supported
7325 	 */
7326 	switch (connector_type) {
7327 	case DRM_MODE_CONNECTOR_HDMIA:
7328 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7329 		aconnector->base.ycbcr_420_allowed =
7330 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7331 		break;
7332 	case DRM_MODE_CONNECTOR_DisplayPort:
7333 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7334 		aconnector->base.ycbcr_420_allowed =
7335 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7336 		break;
7337 	case DRM_MODE_CONNECTOR_DVID:
7338 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7339 		break;
7340 	default:
7341 		break;
7342 	}
7343 
7344 	drm_object_attach_property(&aconnector->base.base,
7345 				dm->ddev->mode_config.scaling_mode_property,
7346 				DRM_MODE_SCALE_NONE);
7347 
7348 	drm_object_attach_property(&aconnector->base.base,
7349 				adev->mode_info.underscan_property,
7350 				UNDERSCAN_OFF);
7351 	drm_object_attach_property(&aconnector->base.base,
7352 				adev->mode_info.underscan_hborder_property,
7353 				0);
7354 	drm_object_attach_property(&aconnector->base.base,
7355 				adev->mode_info.underscan_vborder_property,
7356 				0);
7357 
7358 	if (!aconnector->mst_port)
7359 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7360 
7361 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7362 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7363 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7364 
7365 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7366 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7367 		drm_object_attach_property(&aconnector->base.base,
7368 				adev->mode_info.abm_level_property, 0);
7369 	}
7370 
7371 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7372 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7373 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7374 		drm_object_attach_property(
7375 			&aconnector->base.base,
7376 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7377 
7378 		if (!aconnector->mst_port)
7379 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7380 
7381 #ifdef CONFIG_DRM_AMD_DC_HDCP
7382 		if (adev->dm.hdcp_workqueue)
7383 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7384 #endif
7385 	}
7386 }
7387 
7388 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7389 			      struct i2c_msg *msgs, int num)
7390 {
7391 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7392 	struct ddc_service *ddc_service = i2c->ddc_service;
7393 	struct i2c_command cmd;
7394 	int i;
7395 	int result = -EIO;
7396 
7397 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7398 
7399 	if (!cmd.payloads)
7400 		return result;
7401 
7402 	cmd.number_of_payloads = num;
7403 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7404 	cmd.speed = 100;
7405 
7406 	for (i = 0; i < num; i++) {
7407 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7408 		cmd.payloads[i].address = msgs[i].addr;
7409 		cmd.payloads[i].length = msgs[i].len;
7410 		cmd.payloads[i].data = msgs[i].buf;
7411 	}
7412 
7413 	if (dc_submit_i2c(
7414 			ddc_service->ctx->dc,
7415 			ddc_service->ddc_pin->hw_info.ddc_channel,
7416 			&cmd))
7417 		result = num;
7418 
7419 	kfree(cmd.payloads);
7420 	return result;
7421 }
7422 
7423 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7424 {
7425 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7426 }
7427 
7428 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7429 	.master_xfer = amdgpu_dm_i2c_xfer,
7430 	.functionality = amdgpu_dm_i2c_func,
7431 };
7432 
7433 static struct amdgpu_i2c_adapter *
7434 create_i2c(struct ddc_service *ddc_service,
7435 	   int link_index,
7436 	   int *res)
7437 {
7438 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7439 	struct amdgpu_i2c_adapter *i2c;
7440 
7441 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7442 	if (!i2c)
7443 		return NULL;
7444 	i2c->base.owner = THIS_MODULE;
7445 	i2c->base.class = I2C_CLASS_DDC;
7446 	i2c->base.dev.parent = &adev->pdev->dev;
7447 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7448 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7449 	i2c_set_adapdata(&i2c->base, i2c);
7450 	i2c->ddc_service = ddc_service;
7451 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7452 
7453 	return i2c;
7454 }
7455 
7456 
7457 /*
7458  * Note: this function assumes that dc_link_detect() was called for the
7459  * dc_link which will be represented by this aconnector.
7460  */
7461 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7462 				    struct amdgpu_dm_connector *aconnector,
7463 				    uint32_t link_index,
7464 				    struct amdgpu_encoder *aencoder)
7465 {
7466 	int res = 0;
7467 	int connector_type;
7468 	struct dc *dc = dm->dc;
7469 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7470 	struct amdgpu_i2c_adapter *i2c;
7471 
7472 	link->priv = aconnector;
7473 
7474 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7475 
7476 	i2c = create_i2c(link->ddc, link->link_index, &res);
7477 	if (!i2c) {
7478 		DRM_ERROR("Failed to create i2c adapter data\n");
7479 		return -ENOMEM;
7480 	}
7481 
7482 	aconnector->i2c = i2c;
7483 	res = i2c_add_adapter(&i2c->base);
7484 
7485 	if (res) {
7486 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7487 		goto out_free;
7488 	}
7489 
7490 	connector_type = to_drm_connector_type(link->connector_signal);
7491 
7492 	res = drm_connector_init_with_ddc(
7493 			dm->ddev,
7494 			&aconnector->base,
7495 			&amdgpu_dm_connector_funcs,
7496 			connector_type,
7497 			&i2c->base);
7498 
7499 	if (res) {
7500 		DRM_ERROR("connector_init failed\n");
7501 		aconnector->connector_id = -1;
7502 		goto out_free;
7503 	}
7504 
7505 	drm_connector_helper_add(
7506 			&aconnector->base,
7507 			&amdgpu_dm_connector_helper_funcs);
7508 
7509 	amdgpu_dm_connector_init_helper(
7510 		dm,
7511 		aconnector,
7512 		connector_type,
7513 		link,
7514 		link_index);
7515 
7516 	drm_connector_attach_encoder(
7517 		&aconnector->base, &aencoder->base);
7518 
7519 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7520 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7521 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7522 
7523 out_free:
7524 	if (res) {
7525 		kfree(i2c);
7526 		aconnector->i2c = NULL;
7527 	}
7528 	return res;
7529 }
7530 
7531 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7532 {
7533 	switch (adev->mode_info.num_crtc) {
7534 	case 1:
7535 		return 0x1;
7536 	case 2:
7537 		return 0x3;
7538 	case 3:
7539 		return 0x7;
7540 	case 4:
7541 		return 0xf;
7542 	case 5:
7543 		return 0x1f;
7544 	case 6:
7545 	default:
7546 		return 0x3f;
7547 	}
7548 }
7549 
7550 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7551 				  struct amdgpu_encoder *aencoder,
7552 				  uint32_t link_index)
7553 {
7554 	struct amdgpu_device *adev = drm_to_adev(dev);
7555 
7556 	int res = drm_encoder_init(dev,
7557 				   &aencoder->base,
7558 				   &amdgpu_dm_encoder_funcs,
7559 				   DRM_MODE_ENCODER_TMDS,
7560 				   NULL);
7561 
7562 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7563 
7564 	if (!res)
7565 		aencoder->encoder_id = link_index;
7566 	else
7567 		aencoder->encoder_id = -1;
7568 
7569 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7570 
7571 	return res;
7572 }
7573 
7574 static void manage_dm_interrupts(struct amdgpu_device *adev,
7575 				 struct amdgpu_crtc *acrtc,
7576 				 bool enable)
7577 {
7578 	/*
7579 	 * We have no guarantee that the frontend index maps to the same
7580 	 * backend index - some even map to more than one.
7581 	 *
7582 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7583 	 */
7584 	int irq_type =
7585 		amdgpu_display_crtc_idx_to_irq_type(
7586 			adev,
7587 			acrtc->crtc_id);
7588 
7589 	if (enable) {
7590 		drm_crtc_vblank_on(&acrtc->base);
7591 		amdgpu_irq_get(
7592 			adev,
7593 			&adev->pageflip_irq,
7594 			irq_type);
7595 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7596 		amdgpu_irq_get(
7597 			adev,
7598 			&adev->vline0_irq,
7599 			irq_type);
7600 #endif
7601 	} else {
7602 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7603 		amdgpu_irq_put(
7604 			adev,
7605 			&adev->vline0_irq,
7606 			irq_type);
7607 #endif
7608 		amdgpu_irq_put(
7609 			adev,
7610 			&adev->pageflip_irq,
7611 			irq_type);
7612 		drm_crtc_vblank_off(&acrtc->base);
7613 	}
7614 }
7615 
7616 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7617 				      struct amdgpu_crtc *acrtc)
7618 {
7619 	int irq_type =
7620 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7621 
7622 	/**
7623 	 * This reads the current state for the IRQ and force reapplies
7624 	 * the setting to hardware.
7625 	 */
7626 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7627 }
7628 
7629 static bool
7630 is_scaling_state_different(const struct dm_connector_state *dm_state,
7631 			   const struct dm_connector_state *old_dm_state)
7632 {
7633 	if (dm_state->scaling != old_dm_state->scaling)
7634 		return true;
7635 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7636 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7637 			return true;
7638 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7639 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7640 			return true;
7641 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7642 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7643 		return true;
7644 	return false;
7645 }
7646 
7647 #ifdef CONFIG_DRM_AMD_DC_HDCP
7648 static bool is_content_protection_different(struct drm_connector_state *state,
7649 					    const struct drm_connector_state *old_state,
7650 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7651 {
7652 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7653 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7654 
7655 	/* Handle: Type0/1 change */
7656 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7657 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7658 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7659 		return true;
7660 	}
7661 
7662 	/* CP is being re enabled, ignore this
7663 	 *
7664 	 * Handles:	ENABLED -> DESIRED
7665 	 */
7666 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7667 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7668 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7669 		return false;
7670 	}
7671 
7672 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7673 	 *
7674 	 * Handles:	UNDESIRED -> ENABLED
7675 	 */
7676 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7677 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7678 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7679 
7680 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7681 	 * hot-plug, headless s3, dpms
7682 	 *
7683 	 * Handles:	DESIRED -> DESIRED (Special case)
7684 	 */
7685 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7686 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7687 		dm_con_state->update_hdcp = false;
7688 		return true;
7689 	}
7690 
7691 	/*
7692 	 * Handles:	UNDESIRED -> UNDESIRED
7693 	 *		DESIRED -> DESIRED
7694 	 *		ENABLED -> ENABLED
7695 	 */
7696 	if (old_state->content_protection == state->content_protection)
7697 		return false;
7698 
7699 	/*
7700 	 * Handles:	UNDESIRED -> DESIRED
7701 	 *		DESIRED -> UNDESIRED
7702 	 *		ENABLED -> UNDESIRED
7703 	 */
7704 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7705 		return true;
7706 
7707 	/*
7708 	 * Handles:	DESIRED -> ENABLED
7709 	 */
7710 	return false;
7711 }
7712 
7713 #endif
7714 static void remove_stream(struct amdgpu_device *adev,
7715 			  struct amdgpu_crtc *acrtc,
7716 			  struct dc_stream_state *stream)
7717 {
7718 	/* this is the update mode case */
7719 
7720 	acrtc->otg_inst = -1;
7721 	acrtc->enabled = false;
7722 }
7723 
7724 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7725 			       struct dc_cursor_position *position)
7726 {
7727 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7728 	int x, y;
7729 	int xorigin = 0, yorigin = 0;
7730 
7731 	position->enable = false;
7732 	position->x = 0;
7733 	position->y = 0;
7734 
7735 	if (!crtc || !plane->state->fb)
7736 		return 0;
7737 
7738 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7739 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7740 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7741 			  __func__,
7742 			  plane->state->crtc_w,
7743 			  plane->state->crtc_h);
7744 		return -EINVAL;
7745 	}
7746 
7747 	x = plane->state->crtc_x;
7748 	y = plane->state->crtc_y;
7749 
7750 	if (x <= -amdgpu_crtc->max_cursor_width ||
7751 	    y <= -amdgpu_crtc->max_cursor_height)
7752 		return 0;
7753 
7754 	if (x < 0) {
7755 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7756 		x = 0;
7757 	}
7758 	if (y < 0) {
7759 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7760 		y = 0;
7761 	}
7762 	position->enable = true;
7763 	position->translate_by_source = true;
7764 	position->x = x;
7765 	position->y = y;
7766 	position->x_hotspot = xorigin;
7767 	position->y_hotspot = yorigin;
7768 
7769 	return 0;
7770 }
7771 
7772 static void handle_cursor_update(struct drm_plane *plane,
7773 				 struct drm_plane_state *old_plane_state)
7774 {
7775 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7776 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7777 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7778 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7779 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7780 	uint64_t address = afb ? afb->address : 0;
7781 	struct dc_cursor_position position;
7782 	struct dc_cursor_attributes attributes;
7783 	int ret;
7784 
7785 	if (!plane->state->fb && !old_plane_state->fb)
7786 		return;
7787 
7788 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7789 			 __func__,
7790 			 amdgpu_crtc->crtc_id,
7791 			 plane->state->crtc_w,
7792 			 plane->state->crtc_h);
7793 
7794 	ret = get_cursor_position(plane, crtc, &position);
7795 	if (ret)
7796 		return;
7797 
7798 	if (!position.enable) {
7799 		/* turn off cursor */
7800 		if (crtc_state && crtc_state->stream) {
7801 			mutex_lock(&adev->dm.dc_lock);
7802 			dc_stream_set_cursor_position(crtc_state->stream,
7803 						      &position);
7804 			mutex_unlock(&adev->dm.dc_lock);
7805 		}
7806 		return;
7807 	}
7808 
7809 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7810 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7811 
7812 	memset(&attributes, 0, sizeof(attributes));
7813 	attributes.address.high_part = upper_32_bits(address);
7814 	attributes.address.low_part  = lower_32_bits(address);
7815 	attributes.width             = plane->state->crtc_w;
7816 	attributes.height            = plane->state->crtc_h;
7817 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7818 	attributes.rotation_angle    = 0;
7819 	attributes.attribute_flags.value = 0;
7820 
7821 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7822 
7823 	if (crtc_state->stream) {
7824 		mutex_lock(&adev->dm.dc_lock);
7825 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7826 							 &attributes))
7827 			DRM_ERROR("DC failed to set cursor attributes\n");
7828 
7829 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7830 						   &position))
7831 			DRM_ERROR("DC failed to set cursor position\n");
7832 		mutex_unlock(&adev->dm.dc_lock);
7833 	}
7834 }
7835 
7836 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7837 {
7838 
7839 	assert_spin_locked(&acrtc->base.dev->event_lock);
7840 	WARN_ON(acrtc->event);
7841 
7842 	acrtc->event = acrtc->base.state->event;
7843 
7844 	/* Set the flip status */
7845 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7846 
7847 	/* Mark this event as consumed */
7848 	acrtc->base.state->event = NULL;
7849 
7850 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7851 						 acrtc->crtc_id);
7852 }
7853 
7854 static void update_freesync_state_on_stream(
7855 	struct amdgpu_display_manager *dm,
7856 	struct dm_crtc_state *new_crtc_state,
7857 	struct dc_stream_state *new_stream,
7858 	struct dc_plane_state *surface,
7859 	u32 flip_timestamp_in_us)
7860 {
7861 	struct mod_vrr_params vrr_params;
7862 	struct dc_info_packet vrr_infopacket = {0};
7863 	struct amdgpu_device *adev = dm->adev;
7864 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7865 	unsigned long flags;
7866 	bool pack_sdp_v1_3 = false;
7867 
7868 	if (!new_stream)
7869 		return;
7870 
7871 	/*
7872 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7873 	 * For now it's sufficient to just guard against these conditions.
7874 	 */
7875 
7876 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7877 		return;
7878 
7879 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7880         vrr_params = acrtc->dm_irq_params.vrr_params;
7881 
7882 	if (surface) {
7883 		mod_freesync_handle_preflip(
7884 			dm->freesync_module,
7885 			surface,
7886 			new_stream,
7887 			flip_timestamp_in_us,
7888 			&vrr_params);
7889 
7890 		if (adev->family < AMDGPU_FAMILY_AI &&
7891 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7892 			mod_freesync_handle_v_update(dm->freesync_module,
7893 						     new_stream, &vrr_params);
7894 
7895 			/* Need to call this before the frame ends. */
7896 			dc_stream_adjust_vmin_vmax(dm->dc,
7897 						   new_crtc_state->stream,
7898 						   &vrr_params.adjust);
7899 		}
7900 	}
7901 
7902 	mod_freesync_build_vrr_infopacket(
7903 		dm->freesync_module,
7904 		new_stream,
7905 		&vrr_params,
7906 		PACKET_TYPE_VRR,
7907 		TRANSFER_FUNC_UNKNOWN,
7908 		&vrr_infopacket,
7909 		pack_sdp_v1_3);
7910 
7911 	new_crtc_state->freesync_timing_changed |=
7912 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7913 			&vrr_params.adjust,
7914 			sizeof(vrr_params.adjust)) != 0);
7915 
7916 	new_crtc_state->freesync_vrr_info_changed |=
7917 		(memcmp(&new_crtc_state->vrr_infopacket,
7918 			&vrr_infopacket,
7919 			sizeof(vrr_infopacket)) != 0);
7920 
7921 	acrtc->dm_irq_params.vrr_params = vrr_params;
7922 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7923 
7924 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7925 	new_stream->vrr_infopacket = vrr_infopacket;
7926 
7927 	if (new_crtc_state->freesync_vrr_info_changed)
7928 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7929 			      new_crtc_state->base.crtc->base.id,
7930 			      (int)new_crtc_state->base.vrr_enabled,
7931 			      (int)vrr_params.state);
7932 
7933 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7934 }
7935 
7936 static void update_stream_irq_parameters(
7937 	struct amdgpu_display_manager *dm,
7938 	struct dm_crtc_state *new_crtc_state)
7939 {
7940 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7941 	struct mod_vrr_params vrr_params;
7942 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7943 	struct amdgpu_device *adev = dm->adev;
7944 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7945 	unsigned long flags;
7946 
7947 	if (!new_stream)
7948 		return;
7949 
7950 	/*
7951 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7952 	 * For now it's sufficient to just guard against these conditions.
7953 	 */
7954 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7955 		return;
7956 
7957 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7958 	vrr_params = acrtc->dm_irq_params.vrr_params;
7959 
7960 	if (new_crtc_state->vrr_supported &&
7961 	    config.min_refresh_in_uhz &&
7962 	    config.max_refresh_in_uhz) {
7963 		/*
7964 		 * if freesync compatible mode was set, config.state will be set
7965 		 * in atomic check
7966 		 */
7967 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7968 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7969 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7970 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7971 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7972 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7973 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7974 		} else {
7975 			config.state = new_crtc_state->base.vrr_enabled ?
7976 						     VRR_STATE_ACTIVE_VARIABLE :
7977 						     VRR_STATE_INACTIVE;
7978 		}
7979 	} else {
7980 		config.state = VRR_STATE_UNSUPPORTED;
7981 	}
7982 
7983 	mod_freesync_build_vrr_params(dm->freesync_module,
7984 				      new_stream,
7985 				      &config, &vrr_params);
7986 
7987 	new_crtc_state->freesync_timing_changed |=
7988 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7989 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7990 
7991 	new_crtc_state->freesync_config = config;
7992 	/* Copy state for access from DM IRQ handler */
7993 	acrtc->dm_irq_params.freesync_config = config;
7994 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7995 	acrtc->dm_irq_params.vrr_params = vrr_params;
7996 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7997 }
7998 
7999 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8000 					    struct dm_crtc_state *new_state)
8001 {
8002 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8003 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8004 
8005 	if (!old_vrr_active && new_vrr_active) {
8006 		/* Transition VRR inactive -> active:
8007 		 * While VRR is active, we must not disable vblank irq, as a
8008 		 * reenable after disable would compute bogus vblank/pflip
8009 		 * timestamps if it likely happened inside display front-porch.
8010 		 *
8011 		 * We also need vupdate irq for the actual core vblank handling
8012 		 * at end of vblank.
8013 		 */
8014 		dm_set_vupdate_irq(new_state->base.crtc, true);
8015 		drm_crtc_vblank_get(new_state->base.crtc);
8016 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8017 				 __func__, new_state->base.crtc->base.id);
8018 	} else if (old_vrr_active && !new_vrr_active) {
8019 		/* Transition VRR active -> inactive:
8020 		 * Allow vblank irq disable again for fixed refresh rate.
8021 		 */
8022 		dm_set_vupdate_irq(new_state->base.crtc, false);
8023 		drm_crtc_vblank_put(new_state->base.crtc);
8024 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8025 				 __func__, new_state->base.crtc->base.id);
8026 	}
8027 }
8028 
8029 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8030 {
8031 	struct drm_plane *plane;
8032 	struct drm_plane_state *old_plane_state, *new_plane_state;
8033 	int i;
8034 
8035 	/*
8036 	 * TODO: Make this per-stream so we don't issue redundant updates for
8037 	 * commits with multiple streams.
8038 	 */
8039 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8040 				       new_plane_state, i)
8041 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8042 			handle_cursor_update(plane, old_plane_state);
8043 }
8044 
8045 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8046 				    struct dc_state *dc_state,
8047 				    struct drm_device *dev,
8048 				    struct amdgpu_display_manager *dm,
8049 				    struct drm_crtc *pcrtc,
8050 				    bool wait_for_vblank)
8051 {
8052 	uint32_t i;
8053 	uint64_t timestamp_ns;
8054 	struct drm_plane *plane;
8055 	struct drm_plane_state *old_plane_state, *new_plane_state;
8056 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8057 	struct drm_crtc_state *new_pcrtc_state =
8058 			drm_atomic_get_new_crtc_state(state, pcrtc);
8059 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8060 	struct dm_crtc_state *dm_old_crtc_state =
8061 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8062 	int planes_count = 0, vpos, hpos;
8063 	long r;
8064 	unsigned long flags;
8065 	struct amdgpu_bo *abo;
8066 	uint32_t target_vblank, last_flip_vblank;
8067 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8068 	bool pflip_present = false;
8069 	struct {
8070 		struct dc_surface_update surface_updates[MAX_SURFACES];
8071 		struct dc_plane_info plane_infos[MAX_SURFACES];
8072 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8073 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8074 		struct dc_stream_update stream_update;
8075 	} *bundle;
8076 
8077 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8078 
8079 	if (!bundle) {
8080 		dm_error("Failed to allocate update bundle\n");
8081 		goto cleanup;
8082 	}
8083 
8084 	/*
8085 	 * Disable the cursor first if we're disabling all the planes.
8086 	 * It'll remain on the screen after the planes are re-enabled
8087 	 * if we don't.
8088 	 */
8089 	if (acrtc_state->active_planes == 0)
8090 		amdgpu_dm_commit_cursors(state);
8091 
8092 	/* update planes when needed */
8093 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8094 		struct drm_crtc *crtc = new_plane_state->crtc;
8095 		struct drm_crtc_state *new_crtc_state;
8096 		struct drm_framebuffer *fb = new_plane_state->fb;
8097 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8098 		bool plane_needs_flip;
8099 		struct dc_plane_state *dc_plane;
8100 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8101 
8102 		/* Cursor plane is handled after stream updates */
8103 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8104 			continue;
8105 
8106 		if (!fb || !crtc || pcrtc != crtc)
8107 			continue;
8108 
8109 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8110 		if (!new_crtc_state->active)
8111 			continue;
8112 
8113 		dc_plane = dm_new_plane_state->dc_state;
8114 
8115 		bundle->surface_updates[planes_count].surface = dc_plane;
8116 		if (new_pcrtc_state->color_mgmt_changed) {
8117 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8118 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8119 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8120 		}
8121 
8122 		fill_dc_scaling_info(new_plane_state,
8123 				     &bundle->scaling_infos[planes_count]);
8124 
8125 		bundle->surface_updates[planes_count].scaling_info =
8126 			&bundle->scaling_infos[planes_count];
8127 
8128 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8129 
8130 		pflip_present = pflip_present || plane_needs_flip;
8131 
8132 		if (!plane_needs_flip) {
8133 			planes_count += 1;
8134 			continue;
8135 		}
8136 
8137 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8138 
8139 		/*
8140 		 * Wait for all fences on this FB. Do limited wait to avoid
8141 		 * deadlock during GPU reset when this fence will not signal
8142 		 * but we hold reservation lock for the BO.
8143 		 */
8144 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8145 							false,
8146 							msecs_to_jiffies(5000));
8147 		if (unlikely(r <= 0))
8148 			DRM_ERROR("Waiting for fences timed out!");
8149 
8150 		fill_dc_plane_info_and_addr(
8151 			dm->adev, new_plane_state,
8152 			afb->tiling_flags,
8153 			&bundle->plane_infos[planes_count],
8154 			&bundle->flip_addrs[planes_count].address,
8155 			afb->tmz_surface, false);
8156 
8157 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
8158 				 new_plane_state->plane->index,
8159 				 bundle->plane_infos[planes_count].dcc.enable);
8160 
8161 		bundle->surface_updates[planes_count].plane_info =
8162 			&bundle->plane_infos[planes_count];
8163 
8164 		/*
8165 		 * Only allow immediate flips for fast updates that don't
8166 		 * change FB pitch, DCC state, rotation or mirroing.
8167 		 */
8168 		bundle->flip_addrs[planes_count].flip_immediate =
8169 			crtc->state->async_flip &&
8170 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8171 
8172 		timestamp_ns = ktime_get_ns();
8173 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8174 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8175 		bundle->surface_updates[planes_count].surface = dc_plane;
8176 
8177 		if (!bundle->surface_updates[planes_count].surface) {
8178 			DRM_ERROR("No surface for CRTC: id=%d\n",
8179 					acrtc_attach->crtc_id);
8180 			continue;
8181 		}
8182 
8183 		if (plane == pcrtc->primary)
8184 			update_freesync_state_on_stream(
8185 				dm,
8186 				acrtc_state,
8187 				acrtc_state->stream,
8188 				dc_plane,
8189 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8190 
8191 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
8192 				 __func__,
8193 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8194 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8195 
8196 		planes_count += 1;
8197 
8198 	}
8199 
8200 	if (pflip_present) {
8201 		if (!vrr_active) {
8202 			/* Use old throttling in non-vrr fixed refresh rate mode
8203 			 * to keep flip scheduling based on target vblank counts
8204 			 * working in a backwards compatible way, e.g., for
8205 			 * clients using the GLX_OML_sync_control extension or
8206 			 * DRI3/Present extension with defined target_msc.
8207 			 */
8208 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8209 		}
8210 		else {
8211 			/* For variable refresh rate mode only:
8212 			 * Get vblank of last completed flip to avoid > 1 vrr
8213 			 * flips per video frame by use of throttling, but allow
8214 			 * flip programming anywhere in the possibly large
8215 			 * variable vrr vblank interval for fine-grained flip
8216 			 * timing control and more opportunity to avoid stutter
8217 			 * on late submission of flips.
8218 			 */
8219 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8220 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8221 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8222 		}
8223 
8224 		target_vblank = last_flip_vblank + wait_for_vblank;
8225 
8226 		/*
8227 		 * Wait until we're out of the vertical blank period before the one
8228 		 * targeted by the flip
8229 		 */
8230 		while ((acrtc_attach->enabled &&
8231 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8232 							    0, &vpos, &hpos, NULL,
8233 							    NULL, &pcrtc->hwmode)
8234 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8235 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8236 			(int)(target_vblank -
8237 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8238 			usleep_range(1000, 1100);
8239 		}
8240 
8241 		/**
8242 		 * Prepare the flip event for the pageflip interrupt to handle.
8243 		 *
8244 		 * This only works in the case where we've already turned on the
8245 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8246 		 * from 0 -> n planes we have to skip a hardware generated event
8247 		 * and rely on sending it from software.
8248 		 */
8249 		if (acrtc_attach->base.state->event &&
8250 		    acrtc_state->active_planes > 0) {
8251 			drm_crtc_vblank_get(pcrtc);
8252 
8253 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8254 
8255 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8256 			prepare_flip_isr(acrtc_attach);
8257 
8258 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8259 		}
8260 
8261 		if (acrtc_state->stream) {
8262 			if (acrtc_state->freesync_vrr_info_changed)
8263 				bundle->stream_update.vrr_infopacket =
8264 					&acrtc_state->stream->vrr_infopacket;
8265 		}
8266 	}
8267 
8268 	/* Update the planes if changed or disable if we don't have any. */
8269 	if ((planes_count || acrtc_state->active_planes == 0) &&
8270 		acrtc_state->stream) {
8271 		bundle->stream_update.stream = acrtc_state->stream;
8272 		if (new_pcrtc_state->mode_changed) {
8273 			bundle->stream_update.src = acrtc_state->stream->src;
8274 			bundle->stream_update.dst = acrtc_state->stream->dst;
8275 		}
8276 
8277 		if (new_pcrtc_state->color_mgmt_changed) {
8278 			/*
8279 			 * TODO: This isn't fully correct since we've actually
8280 			 * already modified the stream in place.
8281 			 */
8282 			bundle->stream_update.gamut_remap =
8283 				&acrtc_state->stream->gamut_remap_matrix;
8284 			bundle->stream_update.output_csc_transform =
8285 				&acrtc_state->stream->csc_color_matrix;
8286 			bundle->stream_update.out_transfer_func =
8287 				acrtc_state->stream->out_transfer_func;
8288 		}
8289 
8290 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8291 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8292 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8293 
8294 		/*
8295 		 * If FreeSync state on the stream has changed then we need to
8296 		 * re-adjust the min/max bounds now that DC doesn't handle this
8297 		 * as part of commit.
8298 		 */
8299 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8300 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8301 			dc_stream_adjust_vmin_vmax(
8302 				dm->dc, acrtc_state->stream,
8303 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8304 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8305 		}
8306 		mutex_lock(&dm->dc_lock);
8307 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8308 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8309 			amdgpu_dm_psr_disable(acrtc_state->stream);
8310 
8311 		dc_commit_updates_for_stream(dm->dc,
8312 						     bundle->surface_updates,
8313 						     planes_count,
8314 						     acrtc_state->stream,
8315 						     &bundle->stream_update,
8316 						     dc_state);
8317 
8318 		/**
8319 		 * Enable or disable the interrupts on the backend.
8320 		 *
8321 		 * Most pipes are put into power gating when unused.
8322 		 *
8323 		 * When power gating is enabled on a pipe we lose the
8324 		 * interrupt enablement state when power gating is disabled.
8325 		 *
8326 		 * So we need to update the IRQ control state in hardware
8327 		 * whenever the pipe turns on (since it could be previously
8328 		 * power gated) or off (since some pipes can't be power gated
8329 		 * on some ASICs).
8330 		 */
8331 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8332 			dm_update_pflip_irq_state(drm_to_adev(dev),
8333 						  acrtc_attach);
8334 
8335 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8336 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8337 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8338 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8339 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8340 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8341 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8342 			amdgpu_dm_psr_enable(acrtc_state->stream);
8343 		}
8344 
8345 		mutex_unlock(&dm->dc_lock);
8346 	}
8347 
8348 	/*
8349 	 * Update cursor state *after* programming all the planes.
8350 	 * This avoids redundant programming in the case where we're going
8351 	 * to be disabling a single plane - those pipes are being disabled.
8352 	 */
8353 	if (acrtc_state->active_planes)
8354 		amdgpu_dm_commit_cursors(state);
8355 
8356 cleanup:
8357 	kfree(bundle);
8358 }
8359 
8360 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8361 				   struct drm_atomic_state *state)
8362 {
8363 	struct amdgpu_device *adev = drm_to_adev(dev);
8364 	struct amdgpu_dm_connector *aconnector;
8365 	struct drm_connector *connector;
8366 	struct drm_connector_state *old_con_state, *new_con_state;
8367 	struct drm_crtc_state *new_crtc_state;
8368 	struct dm_crtc_state *new_dm_crtc_state;
8369 	const struct dc_stream_status *status;
8370 	int i, inst;
8371 
8372 	/* Notify device removals. */
8373 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8374 		if (old_con_state->crtc != new_con_state->crtc) {
8375 			/* CRTC changes require notification. */
8376 			goto notify;
8377 		}
8378 
8379 		if (!new_con_state->crtc)
8380 			continue;
8381 
8382 		new_crtc_state = drm_atomic_get_new_crtc_state(
8383 			state, new_con_state->crtc);
8384 
8385 		if (!new_crtc_state)
8386 			continue;
8387 
8388 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8389 			continue;
8390 
8391 	notify:
8392 		aconnector = to_amdgpu_dm_connector(connector);
8393 
8394 		mutex_lock(&adev->dm.audio_lock);
8395 		inst = aconnector->audio_inst;
8396 		aconnector->audio_inst = -1;
8397 		mutex_unlock(&adev->dm.audio_lock);
8398 
8399 		amdgpu_dm_audio_eld_notify(adev, inst);
8400 	}
8401 
8402 	/* Notify audio device additions. */
8403 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8404 		if (!new_con_state->crtc)
8405 			continue;
8406 
8407 		new_crtc_state = drm_atomic_get_new_crtc_state(
8408 			state, new_con_state->crtc);
8409 
8410 		if (!new_crtc_state)
8411 			continue;
8412 
8413 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8414 			continue;
8415 
8416 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8417 		if (!new_dm_crtc_state->stream)
8418 			continue;
8419 
8420 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8421 		if (!status)
8422 			continue;
8423 
8424 		aconnector = to_amdgpu_dm_connector(connector);
8425 
8426 		mutex_lock(&adev->dm.audio_lock);
8427 		inst = status->audio_inst;
8428 		aconnector->audio_inst = inst;
8429 		mutex_unlock(&adev->dm.audio_lock);
8430 
8431 		amdgpu_dm_audio_eld_notify(adev, inst);
8432 	}
8433 }
8434 
8435 /*
8436  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8437  * @crtc_state: the DRM CRTC state
8438  * @stream_state: the DC stream state.
8439  *
8440  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8441  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8442  */
8443 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8444 						struct dc_stream_state *stream_state)
8445 {
8446 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8447 }
8448 
8449 /**
8450  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8451  * @state: The atomic state to commit
8452  *
8453  * This will tell DC to commit the constructed DC state from atomic_check,
8454  * programming the hardware. Any failures here implies a hardware failure, since
8455  * atomic check should have filtered anything non-kosher.
8456  */
8457 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8458 {
8459 	struct drm_device *dev = state->dev;
8460 	struct amdgpu_device *adev = drm_to_adev(dev);
8461 	struct amdgpu_display_manager *dm = &adev->dm;
8462 	struct dm_atomic_state *dm_state;
8463 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8464 	uint32_t i, j;
8465 	struct drm_crtc *crtc;
8466 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8467 	unsigned long flags;
8468 	bool wait_for_vblank = true;
8469 	struct drm_connector *connector;
8470 	struct drm_connector_state *old_con_state, *new_con_state;
8471 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8472 	int crtc_disable_count = 0;
8473 	bool mode_set_reset_required = false;
8474 
8475 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8476 
8477 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8478 
8479 	dm_state = dm_atomic_get_new_state(state);
8480 	if (dm_state && dm_state->context) {
8481 		dc_state = dm_state->context;
8482 	} else {
8483 		/* No state changes, retain current state. */
8484 		dc_state_temp = dc_create_state(dm->dc);
8485 		ASSERT(dc_state_temp);
8486 		dc_state = dc_state_temp;
8487 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8488 	}
8489 
8490 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8491 				       new_crtc_state, i) {
8492 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8493 
8494 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8495 
8496 		if (old_crtc_state->active &&
8497 		    (!new_crtc_state->active ||
8498 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8499 			manage_dm_interrupts(adev, acrtc, false);
8500 			dc_stream_release(dm_old_crtc_state->stream);
8501 		}
8502 	}
8503 
8504 	drm_atomic_helper_calc_timestamping_constants(state);
8505 
8506 	/* update changed items */
8507 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8508 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8509 
8510 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8511 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8512 
8513 		DRM_DEBUG_DRIVER(
8514 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8515 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8516 			"connectors_changed:%d\n",
8517 			acrtc->crtc_id,
8518 			new_crtc_state->enable,
8519 			new_crtc_state->active,
8520 			new_crtc_state->planes_changed,
8521 			new_crtc_state->mode_changed,
8522 			new_crtc_state->active_changed,
8523 			new_crtc_state->connectors_changed);
8524 
8525 		/* Disable cursor if disabling crtc */
8526 		if (old_crtc_state->active && !new_crtc_state->active) {
8527 			struct dc_cursor_position position;
8528 
8529 			memset(&position, 0, sizeof(position));
8530 			mutex_lock(&dm->dc_lock);
8531 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8532 			mutex_unlock(&dm->dc_lock);
8533 		}
8534 
8535 		/* Copy all transient state flags into dc state */
8536 		if (dm_new_crtc_state->stream) {
8537 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8538 							    dm_new_crtc_state->stream);
8539 		}
8540 
8541 		/* handles headless hotplug case, updating new_state and
8542 		 * aconnector as needed
8543 		 */
8544 
8545 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8546 
8547 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8548 
8549 			if (!dm_new_crtc_state->stream) {
8550 				/*
8551 				 * this could happen because of issues with
8552 				 * userspace notifications delivery.
8553 				 * In this case userspace tries to set mode on
8554 				 * display which is disconnected in fact.
8555 				 * dc_sink is NULL in this case on aconnector.
8556 				 * We expect reset mode will come soon.
8557 				 *
8558 				 * This can also happen when unplug is done
8559 				 * during resume sequence ended
8560 				 *
8561 				 * In this case, we want to pretend we still
8562 				 * have a sink to keep the pipe running so that
8563 				 * hw state is consistent with the sw state
8564 				 */
8565 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8566 						__func__, acrtc->base.base.id);
8567 				continue;
8568 			}
8569 
8570 			if (dm_old_crtc_state->stream)
8571 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8572 
8573 			pm_runtime_get_noresume(dev->dev);
8574 
8575 			acrtc->enabled = true;
8576 			acrtc->hw_mode = new_crtc_state->mode;
8577 			crtc->hwmode = new_crtc_state->mode;
8578 			mode_set_reset_required = true;
8579 		} else if (modereset_required(new_crtc_state)) {
8580 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8581 			/* i.e. reset mode */
8582 			if (dm_old_crtc_state->stream)
8583 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8584 
8585 			mode_set_reset_required = true;
8586 		}
8587 	} /* for_each_crtc_in_state() */
8588 
8589 	if (dc_state) {
8590 		/* if there mode set or reset, disable eDP PSR */
8591 		if (mode_set_reset_required)
8592 			amdgpu_dm_psr_disable_all(dm);
8593 
8594 		dm_enable_per_frame_crtc_master_sync(dc_state);
8595 		mutex_lock(&dm->dc_lock);
8596 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8597 		mutex_unlock(&dm->dc_lock);
8598 	}
8599 
8600 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8601 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8602 
8603 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8604 
8605 		if (dm_new_crtc_state->stream != NULL) {
8606 			const struct dc_stream_status *status =
8607 					dc_stream_get_status(dm_new_crtc_state->stream);
8608 
8609 			if (!status)
8610 				status = dc_stream_get_status_from_state(dc_state,
8611 									 dm_new_crtc_state->stream);
8612 			if (!status)
8613 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8614 			else
8615 				acrtc->otg_inst = status->primary_otg_inst;
8616 		}
8617 	}
8618 #ifdef CONFIG_DRM_AMD_DC_HDCP
8619 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8620 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8621 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8622 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8623 
8624 		new_crtc_state = NULL;
8625 
8626 		if (acrtc)
8627 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8628 
8629 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8630 
8631 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8632 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8633 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8634 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8635 			dm_new_con_state->update_hdcp = true;
8636 			continue;
8637 		}
8638 
8639 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8640 			hdcp_update_display(
8641 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8642 				new_con_state->hdcp_content_type,
8643 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8644 	}
8645 #endif
8646 
8647 	/* Handle connector state changes */
8648 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8649 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8650 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8651 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8652 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8653 		struct dc_stream_update stream_update;
8654 		struct dc_info_packet hdr_packet;
8655 		struct dc_stream_status *status = NULL;
8656 		bool abm_changed, hdr_changed, scaling_changed;
8657 
8658 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8659 		memset(&stream_update, 0, sizeof(stream_update));
8660 
8661 		if (acrtc) {
8662 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8663 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8664 		}
8665 
8666 		/* Skip any modesets/resets */
8667 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8668 			continue;
8669 
8670 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8671 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8672 
8673 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8674 							     dm_old_con_state);
8675 
8676 		abm_changed = dm_new_crtc_state->abm_level !=
8677 			      dm_old_crtc_state->abm_level;
8678 
8679 		hdr_changed =
8680 			is_hdr_metadata_different(old_con_state, new_con_state);
8681 
8682 		if (!scaling_changed && !abm_changed && !hdr_changed)
8683 			continue;
8684 
8685 		stream_update.stream = dm_new_crtc_state->stream;
8686 		if (scaling_changed) {
8687 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8688 					dm_new_con_state, dm_new_crtc_state->stream);
8689 
8690 			stream_update.src = dm_new_crtc_state->stream->src;
8691 			stream_update.dst = dm_new_crtc_state->stream->dst;
8692 		}
8693 
8694 		if (abm_changed) {
8695 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8696 
8697 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8698 		}
8699 
8700 		if (hdr_changed) {
8701 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8702 			stream_update.hdr_static_metadata = &hdr_packet;
8703 		}
8704 
8705 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8706 		WARN_ON(!status);
8707 		WARN_ON(!status->plane_count);
8708 
8709 		/*
8710 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8711 		 * Here we create an empty update on each plane.
8712 		 * To fix this, DC should permit updating only stream properties.
8713 		 */
8714 		for (j = 0; j < status->plane_count; j++)
8715 			dummy_updates[j].surface = status->plane_states[0];
8716 
8717 
8718 		mutex_lock(&dm->dc_lock);
8719 		dc_commit_updates_for_stream(dm->dc,
8720 						     dummy_updates,
8721 						     status->plane_count,
8722 						     dm_new_crtc_state->stream,
8723 						     &stream_update,
8724 						     dc_state);
8725 		mutex_unlock(&dm->dc_lock);
8726 	}
8727 
8728 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8729 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8730 				      new_crtc_state, i) {
8731 		if (old_crtc_state->active && !new_crtc_state->active)
8732 			crtc_disable_count++;
8733 
8734 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8735 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8736 
8737 		/* For freesync config update on crtc state and params for irq */
8738 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8739 
8740 		/* Handle vrr on->off / off->on transitions */
8741 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8742 						dm_new_crtc_state);
8743 	}
8744 
8745 	/**
8746 	 * Enable interrupts for CRTCs that are newly enabled or went through
8747 	 * a modeset. It was intentionally deferred until after the front end
8748 	 * state was modified to wait until the OTG was on and so the IRQ
8749 	 * handlers didn't access stale or invalid state.
8750 	 */
8751 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8752 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8753 #ifdef CONFIG_DEBUG_FS
8754 		bool configure_crc = false;
8755 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8756 #endif
8757 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8758 
8759 		if (new_crtc_state->active &&
8760 		    (!old_crtc_state->active ||
8761 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8762 			dc_stream_retain(dm_new_crtc_state->stream);
8763 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8764 			manage_dm_interrupts(adev, acrtc, true);
8765 
8766 #ifdef CONFIG_DEBUG_FS
8767 			/**
8768 			 * Frontend may have changed so reapply the CRC capture
8769 			 * settings for the stream.
8770 			 */
8771 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8772 			spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8773 			cur_crc_src = acrtc->dm_irq_params.crc_src;
8774 			spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8775 
8776 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8777 				configure_crc = true;
8778 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8779 				if (amdgpu_dm_crc_window_is_activated(crtc))
8780 					configure_crc = false;
8781 #endif
8782 			}
8783 
8784 			if (configure_crc)
8785 				amdgpu_dm_crtc_configure_crc_source(
8786 					crtc, dm_new_crtc_state, cur_crc_src);
8787 #endif
8788 		}
8789 	}
8790 
8791 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8792 		if (new_crtc_state->async_flip)
8793 			wait_for_vblank = false;
8794 
8795 	/* update planes when needed per crtc*/
8796 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8797 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8798 
8799 		if (dm_new_crtc_state->stream)
8800 			amdgpu_dm_commit_planes(state, dc_state, dev,
8801 						dm, crtc, wait_for_vblank);
8802 	}
8803 
8804 	/* Update audio instances for each connector. */
8805 	amdgpu_dm_commit_audio(dev, state);
8806 
8807 	/*
8808 	 * send vblank event on all events not handled in flip and
8809 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8810 	 */
8811 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8812 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8813 
8814 		if (new_crtc_state->event)
8815 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8816 
8817 		new_crtc_state->event = NULL;
8818 	}
8819 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8820 
8821 	/* Signal HW programming completion */
8822 	drm_atomic_helper_commit_hw_done(state);
8823 
8824 	if (wait_for_vblank)
8825 		drm_atomic_helper_wait_for_flip_done(dev, state);
8826 
8827 	drm_atomic_helper_cleanup_planes(dev, state);
8828 
8829 	/* return the stolen vga memory back to VRAM */
8830 	if (!adev->mman.keep_stolen_vga_memory)
8831 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8832 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8833 
8834 	/*
8835 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8836 	 * so we can put the GPU into runtime suspend if we're not driving any
8837 	 * displays anymore
8838 	 */
8839 	for (i = 0; i < crtc_disable_count; i++)
8840 		pm_runtime_put_autosuspend(dev->dev);
8841 	pm_runtime_mark_last_busy(dev->dev);
8842 
8843 	if (dc_state_temp)
8844 		dc_release_state(dc_state_temp);
8845 }
8846 
8847 
8848 static int dm_force_atomic_commit(struct drm_connector *connector)
8849 {
8850 	int ret = 0;
8851 	struct drm_device *ddev = connector->dev;
8852 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8853 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8854 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8855 	struct drm_connector_state *conn_state;
8856 	struct drm_crtc_state *crtc_state;
8857 	struct drm_plane_state *plane_state;
8858 
8859 	if (!state)
8860 		return -ENOMEM;
8861 
8862 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8863 
8864 	/* Construct an atomic state to restore previous display setting */
8865 
8866 	/*
8867 	 * Attach connectors to drm_atomic_state
8868 	 */
8869 	conn_state = drm_atomic_get_connector_state(state, connector);
8870 
8871 	ret = PTR_ERR_OR_ZERO(conn_state);
8872 	if (ret)
8873 		goto out;
8874 
8875 	/* Attach crtc to drm_atomic_state*/
8876 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8877 
8878 	ret = PTR_ERR_OR_ZERO(crtc_state);
8879 	if (ret)
8880 		goto out;
8881 
8882 	/* force a restore */
8883 	crtc_state->mode_changed = true;
8884 
8885 	/* Attach plane to drm_atomic_state */
8886 	plane_state = drm_atomic_get_plane_state(state, plane);
8887 
8888 	ret = PTR_ERR_OR_ZERO(plane_state);
8889 	if (ret)
8890 		goto out;
8891 
8892 	/* Call commit internally with the state we just constructed */
8893 	ret = drm_atomic_commit(state);
8894 
8895 out:
8896 	drm_atomic_state_put(state);
8897 	if (ret)
8898 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8899 
8900 	return ret;
8901 }
8902 
8903 /*
8904  * This function handles all cases when set mode does not come upon hotplug.
8905  * This includes when a display is unplugged then plugged back into the
8906  * same port and when running without usermode desktop manager supprot
8907  */
8908 void dm_restore_drm_connector_state(struct drm_device *dev,
8909 				    struct drm_connector *connector)
8910 {
8911 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8912 	struct amdgpu_crtc *disconnected_acrtc;
8913 	struct dm_crtc_state *acrtc_state;
8914 
8915 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8916 		return;
8917 
8918 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8919 	if (!disconnected_acrtc)
8920 		return;
8921 
8922 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8923 	if (!acrtc_state->stream)
8924 		return;
8925 
8926 	/*
8927 	 * If the previous sink is not released and different from the current,
8928 	 * we deduce we are in a state where we can not rely on usermode call
8929 	 * to turn on the display, so we do it here
8930 	 */
8931 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8932 		dm_force_atomic_commit(&aconnector->base);
8933 }
8934 
8935 /*
8936  * Grabs all modesetting locks to serialize against any blocking commits,
8937  * Waits for completion of all non blocking commits.
8938  */
8939 static int do_aquire_global_lock(struct drm_device *dev,
8940 				 struct drm_atomic_state *state)
8941 {
8942 	struct drm_crtc *crtc;
8943 	struct drm_crtc_commit *commit;
8944 	long ret;
8945 
8946 	/*
8947 	 * Adding all modeset locks to aquire_ctx will
8948 	 * ensure that when the framework release it the
8949 	 * extra locks we are locking here will get released to
8950 	 */
8951 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8952 	if (ret)
8953 		return ret;
8954 
8955 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8956 		spin_lock(&crtc->commit_lock);
8957 		commit = list_first_entry_or_null(&crtc->commit_list,
8958 				struct drm_crtc_commit, commit_entry);
8959 		if (commit)
8960 			drm_crtc_commit_get(commit);
8961 		spin_unlock(&crtc->commit_lock);
8962 
8963 		if (!commit)
8964 			continue;
8965 
8966 		/*
8967 		 * Make sure all pending HW programming completed and
8968 		 * page flips done
8969 		 */
8970 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8971 
8972 		if (ret > 0)
8973 			ret = wait_for_completion_interruptible_timeout(
8974 					&commit->flip_done, 10*HZ);
8975 
8976 		if (ret == 0)
8977 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8978 				  "timed out\n", crtc->base.id, crtc->name);
8979 
8980 		drm_crtc_commit_put(commit);
8981 	}
8982 
8983 	return ret < 0 ? ret : 0;
8984 }
8985 
8986 static void get_freesync_config_for_crtc(
8987 	struct dm_crtc_state *new_crtc_state,
8988 	struct dm_connector_state *new_con_state)
8989 {
8990 	struct mod_freesync_config config = {0};
8991 	struct amdgpu_dm_connector *aconnector =
8992 			to_amdgpu_dm_connector(new_con_state->base.connector);
8993 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8994 	int vrefresh = drm_mode_vrefresh(mode);
8995 	bool fs_vid_mode = false;
8996 
8997 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8998 					vrefresh >= aconnector->min_vfreq &&
8999 					vrefresh <= aconnector->max_vfreq;
9000 
9001 	if (new_crtc_state->vrr_supported) {
9002 		new_crtc_state->stream->ignore_msa_timing_param = true;
9003 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9004 
9005 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9006 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9007 		config.vsif_supported = true;
9008 		config.btr = true;
9009 
9010 		if (fs_vid_mode) {
9011 			config.state = VRR_STATE_ACTIVE_FIXED;
9012 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9013 			goto out;
9014 		} else if (new_crtc_state->base.vrr_enabled) {
9015 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9016 		} else {
9017 			config.state = VRR_STATE_INACTIVE;
9018 		}
9019 	}
9020 out:
9021 	new_crtc_state->freesync_config = config;
9022 }
9023 
9024 static void reset_freesync_config_for_crtc(
9025 	struct dm_crtc_state *new_crtc_state)
9026 {
9027 	new_crtc_state->vrr_supported = false;
9028 
9029 	memset(&new_crtc_state->vrr_infopacket, 0,
9030 	       sizeof(new_crtc_state->vrr_infopacket));
9031 }
9032 
9033 static bool
9034 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9035 				 struct drm_crtc_state *new_crtc_state)
9036 {
9037 	struct drm_display_mode old_mode, new_mode;
9038 
9039 	if (!old_crtc_state || !new_crtc_state)
9040 		return false;
9041 
9042 	old_mode = old_crtc_state->mode;
9043 	new_mode = new_crtc_state->mode;
9044 
9045 	if (old_mode.clock       == new_mode.clock &&
9046 	    old_mode.hdisplay    == new_mode.hdisplay &&
9047 	    old_mode.vdisplay    == new_mode.vdisplay &&
9048 	    old_mode.htotal      == new_mode.htotal &&
9049 	    old_mode.vtotal      != new_mode.vtotal &&
9050 	    old_mode.hsync_start == new_mode.hsync_start &&
9051 	    old_mode.vsync_start != new_mode.vsync_start &&
9052 	    old_mode.hsync_end   == new_mode.hsync_end &&
9053 	    old_mode.vsync_end   != new_mode.vsync_end &&
9054 	    old_mode.hskew       == new_mode.hskew &&
9055 	    old_mode.vscan       == new_mode.vscan &&
9056 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9057 	    (new_mode.vsync_end - new_mode.vsync_start))
9058 		return true;
9059 
9060 	return false;
9061 }
9062 
9063 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9064 	uint64_t num, den, res;
9065 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9066 
9067 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9068 
9069 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9070 	den = (unsigned long long)new_crtc_state->mode.htotal *
9071 	      (unsigned long long)new_crtc_state->mode.vtotal;
9072 
9073 	res = div_u64(num, den);
9074 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9075 }
9076 
9077 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9078 				struct drm_atomic_state *state,
9079 				struct drm_crtc *crtc,
9080 				struct drm_crtc_state *old_crtc_state,
9081 				struct drm_crtc_state *new_crtc_state,
9082 				bool enable,
9083 				bool *lock_and_validation_needed)
9084 {
9085 	struct dm_atomic_state *dm_state = NULL;
9086 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9087 	struct dc_stream_state *new_stream;
9088 	int ret = 0;
9089 
9090 	/*
9091 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9092 	 * update changed items
9093 	 */
9094 	struct amdgpu_crtc *acrtc = NULL;
9095 	struct amdgpu_dm_connector *aconnector = NULL;
9096 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9097 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9098 
9099 	new_stream = NULL;
9100 
9101 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9102 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9103 	acrtc = to_amdgpu_crtc(crtc);
9104 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9105 
9106 	/* TODO This hack should go away */
9107 	if (aconnector && enable) {
9108 		/* Make sure fake sink is created in plug-in scenario */
9109 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9110 							    &aconnector->base);
9111 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9112 							    &aconnector->base);
9113 
9114 		if (IS_ERR(drm_new_conn_state)) {
9115 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9116 			goto fail;
9117 		}
9118 
9119 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9120 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9121 
9122 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9123 			goto skip_modeset;
9124 
9125 		new_stream = create_validate_stream_for_sink(aconnector,
9126 							     &new_crtc_state->mode,
9127 							     dm_new_conn_state,
9128 							     dm_old_crtc_state->stream);
9129 
9130 		/*
9131 		 * we can have no stream on ACTION_SET if a display
9132 		 * was disconnected during S3, in this case it is not an
9133 		 * error, the OS will be updated after detection, and
9134 		 * will do the right thing on next atomic commit
9135 		 */
9136 
9137 		if (!new_stream) {
9138 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9139 					__func__, acrtc->base.base.id);
9140 			ret = -ENOMEM;
9141 			goto fail;
9142 		}
9143 
9144 		/*
9145 		 * TODO: Check VSDB bits to decide whether this should
9146 		 * be enabled or not.
9147 		 */
9148 		new_stream->triggered_crtc_reset.enabled =
9149 			dm->force_timing_sync;
9150 
9151 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9152 
9153 		ret = fill_hdr_info_packet(drm_new_conn_state,
9154 					   &new_stream->hdr_static_metadata);
9155 		if (ret)
9156 			goto fail;
9157 
9158 		/*
9159 		 * If we already removed the old stream from the context
9160 		 * (and set the new stream to NULL) then we can't reuse
9161 		 * the old stream even if the stream and scaling are unchanged.
9162 		 * We'll hit the BUG_ON and black screen.
9163 		 *
9164 		 * TODO: Refactor this function to allow this check to work
9165 		 * in all conditions.
9166 		 */
9167 		if (amdgpu_freesync_vid_mode &&
9168 		    dm_new_crtc_state->stream &&
9169 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9170 			goto skip_modeset;
9171 
9172 		if (dm_new_crtc_state->stream &&
9173 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9174 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9175 			new_crtc_state->mode_changed = false;
9176 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9177 					 new_crtc_state->mode_changed);
9178 		}
9179 	}
9180 
9181 	/* mode_changed flag may get updated above, need to check again */
9182 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9183 		goto skip_modeset;
9184 
9185 	DRM_DEBUG_DRIVER(
9186 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9187 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9188 		"connectors_changed:%d\n",
9189 		acrtc->crtc_id,
9190 		new_crtc_state->enable,
9191 		new_crtc_state->active,
9192 		new_crtc_state->planes_changed,
9193 		new_crtc_state->mode_changed,
9194 		new_crtc_state->active_changed,
9195 		new_crtc_state->connectors_changed);
9196 
9197 	/* Remove stream for any changed/disabled CRTC */
9198 	if (!enable) {
9199 
9200 		if (!dm_old_crtc_state->stream)
9201 			goto skip_modeset;
9202 
9203 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9204 		    is_timing_unchanged_for_freesync(new_crtc_state,
9205 						     old_crtc_state)) {
9206 			new_crtc_state->mode_changed = false;
9207 			DRM_DEBUG_DRIVER(
9208 				"Mode change not required for front porch change, "
9209 				"setting mode_changed to %d",
9210 				new_crtc_state->mode_changed);
9211 
9212 			set_freesync_fixed_config(dm_new_crtc_state);
9213 
9214 			goto skip_modeset;
9215 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9216 			   is_freesync_video_mode(&new_crtc_state->mode,
9217 						  aconnector)) {
9218 			set_freesync_fixed_config(dm_new_crtc_state);
9219 		}
9220 
9221 		ret = dm_atomic_get_state(state, &dm_state);
9222 		if (ret)
9223 			goto fail;
9224 
9225 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9226 				crtc->base.id);
9227 
9228 		/* i.e. reset mode */
9229 		if (dc_remove_stream_from_ctx(
9230 				dm->dc,
9231 				dm_state->context,
9232 				dm_old_crtc_state->stream) != DC_OK) {
9233 			ret = -EINVAL;
9234 			goto fail;
9235 		}
9236 
9237 		dc_stream_release(dm_old_crtc_state->stream);
9238 		dm_new_crtc_state->stream = NULL;
9239 
9240 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9241 
9242 		*lock_and_validation_needed = true;
9243 
9244 	} else {/* Add stream for any updated/enabled CRTC */
9245 		/*
9246 		 * Quick fix to prevent NULL pointer on new_stream when
9247 		 * added MST connectors not found in existing crtc_state in the chained mode
9248 		 * TODO: need to dig out the root cause of that
9249 		 */
9250 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9251 			goto skip_modeset;
9252 
9253 		if (modereset_required(new_crtc_state))
9254 			goto skip_modeset;
9255 
9256 		if (modeset_required(new_crtc_state, new_stream,
9257 				     dm_old_crtc_state->stream)) {
9258 
9259 			WARN_ON(dm_new_crtc_state->stream);
9260 
9261 			ret = dm_atomic_get_state(state, &dm_state);
9262 			if (ret)
9263 				goto fail;
9264 
9265 			dm_new_crtc_state->stream = new_stream;
9266 
9267 			dc_stream_retain(new_stream);
9268 
9269 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
9270 						crtc->base.id);
9271 
9272 			if (dc_add_stream_to_ctx(
9273 					dm->dc,
9274 					dm_state->context,
9275 					dm_new_crtc_state->stream) != DC_OK) {
9276 				ret = -EINVAL;
9277 				goto fail;
9278 			}
9279 
9280 			*lock_and_validation_needed = true;
9281 		}
9282 	}
9283 
9284 skip_modeset:
9285 	/* Release extra reference */
9286 	if (new_stream)
9287 		 dc_stream_release(new_stream);
9288 
9289 	/*
9290 	 * We want to do dc stream updates that do not require a
9291 	 * full modeset below.
9292 	 */
9293 	if (!(enable && aconnector && new_crtc_state->active))
9294 		return 0;
9295 	/*
9296 	 * Given above conditions, the dc state cannot be NULL because:
9297 	 * 1. We're in the process of enabling CRTCs (just been added
9298 	 *    to the dc context, or already is on the context)
9299 	 * 2. Has a valid connector attached, and
9300 	 * 3. Is currently active and enabled.
9301 	 * => The dc stream state currently exists.
9302 	 */
9303 	BUG_ON(dm_new_crtc_state->stream == NULL);
9304 
9305 	/* Scaling or underscan settings */
9306 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9307 		update_stream_scaling_settings(
9308 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9309 
9310 	/* ABM settings */
9311 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9312 
9313 	/*
9314 	 * Color management settings. We also update color properties
9315 	 * when a modeset is needed, to ensure it gets reprogrammed.
9316 	 */
9317 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9318 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9319 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9320 		if (ret)
9321 			goto fail;
9322 	}
9323 
9324 	/* Update Freesync settings. */
9325 	get_freesync_config_for_crtc(dm_new_crtc_state,
9326 				     dm_new_conn_state);
9327 
9328 	return ret;
9329 
9330 fail:
9331 	if (new_stream)
9332 		dc_stream_release(new_stream);
9333 	return ret;
9334 }
9335 
9336 static bool should_reset_plane(struct drm_atomic_state *state,
9337 			       struct drm_plane *plane,
9338 			       struct drm_plane_state *old_plane_state,
9339 			       struct drm_plane_state *new_plane_state)
9340 {
9341 	struct drm_plane *other;
9342 	struct drm_plane_state *old_other_state, *new_other_state;
9343 	struct drm_crtc_state *new_crtc_state;
9344 	int i;
9345 
9346 	/*
9347 	 * TODO: Remove this hack once the checks below are sufficient
9348 	 * enough to determine when we need to reset all the planes on
9349 	 * the stream.
9350 	 */
9351 	if (state->allow_modeset)
9352 		return true;
9353 
9354 	/* Exit early if we know that we're adding or removing the plane. */
9355 	if (old_plane_state->crtc != new_plane_state->crtc)
9356 		return true;
9357 
9358 	/* old crtc == new_crtc == NULL, plane not in context. */
9359 	if (!new_plane_state->crtc)
9360 		return false;
9361 
9362 	new_crtc_state =
9363 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9364 
9365 	if (!new_crtc_state)
9366 		return true;
9367 
9368 	/* CRTC Degamma changes currently require us to recreate planes. */
9369 	if (new_crtc_state->color_mgmt_changed)
9370 		return true;
9371 
9372 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9373 		return true;
9374 
9375 	/*
9376 	 * If there are any new primary or overlay planes being added or
9377 	 * removed then the z-order can potentially change. To ensure
9378 	 * correct z-order and pipe acquisition the current DC architecture
9379 	 * requires us to remove and recreate all existing planes.
9380 	 *
9381 	 * TODO: Come up with a more elegant solution for this.
9382 	 */
9383 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9384 		struct amdgpu_framebuffer *old_afb, *new_afb;
9385 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9386 			continue;
9387 
9388 		if (old_other_state->crtc != new_plane_state->crtc &&
9389 		    new_other_state->crtc != new_plane_state->crtc)
9390 			continue;
9391 
9392 		if (old_other_state->crtc != new_other_state->crtc)
9393 			return true;
9394 
9395 		/* Src/dst size and scaling updates. */
9396 		if (old_other_state->src_w != new_other_state->src_w ||
9397 		    old_other_state->src_h != new_other_state->src_h ||
9398 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9399 		    old_other_state->crtc_h != new_other_state->crtc_h)
9400 			return true;
9401 
9402 		/* Rotation / mirroring updates. */
9403 		if (old_other_state->rotation != new_other_state->rotation)
9404 			return true;
9405 
9406 		/* Blending updates. */
9407 		if (old_other_state->pixel_blend_mode !=
9408 		    new_other_state->pixel_blend_mode)
9409 			return true;
9410 
9411 		/* Alpha updates. */
9412 		if (old_other_state->alpha != new_other_state->alpha)
9413 			return true;
9414 
9415 		/* Colorspace changes. */
9416 		if (old_other_state->color_range != new_other_state->color_range ||
9417 		    old_other_state->color_encoding != new_other_state->color_encoding)
9418 			return true;
9419 
9420 		/* Framebuffer checks fall at the end. */
9421 		if (!old_other_state->fb || !new_other_state->fb)
9422 			continue;
9423 
9424 		/* Pixel format changes can require bandwidth updates. */
9425 		if (old_other_state->fb->format != new_other_state->fb->format)
9426 			return true;
9427 
9428 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9429 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9430 
9431 		/* Tiling and DCC changes also require bandwidth updates. */
9432 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9433 		    old_afb->base.modifier != new_afb->base.modifier)
9434 			return true;
9435 	}
9436 
9437 	return false;
9438 }
9439 
9440 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9441 			      struct drm_plane_state *new_plane_state,
9442 			      struct drm_framebuffer *fb)
9443 {
9444 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9445 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9446 	unsigned int pitch;
9447 	bool linear;
9448 
9449 	if (fb->width > new_acrtc->max_cursor_width ||
9450 	    fb->height > new_acrtc->max_cursor_height) {
9451 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9452 				 new_plane_state->fb->width,
9453 				 new_plane_state->fb->height);
9454 		return -EINVAL;
9455 	}
9456 	if (new_plane_state->src_w != fb->width << 16 ||
9457 	    new_plane_state->src_h != fb->height << 16) {
9458 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9459 		return -EINVAL;
9460 	}
9461 
9462 	/* Pitch in pixels */
9463 	pitch = fb->pitches[0] / fb->format->cpp[0];
9464 
9465 	if (fb->width != pitch) {
9466 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9467 				 fb->width, pitch);
9468 		return -EINVAL;
9469 	}
9470 
9471 	switch (pitch) {
9472 	case 64:
9473 	case 128:
9474 	case 256:
9475 		/* FB pitch is supported by cursor plane */
9476 		break;
9477 	default:
9478 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9479 		return -EINVAL;
9480 	}
9481 
9482 	/* Core DRM takes care of checking FB modifiers, so we only need to
9483 	 * check tiling flags when the FB doesn't have a modifier. */
9484 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9485 		if (adev->family < AMDGPU_FAMILY_AI) {
9486 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9487 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9488 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9489 		} else {
9490 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9491 		}
9492 		if (!linear) {
9493 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9494 			return -EINVAL;
9495 		}
9496 	}
9497 
9498 	return 0;
9499 }
9500 
9501 static int dm_update_plane_state(struct dc *dc,
9502 				 struct drm_atomic_state *state,
9503 				 struct drm_plane *plane,
9504 				 struct drm_plane_state *old_plane_state,
9505 				 struct drm_plane_state *new_plane_state,
9506 				 bool enable,
9507 				 bool *lock_and_validation_needed)
9508 {
9509 
9510 	struct dm_atomic_state *dm_state = NULL;
9511 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9512 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9513 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9514 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9515 	struct amdgpu_crtc *new_acrtc;
9516 	bool needs_reset;
9517 	int ret = 0;
9518 
9519 
9520 	new_plane_crtc = new_plane_state->crtc;
9521 	old_plane_crtc = old_plane_state->crtc;
9522 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9523 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9524 
9525 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9526 		if (!enable || !new_plane_crtc ||
9527 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9528 			return 0;
9529 
9530 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9531 
9532 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9533 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9534 			return -EINVAL;
9535 		}
9536 
9537 		if (new_plane_state->fb) {
9538 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9539 						 new_plane_state->fb);
9540 			if (ret)
9541 				return ret;
9542 		}
9543 
9544 		return 0;
9545 	}
9546 
9547 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9548 					 new_plane_state);
9549 
9550 	/* Remove any changed/removed planes */
9551 	if (!enable) {
9552 		if (!needs_reset)
9553 			return 0;
9554 
9555 		if (!old_plane_crtc)
9556 			return 0;
9557 
9558 		old_crtc_state = drm_atomic_get_old_crtc_state(
9559 				state, old_plane_crtc);
9560 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9561 
9562 		if (!dm_old_crtc_state->stream)
9563 			return 0;
9564 
9565 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9566 				plane->base.id, old_plane_crtc->base.id);
9567 
9568 		ret = dm_atomic_get_state(state, &dm_state);
9569 		if (ret)
9570 			return ret;
9571 
9572 		if (!dc_remove_plane_from_context(
9573 				dc,
9574 				dm_old_crtc_state->stream,
9575 				dm_old_plane_state->dc_state,
9576 				dm_state->context)) {
9577 
9578 			return -EINVAL;
9579 		}
9580 
9581 
9582 		dc_plane_state_release(dm_old_plane_state->dc_state);
9583 		dm_new_plane_state->dc_state = NULL;
9584 
9585 		*lock_and_validation_needed = true;
9586 
9587 	} else { /* Add new planes */
9588 		struct dc_plane_state *dc_new_plane_state;
9589 
9590 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9591 			return 0;
9592 
9593 		if (!new_plane_crtc)
9594 			return 0;
9595 
9596 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9597 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9598 
9599 		if (!dm_new_crtc_state->stream)
9600 			return 0;
9601 
9602 		if (!needs_reset)
9603 			return 0;
9604 
9605 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9606 		if (ret)
9607 			return ret;
9608 
9609 		WARN_ON(dm_new_plane_state->dc_state);
9610 
9611 		dc_new_plane_state = dc_create_plane_state(dc);
9612 		if (!dc_new_plane_state)
9613 			return -ENOMEM;
9614 
9615 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9616 				plane->base.id, new_plane_crtc->base.id);
9617 
9618 		ret = fill_dc_plane_attributes(
9619 			drm_to_adev(new_plane_crtc->dev),
9620 			dc_new_plane_state,
9621 			new_plane_state,
9622 			new_crtc_state);
9623 		if (ret) {
9624 			dc_plane_state_release(dc_new_plane_state);
9625 			return ret;
9626 		}
9627 
9628 		ret = dm_atomic_get_state(state, &dm_state);
9629 		if (ret) {
9630 			dc_plane_state_release(dc_new_plane_state);
9631 			return ret;
9632 		}
9633 
9634 		/*
9635 		 * Any atomic check errors that occur after this will
9636 		 * not need a release. The plane state will be attached
9637 		 * to the stream, and therefore part of the atomic
9638 		 * state. It'll be released when the atomic state is
9639 		 * cleaned.
9640 		 */
9641 		if (!dc_add_plane_to_context(
9642 				dc,
9643 				dm_new_crtc_state->stream,
9644 				dc_new_plane_state,
9645 				dm_state->context)) {
9646 
9647 			dc_plane_state_release(dc_new_plane_state);
9648 			return -EINVAL;
9649 		}
9650 
9651 		dm_new_plane_state->dc_state = dc_new_plane_state;
9652 
9653 		/* Tell DC to do a full surface update every time there
9654 		 * is a plane change. Inefficient, but works for now.
9655 		 */
9656 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9657 
9658 		*lock_and_validation_needed = true;
9659 	}
9660 
9661 
9662 	return ret;
9663 }
9664 
9665 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9666 				struct drm_crtc *crtc,
9667 				struct drm_crtc_state *new_crtc_state)
9668 {
9669 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9670 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9671 
9672 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9673 	 * cursor per pipe but it's going to inherit the scaling and
9674 	 * positioning from the underlying pipe. Check the cursor plane's
9675 	 * blending properties match the primary plane's. */
9676 
9677 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9678 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9679 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9680 		return 0;
9681 	}
9682 
9683 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9684 			 (new_cursor_state->src_w >> 16);
9685 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9686 			 (new_cursor_state->src_h >> 16);
9687 
9688 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9689 			 (new_primary_state->src_w >> 16);
9690 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9691 			 (new_primary_state->src_h >> 16);
9692 
9693 	if (cursor_scale_w != primary_scale_w ||
9694 	    cursor_scale_h != primary_scale_h) {
9695 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9696 		return -EINVAL;
9697 	}
9698 
9699 	return 0;
9700 }
9701 
9702 #if defined(CONFIG_DRM_AMD_DC_DCN)
9703 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9704 {
9705 	struct drm_connector *connector;
9706 	struct drm_connector_state *conn_state;
9707 	struct amdgpu_dm_connector *aconnector = NULL;
9708 	int i;
9709 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9710 		if (conn_state->crtc != crtc)
9711 			continue;
9712 
9713 		aconnector = to_amdgpu_dm_connector(connector);
9714 		if (!aconnector->port || !aconnector->mst_port)
9715 			aconnector = NULL;
9716 		else
9717 			break;
9718 	}
9719 
9720 	if (!aconnector)
9721 		return 0;
9722 
9723 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9724 }
9725 #endif
9726 
9727 /**
9728  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9729  * @dev: The DRM device
9730  * @state: The atomic state to commit
9731  *
9732  * Validate that the given atomic state is programmable by DC into hardware.
9733  * This involves constructing a &struct dc_state reflecting the new hardware
9734  * state we wish to commit, then querying DC to see if it is programmable. It's
9735  * important not to modify the existing DC state. Otherwise, atomic_check
9736  * may unexpectedly commit hardware changes.
9737  *
9738  * When validating the DC state, it's important that the right locks are
9739  * acquired. For full updates case which removes/adds/updates streams on one
9740  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9741  * that any such full update commit will wait for completion of any outstanding
9742  * flip using DRMs synchronization events.
9743  *
9744  * Note that DM adds the affected connectors for all CRTCs in state, when that
9745  * might not seem necessary. This is because DC stream creation requires the
9746  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9747  * be possible but non-trivial - a possible TODO item.
9748  *
9749  * Return: -Error code if validation failed.
9750  */
9751 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9752 				  struct drm_atomic_state *state)
9753 {
9754 	struct amdgpu_device *adev = drm_to_adev(dev);
9755 	struct dm_atomic_state *dm_state = NULL;
9756 	struct dc *dc = adev->dm.dc;
9757 	struct drm_connector *connector;
9758 	struct drm_connector_state *old_con_state, *new_con_state;
9759 	struct drm_crtc *crtc;
9760 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9761 	struct drm_plane *plane;
9762 	struct drm_plane_state *old_plane_state, *new_plane_state;
9763 	enum dc_status status;
9764 	int ret, i;
9765 	bool lock_and_validation_needed = false;
9766 	struct dm_crtc_state *dm_old_crtc_state;
9767 
9768 	trace_amdgpu_dm_atomic_check_begin(state);
9769 
9770 	ret = drm_atomic_helper_check_modeset(dev, state);
9771 	if (ret)
9772 		goto fail;
9773 
9774 	/* Check connector changes */
9775 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9776 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9777 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9778 
9779 		/* Skip connectors that are disabled or part of modeset already. */
9780 		if (!old_con_state->crtc && !new_con_state->crtc)
9781 			continue;
9782 
9783 		if (!new_con_state->crtc)
9784 			continue;
9785 
9786 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9787 		if (IS_ERR(new_crtc_state)) {
9788 			ret = PTR_ERR(new_crtc_state);
9789 			goto fail;
9790 		}
9791 
9792 		if (dm_old_con_state->abm_level !=
9793 		    dm_new_con_state->abm_level)
9794 			new_crtc_state->connectors_changed = true;
9795 	}
9796 
9797 #if defined(CONFIG_DRM_AMD_DC_DCN)
9798 	if (dc_resource_is_dsc_encoding_supported(dc)) {
9799 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9800 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9801 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9802 				if (ret)
9803 					goto fail;
9804 			}
9805 		}
9806 	}
9807 #endif
9808 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9809 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9810 
9811 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9812 		    !new_crtc_state->color_mgmt_changed &&
9813 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9814 			dm_old_crtc_state->dsc_force_changed == false)
9815 			continue;
9816 
9817 		if (!new_crtc_state->enable)
9818 			continue;
9819 
9820 		ret = drm_atomic_add_affected_connectors(state, crtc);
9821 		if (ret)
9822 			return ret;
9823 
9824 		ret = drm_atomic_add_affected_planes(state, crtc);
9825 		if (ret)
9826 			goto fail;
9827 
9828 		if (dm_old_crtc_state->dsc_force_changed)
9829 			new_crtc_state->mode_changed = true;
9830 	}
9831 
9832 	/*
9833 	 * Add all primary and overlay planes on the CRTC to the state
9834 	 * whenever a plane is enabled to maintain correct z-ordering
9835 	 * and to enable fast surface updates.
9836 	 */
9837 	drm_for_each_crtc(crtc, dev) {
9838 		bool modified = false;
9839 
9840 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9841 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9842 				continue;
9843 
9844 			if (new_plane_state->crtc == crtc ||
9845 			    old_plane_state->crtc == crtc) {
9846 				modified = true;
9847 				break;
9848 			}
9849 		}
9850 
9851 		if (!modified)
9852 			continue;
9853 
9854 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9855 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9856 				continue;
9857 
9858 			new_plane_state =
9859 				drm_atomic_get_plane_state(state, plane);
9860 
9861 			if (IS_ERR(new_plane_state)) {
9862 				ret = PTR_ERR(new_plane_state);
9863 				goto fail;
9864 			}
9865 		}
9866 	}
9867 
9868 	/* Remove exiting planes if they are modified */
9869 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9870 		ret = dm_update_plane_state(dc, state, plane,
9871 					    old_plane_state,
9872 					    new_plane_state,
9873 					    false,
9874 					    &lock_and_validation_needed);
9875 		if (ret)
9876 			goto fail;
9877 	}
9878 
9879 	/* Disable all crtcs which require disable */
9880 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9881 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9882 					   old_crtc_state,
9883 					   new_crtc_state,
9884 					   false,
9885 					   &lock_and_validation_needed);
9886 		if (ret)
9887 			goto fail;
9888 	}
9889 
9890 	/* Enable all crtcs which require enable */
9891 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9892 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9893 					   old_crtc_state,
9894 					   new_crtc_state,
9895 					   true,
9896 					   &lock_and_validation_needed);
9897 		if (ret)
9898 			goto fail;
9899 	}
9900 
9901 	/* Add new/modified planes */
9902 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9903 		ret = dm_update_plane_state(dc, state, plane,
9904 					    old_plane_state,
9905 					    new_plane_state,
9906 					    true,
9907 					    &lock_and_validation_needed);
9908 		if (ret)
9909 			goto fail;
9910 	}
9911 
9912 	/* Run this here since we want to validate the streams we created */
9913 	ret = drm_atomic_helper_check_planes(dev, state);
9914 	if (ret)
9915 		goto fail;
9916 
9917 	/* Check cursor planes scaling */
9918 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9919 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9920 		if (ret)
9921 			goto fail;
9922 	}
9923 
9924 	if (state->legacy_cursor_update) {
9925 		/*
9926 		 * This is a fast cursor update coming from the plane update
9927 		 * helper, check if it can be done asynchronously for better
9928 		 * performance.
9929 		 */
9930 		state->async_update =
9931 			!drm_atomic_helper_async_check(dev, state);
9932 
9933 		/*
9934 		 * Skip the remaining global validation if this is an async
9935 		 * update. Cursor updates can be done without affecting
9936 		 * state or bandwidth calcs and this avoids the performance
9937 		 * penalty of locking the private state object and
9938 		 * allocating a new dc_state.
9939 		 */
9940 		if (state->async_update)
9941 			return 0;
9942 	}
9943 
9944 	/* Check scaling and underscan changes*/
9945 	/* TODO Removed scaling changes validation due to inability to commit
9946 	 * new stream into context w\o causing full reset. Need to
9947 	 * decide how to handle.
9948 	 */
9949 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9950 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9951 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9952 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9953 
9954 		/* Skip any modesets/resets */
9955 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9956 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9957 			continue;
9958 
9959 		/* Skip any thing not scale or underscan changes */
9960 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9961 			continue;
9962 
9963 		lock_and_validation_needed = true;
9964 	}
9965 
9966 	/**
9967 	 * Streams and planes are reset when there are changes that affect
9968 	 * bandwidth. Anything that affects bandwidth needs to go through
9969 	 * DC global validation to ensure that the configuration can be applied
9970 	 * to hardware.
9971 	 *
9972 	 * We have to currently stall out here in atomic_check for outstanding
9973 	 * commits to finish in this case because our IRQ handlers reference
9974 	 * DRM state directly - we can end up disabling interrupts too early
9975 	 * if we don't.
9976 	 *
9977 	 * TODO: Remove this stall and drop DM state private objects.
9978 	 */
9979 	if (lock_and_validation_needed) {
9980 		ret = dm_atomic_get_state(state, &dm_state);
9981 		if (ret)
9982 			goto fail;
9983 
9984 		ret = do_aquire_global_lock(dev, state);
9985 		if (ret)
9986 			goto fail;
9987 
9988 #if defined(CONFIG_DRM_AMD_DC_DCN)
9989 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9990 			goto fail;
9991 
9992 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9993 		if (ret)
9994 			goto fail;
9995 #endif
9996 
9997 		/*
9998 		 * Perform validation of MST topology in the state:
9999 		 * We need to perform MST atomic check before calling
10000 		 * dc_validate_global_state(), or there is a chance
10001 		 * to get stuck in an infinite loop and hang eventually.
10002 		 */
10003 		ret = drm_dp_mst_atomic_check(state);
10004 		if (ret)
10005 			goto fail;
10006 		status = dc_validate_global_state(dc, dm_state->context, false);
10007 		if (status != DC_OK) {
10008 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10009 				       dc_status_to_str(status), status);
10010 			ret = -EINVAL;
10011 			goto fail;
10012 		}
10013 	} else {
10014 		/*
10015 		 * The commit is a fast update. Fast updates shouldn't change
10016 		 * the DC context, affect global validation, and can have their
10017 		 * commit work done in parallel with other commits not touching
10018 		 * the same resource. If we have a new DC context as part of
10019 		 * the DM atomic state from validation we need to free it and
10020 		 * retain the existing one instead.
10021 		 *
10022 		 * Furthermore, since the DM atomic state only contains the DC
10023 		 * context and can safely be annulled, we can free the state
10024 		 * and clear the associated private object now to free
10025 		 * some memory and avoid a possible use-after-free later.
10026 		 */
10027 
10028 		for (i = 0; i < state->num_private_objs; i++) {
10029 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10030 
10031 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10032 				int j = state->num_private_objs-1;
10033 
10034 				dm_atomic_destroy_state(obj,
10035 						state->private_objs[i].state);
10036 
10037 				/* If i is not at the end of the array then the
10038 				 * last element needs to be moved to where i was
10039 				 * before the array can safely be truncated.
10040 				 */
10041 				if (i != j)
10042 					state->private_objs[i] =
10043 						state->private_objs[j];
10044 
10045 				state->private_objs[j].ptr = NULL;
10046 				state->private_objs[j].state = NULL;
10047 				state->private_objs[j].old_state = NULL;
10048 				state->private_objs[j].new_state = NULL;
10049 
10050 				state->num_private_objs = j;
10051 				break;
10052 			}
10053 		}
10054 	}
10055 
10056 	/* Store the overall update type for use later in atomic check. */
10057 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10058 		struct dm_crtc_state *dm_new_crtc_state =
10059 			to_dm_crtc_state(new_crtc_state);
10060 
10061 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10062 							 UPDATE_TYPE_FULL :
10063 							 UPDATE_TYPE_FAST;
10064 	}
10065 
10066 	/* Must be success */
10067 	WARN_ON(ret);
10068 
10069 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10070 
10071 	return ret;
10072 
10073 fail:
10074 	if (ret == -EDEADLK)
10075 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10076 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10077 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10078 	else
10079 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10080 
10081 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10082 
10083 	return ret;
10084 }
10085 
10086 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10087 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10088 {
10089 	uint8_t dpcd_data;
10090 	bool capable = false;
10091 
10092 	if (amdgpu_dm_connector->dc_link &&
10093 		dm_helpers_dp_read_dpcd(
10094 				NULL,
10095 				amdgpu_dm_connector->dc_link,
10096 				DP_DOWN_STREAM_PORT_COUNT,
10097 				&dpcd_data,
10098 				sizeof(dpcd_data))) {
10099 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10100 	}
10101 
10102 	return capable;
10103 }
10104 
10105 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10106 		uint8_t *edid_ext, int len,
10107 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10108 {
10109 	int i;
10110 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10111 	struct dc *dc = adev->dm.dc;
10112 
10113 	/* send extension block to DMCU for parsing */
10114 	for (i = 0; i < len; i += 8) {
10115 		bool res;
10116 		int offset;
10117 
10118 		/* send 8 bytes a time */
10119 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10120 			return false;
10121 
10122 		if (i+8 == len) {
10123 			/* EDID block sent completed, expect result */
10124 			int version, min_rate, max_rate;
10125 
10126 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10127 			if (res) {
10128 				/* amd vsdb found */
10129 				vsdb_info->freesync_supported = 1;
10130 				vsdb_info->amd_vsdb_version = version;
10131 				vsdb_info->min_refresh_rate_hz = min_rate;
10132 				vsdb_info->max_refresh_rate_hz = max_rate;
10133 				return true;
10134 			}
10135 			/* not amd vsdb */
10136 			return false;
10137 		}
10138 
10139 		/* check for ack*/
10140 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10141 		if (!res)
10142 			return false;
10143 	}
10144 
10145 	return false;
10146 }
10147 
10148 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10149 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10150 {
10151 	uint8_t *edid_ext = NULL;
10152 	int i;
10153 	bool valid_vsdb_found = false;
10154 
10155 	/*----- drm_find_cea_extension() -----*/
10156 	/* No EDID or EDID extensions */
10157 	if (edid == NULL || edid->extensions == 0)
10158 		return -ENODEV;
10159 
10160 	/* Find CEA extension */
10161 	for (i = 0; i < edid->extensions; i++) {
10162 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10163 		if (edid_ext[0] == CEA_EXT)
10164 			break;
10165 	}
10166 
10167 	if (i == edid->extensions)
10168 		return -ENODEV;
10169 
10170 	/*----- cea_db_offsets() -----*/
10171 	if (edid_ext[0] != CEA_EXT)
10172 		return -ENODEV;
10173 
10174 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10175 
10176 	return valid_vsdb_found ? i : -ENODEV;
10177 }
10178 
10179 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10180 					struct edid *edid)
10181 {
10182 	int i = 0;
10183 	struct detailed_timing *timing;
10184 	struct detailed_non_pixel *data;
10185 	struct detailed_data_monitor_range *range;
10186 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10187 			to_amdgpu_dm_connector(connector);
10188 	struct dm_connector_state *dm_con_state = NULL;
10189 
10190 	struct drm_device *dev = connector->dev;
10191 	struct amdgpu_device *adev = drm_to_adev(dev);
10192 	bool freesync_capable = false;
10193 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10194 
10195 	if (!connector->state) {
10196 		DRM_ERROR("%s - Connector has no state", __func__);
10197 		goto update;
10198 	}
10199 
10200 	if (!edid) {
10201 		dm_con_state = to_dm_connector_state(connector->state);
10202 
10203 		amdgpu_dm_connector->min_vfreq = 0;
10204 		amdgpu_dm_connector->max_vfreq = 0;
10205 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10206 
10207 		goto update;
10208 	}
10209 
10210 	dm_con_state = to_dm_connector_state(connector->state);
10211 
10212 	if (!amdgpu_dm_connector->dc_sink) {
10213 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10214 		goto update;
10215 	}
10216 	if (!adev->dm.freesync_module)
10217 		goto update;
10218 
10219 
10220 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10221 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10222 		bool edid_check_required = false;
10223 
10224 		if (edid) {
10225 			edid_check_required = is_dp_capable_without_timing_msa(
10226 						adev->dm.dc,
10227 						amdgpu_dm_connector);
10228 		}
10229 
10230 		if (edid_check_required == true && (edid->version > 1 ||
10231 		   (edid->version == 1 && edid->revision > 1))) {
10232 			for (i = 0; i < 4; i++) {
10233 
10234 				timing	= &edid->detailed_timings[i];
10235 				data	= &timing->data.other_data;
10236 				range	= &data->data.range;
10237 				/*
10238 				 * Check if monitor has continuous frequency mode
10239 				 */
10240 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10241 					continue;
10242 				/*
10243 				 * Check for flag range limits only. If flag == 1 then
10244 				 * no additional timing information provided.
10245 				 * Default GTF, GTF Secondary curve and CVT are not
10246 				 * supported
10247 				 */
10248 				if (range->flags != 1)
10249 					continue;
10250 
10251 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10252 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10253 				amdgpu_dm_connector->pixel_clock_mhz =
10254 					range->pixel_clock_mhz * 10;
10255 
10256 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10257 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10258 
10259 				break;
10260 			}
10261 
10262 			if (amdgpu_dm_connector->max_vfreq -
10263 			    amdgpu_dm_connector->min_vfreq > 10) {
10264 
10265 				freesync_capable = true;
10266 			}
10267 		}
10268 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10269 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10270 		if (i >= 0 && vsdb_info.freesync_supported) {
10271 			timing  = &edid->detailed_timings[i];
10272 			data    = &timing->data.other_data;
10273 
10274 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10275 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10276 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10277 				freesync_capable = true;
10278 
10279 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10280 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10281 		}
10282 	}
10283 
10284 update:
10285 	if (dm_con_state)
10286 		dm_con_state->freesync_capable = freesync_capable;
10287 
10288 	if (connector->vrr_capable_property)
10289 		drm_connector_set_vrr_capable_property(connector,
10290 						       freesync_capable);
10291 }
10292 
10293 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10294 {
10295 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10296 
10297 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10298 		return;
10299 	if (link->type == dc_connection_none)
10300 		return;
10301 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10302 					dpcd_data, sizeof(dpcd_data))) {
10303 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10304 
10305 		if (dpcd_data[0] == 0) {
10306 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10307 			link->psr_settings.psr_feature_enabled = false;
10308 		} else {
10309 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10310 			link->psr_settings.psr_feature_enabled = true;
10311 		}
10312 
10313 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10314 	}
10315 }
10316 
10317 /*
10318  * amdgpu_dm_link_setup_psr() - configure psr link
10319  * @stream: stream state
10320  *
10321  * Return: true if success
10322  */
10323 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10324 {
10325 	struct dc_link *link = NULL;
10326 	struct psr_config psr_config = {0};
10327 	struct psr_context psr_context = {0};
10328 	bool ret = false;
10329 
10330 	if (stream == NULL)
10331 		return false;
10332 
10333 	link = stream->link;
10334 
10335 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10336 
10337 	if (psr_config.psr_version > 0) {
10338 		psr_config.psr_exit_link_training_required = 0x1;
10339 		psr_config.psr_frame_capture_indication_req = 0;
10340 		psr_config.psr_rfb_setup_time = 0x37;
10341 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10342 		psr_config.allow_smu_optimizations = 0x0;
10343 
10344 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10345 
10346 	}
10347 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10348 
10349 	return ret;
10350 }
10351 
10352 /*
10353  * amdgpu_dm_psr_enable() - enable psr f/w
10354  * @stream: stream state
10355  *
10356  * Return: true if success
10357  */
10358 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10359 {
10360 	struct dc_link *link = stream->link;
10361 	unsigned int vsync_rate_hz = 0;
10362 	struct dc_static_screen_params params = {0};
10363 	/* Calculate number of static frames before generating interrupt to
10364 	 * enter PSR.
10365 	 */
10366 	// Init fail safe of 2 frames static
10367 	unsigned int num_frames_static = 2;
10368 
10369 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10370 
10371 	vsync_rate_hz = div64_u64(div64_u64((
10372 			stream->timing.pix_clk_100hz * 100),
10373 			stream->timing.v_total),
10374 			stream->timing.h_total);
10375 
10376 	/* Round up
10377 	 * Calculate number of frames such that at least 30 ms of time has
10378 	 * passed.
10379 	 */
10380 	if (vsync_rate_hz != 0) {
10381 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10382 		num_frames_static = (30000 / frame_time_microsec) + 1;
10383 	}
10384 
10385 	params.triggers.cursor_update = true;
10386 	params.triggers.overlay_update = true;
10387 	params.triggers.surface_update = true;
10388 	params.num_frames = num_frames_static;
10389 
10390 	dc_stream_set_static_screen_params(link->ctx->dc,
10391 					   &stream, 1,
10392 					   &params);
10393 
10394 	return dc_link_set_psr_allow_active(link, true, false, false);
10395 }
10396 
10397 /*
10398  * amdgpu_dm_psr_disable() - disable psr f/w
10399  * @stream:  stream state
10400  *
10401  * Return: true if success
10402  */
10403 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10404 {
10405 
10406 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10407 
10408 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10409 }
10410 
10411 /*
10412  * amdgpu_dm_psr_disable() - disable psr f/w
10413  * if psr is enabled on any stream
10414  *
10415  * Return: true if success
10416  */
10417 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10418 {
10419 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10420 	return dc_set_psr_allow_active(dm->dc, false);
10421 }
10422 
10423 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10424 {
10425 	struct amdgpu_device *adev = drm_to_adev(dev);
10426 	struct dc *dc = adev->dm.dc;
10427 	int i;
10428 
10429 	mutex_lock(&adev->dm.dc_lock);
10430 	if (dc->current_state) {
10431 		for (i = 0; i < dc->current_state->stream_count; ++i)
10432 			dc->current_state->streams[i]
10433 				->triggered_crtc_reset.enabled =
10434 				adev->dm.force_timing_sync;
10435 
10436 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10437 		dc_trigger_sync(dc, dc->current_state);
10438 	}
10439 	mutex_unlock(&adev->dm.dc_lock);
10440 }
10441 
10442 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10443 		       uint32_t value, const char *func_name)
10444 {
10445 #ifdef DM_CHECK_ADDR_0
10446 	if (address == 0) {
10447 		DC_ERR("invalid register write. address = 0");
10448 		return;
10449 	}
10450 #endif
10451 	cgs_write_register(ctx->cgs_device, address, value);
10452 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10453 }
10454 
10455 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10456 			  const char *func_name)
10457 {
10458 	uint32_t value;
10459 #ifdef DM_CHECK_ADDR_0
10460 	if (address == 0) {
10461 		DC_ERR("invalid register read; address = 0\n");
10462 		return 0;
10463 	}
10464 #endif
10465 
10466 	if (ctx->dmub_srv &&
10467 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10468 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10469 		ASSERT(false);
10470 		return 0;
10471 	}
10472 
10473 	value = cgs_read_register(ctx->cgs_device, address);
10474 
10475 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10476 
10477 	return value;
10478 }
10479