1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136 	switch (link->dpcd_caps.dongle_type) {
137 	case DISPLAY_DONGLE_NONE:
138 		return DRM_MODE_SUBCONNECTOR_Native;
139 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 		return DRM_MODE_SUBCONNECTOR_VGA;
141 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 		return DRM_MODE_SUBCONNECTOR_DVID;
144 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_HDMIA;
147 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 	default:
149 		return DRM_MODE_SUBCONNECTOR_Unknown;
150 	}
151 }
152 
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155 	struct dc_link *link = aconnector->dc_link;
156 	struct drm_connector *connector = &aconnector->base;
157 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158 
159 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 		return;
161 
162 	if (aconnector->dc_sink)
163 		subconnector = get_subconnector_type(link);
164 
165 	drm_object_property_set_value(&connector->base,
166 			connector->dev->mode_config.dp_subconnector_property,
167 			subconnector);
168 }
169 
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180 
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 				struct drm_plane *plane,
183 				unsigned long possible_crtcs,
184 				const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 			       struct drm_plane *plane,
187 			       uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
190 				    uint32_t link_index,
191 				    struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 				  struct amdgpu_encoder *aencoder,
194 				  uint32_t link_index);
195 
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214 
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230 	if (crtc >= adev->mode_info.num_crtc)
231 		return 0;
232 	else {
233 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234 
235 		if (acrtc->dm_irq_params.stream == NULL) {
236 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 				  crtc);
238 			return 0;
239 		}
240 
241 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242 	}
243 }
244 
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 				  u32 *vbl, u32 *position)
247 {
248 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 
250 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 		return -EINVAL;
252 	else {
253 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 
255 		if (acrtc->dm_irq_params.stream ==  NULL) {
256 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 				  crtc);
258 			return 0;
259 		}
260 
261 		/*
262 		 * TODO rework base driver to use values directly.
263 		 * for now parse it back into reg-format
264 		 */
265 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266 					 &v_blank_start,
267 					 &v_blank_end,
268 					 &h_position,
269 					 &v_position);
270 
271 		*position = v_position | (h_position << 16);
272 		*vbl = v_blank_start | (v_blank_end << 16);
273 	}
274 
275 	return 0;
276 }
277 
278 static bool dm_is_idle(void *handle)
279 {
280 	/* XXX todo */
281 	return true;
282 }
283 
284 static int dm_wait_for_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return 0;
288 }
289 
290 static bool dm_check_soft_reset(void *handle)
291 {
292 	return false;
293 }
294 
295 static int dm_soft_reset(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 		     int otg_inst)
304 {
305 	struct drm_device *dev = adev_to_drm(adev);
306 	struct drm_crtc *crtc;
307 	struct amdgpu_crtc *amdgpu_crtc;
308 
309 	if (otg_inst == -1) {
310 		WARN_ON(1);
311 		return adev->mode_info.crtcs[0];
312 	}
313 
314 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 		amdgpu_crtc = to_amdgpu_crtc(crtc);
316 
317 		if (amdgpu_crtc->otg_inst == otg_inst)
318 			return amdgpu_crtc;
319 	}
320 
321 	return NULL;
322 }
323 
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326 	return acrtc->dm_irq_params.freesync_config.state ==
327 		       VRR_STATE_ACTIVE_VARIABLE ||
328 	       acrtc->dm_irq_params.freesync_config.state ==
329 		       VRR_STATE_ACTIVE_FIXED;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347 	struct amdgpu_crtc *amdgpu_crtc;
348 	struct common_irq_params *irq_params = interrupt_params;
349 	struct amdgpu_device *adev = irq_params->adev;
350 	unsigned long flags;
351 	struct drm_pending_vblank_event *e;
352 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 	bool vrr_active;
354 
355 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356 
357 	/* IRQ could occur when in initial stage */
358 	/* TODO work and BO cleanup */
359 	if (amdgpu_crtc == NULL) {
360 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 		return;
362 	}
363 
364 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365 
366 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 						 amdgpu_crtc->pflip_status,
369 						 AMDGPU_FLIP_SUBMITTED,
370 						 amdgpu_crtc->crtc_id,
371 						 amdgpu_crtc);
372 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373 		return;
374 	}
375 
376 	/* page flip completed. */
377 	e = amdgpu_crtc->event;
378 	amdgpu_crtc->event = NULL;
379 
380 	if (!e)
381 		WARN_ON(1);
382 
383 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384 
385 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 	if (!vrr_active ||
387 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 				      &v_blank_end, &hpos, &vpos) ||
389 	    (vpos < v_blank_start)) {
390 		/* Update to correct count and vblank timestamp if racing with
391 		 * vblank irq. This also updates to the correct vblank timestamp
392 		 * even in VRR mode, as scanout is past the front-porch atm.
393 		 */
394 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395 
396 		/* Wake up userspace by sending the pageflip event with proper
397 		 * count and timestamp of vblank of flip completion.
398 		 */
399 		if (e) {
400 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401 
402 			/* Event sent, so done with vblank for this flip */
403 			drm_crtc_vblank_put(&amdgpu_crtc->base);
404 		}
405 	} else if (e) {
406 		/* VRR active and inside front-porch: vblank count and
407 		 * timestamp for pageflip event will only be up to date after
408 		 * drm_crtc_handle_vblank() has been executed from late vblank
409 		 * irq handler after start of back-porch (vline 0). We queue the
410 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 		 * updated timestamp and count, once it runs after us.
412 		 *
413 		 * We need to open-code this instead of using the helper
414 		 * drm_crtc_arm_vblank_event(), as that helper would
415 		 * call drm_crtc_accurate_vblank_count(), which we must
416 		 * not call in VRR mode while we are in front-porch!
417 		 */
418 
419 		/* sequence will be replaced by real count during send-out. */
420 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 		e->pipe = amdgpu_crtc->crtc_id;
422 
423 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424 		e = NULL;
425 	}
426 
427 	/* Keep track of vblank of this flip for flip throttling. We use the
428 	 * cooked hw counter, as that one incremented at start of this vblank
429 	 * of pageflip completion, so last_flip_vblank is the forbidden count
430 	 * for queueing new pageflips if vsync + VRR is enabled.
431 	 */
432 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434 
435 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437 
438 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 			 vrr_active, (int) !e);
441 }
442 
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445 	struct common_irq_params *irq_params = interrupt_params;
446 	struct amdgpu_device *adev = irq_params->adev;
447 	struct amdgpu_crtc *acrtc;
448 	unsigned long flags;
449 	int vrr_active;
450 
451 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452 
453 	if (acrtc) {
454 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455 
456 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 			      acrtc->crtc_id,
458 			      vrr_active);
459 
460 		/* Core vblank handling is done here after end of front-porch in
461 		 * vrr mode, as vblank timestamping will give valid results
462 		 * while now done after front-porch. This will also deliver
463 		 * page-flip completion events that have been queued to us
464 		 * if a pageflip happened inside front-porch.
465 		 */
466 		if (vrr_active) {
467 			drm_crtc_handle_vblank(&acrtc->base);
468 
469 			/* BTR processing for pre-DCE12 ASICs */
470 			if (acrtc->dm_irq_params.stream &&
471 			    adev->family < AMDGPU_FAMILY_AI) {
472 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 				mod_freesync_handle_v_update(
474 				    adev->dm.freesync_module,
475 				    acrtc->dm_irq_params.stream,
476 				    &acrtc->dm_irq_params.vrr_params);
477 
478 				dc_stream_adjust_vmin_vmax(
479 				    adev->dm.dc,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params.adjust);
482 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483 			}
484 		}
485 	}
486 }
487 
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497 	struct common_irq_params *irq_params = interrupt_params;
498 	struct amdgpu_device *adev = irq_params->adev;
499 	struct amdgpu_crtc *acrtc;
500 	unsigned long flags;
501 	int vrr_active;
502 
503 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504 	if (!acrtc)
505 		return;
506 
507 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508 
509 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 		      vrr_active, acrtc->dm_irq_params.active_planes);
511 
512 	/**
513 	 * Core vblank handling at start of front-porch is only possible
514 	 * in non-vrr mode, as only there vblank timestamping will give
515 	 * valid results while done in front-porch. Otherwise defer it
516 	 * to dm_vupdate_high_irq after end of front-porch.
517 	 */
518 	if (!vrr_active)
519 		drm_crtc_handle_vblank(&acrtc->base);
520 
521 	/**
522 	 * Following stuff must happen at start of vblank, for crc
523 	 * computation and below-the-range btr support in vrr mode.
524 	 */
525 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526 
527 	/* BTR updates need to happen before VUPDATE on Vega and above. */
528 	if (adev->family < AMDGPU_FAMILY_AI)
529 		return;
530 
531 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532 
533 	if (acrtc->dm_irq_params.stream &&
534 	    acrtc->dm_irq_params.vrr_params.supported &&
535 	    acrtc->dm_irq_params.freesync_config.state ==
536 		    VRR_STATE_ACTIVE_VARIABLE) {
537 		mod_freesync_handle_v_update(adev->dm.freesync_module,
538 					     acrtc->dm_irq_params.stream,
539 					     &acrtc->dm_irq_params.vrr_params);
540 
541 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 					   &acrtc->dm_irq_params.vrr_params.adjust);
543 	}
544 
545 	/*
546 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 	 * In that case, pageflip completion interrupts won't fire and pageflip
548 	 * completion events won't get delivered. Prevent this by sending
549 	 * pending pageflip events from here if a flip is still pending.
550 	 *
551 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 	 * avoid race conditions between flip programming and completion,
553 	 * which could cause too early flip completion events.
554 	 */
555 	if (adev->family >= AMDGPU_FAMILY_RV &&
556 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 	    acrtc->dm_irq_params.active_planes == 0) {
558 		if (acrtc->event) {
559 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 			acrtc->event = NULL;
561 			drm_crtc_vblank_put(&acrtc->base);
562 		}
563 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 	}
565 
566 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568 
569 static int dm_set_clockgating_state(void *handle,
570 		  enum amd_clockgating_state state)
571 {
572 	return 0;
573 }
574 
575 static int dm_set_powergating_state(void *handle,
576 		  enum amd_powergating_state state)
577 {
578 	return 0;
579 }
580 
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583 
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587 	struct drm_device *dev = connector->dev;
588 	struct amdgpu_device *adev = drm_to_adev(dev);
589 	struct dm_compressor_info *compressor = &adev->dm.compressor;
590 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 	struct drm_display_mode *mode;
592 	unsigned long max_size = 0;
593 
594 	if (adev->dm.dc->fbc_compressor == NULL)
595 		return;
596 
597 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598 		return;
599 
600 	if (compressor->bo_ptr)
601 		return;
602 
603 
604 	list_for_each_entry(mode, &connector->modes, head) {
605 		if (max_size < mode->htotal * mode->vtotal)
606 			max_size = mode->htotal * mode->vtotal;
607 	}
608 
609 	if (max_size) {
610 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 			    &compressor->gpu_addr, &compressor->cpu_addr);
613 
614 		if (r)
615 			DRM_ERROR("DM: Failed to initialize FBC\n");
616 		else {
617 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 		}
620 
621 	}
622 
623 }
624 
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 					  int pipe, bool *enabled,
627 					  unsigned char *buf, int max_bytes)
628 {
629 	struct drm_device *dev = dev_get_drvdata(kdev);
630 	struct amdgpu_device *adev = drm_to_adev(dev);
631 	struct drm_connector *connector;
632 	struct drm_connector_list_iter conn_iter;
633 	struct amdgpu_dm_connector *aconnector;
634 	int ret = 0;
635 
636 	*enabled = false;
637 
638 	mutex_lock(&adev->dm.audio_lock);
639 
640 	drm_connector_list_iter_begin(dev, &conn_iter);
641 	drm_for_each_connector_iter(connector, &conn_iter) {
642 		aconnector = to_amdgpu_dm_connector(connector);
643 		if (aconnector->audio_inst != port)
644 			continue;
645 
646 		*enabled = true;
647 		ret = drm_eld_size(connector->eld);
648 		memcpy(buf, connector->eld, min(max_bytes, ret));
649 
650 		break;
651 	}
652 	drm_connector_list_iter_end(&conn_iter);
653 
654 	mutex_unlock(&adev->dm.audio_lock);
655 
656 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657 
658 	return ret;
659 }
660 
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 	.get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664 
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 				       struct device *hda_kdev, void *data)
667 {
668 	struct drm_device *dev = dev_get_drvdata(kdev);
669 	struct amdgpu_device *adev = drm_to_adev(dev);
670 	struct drm_audio_component *acomp = data;
671 
672 	acomp->ops = &amdgpu_dm_audio_component_ops;
673 	acomp->dev = kdev;
674 	adev->dm.audio_component = acomp;
675 
676 	return 0;
677 }
678 
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 					  struct device *hda_kdev, void *data)
681 {
682 	struct drm_device *dev = dev_get_drvdata(kdev);
683 	struct amdgpu_device *adev = drm_to_adev(dev);
684 	struct drm_audio_component *acomp = data;
685 
686 	acomp->ops = NULL;
687 	acomp->dev = NULL;
688 	adev->dm.audio_component = NULL;
689 }
690 
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 	.bind	= amdgpu_dm_audio_component_bind,
693 	.unbind	= amdgpu_dm_audio_component_unbind,
694 };
695 
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698 	int i, ret;
699 
700 	if (!amdgpu_audio)
701 		return 0;
702 
703 	adev->mode_info.audio.enabled = true;
704 
705 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706 
707 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 		adev->mode_info.audio.pin[i].channels = -1;
709 		adev->mode_info.audio.pin[i].rate = -1;
710 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 		adev->mode_info.audio.pin[i].status_bits = 0;
712 		adev->mode_info.audio.pin[i].category_code = 0;
713 		adev->mode_info.audio.pin[i].connected = false;
714 		adev->mode_info.audio.pin[i].id =
715 			adev->dm.dc->res_pool->audios[i]->inst;
716 		adev->mode_info.audio.pin[i].offset = 0;
717 	}
718 
719 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 	if (ret < 0)
721 		return ret;
722 
723 	adev->dm.audio_registered = true;
724 
725 	return 0;
726 }
727 
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730 	if (!amdgpu_audio)
731 		return;
732 
733 	if (!adev->mode_info.audio.enabled)
734 		return;
735 
736 	if (adev->dm.audio_registered) {
737 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 		adev->dm.audio_registered = false;
739 	}
740 
741 	/* TODO: Disable audio? */
742 
743 	adev->mode_info.audio.enabled = false;
744 }
745 
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748 	struct drm_audio_component *acomp = adev->dm.audio_component;
749 
750 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752 
753 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 						 pin, -1);
755 	}
756 }
757 
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760 	const struct dmcub_firmware_header_v1_0 *hdr;
761 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 	struct abm *abm = adev->dm.dc->res_pool->abm;
766 	struct dmub_srv_hw_params hw_params;
767 	enum dmub_status status;
768 	const unsigned char *fw_inst_const, *fw_bss_data;
769 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
770 	bool has_hw_support;
771 
772 	if (!dmub_srv)
773 		/* DMUB isn't supported on the ASIC. */
774 		return 0;
775 
776 	if (!fb_info) {
777 		DRM_ERROR("No framebuffer info for DMUB service.\n");
778 		return -EINVAL;
779 	}
780 
781 	if (!dmub_fw) {
782 		/* Firmware required for DMUB support. */
783 		DRM_ERROR("No firmware provided for DMUB.\n");
784 		return -EINVAL;
785 	}
786 
787 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 	if (status != DMUB_STATUS_OK) {
789 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 		return -EINVAL;
791 	}
792 
793 	if (!has_hw_support) {
794 		DRM_INFO("DMUB unsupported on ASIC\n");
795 		return 0;
796 	}
797 
798 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799 
800 	fw_inst_const = dmub_fw->data +
801 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802 			PSP_HEADER_BYTES;
803 
804 	fw_bss_data = dmub_fw->data +
805 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 		      le32_to_cpu(hdr->inst_const_bytes);
807 
808 	/* Copy firmware and bios info into FB memory. */
809 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811 
812 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813 
814 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 	 * amdgpu_ucode_init_single_fw will load dmub firmware
816 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 	 * will be done by dm_dmub_hw_init
818 	 */
819 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 				fw_inst_const_size);
822 	}
823 
824 	if (fw_bss_data_size)
825 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 		       fw_bss_data, fw_bss_data_size);
827 
828 	/* Copy firmware bios info into FB memory. */
829 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 	       adev->bios_size);
831 
832 	/* Reset regions that need to be reset. */
833 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835 
836 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838 
839 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841 
842 	/* Initialize hardware. */
843 	memset(&hw_params, 0, sizeof(hw_params));
844 	hw_params.fb_base = adev->gmc.fb_start;
845 	hw_params.fb_offset = adev->gmc.aper_base;
846 
847 	/* backdoor load firmware and trigger dmub running */
848 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 		hw_params.load_inst_const = true;
850 
851 	if (dmcu)
852 		hw_params.psp_version = dmcu->psp_version;
853 
854 	for (i = 0; i < fb_info->num_fb; ++i)
855 		hw_params.fb[i] = &fb_info->fb[i];
856 
857 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 	if (status != DMUB_STATUS_OK) {
859 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 		return -EINVAL;
861 	}
862 
863 	/* Wait for firmware load to finish. */
864 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 	if (status != DMUB_STATUS_OK)
866 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867 
868 	/* Init DMCU and ABM if available. */
869 	if (dmcu && abm) {
870 		dmcu->funcs->dmcu_init(dmcu);
871 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 	}
873 
874 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889 	uint64_t pt_base;
890 	uint32_t logical_addr_low;
891 	uint32_t logical_addr_high;
892 	uint32_t agp_base, agp_bot, agp_top;
893 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894 
895 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897 
898 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 		/*
900 		 * Raven2 has a HW issue that it is unable to use the vram which
901 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 		 * workaround that increase system aperture high address (add 1)
903 		 * to get rid of the VM fault and hardware hang.
904 		 */
905 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 	else
907 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908 
909 	agp_base = 0;
910 	agp_bot = adev->gmc.agp_start >> 24;
911 	agp_top = adev->gmc.agp_end >> 24;
912 
913 
914 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 	page_table_base.low_part = lower_32_bits(pt_base);
920 
921 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923 
924 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927 
928 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931 
932 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935 
936 	pa_config->is_hvm_enabled = 0;
937 
938 }
939 #endif
940 
941 static int amdgpu_dm_init(struct amdgpu_device *adev)
942 {
943 	struct dc_init_data init_data;
944 #ifdef CONFIG_DRM_AMD_DC_HDCP
945 	struct dc_callback_init init_params;
946 #endif
947 	int r;
948 
949 	adev->dm.ddev = adev_to_drm(adev);
950 	adev->dm.adev = adev;
951 
952 	/* Zero all the fields */
953 	memset(&init_data, 0, sizeof(init_data));
954 #ifdef CONFIG_DRM_AMD_DC_HDCP
955 	memset(&init_params, 0, sizeof(init_params));
956 #endif
957 
958 	mutex_init(&adev->dm.dc_lock);
959 	mutex_init(&adev->dm.audio_lock);
960 
961 	if(amdgpu_dm_irq_init(adev)) {
962 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
963 		goto error;
964 	}
965 
966 	init_data.asic_id.chip_family = adev->family;
967 
968 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
969 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
970 
971 	init_data.asic_id.vram_width = adev->gmc.vram_width;
972 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
973 	init_data.asic_id.atombios_base_address =
974 		adev->mode_info.atom_context->bios;
975 
976 	init_data.driver = adev;
977 
978 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
979 
980 	if (!adev->dm.cgs_device) {
981 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
982 		goto error;
983 	}
984 
985 	init_data.cgs_device = adev->dm.cgs_device;
986 
987 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
988 
989 	switch (adev->asic_type) {
990 	case CHIP_CARRIZO:
991 	case CHIP_STONEY:
992 	case CHIP_RAVEN:
993 	case CHIP_RENOIR:
994 		init_data.flags.gpu_vm_support = true;
995 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
996 			init_data.flags.disable_dmcu = true;
997 		break;
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
999 	case CHIP_VANGOGH:
1000 		init_data.flags.gpu_vm_support = true;
1001 		break;
1002 #endif
1003 	default:
1004 		break;
1005 	}
1006 
1007 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008 		init_data.flags.fbc_support = true;
1009 
1010 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011 		init_data.flags.multi_mon_pp_mclk_switch = true;
1012 
1013 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014 		init_data.flags.disable_fractional_pwm = true;
1015 
1016 	init_data.flags.power_down_display_on_boot = true;
1017 
1018 	/* Display Core create. */
1019 	adev->dm.dc = dc_create(&init_data);
1020 
1021 	if (adev->dm.dc) {
1022 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1023 	} else {
1024 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1025 		goto error;
1026 	}
1027 
1028 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1029 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1030 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1031 	}
1032 
1033 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1034 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1035 
1036 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1037 		adev->dm.dc->debug.disable_stutter = true;
1038 
1039 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1040 		adev->dm.dc->debug.disable_dsc = true;
1041 
1042 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1043 		adev->dm.dc->debug.disable_clock_gate = true;
1044 
1045 	r = dm_dmub_hw_init(adev);
1046 	if (r) {
1047 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1048 		goto error;
1049 	}
1050 
1051 	dc_hardware_init(adev->dm.dc);
1052 
1053 #if defined(CONFIG_DRM_AMD_DC_DCN)
1054 	if (adev->apu_flags) {
1055 		struct dc_phy_addr_space_config pa_config;
1056 
1057 		mmhub_read_system_context(adev, &pa_config);
1058 
1059 		// Call the DC init_memory func
1060 		dc_setup_system_context(adev->dm.dc, &pa_config);
1061 	}
1062 #endif
1063 
1064 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1065 	if (!adev->dm.freesync_module) {
1066 		DRM_ERROR(
1067 		"amdgpu: failed to initialize freesync_module.\n");
1068 	} else
1069 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1070 				adev->dm.freesync_module);
1071 
1072 	amdgpu_dm_init_color_mod();
1073 
1074 #ifdef CONFIG_DRM_AMD_DC_HDCP
1075 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1076 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1077 
1078 		if (!adev->dm.hdcp_workqueue)
1079 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1080 		else
1081 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1082 
1083 		dc_init_callbacks(adev->dm.dc, &init_params);
1084 	}
1085 #endif
1086 	if (amdgpu_dm_initialize_drm_device(adev)) {
1087 		DRM_ERROR(
1088 		"amdgpu: failed to initialize sw for display support.\n");
1089 		goto error;
1090 	}
1091 
1092 	/* create fake encoders for MST */
1093 	dm_dp_create_fake_mst_encoders(adev);
1094 
1095 	/* TODO: Add_display_info? */
1096 
1097 	/* TODO use dynamic cursor width */
1098 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1099 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1100 
1101 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1102 		DRM_ERROR(
1103 		"amdgpu: failed to initialize sw for display support.\n");
1104 		goto error;
1105 	}
1106 
1107 
1108 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1109 
1110 	return 0;
1111 error:
1112 	amdgpu_dm_fini(adev);
1113 
1114 	return -EINVAL;
1115 }
1116 
1117 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1118 {
1119 	int i;
1120 
1121 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1122 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1123 	}
1124 
1125 	amdgpu_dm_audio_fini(adev);
1126 
1127 	amdgpu_dm_destroy_drm_device(&adev->dm);
1128 
1129 #ifdef CONFIG_DRM_AMD_DC_HDCP
1130 	if (adev->dm.hdcp_workqueue) {
1131 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1132 		adev->dm.hdcp_workqueue = NULL;
1133 	}
1134 
1135 	if (adev->dm.dc)
1136 		dc_deinit_callbacks(adev->dm.dc);
1137 #endif
1138 	if (adev->dm.dc->ctx->dmub_srv) {
1139 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1140 		adev->dm.dc->ctx->dmub_srv = NULL;
1141 	}
1142 
1143 	if (adev->dm.dmub_bo)
1144 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1145 				      &adev->dm.dmub_bo_gpu_addr,
1146 				      &adev->dm.dmub_bo_cpu_addr);
1147 
1148 	/* DC Destroy TODO: Replace destroy DAL */
1149 	if (adev->dm.dc)
1150 		dc_destroy(&adev->dm.dc);
1151 	/*
1152 	 * TODO: pageflip, vlank interrupt
1153 	 *
1154 	 * amdgpu_dm_irq_fini(adev);
1155 	 */
1156 
1157 	if (adev->dm.cgs_device) {
1158 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1159 		adev->dm.cgs_device = NULL;
1160 	}
1161 	if (adev->dm.freesync_module) {
1162 		mod_freesync_destroy(adev->dm.freesync_module);
1163 		adev->dm.freesync_module = NULL;
1164 	}
1165 
1166 	mutex_destroy(&adev->dm.audio_lock);
1167 	mutex_destroy(&adev->dm.dc_lock);
1168 
1169 	return;
1170 }
1171 
1172 static int load_dmcu_fw(struct amdgpu_device *adev)
1173 {
1174 	const char *fw_name_dmcu = NULL;
1175 	int r;
1176 	const struct dmcu_firmware_header_v1_0 *hdr;
1177 
1178 	switch(adev->asic_type) {
1179 #if defined(CONFIG_DRM_AMD_DC_SI)
1180 	case CHIP_TAHITI:
1181 	case CHIP_PITCAIRN:
1182 	case CHIP_VERDE:
1183 	case CHIP_OLAND:
1184 #endif
1185 	case CHIP_BONAIRE:
1186 	case CHIP_HAWAII:
1187 	case CHIP_KAVERI:
1188 	case CHIP_KABINI:
1189 	case CHIP_MULLINS:
1190 	case CHIP_TONGA:
1191 	case CHIP_FIJI:
1192 	case CHIP_CARRIZO:
1193 	case CHIP_STONEY:
1194 	case CHIP_POLARIS11:
1195 	case CHIP_POLARIS10:
1196 	case CHIP_POLARIS12:
1197 	case CHIP_VEGAM:
1198 	case CHIP_VEGA10:
1199 	case CHIP_VEGA12:
1200 	case CHIP_VEGA20:
1201 	case CHIP_NAVI10:
1202 	case CHIP_NAVI14:
1203 	case CHIP_RENOIR:
1204 	case CHIP_SIENNA_CICHLID:
1205 	case CHIP_NAVY_FLOUNDER:
1206 	case CHIP_DIMGREY_CAVEFISH:
1207 	case CHIP_VANGOGH:
1208 		return 0;
1209 	case CHIP_NAVI12:
1210 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1211 		break;
1212 	case CHIP_RAVEN:
1213 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1214 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1215 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1216 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1217 		else
1218 			return 0;
1219 		break;
1220 	default:
1221 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1222 		return -EINVAL;
1223 	}
1224 
1225 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1226 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1227 		return 0;
1228 	}
1229 
1230 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1231 	if (r == -ENOENT) {
1232 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1233 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1234 		adev->dm.fw_dmcu = NULL;
1235 		return 0;
1236 	}
1237 	if (r) {
1238 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1239 			fw_name_dmcu);
1240 		return r;
1241 	}
1242 
1243 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1244 	if (r) {
1245 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1246 			fw_name_dmcu);
1247 		release_firmware(adev->dm.fw_dmcu);
1248 		adev->dm.fw_dmcu = NULL;
1249 		return r;
1250 	}
1251 
1252 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1253 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1254 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1255 	adev->firmware.fw_size +=
1256 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1257 
1258 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1259 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1260 	adev->firmware.fw_size +=
1261 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1262 
1263 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1264 
1265 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1266 
1267 	return 0;
1268 }
1269 
1270 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1271 {
1272 	struct amdgpu_device *adev = ctx;
1273 
1274 	return dm_read_reg(adev->dm.dc->ctx, address);
1275 }
1276 
1277 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1278 				     uint32_t value)
1279 {
1280 	struct amdgpu_device *adev = ctx;
1281 
1282 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1283 }
1284 
1285 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1286 {
1287 	struct dmub_srv_create_params create_params;
1288 	struct dmub_srv_region_params region_params;
1289 	struct dmub_srv_region_info region_info;
1290 	struct dmub_srv_fb_params fb_params;
1291 	struct dmub_srv_fb_info *fb_info;
1292 	struct dmub_srv *dmub_srv;
1293 	const struct dmcub_firmware_header_v1_0 *hdr;
1294 	const char *fw_name_dmub;
1295 	enum dmub_asic dmub_asic;
1296 	enum dmub_status status;
1297 	int r;
1298 
1299 	switch (adev->asic_type) {
1300 	case CHIP_RENOIR:
1301 		dmub_asic = DMUB_ASIC_DCN21;
1302 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1303 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1304 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1305 		break;
1306 	case CHIP_SIENNA_CICHLID:
1307 		dmub_asic = DMUB_ASIC_DCN30;
1308 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1309 		break;
1310 	case CHIP_NAVY_FLOUNDER:
1311 		dmub_asic = DMUB_ASIC_DCN30;
1312 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1313 		break;
1314 	case CHIP_VANGOGH:
1315 		dmub_asic = DMUB_ASIC_DCN301;
1316 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1317 		break;
1318 	case CHIP_DIMGREY_CAVEFISH:
1319 		dmub_asic = DMUB_ASIC_DCN302;
1320 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1321 		break;
1322 
1323 	default:
1324 		/* ASIC doesn't support DMUB. */
1325 		return 0;
1326 	}
1327 
1328 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329 	if (r) {
1330 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331 		return 0;
1332 	}
1333 
1334 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335 	if (r) {
1336 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337 		return 0;
1338 	}
1339 
1340 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341 
1342 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1343 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1344 			AMDGPU_UCODE_ID_DMCUB;
1345 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1346 			adev->dm.dmub_fw;
1347 		adev->firmware.fw_size +=
1348 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1349 
1350 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1351 			 adev->dm.dmcub_fw_version);
1352 	}
1353 
1354 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1355 
1356 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 	dmub_srv = adev->dm.dmub_srv;
1358 
1359 	if (!dmub_srv) {
1360 		DRM_ERROR("Failed to allocate DMUB service!\n");
1361 		return -ENOMEM;
1362 	}
1363 
1364 	memset(&create_params, 0, sizeof(create_params));
1365 	create_params.user_ctx = adev;
1366 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 	create_params.asic = dmub_asic;
1369 
1370 	/* Create the DMUB service. */
1371 	status = dmub_srv_create(dmub_srv, &create_params);
1372 	if (status != DMUB_STATUS_OK) {
1373 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1374 		return -EINVAL;
1375 	}
1376 
1377 	/* Calculate the size of all the regions for the DMUB service. */
1378 	memset(&region_params, 0, sizeof(region_params));
1379 
1380 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 	region_params.vbios_size = adev->bios_size;
1384 	region_params.fw_bss_data = region_params.bss_data_size ?
1385 		adev->dm.dmub_fw->data +
1386 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388 	region_params.fw_inst_const =
1389 		adev->dm.dmub_fw->data +
1390 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391 		PSP_HEADER_BYTES;
1392 
1393 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1394 					   &region_info);
1395 
1396 	if (status != DMUB_STATUS_OK) {
1397 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1398 		return -EINVAL;
1399 	}
1400 
1401 	/*
1402 	 * Allocate a framebuffer based on the total size of all the regions.
1403 	 * TODO: Move this into GART.
1404 	 */
1405 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407 				    &adev->dm.dmub_bo_gpu_addr,
1408 				    &adev->dm.dmub_bo_cpu_addr);
1409 	if (r)
1410 		return r;
1411 
1412 	/* Rebase the regions on the framebuffer address. */
1413 	memset(&fb_params, 0, sizeof(fb_params));
1414 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416 	fb_params.region_info = &region_info;
1417 
1418 	adev->dm.dmub_fb_info =
1419 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420 	fb_info = adev->dm.dmub_fb_info;
1421 
1422 	if (!fb_info) {
1423 		DRM_ERROR(
1424 			"Failed to allocate framebuffer info for DMUB service!\n");
1425 		return -ENOMEM;
1426 	}
1427 
1428 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429 	if (status != DMUB_STATUS_OK) {
1430 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1431 		return -EINVAL;
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 static int dm_sw_init(void *handle)
1438 {
1439 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1440 	int r;
1441 
1442 	r = dm_dmub_sw_init(adev);
1443 	if (r)
1444 		return r;
1445 
1446 	return load_dmcu_fw(adev);
1447 }
1448 
1449 static int dm_sw_fini(void *handle)
1450 {
1451 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 
1453 	kfree(adev->dm.dmub_fb_info);
1454 	adev->dm.dmub_fb_info = NULL;
1455 
1456 	if (adev->dm.dmub_srv) {
1457 		dmub_srv_destroy(adev->dm.dmub_srv);
1458 		adev->dm.dmub_srv = NULL;
1459 	}
1460 
1461 	release_firmware(adev->dm.dmub_fw);
1462 	adev->dm.dmub_fw = NULL;
1463 
1464 	release_firmware(adev->dm.fw_dmcu);
1465 	adev->dm.fw_dmcu = NULL;
1466 
1467 	return 0;
1468 }
1469 
1470 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1471 {
1472 	struct amdgpu_dm_connector *aconnector;
1473 	struct drm_connector *connector;
1474 	struct drm_connector_list_iter iter;
1475 	int ret = 0;
1476 
1477 	drm_connector_list_iter_begin(dev, &iter);
1478 	drm_for_each_connector_iter(connector, &iter) {
1479 		aconnector = to_amdgpu_dm_connector(connector);
1480 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1481 		    aconnector->mst_mgr.aux) {
1482 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1483 					 aconnector,
1484 					 aconnector->base.base.id);
1485 
1486 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1487 			if (ret < 0) {
1488 				DRM_ERROR("DM_MST: Failed to start MST\n");
1489 				aconnector->dc_link->type =
1490 					dc_connection_single;
1491 				break;
1492 			}
1493 		}
1494 	}
1495 	drm_connector_list_iter_end(&iter);
1496 
1497 	return ret;
1498 }
1499 
1500 static int dm_late_init(void *handle)
1501 {
1502 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1503 
1504 	struct dmcu_iram_parameters params;
1505 	unsigned int linear_lut[16];
1506 	int i;
1507 	struct dmcu *dmcu = NULL;
1508 	bool ret = true;
1509 
1510 	dmcu = adev->dm.dc->res_pool->dmcu;
1511 
1512 	for (i = 0; i < 16; i++)
1513 		linear_lut[i] = 0xFFFF * i / 15;
1514 
1515 	params.set = 0;
1516 	params.backlight_ramping_start = 0xCCCC;
1517 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1518 	params.backlight_lut_array_size = 16;
1519 	params.backlight_lut_array = linear_lut;
1520 
1521 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1522 	 * 0xFFFF x 0.01 = 0x28F
1523 	 */
1524 	params.min_abm_backlight = 0x28F;
1525 
1526 	/* In the case where abm is implemented on dmcub,
1527 	 * dmcu object will be null.
1528 	 * ABM 2.4 and up are implemented on dmcub.
1529 	 */
1530 	if (dmcu)
1531 		ret = dmcu_load_iram(dmcu, params);
1532 	else if (adev->dm.dc->ctx->dmub_srv)
1533 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1534 
1535 	if (!ret)
1536 		return -EINVAL;
1537 
1538 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1539 }
1540 
1541 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1542 {
1543 	struct amdgpu_dm_connector *aconnector;
1544 	struct drm_connector *connector;
1545 	struct drm_connector_list_iter iter;
1546 	struct drm_dp_mst_topology_mgr *mgr;
1547 	int ret;
1548 	bool need_hotplug = false;
1549 
1550 	drm_connector_list_iter_begin(dev, &iter);
1551 	drm_for_each_connector_iter(connector, &iter) {
1552 		aconnector = to_amdgpu_dm_connector(connector);
1553 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1554 		    aconnector->mst_port)
1555 			continue;
1556 
1557 		mgr = &aconnector->mst_mgr;
1558 
1559 		if (suspend) {
1560 			drm_dp_mst_topology_mgr_suspend(mgr);
1561 		} else {
1562 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1563 			if (ret < 0) {
1564 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1565 				need_hotplug = true;
1566 			}
1567 		}
1568 	}
1569 	drm_connector_list_iter_end(&iter);
1570 
1571 	if (need_hotplug)
1572 		drm_kms_helper_hotplug_event(dev);
1573 }
1574 
1575 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1576 {
1577 	struct smu_context *smu = &adev->smu;
1578 	int ret = 0;
1579 
1580 	if (!is_support_sw_smu(adev))
1581 		return 0;
1582 
1583 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584 	 * on window driver dc implementation.
1585 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586 	 * should be passed to smu during boot up and resume from s3.
1587 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1588 	 * dcn20_resource_construct
1589 	 * then call pplib functions below to pass the settings to smu:
1590 	 * smu_set_watermarks_for_clock_ranges
1591 	 * smu_set_watermarks_table
1592 	 * navi10_set_watermarks_table
1593 	 * smu_write_watermarks_table
1594 	 *
1595 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1596 	 * dc has implemented different flow for window driver:
1597 	 * dc_hardware_init / dc_set_power_state
1598 	 * dcn10_init_hw
1599 	 * notify_wm_ranges
1600 	 * set_wm_ranges
1601 	 * -- Linux
1602 	 * smu_set_watermarks_for_clock_ranges
1603 	 * renoir_set_watermarks_table
1604 	 * smu_write_watermarks_table
1605 	 *
1606 	 * For Linux,
1607 	 * dc_hardware_init -> amdgpu_dm_init
1608 	 * dc_set_power_state --> dm_resume
1609 	 *
1610 	 * therefore, this function apply to navi10/12/14 but not Renoir
1611 	 * *
1612 	 */
1613 	switch(adev->asic_type) {
1614 	case CHIP_NAVI10:
1615 	case CHIP_NAVI14:
1616 	case CHIP_NAVI12:
1617 		break;
1618 	default:
1619 		return 0;
1620 	}
1621 
1622 	ret = smu_write_watermarks_table(smu);
1623 	if (ret) {
1624 		DRM_ERROR("Failed to update WMTABLE!\n");
1625 		return ret;
1626 	}
1627 
1628 	return 0;
1629 }
1630 
1631 /**
1632  * dm_hw_init() - Initialize DC device
1633  * @handle: The base driver device containing the amdgpu_dm device.
1634  *
1635  * Initialize the &struct amdgpu_display_manager device. This involves calling
1636  * the initializers of each DM component, then populating the struct with them.
1637  *
1638  * Although the function implies hardware initialization, both hardware and
1639  * software are initialized here. Splitting them out to their relevant init
1640  * hooks is a future TODO item.
1641  *
1642  * Some notable things that are initialized here:
1643  *
1644  * - Display Core, both software and hardware
1645  * - DC modules that we need (freesync and color management)
1646  * - DRM software states
1647  * - Interrupt sources and handlers
1648  * - Vblank support
1649  * - Debug FS entries, if enabled
1650  */
1651 static int dm_hw_init(void *handle)
1652 {
1653 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654 	/* Create DAL display manager */
1655 	amdgpu_dm_init(adev);
1656 	amdgpu_dm_hpd_init(adev);
1657 
1658 	return 0;
1659 }
1660 
1661 /**
1662  * dm_hw_fini() - Teardown DC device
1663  * @handle: The base driver device containing the amdgpu_dm device.
1664  *
1665  * Teardown components within &struct amdgpu_display_manager that require
1666  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667  * were loaded. Also flush IRQ workqueues and disable them.
1668  */
1669 static int dm_hw_fini(void *handle)
1670 {
1671 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672 
1673 	amdgpu_dm_hpd_fini(adev);
1674 
1675 	amdgpu_dm_irq_fini(adev);
1676 	amdgpu_dm_fini(adev);
1677 	return 0;
1678 }
1679 
1680 
1681 static int dm_enable_vblank(struct drm_crtc *crtc);
1682 static void dm_disable_vblank(struct drm_crtc *crtc);
1683 
1684 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685 				 struct dc_state *state, bool enable)
1686 {
1687 	enum dc_irq_source irq_source;
1688 	struct amdgpu_crtc *acrtc;
1689 	int rc = -EBUSY;
1690 	int i = 0;
1691 
1692 	for (i = 0; i < state->stream_count; i++) {
1693 		acrtc = get_crtc_by_otg_inst(
1694 				adev, state->stream_status[i].primary_otg_inst);
1695 
1696 		if (acrtc && state->stream_status[i].plane_count != 0) {
1697 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1701 			if (rc)
1702 				DRM_WARN("Failed to %s pflip interrupts\n",
1703 					 enable ? "enable" : "disable");
1704 
1705 			if (enable) {
1706 				rc = dm_enable_vblank(&acrtc->base);
1707 				if (rc)
1708 					DRM_WARN("Failed to enable vblank interrupts\n");
1709 			} else {
1710 				dm_disable_vblank(&acrtc->base);
1711 			}
1712 
1713 		}
1714 	}
1715 
1716 }
1717 
1718 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1719 {
1720 	struct dc_state *context = NULL;
1721 	enum dc_status res = DC_ERROR_UNEXPECTED;
1722 	int i;
1723 	struct dc_stream_state *del_streams[MAX_PIPES];
1724 	int del_streams_count = 0;
1725 
1726 	memset(del_streams, 0, sizeof(del_streams));
1727 
1728 	context = dc_create_state(dc);
1729 	if (context == NULL)
1730 		goto context_alloc_fail;
1731 
1732 	dc_resource_state_copy_construct_current(dc, context);
1733 
1734 	/* First remove from context all streams */
1735 	for (i = 0; i < context->stream_count; i++) {
1736 		struct dc_stream_state *stream = context->streams[i];
1737 
1738 		del_streams[del_streams_count++] = stream;
1739 	}
1740 
1741 	/* Remove all planes for removed streams and then remove the streams */
1742 	for (i = 0; i < del_streams_count; i++) {
1743 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744 			res = DC_FAIL_DETACH_SURFACES;
1745 			goto fail;
1746 		}
1747 
1748 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1749 		if (res != DC_OK)
1750 			goto fail;
1751 	}
1752 
1753 
1754 	res = dc_validate_global_state(dc, context, false);
1755 
1756 	if (res != DC_OK) {
1757 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1758 		goto fail;
1759 	}
1760 
1761 	res = dc_commit_state(dc, context);
1762 
1763 fail:
1764 	dc_release_state(context);
1765 
1766 context_alloc_fail:
1767 	return res;
1768 }
1769 
1770 static int dm_suspend(void *handle)
1771 {
1772 	struct amdgpu_device *adev = handle;
1773 	struct amdgpu_display_manager *dm = &adev->dm;
1774 	int ret = 0;
1775 
1776 	if (amdgpu_in_reset(adev)) {
1777 		mutex_lock(&dm->dc_lock);
1778 
1779 #if defined(CONFIG_DRM_AMD_DC_DCN)
1780 		dc_allow_idle_optimizations(adev->dm.dc, false);
1781 #endif
1782 
1783 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1784 
1785 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1786 
1787 		amdgpu_dm_commit_zero_streams(dm->dc);
1788 
1789 		amdgpu_dm_irq_suspend(adev);
1790 
1791 		return ret;
1792 	}
1793 
1794 	WARN_ON(adev->dm.cached_state);
1795 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1796 
1797 	s3_handle_mst(adev_to_drm(adev), true);
1798 
1799 	amdgpu_dm_irq_suspend(adev);
1800 
1801 
1802 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1803 
1804 	return 0;
1805 }
1806 
1807 static struct amdgpu_dm_connector *
1808 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1809 					     struct drm_crtc *crtc)
1810 {
1811 	uint32_t i;
1812 	struct drm_connector_state *new_con_state;
1813 	struct drm_connector *connector;
1814 	struct drm_crtc *crtc_from_state;
1815 
1816 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1817 		crtc_from_state = new_con_state->crtc;
1818 
1819 		if (crtc_from_state == crtc)
1820 			return to_amdgpu_dm_connector(connector);
1821 	}
1822 
1823 	return NULL;
1824 }
1825 
1826 static void emulated_link_detect(struct dc_link *link)
1827 {
1828 	struct dc_sink_init_data sink_init_data = { 0 };
1829 	struct display_sink_capability sink_caps = { 0 };
1830 	enum dc_edid_status edid_status;
1831 	struct dc_context *dc_ctx = link->ctx;
1832 	struct dc_sink *sink = NULL;
1833 	struct dc_sink *prev_sink = NULL;
1834 
1835 	link->type = dc_connection_none;
1836 	prev_sink = link->local_sink;
1837 
1838 	if (prev_sink)
1839 		dc_sink_release(prev_sink);
1840 
1841 	switch (link->connector_signal) {
1842 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1843 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1845 		break;
1846 	}
1847 
1848 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1849 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1851 		break;
1852 	}
1853 
1854 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1855 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1857 		break;
1858 	}
1859 
1860 	case SIGNAL_TYPE_LVDS: {
1861 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1862 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1863 		break;
1864 	}
1865 
1866 	case SIGNAL_TYPE_EDP: {
1867 		sink_caps.transaction_type =
1868 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1869 		sink_caps.signal = SIGNAL_TYPE_EDP;
1870 		break;
1871 	}
1872 
1873 	case SIGNAL_TYPE_DISPLAY_PORT: {
1874 		sink_caps.transaction_type =
1875 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1876 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1877 		break;
1878 	}
1879 
1880 	default:
1881 		DC_ERROR("Invalid connector type! signal:%d\n",
1882 			link->connector_signal);
1883 		return;
1884 	}
1885 
1886 	sink_init_data.link = link;
1887 	sink_init_data.sink_signal = sink_caps.signal;
1888 
1889 	sink = dc_sink_create(&sink_init_data);
1890 	if (!sink) {
1891 		DC_ERROR("Failed to create sink!\n");
1892 		return;
1893 	}
1894 
1895 	/* dc_sink_create returns a new reference */
1896 	link->local_sink = sink;
1897 
1898 	edid_status = dm_helpers_read_local_edid(
1899 			link->ctx,
1900 			link,
1901 			sink);
1902 
1903 	if (edid_status != EDID_OK)
1904 		DC_ERROR("Failed to read EDID");
1905 
1906 }
1907 
1908 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1909 				     struct amdgpu_display_manager *dm)
1910 {
1911 	struct {
1912 		struct dc_surface_update surface_updates[MAX_SURFACES];
1913 		struct dc_plane_info plane_infos[MAX_SURFACES];
1914 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1915 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1916 		struct dc_stream_update stream_update;
1917 	} * bundle;
1918 	int k, m;
1919 
1920 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1921 
1922 	if (!bundle) {
1923 		dm_error("Failed to allocate update bundle\n");
1924 		goto cleanup;
1925 	}
1926 
1927 	for (k = 0; k < dc_state->stream_count; k++) {
1928 		bundle->stream_update.stream = dc_state->streams[k];
1929 
1930 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1931 			bundle->surface_updates[m].surface =
1932 				dc_state->stream_status->plane_states[m];
1933 			bundle->surface_updates[m].surface->force_full_update =
1934 				true;
1935 		}
1936 		dc_commit_updates_for_stream(
1937 			dm->dc, bundle->surface_updates,
1938 			dc_state->stream_status->plane_count,
1939 			dc_state->streams[k], &bundle->stream_update);
1940 	}
1941 
1942 cleanup:
1943 	kfree(bundle);
1944 
1945 	return;
1946 }
1947 
1948 static void dm_set_dpms_off(struct dc_link *link)
1949 {
1950 	struct dc_stream_state *stream_state;
1951 	struct amdgpu_dm_connector *aconnector = link->priv;
1952 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1953 	struct dc_stream_update stream_update;
1954 	bool dpms_off = true;
1955 
1956 	memset(&stream_update, 0, sizeof(stream_update));
1957 	stream_update.dpms_off = &dpms_off;
1958 
1959 	mutex_lock(&adev->dm.dc_lock);
1960 	stream_state = dc_stream_find_from_link(link);
1961 
1962 	if (stream_state == NULL) {
1963 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1964 		mutex_unlock(&adev->dm.dc_lock);
1965 		return;
1966 	}
1967 
1968 	stream_update.stream = stream_state;
1969 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1970 				     stream_state, &stream_update);
1971 	mutex_unlock(&adev->dm.dc_lock);
1972 }
1973 
1974 static int dm_resume(void *handle)
1975 {
1976 	struct amdgpu_device *adev = handle;
1977 	struct drm_device *ddev = adev_to_drm(adev);
1978 	struct amdgpu_display_manager *dm = &adev->dm;
1979 	struct amdgpu_dm_connector *aconnector;
1980 	struct drm_connector *connector;
1981 	struct drm_connector_list_iter iter;
1982 	struct drm_crtc *crtc;
1983 	struct drm_crtc_state *new_crtc_state;
1984 	struct dm_crtc_state *dm_new_crtc_state;
1985 	struct drm_plane *plane;
1986 	struct drm_plane_state *new_plane_state;
1987 	struct dm_plane_state *dm_new_plane_state;
1988 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1989 	enum dc_connection_type new_connection_type = dc_connection_none;
1990 	struct dc_state *dc_state;
1991 	int i, r, j;
1992 
1993 	if (amdgpu_in_reset(adev)) {
1994 		dc_state = dm->cached_dc_state;
1995 
1996 		r = dm_dmub_hw_init(adev);
1997 		if (r)
1998 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1999 
2000 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2001 		dc_resume(dm->dc);
2002 
2003 		amdgpu_dm_irq_resume_early(adev);
2004 
2005 		for (i = 0; i < dc_state->stream_count; i++) {
2006 			dc_state->streams[i]->mode_changed = true;
2007 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2008 				dc_state->stream_status->plane_states[j]->update_flags.raw
2009 					= 0xffffffff;
2010 			}
2011 		}
2012 
2013 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2014 
2015 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2016 
2017 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2018 
2019 		dc_release_state(dm->cached_dc_state);
2020 		dm->cached_dc_state = NULL;
2021 
2022 		amdgpu_dm_irq_resume_late(adev);
2023 
2024 		mutex_unlock(&dm->dc_lock);
2025 
2026 		return 0;
2027 	}
2028 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2029 	dc_release_state(dm_state->context);
2030 	dm_state->context = dc_create_state(dm->dc);
2031 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2032 	dc_resource_state_construct(dm->dc, dm_state->context);
2033 
2034 	/* Before powering on DC we need to re-initialize DMUB. */
2035 	r = dm_dmub_hw_init(adev);
2036 	if (r)
2037 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2038 
2039 	/* power on hardware */
2040 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2041 
2042 	/* program HPD filter */
2043 	dc_resume(dm->dc);
2044 
2045 	/*
2046 	 * early enable HPD Rx IRQ, should be done before set mode as short
2047 	 * pulse interrupts are used for MST
2048 	 */
2049 	amdgpu_dm_irq_resume_early(adev);
2050 
2051 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2052 	s3_handle_mst(ddev, false);
2053 
2054 	/* Do detection*/
2055 	drm_connector_list_iter_begin(ddev, &iter);
2056 	drm_for_each_connector_iter(connector, &iter) {
2057 		aconnector = to_amdgpu_dm_connector(connector);
2058 
2059 		/*
2060 		 * this is the case when traversing through already created
2061 		 * MST connectors, should be skipped
2062 		 */
2063 		if (aconnector->mst_port)
2064 			continue;
2065 
2066 		mutex_lock(&aconnector->hpd_lock);
2067 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2068 			DRM_ERROR("KMS: Failed to detect connector\n");
2069 
2070 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2071 			emulated_link_detect(aconnector->dc_link);
2072 		else
2073 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2074 
2075 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2076 			aconnector->fake_enable = false;
2077 
2078 		if (aconnector->dc_sink)
2079 			dc_sink_release(aconnector->dc_sink);
2080 		aconnector->dc_sink = NULL;
2081 		amdgpu_dm_update_connector_after_detect(aconnector);
2082 		mutex_unlock(&aconnector->hpd_lock);
2083 	}
2084 	drm_connector_list_iter_end(&iter);
2085 
2086 	/* Force mode set in atomic commit */
2087 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2088 		new_crtc_state->active_changed = true;
2089 
2090 	/*
2091 	 * atomic_check is expected to create the dc states. We need to release
2092 	 * them here, since they were duplicated as part of the suspend
2093 	 * procedure.
2094 	 */
2095 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2096 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2097 		if (dm_new_crtc_state->stream) {
2098 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2099 			dc_stream_release(dm_new_crtc_state->stream);
2100 			dm_new_crtc_state->stream = NULL;
2101 		}
2102 	}
2103 
2104 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2105 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2106 		if (dm_new_plane_state->dc_state) {
2107 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2108 			dc_plane_state_release(dm_new_plane_state->dc_state);
2109 			dm_new_plane_state->dc_state = NULL;
2110 		}
2111 	}
2112 
2113 	drm_atomic_helper_resume(ddev, dm->cached_state);
2114 
2115 	dm->cached_state = NULL;
2116 
2117 	amdgpu_dm_irq_resume_late(adev);
2118 
2119 	amdgpu_dm_smu_write_watermarks_table(adev);
2120 
2121 	return 0;
2122 }
2123 
2124 /**
2125  * DOC: DM Lifecycle
2126  *
2127  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2128  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2129  * the base driver's device list to be initialized and torn down accordingly.
2130  *
2131  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2132  */
2133 
2134 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2135 	.name = "dm",
2136 	.early_init = dm_early_init,
2137 	.late_init = dm_late_init,
2138 	.sw_init = dm_sw_init,
2139 	.sw_fini = dm_sw_fini,
2140 	.hw_init = dm_hw_init,
2141 	.hw_fini = dm_hw_fini,
2142 	.suspend = dm_suspend,
2143 	.resume = dm_resume,
2144 	.is_idle = dm_is_idle,
2145 	.wait_for_idle = dm_wait_for_idle,
2146 	.check_soft_reset = dm_check_soft_reset,
2147 	.soft_reset = dm_soft_reset,
2148 	.set_clockgating_state = dm_set_clockgating_state,
2149 	.set_powergating_state = dm_set_powergating_state,
2150 };
2151 
2152 const struct amdgpu_ip_block_version dm_ip_block =
2153 {
2154 	.type = AMD_IP_BLOCK_TYPE_DCE,
2155 	.major = 1,
2156 	.minor = 0,
2157 	.rev = 0,
2158 	.funcs = &amdgpu_dm_funcs,
2159 };
2160 
2161 
2162 /**
2163  * DOC: atomic
2164  *
2165  * *WIP*
2166  */
2167 
2168 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2169 	.fb_create = amdgpu_display_user_framebuffer_create,
2170 	.get_format_info = amd_get_format_info,
2171 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2172 	.atomic_check = amdgpu_dm_atomic_check,
2173 	.atomic_commit = drm_atomic_helper_commit,
2174 };
2175 
2176 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2177 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2178 };
2179 
2180 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2181 {
2182 	u32 max_cll, min_cll, max, min, q, r;
2183 	struct amdgpu_dm_backlight_caps *caps;
2184 	struct amdgpu_display_manager *dm;
2185 	struct drm_connector *conn_base;
2186 	struct amdgpu_device *adev;
2187 	struct dc_link *link = NULL;
2188 	static const u8 pre_computed_values[] = {
2189 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2190 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2191 
2192 	if (!aconnector || !aconnector->dc_link)
2193 		return;
2194 
2195 	link = aconnector->dc_link;
2196 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2197 		return;
2198 
2199 	conn_base = &aconnector->base;
2200 	adev = drm_to_adev(conn_base->dev);
2201 	dm = &adev->dm;
2202 	caps = &dm->backlight_caps;
2203 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2204 	caps->aux_support = false;
2205 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2206 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2207 
2208 	if (caps->ext_caps->bits.oled == 1 ||
2209 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2210 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2211 		caps->aux_support = true;
2212 
2213 	/* From the specification (CTA-861-G), for calculating the maximum
2214 	 * luminance we need to use:
2215 	 *	Luminance = 50*2**(CV/32)
2216 	 * Where CV is a one-byte value.
2217 	 * For calculating this expression we may need float point precision;
2218 	 * to avoid this complexity level, we take advantage that CV is divided
2219 	 * by a constant. From the Euclids division algorithm, we know that CV
2220 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2221 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2222 	 * need to pre-compute the value of r/32. For pre-computing the values
2223 	 * We just used the following Ruby line:
2224 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2225 	 * The results of the above expressions can be verified at
2226 	 * pre_computed_values.
2227 	 */
2228 	q = max_cll >> 5;
2229 	r = max_cll % 32;
2230 	max = (1 << q) * pre_computed_values[r];
2231 
2232 	// min luminance: maxLum * (CV/255)^2 / 100
2233 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2234 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2235 
2236 	caps->aux_max_input_signal = max;
2237 	caps->aux_min_input_signal = min;
2238 }
2239 
2240 void amdgpu_dm_update_connector_after_detect(
2241 		struct amdgpu_dm_connector *aconnector)
2242 {
2243 	struct drm_connector *connector = &aconnector->base;
2244 	struct drm_device *dev = connector->dev;
2245 	struct dc_sink *sink;
2246 
2247 	/* MST handled by drm_mst framework */
2248 	if (aconnector->mst_mgr.mst_state == true)
2249 		return;
2250 
2251 	sink = aconnector->dc_link->local_sink;
2252 	if (sink)
2253 		dc_sink_retain(sink);
2254 
2255 	/*
2256 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2257 	 * the connector sink is set to either fake or physical sink depends on link status.
2258 	 * Skip if already done during boot.
2259 	 */
2260 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2261 			&& aconnector->dc_em_sink) {
2262 
2263 		/*
2264 		 * For S3 resume with headless use eml_sink to fake stream
2265 		 * because on resume connector->sink is set to NULL
2266 		 */
2267 		mutex_lock(&dev->mode_config.mutex);
2268 
2269 		if (sink) {
2270 			if (aconnector->dc_sink) {
2271 				amdgpu_dm_update_freesync_caps(connector, NULL);
2272 				/*
2273 				 * retain and release below are used to
2274 				 * bump up refcount for sink because the link doesn't point
2275 				 * to it anymore after disconnect, so on next crtc to connector
2276 				 * reshuffle by UMD we will get into unwanted dc_sink release
2277 				 */
2278 				dc_sink_release(aconnector->dc_sink);
2279 			}
2280 			aconnector->dc_sink = sink;
2281 			dc_sink_retain(aconnector->dc_sink);
2282 			amdgpu_dm_update_freesync_caps(connector,
2283 					aconnector->edid);
2284 		} else {
2285 			amdgpu_dm_update_freesync_caps(connector, NULL);
2286 			if (!aconnector->dc_sink) {
2287 				aconnector->dc_sink = aconnector->dc_em_sink;
2288 				dc_sink_retain(aconnector->dc_sink);
2289 			}
2290 		}
2291 
2292 		mutex_unlock(&dev->mode_config.mutex);
2293 
2294 		if (sink)
2295 			dc_sink_release(sink);
2296 		return;
2297 	}
2298 
2299 	/*
2300 	 * TODO: temporary guard to look for proper fix
2301 	 * if this sink is MST sink, we should not do anything
2302 	 */
2303 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2304 		dc_sink_release(sink);
2305 		return;
2306 	}
2307 
2308 	if (aconnector->dc_sink == sink) {
2309 		/*
2310 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2311 		 * Do nothing!!
2312 		 */
2313 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2314 				aconnector->connector_id);
2315 		if (sink)
2316 			dc_sink_release(sink);
2317 		return;
2318 	}
2319 
2320 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2321 		aconnector->connector_id, aconnector->dc_sink, sink);
2322 
2323 	mutex_lock(&dev->mode_config.mutex);
2324 
2325 	/*
2326 	 * 1. Update status of the drm connector
2327 	 * 2. Send an event and let userspace tell us what to do
2328 	 */
2329 	if (sink) {
2330 		/*
2331 		 * TODO: check if we still need the S3 mode update workaround.
2332 		 * If yes, put it here.
2333 		 */
2334 		if (aconnector->dc_sink) {
2335 			amdgpu_dm_update_freesync_caps(connector, NULL);
2336 			dc_sink_release(aconnector->dc_sink);
2337 		}
2338 
2339 		aconnector->dc_sink = sink;
2340 		dc_sink_retain(aconnector->dc_sink);
2341 		if (sink->dc_edid.length == 0) {
2342 			aconnector->edid = NULL;
2343 			if (aconnector->dc_link->aux_mode) {
2344 				drm_dp_cec_unset_edid(
2345 					&aconnector->dm_dp_aux.aux);
2346 			}
2347 		} else {
2348 			aconnector->edid =
2349 				(struct edid *)sink->dc_edid.raw_edid;
2350 
2351 			drm_connector_update_edid_property(connector,
2352 							   aconnector->edid);
2353 			if (aconnector->dc_link->aux_mode)
2354 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2355 						    aconnector->edid);
2356 		}
2357 
2358 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2359 		update_connector_ext_caps(aconnector);
2360 	} else {
2361 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2362 		amdgpu_dm_update_freesync_caps(connector, NULL);
2363 		drm_connector_update_edid_property(connector, NULL);
2364 		aconnector->num_modes = 0;
2365 		dc_sink_release(aconnector->dc_sink);
2366 		aconnector->dc_sink = NULL;
2367 		aconnector->edid = NULL;
2368 #ifdef CONFIG_DRM_AMD_DC_HDCP
2369 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2370 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2371 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2372 #endif
2373 	}
2374 
2375 	mutex_unlock(&dev->mode_config.mutex);
2376 
2377 	update_subconnector_property(aconnector);
2378 
2379 	if (sink)
2380 		dc_sink_release(sink);
2381 }
2382 
2383 static void handle_hpd_irq(void *param)
2384 {
2385 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2386 	struct drm_connector *connector = &aconnector->base;
2387 	struct drm_device *dev = connector->dev;
2388 	enum dc_connection_type new_connection_type = dc_connection_none;
2389 #ifdef CONFIG_DRM_AMD_DC_HDCP
2390 	struct amdgpu_device *adev = drm_to_adev(dev);
2391 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2392 #endif
2393 
2394 	/*
2395 	 * In case of failure or MST no need to update connector status or notify the OS
2396 	 * since (for MST case) MST does this in its own context.
2397 	 */
2398 	mutex_lock(&aconnector->hpd_lock);
2399 
2400 #ifdef CONFIG_DRM_AMD_DC_HDCP
2401 	if (adev->dm.hdcp_workqueue) {
2402 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2403 		dm_con_state->update_hdcp = true;
2404 	}
2405 #endif
2406 	if (aconnector->fake_enable)
2407 		aconnector->fake_enable = false;
2408 
2409 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2410 		DRM_ERROR("KMS: Failed to detect connector\n");
2411 
2412 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2413 		emulated_link_detect(aconnector->dc_link);
2414 
2415 
2416 		drm_modeset_lock_all(dev);
2417 		dm_restore_drm_connector_state(dev, connector);
2418 		drm_modeset_unlock_all(dev);
2419 
2420 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2421 			drm_kms_helper_hotplug_event(dev);
2422 
2423 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2424 		if (new_connection_type == dc_connection_none &&
2425 		    aconnector->dc_link->type == dc_connection_none)
2426 			dm_set_dpms_off(aconnector->dc_link);
2427 
2428 		amdgpu_dm_update_connector_after_detect(aconnector);
2429 
2430 		drm_modeset_lock_all(dev);
2431 		dm_restore_drm_connector_state(dev, connector);
2432 		drm_modeset_unlock_all(dev);
2433 
2434 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2435 			drm_kms_helper_hotplug_event(dev);
2436 	}
2437 	mutex_unlock(&aconnector->hpd_lock);
2438 
2439 }
2440 
2441 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2442 {
2443 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2444 	uint8_t dret;
2445 	bool new_irq_handled = false;
2446 	int dpcd_addr;
2447 	int dpcd_bytes_to_read;
2448 
2449 	const int max_process_count = 30;
2450 	int process_count = 0;
2451 
2452 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2453 
2454 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2455 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2456 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2457 		dpcd_addr = DP_SINK_COUNT;
2458 	} else {
2459 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2460 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2461 		dpcd_addr = DP_SINK_COUNT_ESI;
2462 	}
2463 
2464 	dret = drm_dp_dpcd_read(
2465 		&aconnector->dm_dp_aux.aux,
2466 		dpcd_addr,
2467 		esi,
2468 		dpcd_bytes_to_read);
2469 
2470 	while (dret == dpcd_bytes_to_read &&
2471 		process_count < max_process_count) {
2472 		uint8_t retry;
2473 		dret = 0;
2474 
2475 		process_count++;
2476 
2477 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2478 		/* handle HPD short pulse irq */
2479 		if (aconnector->mst_mgr.mst_state)
2480 			drm_dp_mst_hpd_irq(
2481 				&aconnector->mst_mgr,
2482 				esi,
2483 				&new_irq_handled);
2484 
2485 		if (new_irq_handled) {
2486 			/* ACK at DPCD to notify down stream */
2487 			const int ack_dpcd_bytes_to_write =
2488 				dpcd_bytes_to_read - 1;
2489 
2490 			for (retry = 0; retry < 3; retry++) {
2491 				uint8_t wret;
2492 
2493 				wret = drm_dp_dpcd_write(
2494 					&aconnector->dm_dp_aux.aux,
2495 					dpcd_addr + 1,
2496 					&esi[1],
2497 					ack_dpcd_bytes_to_write);
2498 				if (wret == ack_dpcd_bytes_to_write)
2499 					break;
2500 			}
2501 
2502 			/* check if there is new irq to be handled */
2503 			dret = drm_dp_dpcd_read(
2504 				&aconnector->dm_dp_aux.aux,
2505 				dpcd_addr,
2506 				esi,
2507 				dpcd_bytes_to_read);
2508 
2509 			new_irq_handled = false;
2510 		} else {
2511 			break;
2512 		}
2513 	}
2514 
2515 	if (process_count == max_process_count)
2516 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2517 }
2518 
2519 static void handle_hpd_rx_irq(void *param)
2520 {
2521 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2522 	struct drm_connector *connector = &aconnector->base;
2523 	struct drm_device *dev = connector->dev;
2524 	struct dc_link *dc_link = aconnector->dc_link;
2525 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2526 	bool result = false;
2527 	enum dc_connection_type new_connection_type = dc_connection_none;
2528 	struct amdgpu_device *adev = drm_to_adev(dev);
2529 	union hpd_irq_data hpd_irq_data;
2530 
2531 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2532 
2533 	/*
2534 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2535 	 * conflict, after implement i2c helper, this mutex should be
2536 	 * retired.
2537 	 */
2538 	if (dc_link->type != dc_connection_mst_branch)
2539 		mutex_lock(&aconnector->hpd_lock);
2540 
2541 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2542 
2543 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2544 		(dc_link->type == dc_connection_mst_branch)) {
2545 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2546 			result = true;
2547 			dm_handle_hpd_rx_irq(aconnector);
2548 			goto out;
2549 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2550 			result = false;
2551 			dm_handle_hpd_rx_irq(aconnector);
2552 			goto out;
2553 		}
2554 	}
2555 
2556 	mutex_lock(&adev->dm.dc_lock);
2557 #ifdef CONFIG_DRM_AMD_DC_HDCP
2558 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2559 #else
2560 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2561 #endif
2562 	mutex_unlock(&adev->dm.dc_lock);
2563 
2564 out:
2565 	if (result && !is_mst_root_connector) {
2566 		/* Downstream Port status changed. */
2567 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2568 			DRM_ERROR("KMS: Failed to detect connector\n");
2569 
2570 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2571 			emulated_link_detect(dc_link);
2572 
2573 			if (aconnector->fake_enable)
2574 				aconnector->fake_enable = false;
2575 
2576 			amdgpu_dm_update_connector_after_detect(aconnector);
2577 
2578 
2579 			drm_modeset_lock_all(dev);
2580 			dm_restore_drm_connector_state(dev, connector);
2581 			drm_modeset_unlock_all(dev);
2582 
2583 			drm_kms_helper_hotplug_event(dev);
2584 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2585 
2586 			if (aconnector->fake_enable)
2587 				aconnector->fake_enable = false;
2588 
2589 			amdgpu_dm_update_connector_after_detect(aconnector);
2590 
2591 
2592 			drm_modeset_lock_all(dev);
2593 			dm_restore_drm_connector_state(dev, connector);
2594 			drm_modeset_unlock_all(dev);
2595 
2596 			drm_kms_helper_hotplug_event(dev);
2597 		}
2598 	}
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2601 		if (adev->dm.hdcp_workqueue)
2602 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2603 	}
2604 #endif
2605 
2606 	if (dc_link->type != dc_connection_mst_branch) {
2607 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2608 		mutex_unlock(&aconnector->hpd_lock);
2609 	}
2610 }
2611 
2612 static void register_hpd_handlers(struct amdgpu_device *adev)
2613 {
2614 	struct drm_device *dev = adev_to_drm(adev);
2615 	struct drm_connector *connector;
2616 	struct amdgpu_dm_connector *aconnector;
2617 	const struct dc_link *dc_link;
2618 	struct dc_interrupt_params int_params = {0};
2619 
2620 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2621 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2622 
2623 	list_for_each_entry(connector,
2624 			&dev->mode_config.connector_list, head)	{
2625 
2626 		aconnector = to_amdgpu_dm_connector(connector);
2627 		dc_link = aconnector->dc_link;
2628 
2629 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2630 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2631 			int_params.irq_source = dc_link->irq_source_hpd;
2632 
2633 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2634 					handle_hpd_irq,
2635 					(void *) aconnector);
2636 		}
2637 
2638 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2639 
2640 			/* Also register for DP short pulse (hpd_rx). */
2641 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2642 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2643 
2644 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2645 					handle_hpd_rx_irq,
2646 					(void *) aconnector);
2647 		}
2648 	}
2649 }
2650 
2651 #if defined(CONFIG_DRM_AMD_DC_SI)
2652 /* Register IRQ sources and initialize IRQ callbacks */
2653 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2654 {
2655 	struct dc *dc = adev->dm.dc;
2656 	struct common_irq_params *c_irq_params;
2657 	struct dc_interrupt_params int_params = {0};
2658 	int r;
2659 	int i;
2660 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2661 
2662 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2663 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2664 
2665 	/*
2666 	 * Actions of amdgpu_irq_add_id():
2667 	 * 1. Register a set() function with base driver.
2668 	 *    Base driver will call set() function to enable/disable an
2669 	 *    interrupt in DC hardware.
2670 	 * 2. Register amdgpu_dm_irq_handler().
2671 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2672 	 *    coming from DC hardware.
2673 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2674 	 *    for acknowledging and handling. */
2675 
2676 	/* Use VBLANK interrupt */
2677 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2678 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2679 		if (r) {
2680 			DRM_ERROR("Failed to add crtc irq id!\n");
2681 			return r;
2682 		}
2683 
2684 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2685 		int_params.irq_source =
2686 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2687 
2688 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2689 
2690 		c_irq_params->adev = adev;
2691 		c_irq_params->irq_src = int_params.irq_source;
2692 
2693 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2694 				dm_crtc_high_irq, c_irq_params);
2695 	}
2696 
2697 	/* Use GRPH_PFLIP interrupt */
2698 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2699 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2700 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2701 		if (r) {
2702 			DRM_ERROR("Failed to add page flip irq id!\n");
2703 			return r;
2704 		}
2705 
2706 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2707 		int_params.irq_source =
2708 			dc_interrupt_to_irq_source(dc, i, 0);
2709 
2710 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2711 
2712 		c_irq_params->adev = adev;
2713 		c_irq_params->irq_src = int_params.irq_source;
2714 
2715 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2716 				dm_pflip_high_irq, c_irq_params);
2717 
2718 	}
2719 
2720 	/* HPD */
2721 	r = amdgpu_irq_add_id(adev, client_id,
2722 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2723 	if (r) {
2724 		DRM_ERROR("Failed to add hpd irq id!\n");
2725 		return r;
2726 	}
2727 
2728 	register_hpd_handlers(adev);
2729 
2730 	return 0;
2731 }
2732 #endif
2733 
2734 /* Register IRQ sources and initialize IRQ callbacks */
2735 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2736 {
2737 	struct dc *dc = adev->dm.dc;
2738 	struct common_irq_params *c_irq_params;
2739 	struct dc_interrupt_params int_params = {0};
2740 	int r;
2741 	int i;
2742 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2743 
2744 	if (adev->asic_type >= CHIP_VEGA10)
2745 		client_id = SOC15_IH_CLIENTID_DCE;
2746 
2747 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2748 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2749 
2750 	/*
2751 	 * Actions of amdgpu_irq_add_id():
2752 	 * 1. Register a set() function with base driver.
2753 	 *    Base driver will call set() function to enable/disable an
2754 	 *    interrupt in DC hardware.
2755 	 * 2. Register amdgpu_dm_irq_handler().
2756 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2757 	 *    coming from DC hardware.
2758 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2759 	 *    for acknowledging and handling. */
2760 
2761 	/* Use VBLANK interrupt */
2762 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2763 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2764 		if (r) {
2765 			DRM_ERROR("Failed to add crtc irq id!\n");
2766 			return r;
2767 		}
2768 
2769 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2770 		int_params.irq_source =
2771 			dc_interrupt_to_irq_source(dc, i, 0);
2772 
2773 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2774 
2775 		c_irq_params->adev = adev;
2776 		c_irq_params->irq_src = int_params.irq_source;
2777 
2778 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2779 				dm_crtc_high_irq, c_irq_params);
2780 	}
2781 
2782 	/* Use VUPDATE interrupt */
2783 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2784 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2785 		if (r) {
2786 			DRM_ERROR("Failed to add vupdate irq id!\n");
2787 			return r;
2788 		}
2789 
2790 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2791 		int_params.irq_source =
2792 			dc_interrupt_to_irq_source(dc, i, 0);
2793 
2794 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2795 
2796 		c_irq_params->adev = adev;
2797 		c_irq_params->irq_src = int_params.irq_source;
2798 
2799 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800 				dm_vupdate_high_irq, c_irq_params);
2801 	}
2802 
2803 	/* Use GRPH_PFLIP interrupt */
2804 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2805 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2806 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2807 		if (r) {
2808 			DRM_ERROR("Failed to add page flip irq id!\n");
2809 			return r;
2810 		}
2811 
2812 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2813 		int_params.irq_source =
2814 			dc_interrupt_to_irq_source(dc, i, 0);
2815 
2816 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2817 
2818 		c_irq_params->adev = adev;
2819 		c_irq_params->irq_src = int_params.irq_source;
2820 
2821 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2822 				dm_pflip_high_irq, c_irq_params);
2823 
2824 	}
2825 
2826 	/* HPD */
2827 	r = amdgpu_irq_add_id(adev, client_id,
2828 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2829 	if (r) {
2830 		DRM_ERROR("Failed to add hpd irq id!\n");
2831 		return r;
2832 	}
2833 
2834 	register_hpd_handlers(adev);
2835 
2836 	return 0;
2837 }
2838 
2839 #if defined(CONFIG_DRM_AMD_DC_DCN)
2840 /* Register IRQ sources and initialize IRQ callbacks */
2841 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2842 {
2843 	struct dc *dc = adev->dm.dc;
2844 	struct common_irq_params *c_irq_params;
2845 	struct dc_interrupt_params int_params = {0};
2846 	int r;
2847 	int i;
2848 
2849 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2850 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2851 
2852 	/*
2853 	 * Actions of amdgpu_irq_add_id():
2854 	 * 1. Register a set() function with base driver.
2855 	 *    Base driver will call set() function to enable/disable an
2856 	 *    interrupt in DC hardware.
2857 	 * 2. Register amdgpu_dm_irq_handler().
2858 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2859 	 *    coming from DC hardware.
2860 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2861 	 *    for acknowledging and handling.
2862 	 */
2863 
2864 	/* Use VSTARTUP interrupt */
2865 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2866 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2867 			i++) {
2868 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2869 
2870 		if (r) {
2871 			DRM_ERROR("Failed to add crtc irq id!\n");
2872 			return r;
2873 		}
2874 
2875 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2876 		int_params.irq_source =
2877 			dc_interrupt_to_irq_source(dc, i, 0);
2878 
2879 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2880 
2881 		c_irq_params->adev = adev;
2882 		c_irq_params->irq_src = int_params.irq_source;
2883 
2884 		amdgpu_dm_irq_register_interrupt(
2885 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2886 	}
2887 
2888 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2889 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2890 	 * to trigger at end of each vblank, regardless of state of the lock,
2891 	 * matching DCE behaviour.
2892 	 */
2893 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2894 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2895 	     i++) {
2896 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2897 
2898 		if (r) {
2899 			DRM_ERROR("Failed to add vupdate irq id!\n");
2900 			return r;
2901 		}
2902 
2903 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2904 		int_params.irq_source =
2905 			dc_interrupt_to_irq_source(dc, i, 0);
2906 
2907 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2908 
2909 		c_irq_params->adev = adev;
2910 		c_irq_params->irq_src = int_params.irq_source;
2911 
2912 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2913 				dm_vupdate_high_irq, c_irq_params);
2914 	}
2915 
2916 	/* Use GRPH_PFLIP interrupt */
2917 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2918 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2919 			i++) {
2920 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2921 		if (r) {
2922 			DRM_ERROR("Failed to add page flip irq id!\n");
2923 			return r;
2924 		}
2925 
2926 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2927 		int_params.irq_source =
2928 			dc_interrupt_to_irq_source(dc, i, 0);
2929 
2930 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2931 
2932 		c_irq_params->adev = adev;
2933 		c_irq_params->irq_src = int_params.irq_source;
2934 
2935 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2936 				dm_pflip_high_irq, c_irq_params);
2937 
2938 	}
2939 
2940 	/* HPD */
2941 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2942 			&adev->hpd_irq);
2943 	if (r) {
2944 		DRM_ERROR("Failed to add hpd irq id!\n");
2945 		return r;
2946 	}
2947 
2948 	register_hpd_handlers(adev);
2949 
2950 	return 0;
2951 }
2952 #endif
2953 
2954 /*
2955  * Acquires the lock for the atomic state object and returns
2956  * the new atomic state.
2957  *
2958  * This should only be called during atomic check.
2959  */
2960 static int dm_atomic_get_state(struct drm_atomic_state *state,
2961 			       struct dm_atomic_state **dm_state)
2962 {
2963 	struct drm_device *dev = state->dev;
2964 	struct amdgpu_device *adev = drm_to_adev(dev);
2965 	struct amdgpu_display_manager *dm = &adev->dm;
2966 	struct drm_private_state *priv_state;
2967 
2968 	if (*dm_state)
2969 		return 0;
2970 
2971 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2972 	if (IS_ERR(priv_state))
2973 		return PTR_ERR(priv_state);
2974 
2975 	*dm_state = to_dm_atomic_state(priv_state);
2976 
2977 	return 0;
2978 }
2979 
2980 static struct dm_atomic_state *
2981 dm_atomic_get_new_state(struct drm_atomic_state *state)
2982 {
2983 	struct drm_device *dev = state->dev;
2984 	struct amdgpu_device *adev = drm_to_adev(dev);
2985 	struct amdgpu_display_manager *dm = &adev->dm;
2986 	struct drm_private_obj *obj;
2987 	struct drm_private_state *new_obj_state;
2988 	int i;
2989 
2990 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2991 		if (obj->funcs == dm->atomic_obj.funcs)
2992 			return to_dm_atomic_state(new_obj_state);
2993 	}
2994 
2995 	return NULL;
2996 }
2997 
2998 static struct drm_private_state *
2999 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3000 {
3001 	struct dm_atomic_state *old_state, *new_state;
3002 
3003 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3004 	if (!new_state)
3005 		return NULL;
3006 
3007 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3008 
3009 	old_state = to_dm_atomic_state(obj->state);
3010 
3011 	if (old_state && old_state->context)
3012 		new_state->context = dc_copy_state(old_state->context);
3013 
3014 	if (!new_state->context) {
3015 		kfree(new_state);
3016 		return NULL;
3017 	}
3018 
3019 	return &new_state->base;
3020 }
3021 
3022 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3023 				    struct drm_private_state *state)
3024 {
3025 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3026 
3027 	if (dm_state && dm_state->context)
3028 		dc_release_state(dm_state->context);
3029 
3030 	kfree(dm_state);
3031 }
3032 
3033 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3034 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3035 	.atomic_destroy_state = dm_atomic_destroy_state,
3036 };
3037 
3038 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3039 {
3040 	struct dm_atomic_state *state;
3041 	int r;
3042 
3043 	adev->mode_info.mode_config_initialized = true;
3044 
3045 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3046 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3047 
3048 	adev_to_drm(adev)->mode_config.max_width = 16384;
3049 	adev_to_drm(adev)->mode_config.max_height = 16384;
3050 
3051 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3052 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3053 	/* indicates support for immediate flip */
3054 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3055 
3056 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3057 
3058 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3059 	if (!state)
3060 		return -ENOMEM;
3061 
3062 	state->context = dc_create_state(adev->dm.dc);
3063 	if (!state->context) {
3064 		kfree(state);
3065 		return -ENOMEM;
3066 	}
3067 
3068 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3069 
3070 	drm_atomic_private_obj_init(adev_to_drm(adev),
3071 				    &adev->dm.atomic_obj,
3072 				    &state->base,
3073 				    &dm_atomic_state_funcs);
3074 
3075 	r = amdgpu_display_modeset_create_props(adev);
3076 	if (r) {
3077 		dc_release_state(state->context);
3078 		kfree(state);
3079 		return r;
3080 	}
3081 
3082 	r = amdgpu_dm_audio_init(adev);
3083 	if (r) {
3084 		dc_release_state(state->context);
3085 		kfree(state);
3086 		return r;
3087 	}
3088 
3089 	return 0;
3090 }
3091 
3092 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3093 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3094 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3095 
3096 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3097 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3098 
3099 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3100 {
3101 #if defined(CONFIG_ACPI)
3102 	struct amdgpu_dm_backlight_caps caps;
3103 
3104 	memset(&caps, 0, sizeof(caps));
3105 
3106 	if (dm->backlight_caps.caps_valid)
3107 		return;
3108 
3109 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3110 	if (caps.caps_valid) {
3111 		dm->backlight_caps.caps_valid = true;
3112 		if (caps.aux_support)
3113 			return;
3114 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3115 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3116 	} else {
3117 		dm->backlight_caps.min_input_signal =
3118 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3119 		dm->backlight_caps.max_input_signal =
3120 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3121 	}
3122 #else
3123 	if (dm->backlight_caps.aux_support)
3124 		return;
3125 
3126 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3127 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3128 #endif
3129 }
3130 
3131 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3132 {
3133 	bool rc;
3134 
3135 	if (!link)
3136 		return 1;
3137 
3138 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3139 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3140 
3141 	return rc ? 0 : 1;
3142 }
3143 
3144 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3145 				unsigned *min, unsigned *max)
3146 {
3147 	if (!caps)
3148 		return 0;
3149 
3150 	if (caps->aux_support) {
3151 		// Firmware limits are in nits, DC API wants millinits.
3152 		*max = 1000 * caps->aux_max_input_signal;
3153 		*min = 1000 * caps->aux_min_input_signal;
3154 	} else {
3155 		// Firmware limits are 8-bit, PWM control is 16-bit.
3156 		*max = 0x101 * caps->max_input_signal;
3157 		*min = 0x101 * caps->min_input_signal;
3158 	}
3159 	return 1;
3160 }
3161 
3162 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3163 					uint32_t brightness)
3164 {
3165 	unsigned min, max;
3166 
3167 	if (!get_brightness_range(caps, &min, &max))
3168 		return brightness;
3169 
3170 	// Rescale 0..255 to min..max
3171 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3172 				       AMDGPU_MAX_BL_LEVEL);
3173 }
3174 
3175 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3176 				      uint32_t brightness)
3177 {
3178 	unsigned min, max;
3179 
3180 	if (!get_brightness_range(caps, &min, &max))
3181 		return brightness;
3182 
3183 	if (brightness < min)
3184 		return 0;
3185 	// Rescale min..max to 0..255
3186 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3187 				 max - min);
3188 }
3189 
3190 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3191 {
3192 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3193 	struct amdgpu_dm_backlight_caps caps;
3194 	struct dc_link *link = NULL;
3195 	u32 brightness;
3196 	bool rc;
3197 
3198 	amdgpu_dm_update_backlight_caps(dm);
3199 	caps = dm->backlight_caps;
3200 
3201 	link = (struct dc_link *)dm->backlight_link;
3202 
3203 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3204 	// Change brightness based on AUX property
3205 	if (caps.aux_support)
3206 		return set_backlight_via_aux(link, brightness);
3207 
3208 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3209 
3210 	return rc ? 0 : 1;
3211 }
3212 
3213 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3214 {
3215 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3216 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3217 
3218 	if (ret == DC_ERROR_UNEXPECTED)
3219 		return bd->props.brightness;
3220 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3221 }
3222 
3223 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3224 	.options = BL_CORE_SUSPENDRESUME,
3225 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3226 	.update_status	= amdgpu_dm_backlight_update_status,
3227 };
3228 
3229 static void
3230 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3231 {
3232 	char bl_name[16];
3233 	struct backlight_properties props = { 0 };
3234 
3235 	amdgpu_dm_update_backlight_caps(dm);
3236 
3237 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3238 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3239 	props.type = BACKLIGHT_RAW;
3240 
3241 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3242 		 adev_to_drm(dm->adev)->primary->index);
3243 
3244 	dm->backlight_dev = backlight_device_register(bl_name,
3245 						      adev_to_drm(dm->adev)->dev,
3246 						      dm,
3247 						      &amdgpu_dm_backlight_ops,
3248 						      &props);
3249 
3250 	if (IS_ERR(dm->backlight_dev))
3251 		DRM_ERROR("DM: Backlight registration failed!\n");
3252 	else
3253 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3254 }
3255 
3256 #endif
3257 
3258 static int initialize_plane(struct amdgpu_display_manager *dm,
3259 			    struct amdgpu_mode_info *mode_info, int plane_id,
3260 			    enum drm_plane_type plane_type,
3261 			    const struct dc_plane_cap *plane_cap)
3262 {
3263 	struct drm_plane *plane;
3264 	unsigned long possible_crtcs;
3265 	int ret = 0;
3266 
3267 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3268 	if (!plane) {
3269 		DRM_ERROR("KMS: Failed to allocate plane\n");
3270 		return -ENOMEM;
3271 	}
3272 	plane->type = plane_type;
3273 
3274 	/*
3275 	 * HACK: IGT tests expect that the primary plane for a CRTC
3276 	 * can only have one possible CRTC. Only expose support for
3277 	 * any CRTC if they're not going to be used as a primary plane
3278 	 * for a CRTC - like overlay or underlay planes.
3279 	 */
3280 	possible_crtcs = 1 << plane_id;
3281 	if (plane_id >= dm->dc->caps.max_streams)
3282 		possible_crtcs = 0xff;
3283 
3284 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3285 
3286 	if (ret) {
3287 		DRM_ERROR("KMS: Failed to initialize plane\n");
3288 		kfree(plane);
3289 		return ret;
3290 	}
3291 
3292 	if (mode_info)
3293 		mode_info->planes[plane_id] = plane;
3294 
3295 	return ret;
3296 }
3297 
3298 
3299 static void register_backlight_device(struct amdgpu_display_manager *dm,
3300 				      struct dc_link *link)
3301 {
3302 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3303 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3304 
3305 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3306 	    link->type != dc_connection_none) {
3307 		/*
3308 		 * Event if registration failed, we should continue with
3309 		 * DM initialization because not having a backlight control
3310 		 * is better then a black screen.
3311 		 */
3312 		amdgpu_dm_register_backlight_device(dm);
3313 
3314 		if (dm->backlight_dev)
3315 			dm->backlight_link = link;
3316 	}
3317 #endif
3318 }
3319 
3320 
3321 /*
3322  * In this architecture, the association
3323  * connector -> encoder -> crtc
3324  * id not really requried. The crtc and connector will hold the
3325  * display_index as an abstraction to use with DAL component
3326  *
3327  * Returns 0 on success
3328  */
3329 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3330 {
3331 	struct amdgpu_display_manager *dm = &adev->dm;
3332 	int32_t i;
3333 	struct amdgpu_dm_connector *aconnector = NULL;
3334 	struct amdgpu_encoder *aencoder = NULL;
3335 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3336 	uint32_t link_cnt;
3337 	int32_t primary_planes;
3338 	enum dc_connection_type new_connection_type = dc_connection_none;
3339 	const struct dc_plane_cap *plane;
3340 
3341 	dm->display_indexes_num = dm->dc->caps.max_streams;
3342 	/* Update the actual used number of crtc */
3343 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3344 
3345 	link_cnt = dm->dc->caps.max_links;
3346 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3347 		DRM_ERROR("DM: Failed to initialize mode config\n");
3348 		return -EINVAL;
3349 	}
3350 
3351 	/* There is one primary plane per CRTC */
3352 	primary_planes = dm->dc->caps.max_streams;
3353 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3354 
3355 	/*
3356 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3357 	 * Order is reversed to match iteration order in atomic check.
3358 	 */
3359 	for (i = (primary_planes - 1); i >= 0; i--) {
3360 		plane = &dm->dc->caps.planes[i];
3361 
3362 		if (initialize_plane(dm, mode_info, i,
3363 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3364 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3365 			goto fail;
3366 		}
3367 	}
3368 
3369 	/*
3370 	 * Initialize overlay planes, index starting after primary planes.
3371 	 * These planes have a higher DRM index than the primary planes since
3372 	 * they should be considered as having a higher z-order.
3373 	 * Order is reversed to match iteration order in atomic check.
3374 	 *
3375 	 * Only support DCN for now, and only expose one so we don't encourage
3376 	 * userspace to use up all the pipes.
3377 	 */
3378 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3379 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3380 
3381 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3382 			continue;
3383 
3384 		if (!plane->blends_with_above || !plane->blends_with_below)
3385 			continue;
3386 
3387 		if (!plane->pixel_format_support.argb8888)
3388 			continue;
3389 
3390 		if (initialize_plane(dm, NULL, primary_planes + i,
3391 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3392 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3393 			goto fail;
3394 		}
3395 
3396 		/* Only create one overlay plane. */
3397 		break;
3398 	}
3399 
3400 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3401 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3402 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3403 			goto fail;
3404 		}
3405 
3406 	/* loops over all connectors on the board */
3407 	for (i = 0; i < link_cnt; i++) {
3408 		struct dc_link *link = NULL;
3409 
3410 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3411 			DRM_ERROR(
3412 				"KMS: Cannot support more than %d display indexes\n",
3413 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3414 			continue;
3415 		}
3416 
3417 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3418 		if (!aconnector)
3419 			goto fail;
3420 
3421 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3422 		if (!aencoder)
3423 			goto fail;
3424 
3425 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3426 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3427 			goto fail;
3428 		}
3429 
3430 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3431 			DRM_ERROR("KMS: Failed to initialize connector\n");
3432 			goto fail;
3433 		}
3434 
3435 		link = dc_get_link_at_index(dm->dc, i);
3436 
3437 		if (!dc_link_detect_sink(link, &new_connection_type))
3438 			DRM_ERROR("KMS: Failed to detect connector\n");
3439 
3440 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3441 			emulated_link_detect(link);
3442 			amdgpu_dm_update_connector_after_detect(aconnector);
3443 
3444 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3445 			amdgpu_dm_update_connector_after_detect(aconnector);
3446 			register_backlight_device(dm, link);
3447 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3448 				amdgpu_dm_set_psr_caps(link);
3449 		}
3450 
3451 
3452 	}
3453 
3454 	/* Software is initialized. Now we can register interrupt handlers. */
3455 	switch (adev->asic_type) {
3456 #if defined(CONFIG_DRM_AMD_DC_SI)
3457 	case CHIP_TAHITI:
3458 	case CHIP_PITCAIRN:
3459 	case CHIP_VERDE:
3460 	case CHIP_OLAND:
3461 		if (dce60_register_irq_handlers(dm->adev)) {
3462 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3463 			goto fail;
3464 		}
3465 		break;
3466 #endif
3467 	case CHIP_BONAIRE:
3468 	case CHIP_HAWAII:
3469 	case CHIP_KAVERI:
3470 	case CHIP_KABINI:
3471 	case CHIP_MULLINS:
3472 	case CHIP_TONGA:
3473 	case CHIP_FIJI:
3474 	case CHIP_CARRIZO:
3475 	case CHIP_STONEY:
3476 	case CHIP_POLARIS11:
3477 	case CHIP_POLARIS10:
3478 	case CHIP_POLARIS12:
3479 	case CHIP_VEGAM:
3480 	case CHIP_VEGA10:
3481 	case CHIP_VEGA12:
3482 	case CHIP_VEGA20:
3483 		if (dce110_register_irq_handlers(dm->adev)) {
3484 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3485 			goto fail;
3486 		}
3487 		break;
3488 #if defined(CONFIG_DRM_AMD_DC_DCN)
3489 	case CHIP_RAVEN:
3490 	case CHIP_NAVI12:
3491 	case CHIP_NAVI10:
3492 	case CHIP_NAVI14:
3493 	case CHIP_RENOIR:
3494 	case CHIP_SIENNA_CICHLID:
3495 	case CHIP_NAVY_FLOUNDER:
3496 	case CHIP_DIMGREY_CAVEFISH:
3497 	case CHIP_VANGOGH:
3498 		if (dcn10_register_irq_handlers(dm->adev)) {
3499 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3500 			goto fail;
3501 		}
3502 		break;
3503 #endif
3504 	default:
3505 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3506 		goto fail;
3507 	}
3508 
3509 	return 0;
3510 fail:
3511 	kfree(aencoder);
3512 	kfree(aconnector);
3513 
3514 	return -EINVAL;
3515 }
3516 
3517 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3518 {
3519 	drm_mode_config_cleanup(dm->ddev);
3520 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3521 	return;
3522 }
3523 
3524 /******************************************************************************
3525  * amdgpu_display_funcs functions
3526  *****************************************************************************/
3527 
3528 /*
3529  * dm_bandwidth_update - program display watermarks
3530  *
3531  * @adev: amdgpu_device pointer
3532  *
3533  * Calculate and program the display watermarks and line buffer allocation.
3534  */
3535 static void dm_bandwidth_update(struct amdgpu_device *adev)
3536 {
3537 	/* TODO: implement later */
3538 }
3539 
3540 static const struct amdgpu_display_funcs dm_display_funcs = {
3541 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3542 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3543 	.backlight_set_level = NULL, /* never called for DC */
3544 	.backlight_get_level = NULL, /* never called for DC */
3545 	.hpd_sense = NULL,/* called unconditionally */
3546 	.hpd_set_polarity = NULL, /* called unconditionally */
3547 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3548 	.page_flip_get_scanoutpos =
3549 		dm_crtc_get_scanoutpos,/* called unconditionally */
3550 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3551 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3552 };
3553 
3554 #if defined(CONFIG_DEBUG_KERNEL_DC)
3555 
3556 static ssize_t s3_debug_store(struct device *device,
3557 			      struct device_attribute *attr,
3558 			      const char *buf,
3559 			      size_t count)
3560 {
3561 	int ret;
3562 	int s3_state;
3563 	struct drm_device *drm_dev = dev_get_drvdata(device);
3564 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3565 
3566 	ret = kstrtoint(buf, 0, &s3_state);
3567 
3568 	if (ret == 0) {
3569 		if (s3_state) {
3570 			dm_resume(adev);
3571 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3572 		} else
3573 			dm_suspend(adev);
3574 	}
3575 
3576 	return ret == 0 ? count : 0;
3577 }
3578 
3579 DEVICE_ATTR_WO(s3_debug);
3580 
3581 #endif
3582 
3583 static int dm_early_init(void *handle)
3584 {
3585 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3586 
3587 	switch (adev->asic_type) {
3588 #if defined(CONFIG_DRM_AMD_DC_SI)
3589 	case CHIP_TAHITI:
3590 	case CHIP_PITCAIRN:
3591 	case CHIP_VERDE:
3592 		adev->mode_info.num_crtc = 6;
3593 		adev->mode_info.num_hpd = 6;
3594 		adev->mode_info.num_dig = 6;
3595 		break;
3596 	case CHIP_OLAND:
3597 		adev->mode_info.num_crtc = 2;
3598 		adev->mode_info.num_hpd = 2;
3599 		adev->mode_info.num_dig = 2;
3600 		break;
3601 #endif
3602 	case CHIP_BONAIRE:
3603 	case CHIP_HAWAII:
3604 		adev->mode_info.num_crtc = 6;
3605 		adev->mode_info.num_hpd = 6;
3606 		adev->mode_info.num_dig = 6;
3607 		break;
3608 	case CHIP_KAVERI:
3609 		adev->mode_info.num_crtc = 4;
3610 		adev->mode_info.num_hpd = 6;
3611 		adev->mode_info.num_dig = 7;
3612 		break;
3613 	case CHIP_KABINI:
3614 	case CHIP_MULLINS:
3615 		adev->mode_info.num_crtc = 2;
3616 		adev->mode_info.num_hpd = 6;
3617 		adev->mode_info.num_dig = 6;
3618 		break;
3619 	case CHIP_FIJI:
3620 	case CHIP_TONGA:
3621 		adev->mode_info.num_crtc = 6;
3622 		adev->mode_info.num_hpd = 6;
3623 		adev->mode_info.num_dig = 7;
3624 		break;
3625 	case CHIP_CARRIZO:
3626 		adev->mode_info.num_crtc = 3;
3627 		adev->mode_info.num_hpd = 6;
3628 		adev->mode_info.num_dig = 9;
3629 		break;
3630 	case CHIP_STONEY:
3631 		adev->mode_info.num_crtc = 2;
3632 		adev->mode_info.num_hpd = 6;
3633 		adev->mode_info.num_dig = 9;
3634 		break;
3635 	case CHIP_POLARIS11:
3636 	case CHIP_POLARIS12:
3637 		adev->mode_info.num_crtc = 5;
3638 		adev->mode_info.num_hpd = 5;
3639 		adev->mode_info.num_dig = 5;
3640 		break;
3641 	case CHIP_POLARIS10:
3642 	case CHIP_VEGAM:
3643 		adev->mode_info.num_crtc = 6;
3644 		adev->mode_info.num_hpd = 6;
3645 		adev->mode_info.num_dig = 6;
3646 		break;
3647 	case CHIP_VEGA10:
3648 	case CHIP_VEGA12:
3649 	case CHIP_VEGA20:
3650 		adev->mode_info.num_crtc = 6;
3651 		adev->mode_info.num_hpd = 6;
3652 		adev->mode_info.num_dig = 6;
3653 		break;
3654 #if defined(CONFIG_DRM_AMD_DC_DCN)
3655 	case CHIP_RAVEN:
3656 	case CHIP_RENOIR:
3657 	case CHIP_VANGOGH:
3658 		adev->mode_info.num_crtc = 4;
3659 		adev->mode_info.num_hpd = 4;
3660 		adev->mode_info.num_dig = 4;
3661 		break;
3662 	case CHIP_NAVI10:
3663 	case CHIP_NAVI12:
3664 	case CHIP_SIENNA_CICHLID:
3665 	case CHIP_NAVY_FLOUNDER:
3666 		adev->mode_info.num_crtc = 6;
3667 		adev->mode_info.num_hpd = 6;
3668 		adev->mode_info.num_dig = 6;
3669 		break;
3670 	case CHIP_NAVI14:
3671 	case CHIP_DIMGREY_CAVEFISH:
3672 		adev->mode_info.num_crtc = 5;
3673 		adev->mode_info.num_hpd = 5;
3674 		adev->mode_info.num_dig = 5;
3675 		break;
3676 #endif
3677 	default:
3678 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3679 		return -EINVAL;
3680 	}
3681 
3682 	amdgpu_dm_set_irq_funcs(adev);
3683 
3684 	if (adev->mode_info.funcs == NULL)
3685 		adev->mode_info.funcs = &dm_display_funcs;
3686 
3687 	/*
3688 	 * Note: Do NOT change adev->audio_endpt_rreg and
3689 	 * adev->audio_endpt_wreg because they are initialised in
3690 	 * amdgpu_device_init()
3691 	 */
3692 #if defined(CONFIG_DEBUG_KERNEL_DC)
3693 	device_create_file(
3694 		adev_to_drm(adev)->dev,
3695 		&dev_attr_s3_debug);
3696 #endif
3697 
3698 	return 0;
3699 }
3700 
3701 static bool modeset_required(struct drm_crtc_state *crtc_state,
3702 			     struct dc_stream_state *new_stream,
3703 			     struct dc_stream_state *old_stream)
3704 {
3705 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3706 }
3707 
3708 static bool modereset_required(struct drm_crtc_state *crtc_state)
3709 {
3710 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3711 }
3712 
3713 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3714 {
3715 	drm_encoder_cleanup(encoder);
3716 	kfree(encoder);
3717 }
3718 
3719 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3720 	.destroy = amdgpu_dm_encoder_destroy,
3721 };
3722 
3723 
3724 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3725 					 struct drm_framebuffer *fb,
3726 					 int *min_downscale, int *max_upscale)
3727 {
3728 	struct amdgpu_device *adev = drm_to_adev(dev);
3729 	struct dc *dc = adev->dm.dc;
3730 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3731 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3732 
3733 	switch (fb->format->format) {
3734 	case DRM_FORMAT_P010:
3735 	case DRM_FORMAT_NV12:
3736 	case DRM_FORMAT_NV21:
3737 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3738 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3739 		break;
3740 
3741 	case DRM_FORMAT_XRGB16161616F:
3742 	case DRM_FORMAT_ARGB16161616F:
3743 	case DRM_FORMAT_XBGR16161616F:
3744 	case DRM_FORMAT_ABGR16161616F:
3745 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3746 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3747 		break;
3748 
3749 	default:
3750 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3751 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3752 		break;
3753 	}
3754 
3755 	/*
3756 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3757 	 * scaling factor of 1.0 == 1000 units.
3758 	 */
3759 	if (*max_upscale == 1)
3760 		*max_upscale = 1000;
3761 
3762 	if (*min_downscale == 1)
3763 		*min_downscale = 1000;
3764 }
3765 
3766 
3767 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3768 				struct dc_scaling_info *scaling_info)
3769 {
3770 	int scale_w, scale_h, min_downscale, max_upscale;
3771 
3772 	memset(scaling_info, 0, sizeof(*scaling_info));
3773 
3774 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3775 	scaling_info->src_rect.x = state->src_x >> 16;
3776 	scaling_info->src_rect.y = state->src_y >> 16;
3777 
3778 	scaling_info->src_rect.width = state->src_w >> 16;
3779 	if (scaling_info->src_rect.width == 0)
3780 		return -EINVAL;
3781 
3782 	scaling_info->src_rect.height = state->src_h >> 16;
3783 	if (scaling_info->src_rect.height == 0)
3784 		return -EINVAL;
3785 
3786 	scaling_info->dst_rect.x = state->crtc_x;
3787 	scaling_info->dst_rect.y = state->crtc_y;
3788 
3789 	if (state->crtc_w == 0)
3790 		return -EINVAL;
3791 
3792 	scaling_info->dst_rect.width = state->crtc_w;
3793 
3794 	if (state->crtc_h == 0)
3795 		return -EINVAL;
3796 
3797 	scaling_info->dst_rect.height = state->crtc_h;
3798 
3799 	/* DRM doesn't specify clipping on destination output. */
3800 	scaling_info->clip_rect = scaling_info->dst_rect;
3801 
3802 	/* Validate scaling per-format with DC plane caps */
3803 	if (state->plane && state->plane->dev && state->fb) {
3804 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3805 					     &min_downscale, &max_upscale);
3806 	} else {
3807 		min_downscale = 250;
3808 		max_upscale = 16000;
3809 	}
3810 
3811 	scale_w = scaling_info->dst_rect.width * 1000 /
3812 		  scaling_info->src_rect.width;
3813 
3814 	if (scale_w < min_downscale || scale_w > max_upscale)
3815 		return -EINVAL;
3816 
3817 	scale_h = scaling_info->dst_rect.height * 1000 /
3818 		  scaling_info->src_rect.height;
3819 
3820 	if (scale_h < min_downscale || scale_h > max_upscale)
3821 		return -EINVAL;
3822 
3823 	/*
3824 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3825 	 * assume reasonable defaults based on the format.
3826 	 */
3827 
3828 	return 0;
3829 }
3830 
3831 static void
3832 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3833 				 uint64_t tiling_flags)
3834 {
3835 	/* Fill GFX8 params */
3836 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3837 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3838 
3839 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3840 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3841 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3842 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3843 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3844 
3845 		/* XXX fix me for VI */
3846 		tiling_info->gfx8.num_banks = num_banks;
3847 		tiling_info->gfx8.array_mode =
3848 				DC_ARRAY_2D_TILED_THIN1;
3849 		tiling_info->gfx8.tile_split = tile_split;
3850 		tiling_info->gfx8.bank_width = bankw;
3851 		tiling_info->gfx8.bank_height = bankh;
3852 		tiling_info->gfx8.tile_aspect = mtaspect;
3853 		tiling_info->gfx8.tile_mode =
3854 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3855 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3856 			== DC_ARRAY_1D_TILED_THIN1) {
3857 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3858 	}
3859 
3860 	tiling_info->gfx8.pipe_config =
3861 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3862 }
3863 
3864 static void
3865 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3866 				  union dc_tiling_info *tiling_info)
3867 {
3868 	tiling_info->gfx9.num_pipes =
3869 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3870 	tiling_info->gfx9.num_banks =
3871 		adev->gfx.config.gb_addr_config_fields.num_banks;
3872 	tiling_info->gfx9.pipe_interleave =
3873 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3874 	tiling_info->gfx9.num_shader_engines =
3875 		adev->gfx.config.gb_addr_config_fields.num_se;
3876 	tiling_info->gfx9.max_compressed_frags =
3877 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3878 	tiling_info->gfx9.num_rb_per_se =
3879 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3880 	tiling_info->gfx9.shaderEnable = 1;
3881 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3882 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3883 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3884 	    adev->asic_type == CHIP_VANGOGH)
3885 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3886 }
3887 
3888 static int
3889 validate_dcc(struct amdgpu_device *adev,
3890 	     const enum surface_pixel_format format,
3891 	     const enum dc_rotation_angle rotation,
3892 	     const union dc_tiling_info *tiling_info,
3893 	     const struct dc_plane_dcc_param *dcc,
3894 	     const struct dc_plane_address *address,
3895 	     const struct plane_size *plane_size)
3896 {
3897 	struct dc *dc = adev->dm.dc;
3898 	struct dc_dcc_surface_param input;
3899 	struct dc_surface_dcc_cap output;
3900 
3901 	memset(&input, 0, sizeof(input));
3902 	memset(&output, 0, sizeof(output));
3903 
3904 	if (!dcc->enable)
3905 		return 0;
3906 
3907 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3908 	    !dc->cap_funcs.get_dcc_compression_cap)
3909 		return -EINVAL;
3910 
3911 	input.format = format;
3912 	input.surface_size.width = plane_size->surface_size.width;
3913 	input.surface_size.height = plane_size->surface_size.height;
3914 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3915 
3916 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3917 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3918 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3919 		input.scan = SCAN_DIRECTION_VERTICAL;
3920 
3921 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3922 		return -EINVAL;
3923 
3924 	if (!output.capable)
3925 		return -EINVAL;
3926 
3927 	if (dcc->independent_64b_blks == 0 &&
3928 	    output.grph.rgb.independent_64b_blks != 0)
3929 		return -EINVAL;
3930 
3931 	return 0;
3932 }
3933 
3934 static bool
3935 modifier_has_dcc(uint64_t modifier)
3936 {
3937 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3938 }
3939 
3940 static unsigned
3941 modifier_gfx9_swizzle_mode(uint64_t modifier)
3942 {
3943 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3944 		return 0;
3945 
3946 	return AMD_FMT_MOD_GET(TILE, modifier);
3947 }
3948 
3949 static const struct drm_format_info *
3950 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3951 {
3952 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3953 }
3954 
3955 static void
3956 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3957 				    union dc_tiling_info *tiling_info,
3958 				    uint64_t modifier)
3959 {
3960 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3961 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3962 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3963 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3964 
3965 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
3966 
3967 	if (!IS_AMD_FMT_MOD(modifier))
3968 		return;
3969 
3970 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3971 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3972 
3973 	if (adev->family >= AMDGPU_FAMILY_NV) {
3974 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3975 	} else {
3976 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3977 
3978 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3979 	}
3980 }
3981 
3982 enum dm_micro_swizzle {
3983 	MICRO_SWIZZLE_Z = 0,
3984 	MICRO_SWIZZLE_S = 1,
3985 	MICRO_SWIZZLE_D = 2,
3986 	MICRO_SWIZZLE_R = 3
3987 };
3988 
3989 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3990 					  uint32_t format,
3991 					  uint64_t modifier)
3992 {
3993 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
3994 	const struct drm_format_info *info = drm_format_info(format);
3995 
3996 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3997 
3998 	if (!info)
3999 		return false;
4000 
4001 	/*
4002 	 * We always have to allow this modifier, because core DRM still
4003 	 * checks LINEAR support if userspace does not provide modifers.
4004 	 */
4005 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4006 		return true;
4007 
4008 	/*
4009 	 * The arbitrary tiling support for multiplane formats has not been hooked
4010 	 * up.
4011 	 */
4012 	if (info->num_planes > 1)
4013 		return false;
4014 
4015 	/*
4016 	 * For D swizzle the canonical modifier depends on the bpp, so check
4017 	 * it here.
4018 	 */
4019 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4020 	    adev->family >= AMDGPU_FAMILY_NV) {
4021 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4022 			return false;
4023 	}
4024 
4025 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4026 	    info->cpp[0] < 8)
4027 		return false;
4028 
4029 	if (modifier_has_dcc(modifier)) {
4030 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4031 		if (info->cpp[0] != 4)
4032 			return false;
4033 	}
4034 
4035 	return true;
4036 }
4037 
4038 static void
4039 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4040 {
4041 	if (!*mods)
4042 		return;
4043 
4044 	if (*cap - *size < 1) {
4045 		uint64_t new_cap = *cap * 2;
4046 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4047 
4048 		if (!new_mods) {
4049 			kfree(*mods);
4050 			*mods = NULL;
4051 			return;
4052 		}
4053 
4054 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4055 		kfree(*mods);
4056 		*mods = new_mods;
4057 		*cap = new_cap;
4058 	}
4059 
4060 	(*mods)[*size] = mod;
4061 	*size += 1;
4062 }
4063 
4064 static void
4065 add_gfx9_modifiers(const struct amdgpu_device *adev,
4066 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4067 {
4068 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4069 	int pipe_xor_bits = min(8, pipes +
4070 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4071 	int bank_xor_bits = min(8 - pipe_xor_bits,
4072 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4073 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4074 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4075 
4076 
4077 	if (adev->family == AMDGPU_FAMILY_RV) {
4078 		/* Raven2 and later */
4079 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4080 
4081 		/*
4082 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4083 		 * doesn't support _D on DCN
4084 		 */
4085 
4086 		if (has_constant_encode) {
4087 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4088 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4089 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4090 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4091 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4092 				    AMD_FMT_MOD_SET(DCC, 1) |
4093 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4094 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4095 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4096 		}
4097 
4098 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4099 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4100 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4101 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4102 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4103 			    AMD_FMT_MOD_SET(DCC, 1) |
4104 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4105 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4106 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4107 
4108 		if (has_constant_encode) {
4109 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4110 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4111 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4112 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4113 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4114 				    AMD_FMT_MOD_SET(DCC, 1) |
4115 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4116 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4117 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4118 
4119 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4120 				    AMD_FMT_MOD_SET(RB, rb) |
4121 				    AMD_FMT_MOD_SET(PIPE, pipes));
4122 		}
4123 
4124 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4125 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4126 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4127 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4128 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4129 			    AMD_FMT_MOD_SET(DCC, 1) |
4130 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4131 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4132 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4133 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4134 			    AMD_FMT_MOD_SET(RB, rb) |
4135 			    AMD_FMT_MOD_SET(PIPE, pipes));
4136 	}
4137 
4138 	/*
4139 	 * Only supported for 64bpp on Raven, will be filtered on format in
4140 	 * dm_plane_format_mod_supported.
4141 	 */
4142 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4143 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4144 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4145 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4146 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4147 
4148 	if (adev->family == AMDGPU_FAMILY_RV) {
4149 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4150 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4151 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4152 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4153 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4154 	}
4155 
4156 	/*
4157 	 * Only supported for 64bpp on Raven, will be filtered on format in
4158 	 * dm_plane_format_mod_supported.
4159 	 */
4160 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4161 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4162 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4163 
4164 	if (adev->family == AMDGPU_FAMILY_RV) {
4165 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4166 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4167 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4168 	}
4169 }
4170 
4171 static void
4172 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4173 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4174 {
4175 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4176 
4177 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4178 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4179 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4180 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4181 		    AMD_FMT_MOD_SET(DCC, 1) |
4182 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4183 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4184 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4185 
4186 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4187 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4188 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4189 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4190 		    AMD_FMT_MOD_SET(DCC, 1) |
4191 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4192 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4193 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4194 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4195 
4196 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4197 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4198 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4199 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4200 
4201 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4202 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4203 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4204 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4205 
4206 
4207 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4208 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4210 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4211 
4212 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4213 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4214 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4215 }
4216 
4217 static void
4218 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4219 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4220 {
4221 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4222 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4223 
4224 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4225 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4226 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4227 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4228 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4229 		    AMD_FMT_MOD_SET(DCC, 1) |
4230 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4231 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4232 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4233 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4234 
4235 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4236 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4237 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4238 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4239 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4240 		    AMD_FMT_MOD_SET(DCC, 1) |
4241 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4242 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4243 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4244 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4245 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4246 
4247 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4248 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4249 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4250 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4251 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4252 
4253 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4254 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4255 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4256 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4257 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4258 
4259 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4260 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4261 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4262 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4263 
4264 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4265 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4266 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4267 }
4268 
4269 static int
4270 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4271 {
4272 	uint64_t size = 0, capacity = 128;
4273 	*mods = NULL;
4274 
4275 	/* We have not hooked up any pre-GFX9 modifiers. */
4276 	if (adev->family < AMDGPU_FAMILY_AI)
4277 		return 0;
4278 
4279 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4280 
4281 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4282 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4283 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4284 		return *mods ? 0 : -ENOMEM;
4285 	}
4286 
4287 	switch (adev->family) {
4288 	case AMDGPU_FAMILY_AI:
4289 	case AMDGPU_FAMILY_RV:
4290 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4291 		break;
4292 	case AMDGPU_FAMILY_NV:
4293 	case AMDGPU_FAMILY_VGH:
4294 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4295 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4296 		else
4297 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4298 		break;
4299 	}
4300 
4301 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4302 
4303 	/* INVALID marks the end of the list. */
4304 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4305 
4306 	if (!*mods)
4307 		return -ENOMEM;
4308 
4309 	return 0;
4310 }
4311 
4312 static int
4313 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4314 					  const struct amdgpu_framebuffer *afb,
4315 					  const enum surface_pixel_format format,
4316 					  const enum dc_rotation_angle rotation,
4317 					  const struct plane_size *plane_size,
4318 					  union dc_tiling_info *tiling_info,
4319 					  struct dc_plane_dcc_param *dcc,
4320 					  struct dc_plane_address *address,
4321 					  const bool force_disable_dcc)
4322 {
4323 	const uint64_t modifier = afb->base.modifier;
4324 	int ret;
4325 
4326 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4327 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4328 
4329 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4330 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4331 
4332 		dcc->enable = 1;
4333 		dcc->meta_pitch = afb->base.pitches[1];
4334 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4335 
4336 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4337 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4338 	}
4339 
4340 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4341 	if (ret)
4342 		return ret;
4343 
4344 	return 0;
4345 }
4346 
4347 static int
4348 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4349 			     const struct amdgpu_framebuffer *afb,
4350 			     const enum surface_pixel_format format,
4351 			     const enum dc_rotation_angle rotation,
4352 			     const uint64_t tiling_flags,
4353 			     union dc_tiling_info *tiling_info,
4354 			     struct plane_size *plane_size,
4355 			     struct dc_plane_dcc_param *dcc,
4356 			     struct dc_plane_address *address,
4357 			     bool tmz_surface,
4358 			     bool force_disable_dcc)
4359 {
4360 	const struct drm_framebuffer *fb = &afb->base;
4361 	int ret;
4362 
4363 	memset(tiling_info, 0, sizeof(*tiling_info));
4364 	memset(plane_size, 0, sizeof(*plane_size));
4365 	memset(dcc, 0, sizeof(*dcc));
4366 	memset(address, 0, sizeof(*address));
4367 
4368 	address->tmz_surface = tmz_surface;
4369 
4370 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4371 		uint64_t addr = afb->address + fb->offsets[0];
4372 
4373 		plane_size->surface_size.x = 0;
4374 		plane_size->surface_size.y = 0;
4375 		plane_size->surface_size.width = fb->width;
4376 		plane_size->surface_size.height = fb->height;
4377 		plane_size->surface_pitch =
4378 			fb->pitches[0] / fb->format->cpp[0];
4379 
4380 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4381 		address->grph.addr.low_part = lower_32_bits(addr);
4382 		address->grph.addr.high_part = upper_32_bits(addr);
4383 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4384 		uint64_t luma_addr = afb->address + fb->offsets[0];
4385 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4386 
4387 		plane_size->surface_size.x = 0;
4388 		plane_size->surface_size.y = 0;
4389 		plane_size->surface_size.width = fb->width;
4390 		plane_size->surface_size.height = fb->height;
4391 		plane_size->surface_pitch =
4392 			fb->pitches[0] / fb->format->cpp[0];
4393 
4394 		plane_size->chroma_size.x = 0;
4395 		plane_size->chroma_size.y = 0;
4396 		/* TODO: set these based on surface format */
4397 		plane_size->chroma_size.width = fb->width / 2;
4398 		plane_size->chroma_size.height = fb->height / 2;
4399 
4400 		plane_size->chroma_pitch =
4401 			fb->pitches[1] / fb->format->cpp[1];
4402 
4403 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4404 		address->video_progressive.luma_addr.low_part =
4405 			lower_32_bits(luma_addr);
4406 		address->video_progressive.luma_addr.high_part =
4407 			upper_32_bits(luma_addr);
4408 		address->video_progressive.chroma_addr.low_part =
4409 			lower_32_bits(chroma_addr);
4410 		address->video_progressive.chroma_addr.high_part =
4411 			upper_32_bits(chroma_addr);
4412 	}
4413 
4414 	if (adev->family >= AMDGPU_FAMILY_AI) {
4415 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4416 								rotation, plane_size,
4417 								tiling_info, dcc,
4418 								address,
4419 								force_disable_dcc);
4420 		if (ret)
4421 			return ret;
4422 	} else {
4423 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4424 	}
4425 
4426 	return 0;
4427 }
4428 
4429 static void
4430 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4431 			       bool *per_pixel_alpha, bool *global_alpha,
4432 			       int *global_alpha_value)
4433 {
4434 	*per_pixel_alpha = false;
4435 	*global_alpha = false;
4436 	*global_alpha_value = 0xff;
4437 
4438 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4439 		return;
4440 
4441 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4442 		static const uint32_t alpha_formats[] = {
4443 			DRM_FORMAT_ARGB8888,
4444 			DRM_FORMAT_RGBA8888,
4445 			DRM_FORMAT_ABGR8888,
4446 		};
4447 		uint32_t format = plane_state->fb->format->format;
4448 		unsigned int i;
4449 
4450 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4451 			if (format == alpha_formats[i]) {
4452 				*per_pixel_alpha = true;
4453 				break;
4454 			}
4455 		}
4456 	}
4457 
4458 	if (plane_state->alpha < 0xffff) {
4459 		*global_alpha = true;
4460 		*global_alpha_value = plane_state->alpha >> 8;
4461 	}
4462 }
4463 
4464 static int
4465 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4466 			    const enum surface_pixel_format format,
4467 			    enum dc_color_space *color_space)
4468 {
4469 	bool full_range;
4470 
4471 	*color_space = COLOR_SPACE_SRGB;
4472 
4473 	/* DRM color properties only affect non-RGB formats. */
4474 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4475 		return 0;
4476 
4477 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4478 
4479 	switch (plane_state->color_encoding) {
4480 	case DRM_COLOR_YCBCR_BT601:
4481 		if (full_range)
4482 			*color_space = COLOR_SPACE_YCBCR601;
4483 		else
4484 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4485 		break;
4486 
4487 	case DRM_COLOR_YCBCR_BT709:
4488 		if (full_range)
4489 			*color_space = COLOR_SPACE_YCBCR709;
4490 		else
4491 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4492 		break;
4493 
4494 	case DRM_COLOR_YCBCR_BT2020:
4495 		if (full_range)
4496 			*color_space = COLOR_SPACE_2020_YCBCR;
4497 		else
4498 			return -EINVAL;
4499 		break;
4500 
4501 	default:
4502 		return -EINVAL;
4503 	}
4504 
4505 	return 0;
4506 }
4507 
4508 static int
4509 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4510 			    const struct drm_plane_state *plane_state,
4511 			    const uint64_t tiling_flags,
4512 			    struct dc_plane_info *plane_info,
4513 			    struct dc_plane_address *address,
4514 			    bool tmz_surface,
4515 			    bool force_disable_dcc)
4516 {
4517 	const struct drm_framebuffer *fb = plane_state->fb;
4518 	const struct amdgpu_framebuffer *afb =
4519 		to_amdgpu_framebuffer(plane_state->fb);
4520 	struct drm_format_name_buf format_name;
4521 	int ret;
4522 
4523 	memset(plane_info, 0, sizeof(*plane_info));
4524 
4525 	switch (fb->format->format) {
4526 	case DRM_FORMAT_C8:
4527 		plane_info->format =
4528 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4529 		break;
4530 	case DRM_FORMAT_RGB565:
4531 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4532 		break;
4533 	case DRM_FORMAT_XRGB8888:
4534 	case DRM_FORMAT_ARGB8888:
4535 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4536 		break;
4537 	case DRM_FORMAT_XRGB2101010:
4538 	case DRM_FORMAT_ARGB2101010:
4539 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4540 		break;
4541 	case DRM_FORMAT_XBGR2101010:
4542 	case DRM_FORMAT_ABGR2101010:
4543 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4544 		break;
4545 	case DRM_FORMAT_XBGR8888:
4546 	case DRM_FORMAT_ABGR8888:
4547 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4548 		break;
4549 	case DRM_FORMAT_NV21:
4550 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4551 		break;
4552 	case DRM_FORMAT_NV12:
4553 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4554 		break;
4555 	case DRM_FORMAT_P010:
4556 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4557 		break;
4558 	case DRM_FORMAT_XRGB16161616F:
4559 	case DRM_FORMAT_ARGB16161616F:
4560 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4561 		break;
4562 	case DRM_FORMAT_XBGR16161616F:
4563 	case DRM_FORMAT_ABGR16161616F:
4564 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4565 		break;
4566 	default:
4567 		DRM_ERROR(
4568 			"Unsupported screen format %s\n",
4569 			drm_get_format_name(fb->format->format, &format_name));
4570 		return -EINVAL;
4571 	}
4572 
4573 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4574 	case DRM_MODE_ROTATE_0:
4575 		plane_info->rotation = ROTATION_ANGLE_0;
4576 		break;
4577 	case DRM_MODE_ROTATE_90:
4578 		plane_info->rotation = ROTATION_ANGLE_90;
4579 		break;
4580 	case DRM_MODE_ROTATE_180:
4581 		plane_info->rotation = ROTATION_ANGLE_180;
4582 		break;
4583 	case DRM_MODE_ROTATE_270:
4584 		plane_info->rotation = ROTATION_ANGLE_270;
4585 		break;
4586 	default:
4587 		plane_info->rotation = ROTATION_ANGLE_0;
4588 		break;
4589 	}
4590 
4591 	plane_info->visible = true;
4592 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4593 
4594 	plane_info->layer_index = 0;
4595 
4596 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4597 					  &plane_info->color_space);
4598 	if (ret)
4599 		return ret;
4600 
4601 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4602 					   plane_info->rotation, tiling_flags,
4603 					   &plane_info->tiling_info,
4604 					   &plane_info->plane_size,
4605 					   &plane_info->dcc, address, tmz_surface,
4606 					   force_disable_dcc);
4607 	if (ret)
4608 		return ret;
4609 
4610 	fill_blending_from_plane_state(
4611 		plane_state, &plane_info->per_pixel_alpha,
4612 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4613 
4614 	return 0;
4615 }
4616 
4617 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4618 				    struct dc_plane_state *dc_plane_state,
4619 				    struct drm_plane_state *plane_state,
4620 				    struct drm_crtc_state *crtc_state)
4621 {
4622 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4623 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4624 	struct dc_scaling_info scaling_info;
4625 	struct dc_plane_info plane_info;
4626 	int ret;
4627 	bool force_disable_dcc = false;
4628 
4629 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4630 	if (ret)
4631 		return ret;
4632 
4633 	dc_plane_state->src_rect = scaling_info.src_rect;
4634 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4635 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4636 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4637 
4638 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4639 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4640 					  afb->tiling_flags,
4641 					  &plane_info,
4642 					  &dc_plane_state->address,
4643 					  afb->tmz_surface,
4644 					  force_disable_dcc);
4645 	if (ret)
4646 		return ret;
4647 
4648 	dc_plane_state->format = plane_info.format;
4649 	dc_plane_state->color_space = plane_info.color_space;
4650 	dc_plane_state->format = plane_info.format;
4651 	dc_plane_state->plane_size = plane_info.plane_size;
4652 	dc_plane_state->rotation = plane_info.rotation;
4653 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4654 	dc_plane_state->stereo_format = plane_info.stereo_format;
4655 	dc_plane_state->tiling_info = plane_info.tiling_info;
4656 	dc_plane_state->visible = plane_info.visible;
4657 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4658 	dc_plane_state->global_alpha = plane_info.global_alpha;
4659 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4660 	dc_plane_state->dcc = plane_info.dcc;
4661 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4662 
4663 	/*
4664 	 * Always set input transfer function, since plane state is refreshed
4665 	 * every time.
4666 	 */
4667 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4668 	if (ret)
4669 		return ret;
4670 
4671 	return 0;
4672 }
4673 
4674 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4675 					   const struct dm_connector_state *dm_state,
4676 					   struct dc_stream_state *stream)
4677 {
4678 	enum amdgpu_rmx_type rmx_type;
4679 
4680 	struct rect src = { 0 }; /* viewport in composition space*/
4681 	struct rect dst = { 0 }; /* stream addressable area */
4682 
4683 	/* no mode. nothing to be done */
4684 	if (!mode)
4685 		return;
4686 
4687 	/* Full screen scaling by default */
4688 	src.width = mode->hdisplay;
4689 	src.height = mode->vdisplay;
4690 	dst.width = stream->timing.h_addressable;
4691 	dst.height = stream->timing.v_addressable;
4692 
4693 	if (dm_state) {
4694 		rmx_type = dm_state->scaling;
4695 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4696 			if (src.width * dst.height <
4697 					src.height * dst.width) {
4698 				/* height needs less upscaling/more downscaling */
4699 				dst.width = src.width *
4700 						dst.height / src.height;
4701 			} else {
4702 				/* width needs less upscaling/more downscaling */
4703 				dst.height = src.height *
4704 						dst.width / src.width;
4705 			}
4706 		} else if (rmx_type == RMX_CENTER) {
4707 			dst = src;
4708 		}
4709 
4710 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4711 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4712 
4713 		if (dm_state->underscan_enable) {
4714 			dst.x += dm_state->underscan_hborder / 2;
4715 			dst.y += dm_state->underscan_vborder / 2;
4716 			dst.width -= dm_state->underscan_hborder;
4717 			dst.height -= dm_state->underscan_vborder;
4718 		}
4719 	}
4720 
4721 	stream->src = src;
4722 	stream->dst = dst;
4723 
4724 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4725 			dst.x, dst.y, dst.width, dst.height);
4726 
4727 }
4728 
4729 static enum dc_color_depth
4730 convert_color_depth_from_display_info(const struct drm_connector *connector,
4731 				      bool is_y420, int requested_bpc)
4732 {
4733 	uint8_t bpc;
4734 
4735 	if (is_y420) {
4736 		bpc = 8;
4737 
4738 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4739 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4740 			bpc = 16;
4741 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4742 			bpc = 12;
4743 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4744 			bpc = 10;
4745 	} else {
4746 		bpc = (uint8_t)connector->display_info.bpc;
4747 		/* Assume 8 bpc by default if no bpc is specified. */
4748 		bpc = bpc ? bpc : 8;
4749 	}
4750 
4751 	if (requested_bpc > 0) {
4752 		/*
4753 		 * Cap display bpc based on the user requested value.
4754 		 *
4755 		 * The value for state->max_bpc may not correctly updated
4756 		 * depending on when the connector gets added to the state
4757 		 * or if this was called outside of atomic check, so it
4758 		 * can't be used directly.
4759 		 */
4760 		bpc = min_t(u8, bpc, requested_bpc);
4761 
4762 		/* Round down to the nearest even number. */
4763 		bpc = bpc - (bpc & 1);
4764 	}
4765 
4766 	switch (bpc) {
4767 	case 0:
4768 		/*
4769 		 * Temporary Work around, DRM doesn't parse color depth for
4770 		 * EDID revision before 1.4
4771 		 * TODO: Fix edid parsing
4772 		 */
4773 		return COLOR_DEPTH_888;
4774 	case 6:
4775 		return COLOR_DEPTH_666;
4776 	case 8:
4777 		return COLOR_DEPTH_888;
4778 	case 10:
4779 		return COLOR_DEPTH_101010;
4780 	case 12:
4781 		return COLOR_DEPTH_121212;
4782 	case 14:
4783 		return COLOR_DEPTH_141414;
4784 	case 16:
4785 		return COLOR_DEPTH_161616;
4786 	default:
4787 		return COLOR_DEPTH_UNDEFINED;
4788 	}
4789 }
4790 
4791 static enum dc_aspect_ratio
4792 get_aspect_ratio(const struct drm_display_mode *mode_in)
4793 {
4794 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4795 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4796 }
4797 
4798 static enum dc_color_space
4799 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4800 {
4801 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4802 
4803 	switch (dc_crtc_timing->pixel_encoding)	{
4804 	case PIXEL_ENCODING_YCBCR422:
4805 	case PIXEL_ENCODING_YCBCR444:
4806 	case PIXEL_ENCODING_YCBCR420:
4807 	{
4808 		/*
4809 		 * 27030khz is the separation point between HDTV and SDTV
4810 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4811 		 * respectively
4812 		 */
4813 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4814 			if (dc_crtc_timing->flags.Y_ONLY)
4815 				color_space =
4816 					COLOR_SPACE_YCBCR709_LIMITED;
4817 			else
4818 				color_space = COLOR_SPACE_YCBCR709;
4819 		} else {
4820 			if (dc_crtc_timing->flags.Y_ONLY)
4821 				color_space =
4822 					COLOR_SPACE_YCBCR601_LIMITED;
4823 			else
4824 				color_space = COLOR_SPACE_YCBCR601;
4825 		}
4826 
4827 	}
4828 	break;
4829 	case PIXEL_ENCODING_RGB:
4830 		color_space = COLOR_SPACE_SRGB;
4831 		break;
4832 
4833 	default:
4834 		WARN_ON(1);
4835 		break;
4836 	}
4837 
4838 	return color_space;
4839 }
4840 
4841 static bool adjust_colour_depth_from_display_info(
4842 	struct dc_crtc_timing *timing_out,
4843 	const struct drm_display_info *info)
4844 {
4845 	enum dc_color_depth depth = timing_out->display_color_depth;
4846 	int normalized_clk;
4847 	do {
4848 		normalized_clk = timing_out->pix_clk_100hz / 10;
4849 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4850 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4851 			normalized_clk /= 2;
4852 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4853 		switch (depth) {
4854 		case COLOR_DEPTH_888:
4855 			break;
4856 		case COLOR_DEPTH_101010:
4857 			normalized_clk = (normalized_clk * 30) / 24;
4858 			break;
4859 		case COLOR_DEPTH_121212:
4860 			normalized_clk = (normalized_clk * 36) / 24;
4861 			break;
4862 		case COLOR_DEPTH_161616:
4863 			normalized_clk = (normalized_clk * 48) / 24;
4864 			break;
4865 		default:
4866 			/* The above depths are the only ones valid for HDMI. */
4867 			return false;
4868 		}
4869 		if (normalized_clk <= info->max_tmds_clock) {
4870 			timing_out->display_color_depth = depth;
4871 			return true;
4872 		}
4873 	} while (--depth > COLOR_DEPTH_666);
4874 	return false;
4875 }
4876 
4877 static void fill_stream_properties_from_drm_display_mode(
4878 	struct dc_stream_state *stream,
4879 	const struct drm_display_mode *mode_in,
4880 	const struct drm_connector *connector,
4881 	const struct drm_connector_state *connector_state,
4882 	const struct dc_stream_state *old_stream,
4883 	int requested_bpc)
4884 {
4885 	struct dc_crtc_timing *timing_out = &stream->timing;
4886 	const struct drm_display_info *info = &connector->display_info;
4887 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4888 	struct hdmi_vendor_infoframe hv_frame;
4889 	struct hdmi_avi_infoframe avi_frame;
4890 
4891 	memset(&hv_frame, 0, sizeof(hv_frame));
4892 	memset(&avi_frame, 0, sizeof(avi_frame));
4893 
4894 	timing_out->h_border_left = 0;
4895 	timing_out->h_border_right = 0;
4896 	timing_out->v_border_top = 0;
4897 	timing_out->v_border_bottom = 0;
4898 	/* TODO: un-hardcode */
4899 	if (drm_mode_is_420_only(info, mode_in)
4900 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4901 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4902 	else if (drm_mode_is_420_also(info, mode_in)
4903 			&& aconnector->force_yuv420_output)
4904 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4905 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4906 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4907 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4908 	else
4909 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4910 
4911 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4912 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4913 		connector,
4914 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4915 		requested_bpc);
4916 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4917 	timing_out->hdmi_vic = 0;
4918 
4919 	if(old_stream) {
4920 		timing_out->vic = old_stream->timing.vic;
4921 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4922 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4923 	} else {
4924 		timing_out->vic = drm_match_cea_mode(mode_in);
4925 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4926 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4927 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4928 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4929 	}
4930 
4931 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4932 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4933 		timing_out->vic = avi_frame.video_code;
4934 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4935 		timing_out->hdmi_vic = hv_frame.vic;
4936 	}
4937 
4938 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4939 	timing_out->h_total = mode_in->crtc_htotal;
4940 	timing_out->h_sync_width =
4941 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4942 	timing_out->h_front_porch =
4943 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4944 	timing_out->v_total = mode_in->crtc_vtotal;
4945 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4946 	timing_out->v_front_porch =
4947 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4948 	timing_out->v_sync_width =
4949 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4950 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4951 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4952 
4953 	stream->output_color_space = get_output_color_space(timing_out);
4954 
4955 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4956 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4957 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4958 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4959 		    drm_mode_is_420_also(info, mode_in) &&
4960 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4961 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4962 			adjust_colour_depth_from_display_info(timing_out, info);
4963 		}
4964 	}
4965 }
4966 
4967 static void fill_audio_info(struct audio_info *audio_info,
4968 			    const struct drm_connector *drm_connector,
4969 			    const struct dc_sink *dc_sink)
4970 {
4971 	int i = 0;
4972 	int cea_revision = 0;
4973 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4974 
4975 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4976 	audio_info->product_id = edid_caps->product_id;
4977 
4978 	cea_revision = drm_connector->display_info.cea_rev;
4979 
4980 	strscpy(audio_info->display_name,
4981 		edid_caps->display_name,
4982 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4983 
4984 	if (cea_revision >= 3) {
4985 		audio_info->mode_count = edid_caps->audio_mode_count;
4986 
4987 		for (i = 0; i < audio_info->mode_count; ++i) {
4988 			audio_info->modes[i].format_code =
4989 					(enum audio_format_code)
4990 					(edid_caps->audio_modes[i].format_code);
4991 			audio_info->modes[i].channel_count =
4992 					edid_caps->audio_modes[i].channel_count;
4993 			audio_info->modes[i].sample_rates.all =
4994 					edid_caps->audio_modes[i].sample_rate;
4995 			audio_info->modes[i].sample_size =
4996 					edid_caps->audio_modes[i].sample_size;
4997 		}
4998 	}
4999 
5000 	audio_info->flags.all = edid_caps->speaker_flags;
5001 
5002 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5003 	if (drm_connector->latency_present[0]) {
5004 		audio_info->video_latency = drm_connector->video_latency[0];
5005 		audio_info->audio_latency = drm_connector->audio_latency[0];
5006 	}
5007 
5008 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5009 
5010 }
5011 
5012 static void
5013 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5014 				      struct drm_display_mode *dst_mode)
5015 {
5016 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5017 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5018 	dst_mode->crtc_clock = src_mode->crtc_clock;
5019 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5020 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5021 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5022 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5023 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5024 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5025 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5026 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5027 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5028 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5029 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5030 }
5031 
5032 static void
5033 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5034 					const struct drm_display_mode *native_mode,
5035 					bool scale_enabled)
5036 {
5037 	if (scale_enabled) {
5038 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5039 	} else if (native_mode->clock == drm_mode->clock &&
5040 			native_mode->htotal == drm_mode->htotal &&
5041 			native_mode->vtotal == drm_mode->vtotal) {
5042 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5043 	} else {
5044 		/* no scaling nor amdgpu inserted, no need to patch */
5045 	}
5046 }
5047 
5048 static struct dc_sink *
5049 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5050 {
5051 	struct dc_sink_init_data sink_init_data = { 0 };
5052 	struct dc_sink *sink = NULL;
5053 	sink_init_data.link = aconnector->dc_link;
5054 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5055 
5056 	sink = dc_sink_create(&sink_init_data);
5057 	if (!sink) {
5058 		DRM_ERROR("Failed to create sink!\n");
5059 		return NULL;
5060 	}
5061 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5062 
5063 	return sink;
5064 }
5065 
5066 static void set_multisync_trigger_params(
5067 		struct dc_stream_state *stream)
5068 {
5069 	if (stream->triggered_crtc_reset.enabled) {
5070 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5071 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5072 	}
5073 }
5074 
5075 static void set_master_stream(struct dc_stream_state *stream_set[],
5076 			      int stream_count)
5077 {
5078 	int j, highest_rfr = 0, master_stream = 0;
5079 
5080 	for (j = 0;  j < stream_count; j++) {
5081 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5082 			int refresh_rate = 0;
5083 
5084 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5085 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5086 			if (refresh_rate > highest_rfr) {
5087 				highest_rfr = refresh_rate;
5088 				master_stream = j;
5089 			}
5090 		}
5091 	}
5092 	for (j = 0;  j < stream_count; j++) {
5093 		if (stream_set[j])
5094 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5095 	}
5096 }
5097 
5098 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5099 {
5100 	int i = 0;
5101 
5102 	if (context->stream_count < 2)
5103 		return;
5104 	for (i = 0; i < context->stream_count ; i++) {
5105 		if (!context->streams[i])
5106 			continue;
5107 		/*
5108 		 * TODO: add a function to read AMD VSDB bits and set
5109 		 * crtc_sync_master.multi_sync_enabled flag
5110 		 * For now it's set to false
5111 		 */
5112 		set_multisync_trigger_params(context->streams[i]);
5113 	}
5114 	set_master_stream(context->streams, context->stream_count);
5115 }
5116 
5117 static struct dc_stream_state *
5118 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5119 		       const struct drm_display_mode *drm_mode,
5120 		       const struct dm_connector_state *dm_state,
5121 		       const struct dc_stream_state *old_stream,
5122 		       int requested_bpc)
5123 {
5124 	struct drm_display_mode *preferred_mode = NULL;
5125 	struct drm_connector *drm_connector;
5126 	const struct drm_connector_state *con_state =
5127 		dm_state ? &dm_state->base : NULL;
5128 	struct dc_stream_state *stream = NULL;
5129 	struct drm_display_mode mode = *drm_mode;
5130 	bool native_mode_found = false;
5131 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5132 	int mode_refresh;
5133 	int preferred_refresh = 0;
5134 #if defined(CONFIG_DRM_AMD_DC_DCN)
5135 	struct dsc_dec_dpcd_caps dsc_caps;
5136 	uint32_t link_bandwidth_kbps;
5137 #endif
5138 	struct dc_sink *sink = NULL;
5139 	if (aconnector == NULL) {
5140 		DRM_ERROR("aconnector is NULL!\n");
5141 		return stream;
5142 	}
5143 
5144 	drm_connector = &aconnector->base;
5145 
5146 	if (!aconnector->dc_sink) {
5147 		sink = create_fake_sink(aconnector);
5148 		if (!sink)
5149 			return stream;
5150 	} else {
5151 		sink = aconnector->dc_sink;
5152 		dc_sink_retain(sink);
5153 	}
5154 
5155 	stream = dc_create_stream_for_sink(sink);
5156 
5157 	if (stream == NULL) {
5158 		DRM_ERROR("Failed to create stream for sink!\n");
5159 		goto finish;
5160 	}
5161 
5162 	stream->dm_stream_context = aconnector;
5163 
5164 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5165 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5166 
5167 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5168 		/* Search for preferred mode */
5169 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5170 			native_mode_found = true;
5171 			break;
5172 		}
5173 	}
5174 	if (!native_mode_found)
5175 		preferred_mode = list_first_entry_or_null(
5176 				&aconnector->base.modes,
5177 				struct drm_display_mode,
5178 				head);
5179 
5180 	mode_refresh = drm_mode_vrefresh(&mode);
5181 
5182 	if (preferred_mode == NULL) {
5183 		/*
5184 		 * This may not be an error, the use case is when we have no
5185 		 * usermode calls to reset and set mode upon hotplug. In this
5186 		 * case, we call set mode ourselves to restore the previous mode
5187 		 * and the modelist may not be filled in in time.
5188 		 */
5189 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5190 	} else {
5191 		decide_crtc_timing_for_drm_display_mode(
5192 				&mode, preferred_mode,
5193 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5194 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5195 	}
5196 
5197 	if (!dm_state)
5198 		drm_mode_set_crtcinfo(&mode, 0);
5199 
5200 	/*
5201 	* If scaling is enabled and refresh rate didn't change
5202 	* we copy the vic and polarities of the old timings
5203 	*/
5204 	if (!scale || mode_refresh != preferred_refresh)
5205 		fill_stream_properties_from_drm_display_mode(stream,
5206 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5207 	else
5208 		fill_stream_properties_from_drm_display_mode(stream,
5209 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5210 
5211 	stream->timing.flags.DSC = 0;
5212 
5213 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5214 #if defined(CONFIG_DRM_AMD_DC_DCN)
5215 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5216 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5217 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5218 				      &dsc_caps);
5219 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5220 							     dc_link_get_link_cap(aconnector->dc_link));
5221 
5222 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5223 			/* Set DSC policy according to dsc_clock_en */
5224 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5225 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5226 
5227 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5228 						  &dsc_caps,
5229 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5230 						  0,
5231 						  link_bandwidth_kbps,
5232 						  &stream->timing,
5233 						  &stream->timing.dsc_cfg))
5234 				stream->timing.flags.DSC = 1;
5235 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5236 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5237 				stream->timing.flags.DSC = 1;
5238 
5239 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5240 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5241 
5242 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5243 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5244 
5245 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5246 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5247 		}
5248 #endif
5249 	}
5250 
5251 	update_stream_scaling_settings(&mode, dm_state, stream);
5252 
5253 	fill_audio_info(
5254 		&stream->audio_info,
5255 		drm_connector,
5256 		sink);
5257 
5258 	update_stream_signal(stream, sink);
5259 
5260 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5261 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5262 
5263 	if (stream->link->psr_settings.psr_feature_enabled) {
5264 		//
5265 		// should decide stream support vsc sdp colorimetry capability
5266 		// before building vsc info packet
5267 		//
5268 		stream->use_vsc_sdp_for_colorimetry = false;
5269 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5270 			stream->use_vsc_sdp_for_colorimetry =
5271 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5272 		} else {
5273 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5274 				stream->use_vsc_sdp_for_colorimetry = true;
5275 		}
5276 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5277 	}
5278 finish:
5279 	dc_sink_release(sink);
5280 
5281 	return stream;
5282 }
5283 
5284 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5285 {
5286 	drm_crtc_cleanup(crtc);
5287 	kfree(crtc);
5288 }
5289 
5290 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5291 				  struct drm_crtc_state *state)
5292 {
5293 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5294 
5295 	/* TODO Destroy dc_stream objects are stream object is flattened */
5296 	if (cur->stream)
5297 		dc_stream_release(cur->stream);
5298 
5299 
5300 	__drm_atomic_helper_crtc_destroy_state(state);
5301 
5302 
5303 	kfree(state);
5304 }
5305 
5306 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5307 {
5308 	struct dm_crtc_state *state;
5309 
5310 	if (crtc->state)
5311 		dm_crtc_destroy_state(crtc, crtc->state);
5312 
5313 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5314 	if (WARN_ON(!state))
5315 		return;
5316 
5317 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5318 }
5319 
5320 static struct drm_crtc_state *
5321 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5322 {
5323 	struct dm_crtc_state *state, *cur;
5324 
5325 	cur = to_dm_crtc_state(crtc->state);
5326 
5327 	if (WARN_ON(!crtc->state))
5328 		return NULL;
5329 
5330 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5331 	if (!state)
5332 		return NULL;
5333 
5334 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5335 
5336 	if (cur->stream) {
5337 		state->stream = cur->stream;
5338 		dc_stream_retain(state->stream);
5339 	}
5340 
5341 	state->active_planes = cur->active_planes;
5342 	state->vrr_infopacket = cur->vrr_infopacket;
5343 	state->abm_level = cur->abm_level;
5344 	state->vrr_supported = cur->vrr_supported;
5345 	state->freesync_config = cur->freesync_config;
5346 	state->crc_src = cur->crc_src;
5347 	state->cm_has_degamma = cur->cm_has_degamma;
5348 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5349 
5350 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5351 
5352 	return &state->base;
5353 }
5354 
5355 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5356 {
5357 	enum dc_irq_source irq_source;
5358 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5359 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5360 	int rc;
5361 
5362 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5363 
5364 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5365 
5366 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5367 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5368 	return rc;
5369 }
5370 
5371 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5372 {
5373 	enum dc_irq_source irq_source;
5374 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5375 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5376 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5377 	struct amdgpu_display_manager *dm = &adev->dm;
5378 	int rc = 0;
5379 
5380 	if (enable) {
5381 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5382 		if (amdgpu_dm_vrr_active(acrtc_state))
5383 			rc = dm_set_vupdate_irq(crtc, true);
5384 	} else {
5385 		/* vblank irq off -> vupdate irq off */
5386 		rc = dm_set_vupdate_irq(crtc, false);
5387 	}
5388 
5389 	if (rc)
5390 		return rc;
5391 
5392 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5393 
5394 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5395 		return -EBUSY;
5396 
5397 	if (amdgpu_in_reset(adev))
5398 		return 0;
5399 
5400 	mutex_lock(&dm->dc_lock);
5401 
5402 	if (enable)
5403 		dm->active_vblank_irq_count++;
5404 	else
5405 		dm->active_vblank_irq_count--;
5406 
5407 #if defined(CONFIG_DRM_AMD_DC_DCN)
5408 	dc_allow_idle_optimizations(
5409 		adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5410 
5411 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5412 #endif
5413 
5414 	mutex_unlock(&dm->dc_lock);
5415 
5416 	return 0;
5417 }
5418 
5419 static int dm_enable_vblank(struct drm_crtc *crtc)
5420 {
5421 	return dm_set_vblank(crtc, true);
5422 }
5423 
5424 static void dm_disable_vblank(struct drm_crtc *crtc)
5425 {
5426 	dm_set_vblank(crtc, false);
5427 }
5428 
5429 /* Implemented only the options currently availible for the driver */
5430 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5431 	.reset = dm_crtc_reset_state,
5432 	.destroy = amdgpu_dm_crtc_destroy,
5433 	.set_config = drm_atomic_helper_set_config,
5434 	.page_flip = drm_atomic_helper_page_flip,
5435 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5436 	.atomic_destroy_state = dm_crtc_destroy_state,
5437 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5438 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5439 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5440 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5441 	.enable_vblank = dm_enable_vblank,
5442 	.disable_vblank = dm_disable_vblank,
5443 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5444 };
5445 
5446 static enum drm_connector_status
5447 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5448 {
5449 	bool connected;
5450 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5451 
5452 	/*
5453 	 * Notes:
5454 	 * 1. This interface is NOT called in context of HPD irq.
5455 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5456 	 * makes it a bad place for *any* MST-related activity.
5457 	 */
5458 
5459 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5460 	    !aconnector->fake_enable)
5461 		connected = (aconnector->dc_sink != NULL);
5462 	else
5463 		connected = (aconnector->base.force == DRM_FORCE_ON);
5464 
5465 	update_subconnector_property(aconnector);
5466 
5467 	return (connected ? connector_status_connected :
5468 			connector_status_disconnected);
5469 }
5470 
5471 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5472 					    struct drm_connector_state *connector_state,
5473 					    struct drm_property *property,
5474 					    uint64_t val)
5475 {
5476 	struct drm_device *dev = connector->dev;
5477 	struct amdgpu_device *adev = drm_to_adev(dev);
5478 	struct dm_connector_state *dm_old_state =
5479 		to_dm_connector_state(connector->state);
5480 	struct dm_connector_state *dm_new_state =
5481 		to_dm_connector_state(connector_state);
5482 
5483 	int ret = -EINVAL;
5484 
5485 	if (property == dev->mode_config.scaling_mode_property) {
5486 		enum amdgpu_rmx_type rmx_type;
5487 
5488 		switch (val) {
5489 		case DRM_MODE_SCALE_CENTER:
5490 			rmx_type = RMX_CENTER;
5491 			break;
5492 		case DRM_MODE_SCALE_ASPECT:
5493 			rmx_type = RMX_ASPECT;
5494 			break;
5495 		case DRM_MODE_SCALE_FULLSCREEN:
5496 			rmx_type = RMX_FULL;
5497 			break;
5498 		case DRM_MODE_SCALE_NONE:
5499 		default:
5500 			rmx_type = RMX_OFF;
5501 			break;
5502 		}
5503 
5504 		if (dm_old_state->scaling == rmx_type)
5505 			return 0;
5506 
5507 		dm_new_state->scaling = rmx_type;
5508 		ret = 0;
5509 	} else if (property == adev->mode_info.underscan_hborder_property) {
5510 		dm_new_state->underscan_hborder = val;
5511 		ret = 0;
5512 	} else if (property == adev->mode_info.underscan_vborder_property) {
5513 		dm_new_state->underscan_vborder = val;
5514 		ret = 0;
5515 	} else if (property == adev->mode_info.underscan_property) {
5516 		dm_new_state->underscan_enable = val;
5517 		ret = 0;
5518 	} else if (property == adev->mode_info.abm_level_property) {
5519 		dm_new_state->abm_level = val;
5520 		ret = 0;
5521 	}
5522 
5523 	return ret;
5524 }
5525 
5526 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5527 					    const struct drm_connector_state *state,
5528 					    struct drm_property *property,
5529 					    uint64_t *val)
5530 {
5531 	struct drm_device *dev = connector->dev;
5532 	struct amdgpu_device *adev = drm_to_adev(dev);
5533 	struct dm_connector_state *dm_state =
5534 		to_dm_connector_state(state);
5535 	int ret = -EINVAL;
5536 
5537 	if (property == dev->mode_config.scaling_mode_property) {
5538 		switch (dm_state->scaling) {
5539 		case RMX_CENTER:
5540 			*val = DRM_MODE_SCALE_CENTER;
5541 			break;
5542 		case RMX_ASPECT:
5543 			*val = DRM_MODE_SCALE_ASPECT;
5544 			break;
5545 		case RMX_FULL:
5546 			*val = DRM_MODE_SCALE_FULLSCREEN;
5547 			break;
5548 		case RMX_OFF:
5549 		default:
5550 			*val = DRM_MODE_SCALE_NONE;
5551 			break;
5552 		}
5553 		ret = 0;
5554 	} else if (property == adev->mode_info.underscan_hborder_property) {
5555 		*val = dm_state->underscan_hborder;
5556 		ret = 0;
5557 	} else if (property == adev->mode_info.underscan_vborder_property) {
5558 		*val = dm_state->underscan_vborder;
5559 		ret = 0;
5560 	} else if (property == adev->mode_info.underscan_property) {
5561 		*val = dm_state->underscan_enable;
5562 		ret = 0;
5563 	} else if (property == adev->mode_info.abm_level_property) {
5564 		*val = dm_state->abm_level;
5565 		ret = 0;
5566 	}
5567 
5568 	return ret;
5569 }
5570 
5571 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5572 {
5573 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5574 
5575 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5576 }
5577 
5578 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5579 {
5580 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5581 	const struct dc_link *link = aconnector->dc_link;
5582 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5583 	struct amdgpu_display_manager *dm = &adev->dm;
5584 
5585 	/*
5586 	 * Call only if mst_mgr was iniitalized before since it's not done
5587 	 * for all connector types.
5588 	 */
5589 	if (aconnector->mst_mgr.dev)
5590 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5591 
5592 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5593 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5594 
5595 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5596 	    link->type != dc_connection_none &&
5597 	    dm->backlight_dev) {
5598 		backlight_device_unregister(dm->backlight_dev);
5599 		dm->backlight_dev = NULL;
5600 	}
5601 #endif
5602 
5603 	if (aconnector->dc_em_sink)
5604 		dc_sink_release(aconnector->dc_em_sink);
5605 	aconnector->dc_em_sink = NULL;
5606 	if (aconnector->dc_sink)
5607 		dc_sink_release(aconnector->dc_sink);
5608 	aconnector->dc_sink = NULL;
5609 
5610 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5611 	drm_connector_unregister(connector);
5612 	drm_connector_cleanup(connector);
5613 	if (aconnector->i2c) {
5614 		i2c_del_adapter(&aconnector->i2c->base);
5615 		kfree(aconnector->i2c);
5616 	}
5617 	kfree(aconnector->dm_dp_aux.aux.name);
5618 
5619 	kfree(connector);
5620 }
5621 
5622 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5623 {
5624 	struct dm_connector_state *state =
5625 		to_dm_connector_state(connector->state);
5626 
5627 	if (connector->state)
5628 		__drm_atomic_helper_connector_destroy_state(connector->state);
5629 
5630 	kfree(state);
5631 
5632 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5633 
5634 	if (state) {
5635 		state->scaling = RMX_OFF;
5636 		state->underscan_enable = false;
5637 		state->underscan_hborder = 0;
5638 		state->underscan_vborder = 0;
5639 		state->base.max_requested_bpc = 8;
5640 		state->vcpi_slots = 0;
5641 		state->pbn = 0;
5642 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5643 			state->abm_level = amdgpu_dm_abm_level;
5644 
5645 		__drm_atomic_helper_connector_reset(connector, &state->base);
5646 	}
5647 }
5648 
5649 struct drm_connector_state *
5650 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5651 {
5652 	struct dm_connector_state *state =
5653 		to_dm_connector_state(connector->state);
5654 
5655 	struct dm_connector_state *new_state =
5656 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5657 
5658 	if (!new_state)
5659 		return NULL;
5660 
5661 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5662 
5663 	new_state->freesync_capable = state->freesync_capable;
5664 	new_state->abm_level = state->abm_level;
5665 	new_state->scaling = state->scaling;
5666 	new_state->underscan_enable = state->underscan_enable;
5667 	new_state->underscan_hborder = state->underscan_hborder;
5668 	new_state->underscan_vborder = state->underscan_vborder;
5669 	new_state->vcpi_slots = state->vcpi_slots;
5670 	new_state->pbn = state->pbn;
5671 	return &new_state->base;
5672 }
5673 
5674 static int
5675 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5676 {
5677 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5678 		to_amdgpu_dm_connector(connector);
5679 	int r;
5680 
5681 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5682 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5683 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5684 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5685 		if (r)
5686 			return r;
5687 	}
5688 
5689 #if defined(CONFIG_DEBUG_FS)
5690 	connector_debugfs_init(amdgpu_dm_connector);
5691 #endif
5692 
5693 	return 0;
5694 }
5695 
5696 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5697 	.reset = amdgpu_dm_connector_funcs_reset,
5698 	.detect = amdgpu_dm_connector_detect,
5699 	.fill_modes = drm_helper_probe_single_connector_modes,
5700 	.destroy = amdgpu_dm_connector_destroy,
5701 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5702 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5703 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5704 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5705 	.late_register = amdgpu_dm_connector_late_register,
5706 	.early_unregister = amdgpu_dm_connector_unregister
5707 };
5708 
5709 static int get_modes(struct drm_connector *connector)
5710 {
5711 	return amdgpu_dm_connector_get_modes(connector);
5712 }
5713 
5714 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5715 {
5716 	struct dc_sink_init_data init_params = {
5717 			.link = aconnector->dc_link,
5718 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5719 	};
5720 	struct edid *edid;
5721 
5722 	if (!aconnector->base.edid_blob_ptr) {
5723 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5724 				aconnector->base.name);
5725 
5726 		aconnector->base.force = DRM_FORCE_OFF;
5727 		aconnector->base.override_edid = false;
5728 		return;
5729 	}
5730 
5731 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5732 
5733 	aconnector->edid = edid;
5734 
5735 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5736 		aconnector->dc_link,
5737 		(uint8_t *)edid,
5738 		(edid->extensions + 1) * EDID_LENGTH,
5739 		&init_params);
5740 
5741 	if (aconnector->base.force == DRM_FORCE_ON) {
5742 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5743 		aconnector->dc_link->local_sink :
5744 		aconnector->dc_em_sink;
5745 		dc_sink_retain(aconnector->dc_sink);
5746 	}
5747 }
5748 
5749 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5750 {
5751 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5752 
5753 	/*
5754 	 * In case of headless boot with force on for DP managed connector
5755 	 * Those settings have to be != 0 to get initial modeset
5756 	 */
5757 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5758 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5759 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5760 	}
5761 
5762 
5763 	aconnector->base.override_edid = true;
5764 	create_eml_sink(aconnector);
5765 }
5766 
5767 static struct dc_stream_state *
5768 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5769 				const struct drm_display_mode *drm_mode,
5770 				const struct dm_connector_state *dm_state,
5771 				const struct dc_stream_state *old_stream)
5772 {
5773 	struct drm_connector *connector = &aconnector->base;
5774 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5775 	struct dc_stream_state *stream;
5776 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5777 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5778 	enum dc_status dc_result = DC_OK;
5779 
5780 	do {
5781 		stream = create_stream_for_sink(aconnector, drm_mode,
5782 						dm_state, old_stream,
5783 						requested_bpc);
5784 		if (stream == NULL) {
5785 			DRM_ERROR("Failed to create stream for sink!\n");
5786 			break;
5787 		}
5788 
5789 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5790 
5791 		if (dc_result != DC_OK) {
5792 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5793 				      drm_mode->hdisplay,
5794 				      drm_mode->vdisplay,
5795 				      drm_mode->clock,
5796 				      dc_result,
5797 				      dc_status_to_str(dc_result));
5798 
5799 			dc_stream_release(stream);
5800 			stream = NULL;
5801 			requested_bpc -= 2; /* lower bpc to retry validation */
5802 		}
5803 
5804 	} while (stream == NULL && requested_bpc >= 6);
5805 
5806 	return stream;
5807 }
5808 
5809 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5810 				   struct drm_display_mode *mode)
5811 {
5812 	int result = MODE_ERROR;
5813 	struct dc_sink *dc_sink;
5814 	/* TODO: Unhardcode stream count */
5815 	struct dc_stream_state *stream;
5816 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5817 
5818 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5819 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5820 		return result;
5821 
5822 	/*
5823 	 * Only run this the first time mode_valid is called to initilialize
5824 	 * EDID mgmt
5825 	 */
5826 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5827 		!aconnector->dc_em_sink)
5828 		handle_edid_mgmt(aconnector);
5829 
5830 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5831 
5832 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5833 				aconnector->base.force != DRM_FORCE_ON) {
5834 		DRM_ERROR("dc_sink is NULL!\n");
5835 		goto fail;
5836 	}
5837 
5838 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5839 	if (stream) {
5840 		dc_stream_release(stream);
5841 		result = MODE_OK;
5842 	}
5843 
5844 fail:
5845 	/* TODO: error handling*/
5846 	return result;
5847 }
5848 
5849 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5850 				struct dc_info_packet *out)
5851 {
5852 	struct hdmi_drm_infoframe frame;
5853 	unsigned char buf[30]; /* 26 + 4 */
5854 	ssize_t len;
5855 	int ret, i;
5856 
5857 	memset(out, 0, sizeof(*out));
5858 
5859 	if (!state->hdr_output_metadata)
5860 		return 0;
5861 
5862 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5863 	if (ret)
5864 		return ret;
5865 
5866 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5867 	if (len < 0)
5868 		return (int)len;
5869 
5870 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5871 	if (len != 30)
5872 		return -EINVAL;
5873 
5874 	/* Prepare the infopacket for DC. */
5875 	switch (state->connector->connector_type) {
5876 	case DRM_MODE_CONNECTOR_HDMIA:
5877 		out->hb0 = 0x87; /* type */
5878 		out->hb1 = 0x01; /* version */
5879 		out->hb2 = 0x1A; /* length */
5880 		out->sb[0] = buf[3]; /* checksum */
5881 		i = 1;
5882 		break;
5883 
5884 	case DRM_MODE_CONNECTOR_DisplayPort:
5885 	case DRM_MODE_CONNECTOR_eDP:
5886 		out->hb0 = 0x00; /* sdp id, zero */
5887 		out->hb1 = 0x87; /* type */
5888 		out->hb2 = 0x1D; /* payload len - 1 */
5889 		out->hb3 = (0x13 << 2); /* sdp version */
5890 		out->sb[0] = 0x01; /* version */
5891 		out->sb[1] = 0x1A; /* length */
5892 		i = 2;
5893 		break;
5894 
5895 	default:
5896 		return -EINVAL;
5897 	}
5898 
5899 	memcpy(&out->sb[i], &buf[4], 26);
5900 	out->valid = true;
5901 
5902 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5903 		       sizeof(out->sb), false);
5904 
5905 	return 0;
5906 }
5907 
5908 static bool
5909 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5910 			  const struct drm_connector_state *new_state)
5911 {
5912 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5913 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5914 
5915 	if (old_blob != new_blob) {
5916 		if (old_blob && new_blob &&
5917 		    old_blob->length == new_blob->length)
5918 			return memcmp(old_blob->data, new_blob->data,
5919 				      old_blob->length);
5920 
5921 		return true;
5922 	}
5923 
5924 	return false;
5925 }
5926 
5927 static int
5928 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5929 				 struct drm_atomic_state *state)
5930 {
5931 	struct drm_connector_state *new_con_state =
5932 		drm_atomic_get_new_connector_state(state, conn);
5933 	struct drm_connector_state *old_con_state =
5934 		drm_atomic_get_old_connector_state(state, conn);
5935 	struct drm_crtc *crtc = new_con_state->crtc;
5936 	struct drm_crtc_state *new_crtc_state;
5937 	int ret;
5938 
5939 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5940 
5941 	if (!crtc)
5942 		return 0;
5943 
5944 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5945 		struct dc_info_packet hdr_infopacket;
5946 
5947 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5948 		if (ret)
5949 			return ret;
5950 
5951 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5952 		if (IS_ERR(new_crtc_state))
5953 			return PTR_ERR(new_crtc_state);
5954 
5955 		/*
5956 		 * DC considers the stream backends changed if the
5957 		 * static metadata changes. Forcing the modeset also
5958 		 * gives a simple way for userspace to switch from
5959 		 * 8bpc to 10bpc when setting the metadata to enter
5960 		 * or exit HDR.
5961 		 *
5962 		 * Changing the static metadata after it's been
5963 		 * set is permissible, however. So only force a
5964 		 * modeset if we're entering or exiting HDR.
5965 		 */
5966 		new_crtc_state->mode_changed =
5967 			!old_con_state->hdr_output_metadata ||
5968 			!new_con_state->hdr_output_metadata;
5969 	}
5970 
5971 	return 0;
5972 }
5973 
5974 static const struct drm_connector_helper_funcs
5975 amdgpu_dm_connector_helper_funcs = {
5976 	/*
5977 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5978 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5979 	 * are missing after user start lightdm. So we need to renew modes list.
5980 	 * in get_modes call back, not just return the modes count
5981 	 */
5982 	.get_modes = get_modes,
5983 	.mode_valid = amdgpu_dm_connector_mode_valid,
5984 	.atomic_check = amdgpu_dm_connector_atomic_check,
5985 };
5986 
5987 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5988 {
5989 }
5990 
5991 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5992 {
5993 	struct drm_atomic_state *state = new_crtc_state->state;
5994 	struct drm_plane *plane;
5995 	int num_active = 0;
5996 
5997 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5998 		struct drm_plane_state *new_plane_state;
5999 
6000 		/* Cursor planes are "fake". */
6001 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6002 			continue;
6003 
6004 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6005 
6006 		if (!new_plane_state) {
6007 			/*
6008 			 * The plane is enable on the CRTC and hasn't changed
6009 			 * state. This means that it previously passed
6010 			 * validation and is therefore enabled.
6011 			 */
6012 			num_active += 1;
6013 			continue;
6014 		}
6015 
6016 		/* We need a framebuffer to be considered enabled. */
6017 		num_active += (new_plane_state->fb != NULL);
6018 	}
6019 
6020 	return num_active;
6021 }
6022 
6023 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6024 					 struct drm_crtc_state *new_crtc_state)
6025 {
6026 	struct dm_crtc_state *dm_new_crtc_state =
6027 		to_dm_crtc_state(new_crtc_state);
6028 
6029 	dm_new_crtc_state->active_planes = 0;
6030 
6031 	if (!dm_new_crtc_state->stream)
6032 		return;
6033 
6034 	dm_new_crtc_state->active_planes =
6035 		count_crtc_active_planes(new_crtc_state);
6036 }
6037 
6038 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6039 				       struct drm_atomic_state *state)
6040 {
6041 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6042 									  crtc);
6043 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6044 	struct dc *dc = adev->dm.dc;
6045 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6046 	int ret = -EINVAL;
6047 
6048 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6049 
6050 	dm_update_crtc_active_planes(crtc, crtc_state);
6051 
6052 	if (unlikely(!dm_crtc_state->stream &&
6053 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6054 		WARN_ON(1);
6055 		return ret;
6056 	}
6057 
6058 	/*
6059 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6060 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6061 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6062 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6063 	 */
6064 	if (crtc_state->enable &&
6065 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6066 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6067 		return -EINVAL;
6068 	}
6069 
6070 	/* In some use cases, like reset, no stream is attached */
6071 	if (!dm_crtc_state->stream)
6072 		return 0;
6073 
6074 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6075 		return 0;
6076 
6077 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6078 	return ret;
6079 }
6080 
6081 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6082 				      const struct drm_display_mode *mode,
6083 				      struct drm_display_mode *adjusted_mode)
6084 {
6085 	return true;
6086 }
6087 
6088 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6089 	.disable = dm_crtc_helper_disable,
6090 	.atomic_check = dm_crtc_helper_atomic_check,
6091 	.mode_fixup = dm_crtc_helper_mode_fixup,
6092 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6093 };
6094 
6095 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6096 {
6097 
6098 }
6099 
6100 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6101 {
6102 	switch (display_color_depth) {
6103 		case COLOR_DEPTH_666:
6104 			return 6;
6105 		case COLOR_DEPTH_888:
6106 			return 8;
6107 		case COLOR_DEPTH_101010:
6108 			return 10;
6109 		case COLOR_DEPTH_121212:
6110 			return 12;
6111 		case COLOR_DEPTH_141414:
6112 			return 14;
6113 		case COLOR_DEPTH_161616:
6114 			return 16;
6115 		default:
6116 			break;
6117 		}
6118 	return 0;
6119 }
6120 
6121 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6122 					  struct drm_crtc_state *crtc_state,
6123 					  struct drm_connector_state *conn_state)
6124 {
6125 	struct drm_atomic_state *state = crtc_state->state;
6126 	struct drm_connector *connector = conn_state->connector;
6127 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6128 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6129 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6130 	struct drm_dp_mst_topology_mgr *mst_mgr;
6131 	struct drm_dp_mst_port *mst_port;
6132 	enum dc_color_depth color_depth;
6133 	int clock, bpp = 0;
6134 	bool is_y420 = false;
6135 
6136 	if (!aconnector->port || !aconnector->dc_sink)
6137 		return 0;
6138 
6139 	mst_port = aconnector->port;
6140 	mst_mgr = &aconnector->mst_port->mst_mgr;
6141 
6142 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6143 		return 0;
6144 
6145 	if (!state->duplicated) {
6146 		int max_bpc = conn_state->max_requested_bpc;
6147 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6148 				aconnector->force_yuv420_output;
6149 		color_depth = convert_color_depth_from_display_info(connector,
6150 								    is_y420,
6151 								    max_bpc);
6152 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6153 		clock = adjusted_mode->clock;
6154 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6155 	}
6156 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6157 									   mst_mgr,
6158 									   mst_port,
6159 									   dm_new_connector_state->pbn,
6160 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6161 	if (dm_new_connector_state->vcpi_slots < 0) {
6162 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6163 		return dm_new_connector_state->vcpi_slots;
6164 	}
6165 	return 0;
6166 }
6167 
6168 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6169 	.disable = dm_encoder_helper_disable,
6170 	.atomic_check = dm_encoder_helper_atomic_check
6171 };
6172 
6173 #if defined(CONFIG_DRM_AMD_DC_DCN)
6174 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6175 					    struct dc_state *dc_state)
6176 {
6177 	struct dc_stream_state *stream = NULL;
6178 	struct drm_connector *connector;
6179 	struct drm_connector_state *new_con_state, *old_con_state;
6180 	struct amdgpu_dm_connector *aconnector;
6181 	struct dm_connector_state *dm_conn_state;
6182 	int i, j, clock, bpp;
6183 	int vcpi, pbn_div, pbn = 0;
6184 
6185 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6186 
6187 		aconnector = to_amdgpu_dm_connector(connector);
6188 
6189 		if (!aconnector->port)
6190 			continue;
6191 
6192 		if (!new_con_state || !new_con_state->crtc)
6193 			continue;
6194 
6195 		dm_conn_state = to_dm_connector_state(new_con_state);
6196 
6197 		for (j = 0; j < dc_state->stream_count; j++) {
6198 			stream = dc_state->streams[j];
6199 			if (!stream)
6200 				continue;
6201 
6202 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6203 				break;
6204 
6205 			stream = NULL;
6206 		}
6207 
6208 		if (!stream)
6209 			continue;
6210 
6211 		if (stream->timing.flags.DSC != 1) {
6212 			drm_dp_mst_atomic_enable_dsc(state,
6213 						     aconnector->port,
6214 						     dm_conn_state->pbn,
6215 						     0,
6216 						     false);
6217 			continue;
6218 		}
6219 
6220 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6221 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6222 		clock = stream->timing.pix_clk_100hz / 10;
6223 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6224 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6225 						    aconnector->port,
6226 						    pbn, pbn_div,
6227 						    true);
6228 		if (vcpi < 0)
6229 			return vcpi;
6230 
6231 		dm_conn_state->pbn = pbn;
6232 		dm_conn_state->vcpi_slots = vcpi;
6233 	}
6234 	return 0;
6235 }
6236 #endif
6237 
6238 static void dm_drm_plane_reset(struct drm_plane *plane)
6239 {
6240 	struct dm_plane_state *amdgpu_state = NULL;
6241 
6242 	if (plane->state)
6243 		plane->funcs->atomic_destroy_state(plane, plane->state);
6244 
6245 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6246 	WARN_ON(amdgpu_state == NULL);
6247 
6248 	if (amdgpu_state)
6249 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6250 }
6251 
6252 static struct drm_plane_state *
6253 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6254 {
6255 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6256 
6257 	old_dm_plane_state = to_dm_plane_state(plane->state);
6258 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6259 	if (!dm_plane_state)
6260 		return NULL;
6261 
6262 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6263 
6264 	if (old_dm_plane_state->dc_state) {
6265 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6266 		dc_plane_state_retain(dm_plane_state->dc_state);
6267 	}
6268 
6269 	return &dm_plane_state->base;
6270 }
6271 
6272 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6273 				struct drm_plane_state *state)
6274 {
6275 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6276 
6277 	if (dm_plane_state->dc_state)
6278 		dc_plane_state_release(dm_plane_state->dc_state);
6279 
6280 	drm_atomic_helper_plane_destroy_state(plane, state);
6281 }
6282 
6283 static const struct drm_plane_funcs dm_plane_funcs = {
6284 	.update_plane	= drm_atomic_helper_update_plane,
6285 	.disable_plane	= drm_atomic_helper_disable_plane,
6286 	.destroy	= drm_primary_helper_destroy,
6287 	.reset = dm_drm_plane_reset,
6288 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6289 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6290 	.format_mod_supported = dm_plane_format_mod_supported,
6291 };
6292 
6293 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6294 				      struct drm_plane_state *new_state)
6295 {
6296 	struct amdgpu_framebuffer *afb;
6297 	struct drm_gem_object *obj;
6298 	struct amdgpu_device *adev;
6299 	struct amdgpu_bo *rbo;
6300 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6301 	struct list_head list;
6302 	struct ttm_validate_buffer tv;
6303 	struct ww_acquire_ctx ticket;
6304 	uint32_t domain;
6305 	int r;
6306 
6307 	if (!new_state->fb) {
6308 		DRM_DEBUG_DRIVER("No FB bound\n");
6309 		return 0;
6310 	}
6311 
6312 	afb = to_amdgpu_framebuffer(new_state->fb);
6313 	obj = new_state->fb->obj[0];
6314 	rbo = gem_to_amdgpu_bo(obj);
6315 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6316 	INIT_LIST_HEAD(&list);
6317 
6318 	tv.bo = &rbo->tbo;
6319 	tv.num_shared = 1;
6320 	list_add(&tv.head, &list);
6321 
6322 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6323 	if (r) {
6324 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6325 		return r;
6326 	}
6327 
6328 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6329 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6330 	else
6331 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6332 
6333 	r = amdgpu_bo_pin(rbo, domain);
6334 	if (unlikely(r != 0)) {
6335 		if (r != -ERESTARTSYS)
6336 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6337 		ttm_eu_backoff_reservation(&ticket, &list);
6338 		return r;
6339 	}
6340 
6341 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6342 	if (unlikely(r != 0)) {
6343 		amdgpu_bo_unpin(rbo);
6344 		ttm_eu_backoff_reservation(&ticket, &list);
6345 		DRM_ERROR("%p bind failed\n", rbo);
6346 		return r;
6347 	}
6348 
6349 	ttm_eu_backoff_reservation(&ticket, &list);
6350 
6351 	afb->address = amdgpu_bo_gpu_offset(rbo);
6352 
6353 	amdgpu_bo_ref(rbo);
6354 
6355 	/**
6356 	 * We don't do surface updates on planes that have been newly created,
6357 	 * but we also don't have the afb->address during atomic check.
6358 	 *
6359 	 * Fill in buffer attributes depending on the address here, but only on
6360 	 * newly created planes since they're not being used by DC yet and this
6361 	 * won't modify global state.
6362 	 */
6363 	dm_plane_state_old = to_dm_plane_state(plane->state);
6364 	dm_plane_state_new = to_dm_plane_state(new_state);
6365 
6366 	if (dm_plane_state_new->dc_state &&
6367 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6368 		struct dc_plane_state *plane_state =
6369 			dm_plane_state_new->dc_state;
6370 		bool force_disable_dcc = !plane_state->dcc.enable;
6371 
6372 		fill_plane_buffer_attributes(
6373 			adev, afb, plane_state->format, plane_state->rotation,
6374 			afb->tiling_flags,
6375 			&plane_state->tiling_info, &plane_state->plane_size,
6376 			&plane_state->dcc, &plane_state->address,
6377 			afb->tmz_surface, force_disable_dcc);
6378 	}
6379 
6380 	return 0;
6381 }
6382 
6383 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6384 				       struct drm_plane_state *old_state)
6385 {
6386 	struct amdgpu_bo *rbo;
6387 	int r;
6388 
6389 	if (!old_state->fb)
6390 		return;
6391 
6392 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6393 	r = amdgpu_bo_reserve(rbo, false);
6394 	if (unlikely(r)) {
6395 		DRM_ERROR("failed to reserve rbo before unpin\n");
6396 		return;
6397 	}
6398 
6399 	amdgpu_bo_unpin(rbo);
6400 	amdgpu_bo_unreserve(rbo);
6401 	amdgpu_bo_unref(&rbo);
6402 }
6403 
6404 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6405 				       struct drm_crtc_state *new_crtc_state)
6406 {
6407 	struct drm_framebuffer *fb = state->fb;
6408 	int min_downscale, max_upscale;
6409 	int min_scale = 0;
6410 	int max_scale = INT_MAX;
6411 
6412 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6413 	if (fb && state->crtc) {
6414 		/* Validate viewport to cover the case when only the position changes */
6415 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6416 			int viewport_width = state->crtc_w;
6417 			int viewport_height = state->crtc_h;
6418 
6419 			if (state->crtc_x < 0)
6420 				viewport_width += state->crtc_x;
6421 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6422 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6423 
6424 			if (state->crtc_y < 0)
6425 				viewport_height += state->crtc_y;
6426 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6427 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6428 
6429 			/* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6430 			 * which is still OK to satisfy the condition below, thereby also covering these cases
6431 			 * (when plane is completely outside of screen).
6432 			 * x2 for width is because of pipe-split.
6433 			 */
6434 			if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6435 				return -EINVAL;
6436 		}
6437 
6438 		/* Get min/max allowed scaling factors from plane caps. */
6439 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6440 					     &min_downscale, &max_upscale);
6441 		/*
6442 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6443 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6444 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6445 		 */
6446 		min_scale = (1000 << 16) / max_upscale;
6447 		max_scale = (1000 << 16) / min_downscale;
6448 	}
6449 
6450 	return drm_atomic_helper_check_plane_state(
6451 		state, new_crtc_state, min_scale, max_scale, true, true);
6452 }
6453 
6454 static int dm_plane_atomic_check(struct drm_plane *plane,
6455 				 struct drm_plane_state *state)
6456 {
6457 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6458 	struct dc *dc = adev->dm.dc;
6459 	struct dm_plane_state *dm_plane_state;
6460 	struct dc_scaling_info scaling_info;
6461 	struct drm_crtc_state *new_crtc_state;
6462 	int ret;
6463 
6464 	trace_amdgpu_dm_plane_atomic_check(state);
6465 
6466 	dm_plane_state = to_dm_plane_state(state);
6467 
6468 	if (!dm_plane_state->dc_state)
6469 		return 0;
6470 
6471 	new_crtc_state =
6472 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6473 	if (!new_crtc_state)
6474 		return -EINVAL;
6475 
6476 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6477 	if (ret)
6478 		return ret;
6479 
6480 	ret = fill_dc_scaling_info(state, &scaling_info);
6481 	if (ret)
6482 		return ret;
6483 
6484 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6485 		return 0;
6486 
6487 	return -EINVAL;
6488 }
6489 
6490 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6491 				       struct drm_plane_state *new_plane_state)
6492 {
6493 	/* Only support async updates on cursor planes. */
6494 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6495 		return -EINVAL;
6496 
6497 	return 0;
6498 }
6499 
6500 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6501 					 struct drm_plane_state *new_state)
6502 {
6503 	struct drm_plane_state *old_state =
6504 		drm_atomic_get_old_plane_state(new_state->state, plane);
6505 
6506 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6507 
6508 	swap(plane->state->fb, new_state->fb);
6509 
6510 	plane->state->src_x = new_state->src_x;
6511 	plane->state->src_y = new_state->src_y;
6512 	plane->state->src_w = new_state->src_w;
6513 	plane->state->src_h = new_state->src_h;
6514 	plane->state->crtc_x = new_state->crtc_x;
6515 	plane->state->crtc_y = new_state->crtc_y;
6516 	plane->state->crtc_w = new_state->crtc_w;
6517 	plane->state->crtc_h = new_state->crtc_h;
6518 
6519 	handle_cursor_update(plane, old_state);
6520 }
6521 
6522 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6523 	.prepare_fb = dm_plane_helper_prepare_fb,
6524 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6525 	.atomic_check = dm_plane_atomic_check,
6526 	.atomic_async_check = dm_plane_atomic_async_check,
6527 	.atomic_async_update = dm_plane_atomic_async_update
6528 };
6529 
6530 /*
6531  * TODO: these are currently initialized to rgb formats only.
6532  * For future use cases we should either initialize them dynamically based on
6533  * plane capabilities, or initialize this array to all formats, so internal drm
6534  * check will succeed, and let DC implement proper check
6535  */
6536 static const uint32_t rgb_formats[] = {
6537 	DRM_FORMAT_XRGB8888,
6538 	DRM_FORMAT_ARGB8888,
6539 	DRM_FORMAT_RGBA8888,
6540 	DRM_FORMAT_XRGB2101010,
6541 	DRM_FORMAT_XBGR2101010,
6542 	DRM_FORMAT_ARGB2101010,
6543 	DRM_FORMAT_ABGR2101010,
6544 	DRM_FORMAT_XBGR8888,
6545 	DRM_FORMAT_ABGR8888,
6546 	DRM_FORMAT_RGB565,
6547 };
6548 
6549 static const uint32_t overlay_formats[] = {
6550 	DRM_FORMAT_XRGB8888,
6551 	DRM_FORMAT_ARGB8888,
6552 	DRM_FORMAT_RGBA8888,
6553 	DRM_FORMAT_XBGR8888,
6554 	DRM_FORMAT_ABGR8888,
6555 	DRM_FORMAT_RGB565
6556 };
6557 
6558 static const u32 cursor_formats[] = {
6559 	DRM_FORMAT_ARGB8888
6560 };
6561 
6562 static int get_plane_formats(const struct drm_plane *plane,
6563 			     const struct dc_plane_cap *plane_cap,
6564 			     uint32_t *formats, int max_formats)
6565 {
6566 	int i, num_formats = 0;
6567 
6568 	/*
6569 	 * TODO: Query support for each group of formats directly from
6570 	 * DC plane caps. This will require adding more formats to the
6571 	 * caps list.
6572 	 */
6573 
6574 	switch (plane->type) {
6575 	case DRM_PLANE_TYPE_PRIMARY:
6576 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6577 			if (num_formats >= max_formats)
6578 				break;
6579 
6580 			formats[num_formats++] = rgb_formats[i];
6581 		}
6582 
6583 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6584 			formats[num_formats++] = DRM_FORMAT_NV12;
6585 		if (plane_cap && plane_cap->pixel_format_support.p010)
6586 			formats[num_formats++] = DRM_FORMAT_P010;
6587 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6588 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6589 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6590 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6591 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6592 		}
6593 		break;
6594 
6595 	case DRM_PLANE_TYPE_OVERLAY:
6596 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6597 			if (num_formats >= max_formats)
6598 				break;
6599 
6600 			formats[num_formats++] = overlay_formats[i];
6601 		}
6602 		break;
6603 
6604 	case DRM_PLANE_TYPE_CURSOR:
6605 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6606 			if (num_formats >= max_formats)
6607 				break;
6608 
6609 			formats[num_formats++] = cursor_formats[i];
6610 		}
6611 		break;
6612 	}
6613 
6614 	return num_formats;
6615 }
6616 
6617 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6618 				struct drm_plane *plane,
6619 				unsigned long possible_crtcs,
6620 				const struct dc_plane_cap *plane_cap)
6621 {
6622 	uint32_t formats[32];
6623 	int num_formats;
6624 	int res = -EPERM;
6625 	unsigned int supported_rotations;
6626 	uint64_t *modifiers = NULL;
6627 
6628 	num_formats = get_plane_formats(plane, plane_cap, formats,
6629 					ARRAY_SIZE(formats));
6630 
6631 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6632 	if (res)
6633 		return res;
6634 
6635 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6636 				       &dm_plane_funcs, formats, num_formats,
6637 				       modifiers, plane->type, NULL);
6638 	kfree(modifiers);
6639 	if (res)
6640 		return res;
6641 
6642 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6643 	    plane_cap && plane_cap->per_pixel_alpha) {
6644 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6645 					  BIT(DRM_MODE_BLEND_PREMULTI);
6646 
6647 		drm_plane_create_alpha_property(plane);
6648 		drm_plane_create_blend_mode_property(plane, blend_caps);
6649 	}
6650 
6651 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6652 	    plane_cap &&
6653 	    (plane_cap->pixel_format_support.nv12 ||
6654 	     plane_cap->pixel_format_support.p010)) {
6655 		/* This only affects YUV formats. */
6656 		drm_plane_create_color_properties(
6657 			plane,
6658 			BIT(DRM_COLOR_YCBCR_BT601) |
6659 			BIT(DRM_COLOR_YCBCR_BT709) |
6660 			BIT(DRM_COLOR_YCBCR_BT2020),
6661 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6662 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6663 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6664 	}
6665 
6666 	supported_rotations =
6667 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6668 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6669 
6670 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6671 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6672 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6673 						   supported_rotations);
6674 
6675 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6676 
6677 	/* Create (reset) the plane state */
6678 	if (plane->funcs->reset)
6679 		plane->funcs->reset(plane);
6680 
6681 	return 0;
6682 }
6683 
6684 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6685 			       struct drm_plane *plane,
6686 			       uint32_t crtc_index)
6687 {
6688 	struct amdgpu_crtc *acrtc = NULL;
6689 	struct drm_plane *cursor_plane;
6690 
6691 	int res = -ENOMEM;
6692 
6693 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6694 	if (!cursor_plane)
6695 		goto fail;
6696 
6697 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6698 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6699 
6700 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6701 	if (!acrtc)
6702 		goto fail;
6703 
6704 	res = drm_crtc_init_with_planes(
6705 			dm->ddev,
6706 			&acrtc->base,
6707 			plane,
6708 			cursor_plane,
6709 			&amdgpu_dm_crtc_funcs, NULL);
6710 
6711 	if (res)
6712 		goto fail;
6713 
6714 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6715 
6716 	/* Create (reset) the plane state */
6717 	if (acrtc->base.funcs->reset)
6718 		acrtc->base.funcs->reset(&acrtc->base);
6719 
6720 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6721 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6722 
6723 	acrtc->crtc_id = crtc_index;
6724 	acrtc->base.enabled = false;
6725 	acrtc->otg_inst = -1;
6726 
6727 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6728 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6729 				   true, MAX_COLOR_LUT_ENTRIES);
6730 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6731 
6732 	return 0;
6733 
6734 fail:
6735 	kfree(acrtc);
6736 	kfree(cursor_plane);
6737 	return res;
6738 }
6739 
6740 
6741 static int to_drm_connector_type(enum signal_type st)
6742 {
6743 	switch (st) {
6744 	case SIGNAL_TYPE_HDMI_TYPE_A:
6745 		return DRM_MODE_CONNECTOR_HDMIA;
6746 	case SIGNAL_TYPE_EDP:
6747 		return DRM_MODE_CONNECTOR_eDP;
6748 	case SIGNAL_TYPE_LVDS:
6749 		return DRM_MODE_CONNECTOR_LVDS;
6750 	case SIGNAL_TYPE_RGB:
6751 		return DRM_MODE_CONNECTOR_VGA;
6752 	case SIGNAL_TYPE_DISPLAY_PORT:
6753 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6754 		return DRM_MODE_CONNECTOR_DisplayPort;
6755 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6756 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6757 		return DRM_MODE_CONNECTOR_DVID;
6758 	case SIGNAL_TYPE_VIRTUAL:
6759 		return DRM_MODE_CONNECTOR_VIRTUAL;
6760 
6761 	default:
6762 		return DRM_MODE_CONNECTOR_Unknown;
6763 	}
6764 }
6765 
6766 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6767 {
6768 	struct drm_encoder *encoder;
6769 
6770 	/* There is only one encoder per connector */
6771 	drm_connector_for_each_possible_encoder(connector, encoder)
6772 		return encoder;
6773 
6774 	return NULL;
6775 }
6776 
6777 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6778 {
6779 	struct drm_encoder *encoder;
6780 	struct amdgpu_encoder *amdgpu_encoder;
6781 
6782 	encoder = amdgpu_dm_connector_to_encoder(connector);
6783 
6784 	if (encoder == NULL)
6785 		return;
6786 
6787 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6788 
6789 	amdgpu_encoder->native_mode.clock = 0;
6790 
6791 	if (!list_empty(&connector->probed_modes)) {
6792 		struct drm_display_mode *preferred_mode = NULL;
6793 
6794 		list_for_each_entry(preferred_mode,
6795 				    &connector->probed_modes,
6796 				    head) {
6797 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6798 				amdgpu_encoder->native_mode = *preferred_mode;
6799 
6800 			break;
6801 		}
6802 
6803 	}
6804 }
6805 
6806 static struct drm_display_mode *
6807 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6808 			     char *name,
6809 			     int hdisplay, int vdisplay)
6810 {
6811 	struct drm_device *dev = encoder->dev;
6812 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6813 	struct drm_display_mode *mode = NULL;
6814 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6815 
6816 	mode = drm_mode_duplicate(dev, native_mode);
6817 
6818 	if (mode == NULL)
6819 		return NULL;
6820 
6821 	mode->hdisplay = hdisplay;
6822 	mode->vdisplay = vdisplay;
6823 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6824 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6825 
6826 	return mode;
6827 
6828 }
6829 
6830 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6831 						 struct drm_connector *connector)
6832 {
6833 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6834 	struct drm_display_mode *mode = NULL;
6835 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6836 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6837 				to_amdgpu_dm_connector(connector);
6838 	int i;
6839 	int n;
6840 	struct mode_size {
6841 		char name[DRM_DISPLAY_MODE_LEN];
6842 		int w;
6843 		int h;
6844 	} common_modes[] = {
6845 		{  "640x480",  640,  480},
6846 		{  "800x600",  800,  600},
6847 		{ "1024x768", 1024,  768},
6848 		{ "1280x720", 1280,  720},
6849 		{ "1280x800", 1280,  800},
6850 		{"1280x1024", 1280, 1024},
6851 		{ "1440x900", 1440,  900},
6852 		{"1680x1050", 1680, 1050},
6853 		{"1600x1200", 1600, 1200},
6854 		{"1920x1080", 1920, 1080},
6855 		{"1920x1200", 1920, 1200}
6856 	};
6857 
6858 	n = ARRAY_SIZE(common_modes);
6859 
6860 	for (i = 0; i < n; i++) {
6861 		struct drm_display_mode *curmode = NULL;
6862 		bool mode_existed = false;
6863 
6864 		if (common_modes[i].w > native_mode->hdisplay ||
6865 		    common_modes[i].h > native_mode->vdisplay ||
6866 		   (common_modes[i].w == native_mode->hdisplay &&
6867 		    common_modes[i].h == native_mode->vdisplay))
6868 			continue;
6869 
6870 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6871 			if (common_modes[i].w == curmode->hdisplay &&
6872 			    common_modes[i].h == curmode->vdisplay) {
6873 				mode_existed = true;
6874 				break;
6875 			}
6876 		}
6877 
6878 		if (mode_existed)
6879 			continue;
6880 
6881 		mode = amdgpu_dm_create_common_mode(encoder,
6882 				common_modes[i].name, common_modes[i].w,
6883 				common_modes[i].h);
6884 		drm_mode_probed_add(connector, mode);
6885 		amdgpu_dm_connector->num_modes++;
6886 	}
6887 }
6888 
6889 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6890 					      struct edid *edid)
6891 {
6892 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6893 			to_amdgpu_dm_connector(connector);
6894 
6895 	if (edid) {
6896 		/* empty probed_modes */
6897 		INIT_LIST_HEAD(&connector->probed_modes);
6898 		amdgpu_dm_connector->num_modes =
6899 				drm_add_edid_modes(connector, edid);
6900 
6901 		/* sorting the probed modes before calling function
6902 		 * amdgpu_dm_get_native_mode() since EDID can have
6903 		 * more than one preferred mode. The modes that are
6904 		 * later in the probed mode list could be of higher
6905 		 * and preferred resolution. For example, 3840x2160
6906 		 * resolution in base EDID preferred timing and 4096x2160
6907 		 * preferred resolution in DID extension block later.
6908 		 */
6909 		drm_mode_sort(&connector->probed_modes);
6910 		amdgpu_dm_get_native_mode(connector);
6911 	} else {
6912 		amdgpu_dm_connector->num_modes = 0;
6913 	}
6914 }
6915 
6916 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6917 {
6918 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6919 			to_amdgpu_dm_connector(connector);
6920 	struct drm_encoder *encoder;
6921 	struct edid *edid = amdgpu_dm_connector->edid;
6922 
6923 	encoder = amdgpu_dm_connector_to_encoder(connector);
6924 
6925 	if (!drm_edid_is_valid(edid)) {
6926 		amdgpu_dm_connector->num_modes =
6927 				drm_add_modes_noedid(connector, 640, 480);
6928 	} else {
6929 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6930 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6931 	}
6932 	amdgpu_dm_fbc_init(connector);
6933 
6934 	return amdgpu_dm_connector->num_modes;
6935 }
6936 
6937 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6938 				     struct amdgpu_dm_connector *aconnector,
6939 				     int connector_type,
6940 				     struct dc_link *link,
6941 				     int link_index)
6942 {
6943 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6944 
6945 	/*
6946 	 * Some of the properties below require access to state, like bpc.
6947 	 * Allocate some default initial connector state with our reset helper.
6948 	 */
6949 	if (aconnector->base.funcs->reset)
6950 		aconnector->base.funcs->reset(&aconnector->base);
6951 
6952 	aconnector->connector_id = link_index;
6953 	aconnector->dc_link = link;
6954 	aconnector->base.interlace_allowed = false;
6955 	aconnector->base.doublescan_allowed = false;
6956 	aconnector->base.stereo_allowed = false;
6957 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6958 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6959 	aconnector->audio_inst = -1;
6960 	mutex_init(&aconnector->hpd_lock);
6961 
6962 	/*
6963 	 * configure support HPD hot plug connector_>polled default value is 0
6964 	 * which means HPD hot plug not supported
6965 	 */
6966 	switch (connector_type) {
6967 	case DRM_MODE_CONNECTOR_HDMIA:
6968 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6969 		aconnector->base.ycbcr_420_allowed =
6970 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6971 		break;
6972 	case DRM_MODE_CONNECTOR_DisplayPort:
6973 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6974 		aconnector->base.ycbcr_420_allowed =
6975 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6976 		break;
6977 	case DRM_MODE_CONNECTOR_DVID:
6978 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6979 		break;
6980 	default:
6981 		break;
6982 	}
6983 
6984 	drm_object_attach_property(&aconnector->base.base,
6985 				dm->ddev->mode_config.scaling_mode_property,
6986 				DRM_MODE_SCALE_NONE);
6987 
6988 	drm_object_attach_property(&aconnector->base.base,
6989 				adev->mode_info.underscan_property,
6990 				UNDERSCAN_OFF);
6991 	drm_object_attach_property(&aconnector->base.base,
6992 				adev->mode_info.underscan_hborder_property,
6993 				0);
6994 	drm_object_attach_property(&aconnector->base.base,
6995 				adev->mode_info.underscan_vborder_property,
6996 				0);
6997 
6998 	if (!aconnector->mst_port)
6999 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7000 
7001 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7002 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7003 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7004 
7005 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7006 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7007 		drm_object_attach_property(&aconnector->base.base,
7008 				adev->mode_info.abm_level_property, 0);
7009 	}
7010 
7011 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7012 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7013 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7014 		drm_object_attach_property(
7015 			&aconnector->base.base,
7016 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7017 
7018 		if (!aconnector->mst_port)
7019 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7020 
7021 #ifdef CONFIG_DRM_AMD_DC_HDCP
7022 		if (adev->dm.hdcp_workqueue)
7023 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7024 #endif
7025 	}
7026 }
7027 
7028 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7029 			      struct i2c_msg *msgs, int num)
7030 {
7031 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7032 	struct ddc_service *ddc_service = i2c->ddc_service;
7033 	struct i2c_command cmd;
7034 	int i;
7035 	int result = -EIO;
7036 
7037 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7038 
7039 	if (!cmd.payloads)
7040 		return result;
7041 
7042 	cmd.number_of_payloads = num;
7043 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7044 	cmd.speed = 100;
7045 
7046 	for (i = 0; i < num; i++) {
7047 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7048 		cmd.payloads[i].address = msgs[i].addr;
7049 		cmd.payloads[i].length = msgs[i].len;
7050 		cmd.payloads[i].data = msgs[i].buf;
7051 	}
7052 
7053 	if (dc_submit_i2c(
7054 			ddc_service->ctx->dc,
7055 			ddc_service->ddc_pin->hw_info.ddc_channel,
7056 			&cmd))
7057 		result = num;
7058 
7059 	kfree(cmd.payloads);
7060 	return result;
7061 }
7062 
7063 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7064 {
7065 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7066 }
7067 
7068 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7069 	.master_xfer = amdgpu_dm_i2c_xfer,
7070 	.functionality = amdgpu_dm_i2c_func,
7071 };
7072 
7073 static struct amdgpu_i2c_adapter *
7074 create_i2c(struct ddc_service *ddc_service,
7075 	   int link_index,
7076 	   int *res)
7077 {
7078 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7079 	struct amdgpu_i2c_adapter *i2c;
7080 
7081 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7082 	if (!i2c)
7083 		return NULL;
7084 	i2c->base.owner = THIS_MODULE;
7085 	i2c->base.class = I2C_CLASS_DDC;
7086 	i2c->base.dev.parent = &adev->pdev->dev;
7087 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7088 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7089 	i2c_set_adapdata(&i2c->base, i2c);
7090 	i2c->ddc_service = ddc_service;
7091 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7092 
7093 	return i2c;
7094 }
7095 
7096 
7097 /*
7098  * Note: this function assumes that dc_link_detect() was called for the
7099  * dc_link which will be represented by this aconnector.
7100  */
7101 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7102 				    struct amdgpu_dm_connector *aconnector,
7103 				    uint32_t link_index,
7104 				    struct amdgpu_encoder *aencoder)
7105 {
7106 	int res = 0;
7107 	int connector_type;
7108 	struct dc *dc = dm->dc;
7109 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7110 	struct amdgpu_i2c_adapter *i2c;
7111 
7112 	link->priv = aconnector;
7113 
7114 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7115 
7116 	i2c = create_i2c(link->ddc, link->link_index, &res);
7117 	if (!i2c) {
7118 		DRM_ERROR("Failed to create i2c adapter data\n");
7119 		return -ENOMEM;
7120 	}
7121 
7122 	aconnector->i2c = i2c;
7123 	res = i2c_add_adapter(&i2c->base);
7124 
7125 	if (res) {
7126 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7127 		goto out_free;
7128 	}
7129 
7130 	connector_type = to_drm_connector_type(link->connector_signal);
7131 
7132 	res = drm_connector_init_with_ddc(
7133 			dm->ddev,
7134 			&aconnector->base,
7135 			&amdgpu_dm_connector_funcs,
7136 			connector_type,
7137 			&i2c->base);
7138 
7139 	if (res) {
7140 		DRM_ERROR("connector_init failed\n");
7141 		aconnector->connector_id = -1;
7142 		goto out_free;
7143 	}
7144 
7145 	drm_connector_helper_add(
7146 			&aconnector->base,
7147 			&amdgpu_dm_connector_helper_funcs);
7148 
7149 	amdgpu_dm_connector_init_helper(
7150 		dm,
7151 		aconnector,
7152 		connector_type,
7153 		link,
7154 		link_index);
7155 
7156 	drm_connector_attach_encoder(
7157 		&aconnector->base, &aencoder->base);
7158 
7159 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7160 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7161 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7162 
7163 out_free:
7164 	if (res) {
7165 		kfree(i2c);
7166 		aconnector->i2c = NULL;
7167 	}
7168 	return res;
7169 }
7170 
7171 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7172 {
7173 	switch (adev->mode_info.num_crtc) {
7174 	case 1:
7175 		return 0x1;
7176 	case 2:
7177 		return 0x3;
7178 	case 3:
7179 		return 0x7;
7180 	case 4:
7181 		return 0xf;
7182 	case 5:
7183 		return 0x1f;
7184 	case 6:
7185 	default:
7186 		return 0x3f;
7187 	}
7188 }
7189 
7190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7191 				  struct amdgpu_encoder *aencoder,
7192 				  uint32_t link_index)
7193 {
7194 	struct amdgpu_device *adev = drm_to_adev(dev);
7195 
7196 	int res = drm_encoder_init(dev,
7197 				   &aencoder->base,
7198 				   &amdgpu_dm_encoder_funcs,
7199 				   DRM_MODE_ENCODER_TMDS,
7200 				   NULL);
7201 
7202 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7203 
7204 	if (!res)
7205 		aencoder->encoder_id = link_index;
7206 	else
7207 		aencoder->encoder_id = -1;
7208 
7209 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7210 
7211 	return res;
7212 }
7213 
7214 static void manage_dm_interrupts(struct amdgpu_device *adev,
7215 				 struct amdgpu_crtc *acrtc,
7216 				 bool enable)
7217 {
7218 	/*
7219 	 * We have no guarantee that the frontend index maps to the same
7220 	 * backend index - some even map to more than one.
7221 	 *
7222 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7223 	 */
7224 	int irq_type =
7225 		amdgpu_display_crtc_idx_to_irq_type(
7226 			adev,
7227 			acrtc->crtc_id);
7228 
7229 	if (enable) {
7230 		drm_crtc_vblank_on(&acrtc->base);
7231 		amdgpu_irq_get(
7232 			adev,
7233 			&adev->pageflip_irq,
7234 			irq_type);
7235 	} else {
7236 
7237 		amdgpu_irq_put(
7238 			adev,
7239 			&adev->pageflip_irq,
7240 			irq_type);
7241 		drm_crtc_vblank_off(&acrtc->base);
7242 	}
7243 }
7244 
7245 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7246 				      struct amdgpu_crtc *acrtc)
7247 {
7248 	int irq_type =
7249 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7250 
7251 	/**
7252 	 * This reads the current state for the IRQ and force reapplies
7253 	 * the setting to hardware.
7254 	 */
7255 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7256 }
7257 
7258 static bool
7259 is_scaling_state_different(const struct dm_connector_state *dm_state,
7260 			   const struct dm_connector_state *old_dm_state)
7261 {
7262 	if (dm_state->scaling != old_dm_state->scaling)
7263 		return true;
7264 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7265 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7266 			return true;
7267 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7268 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7269 			return true;
7270 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7271 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7272 		return true;
7273 	return false;
7274 }
7275 
7276 #ifdef CONFIG_DRM_AMD_DC_HDCP
7277 static bool is_content_protection_different(struct drm_connector_state *state,
7278 					    const struct drm_connector_state *old_state,
7279 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7280 {
7281 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7282 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7283 
7284 	/* Handle: Type0/1 change */
7285 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7286 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7287 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7288 		return true;
7289 	}
7290 
7291 	/* CP is being re enabled, ignore this
7292 	 *
7293 	 * Handles:	ENABLED -> DESIRED
7294 	 */
7295 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7296 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7297 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7298 		return false;
7299 	}
7300 
7301 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7302 	 *
7303 	 * Handles:	UNDESIRED -> ENABLED
7304 	 */
7305 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7306 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7307 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7308 
7309 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7310 	 * hot-plug, headless s3, dpms
7311 	 *
7312 	 * Handles:	DESIRED -> DESIRED (Special case)
7313 	 */
7314 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7315 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7316 		dm_con_state->update_hdcp = false;
7317 		return true;
7318 	}
7319 
7320 	/*
7321 	 * Handles:	UNDESIRED -> UNDESIRED
7322 	 *		DESIRED -> DESIRED
7323 	 *		ENABLED -> ENABLED
7324 	 */
7325 	if (old_state->content_protection == state->content_protection)
7326 		return false;
7327 
7328 	/*
7329 	 * Handles:	UNDESIRED -> DESIRED
7330 	 *		DESIRED -> UNDESIRED
7331 	 *		ENABLED -> UNDESIRED
7332 	 */
7333 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7334 		return true;
7335 
7336 	/*
7337 	 * Handles:	DESIRED -> ENABLED
7338 	 */
7339 	return false;
7340 }
7341 
7342 #endif
7343 static void remove_stream(struct amdgpu_device *adev,
7344 			  struct amdgpu_crtc *acrtc,
7345 			  struct dc_stream_state *stream)
7346 {
7347 	/* this is the update mode case */
7348 
7349 	acrtc->otg_inst = -1;
7350 	acrtc->enabled = false;
7351 }
7352 
7353 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7354 			       struct dc_cursor_position *position)
7355 {
7356 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7357 	int x, y;
7358 	int xorigin = 0, yorigin = 0;
7359 
7360 	position->enable = false;
7361 	position->x = 0;
7362 	position->y = 0;
7363 
7364 	if (!crtc || !plane->state->fb)
7365 		return 0;
7366 
7367 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7368 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7369 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7370 			  __func__,
7371 			  plane->state->crtc_w,
7372 			  plane->state->crtc_h);
7373 		return -EINVAL;
7374 	}
7375 
7376 	x = plane->state->crtc_x;
7377 	y = plane->state->crtc_y;
7378 
7379 	if (x <= -amdgpu_crtc->max_cursor_width ||
7380 	    y <= -amdgpu_crtc->max_cursor_height)
7381 		return 0;
7382 
7383 	if (x < 0) {
7384 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7385 		x = 0;
7386 	}
7387 	if (y < 0) {
7388 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7389 		y = 0;
7390 	}
7391 	position->enable = true;
7392 	position->translate_by_source = true;
7393 	position->x = x;
7394 	position->y = y;
7395 	position->x_hotspot = xorigin;
7396 	position->y_hotspot = yorigin;
7397 
7398 	return 0;
7399 }
7400 
7401 static void handle_cursor_update(struct drm_plane *plane,
7402 				 struct drm_plane_state *old_plane_state)
7403 {
7404 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7405 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7406 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7407 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7408 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7409 	uint64_t address = afb ? afb->address : 0;
7410 	struct dc_cursor_position position;
7411 	struct dc_cursor_attributes attributes;
7412 	int ret;
7413 
7414 	if (!plane->state->fb && !old_plane_state->fb)
7415 		return;
7416 
7417 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7418 			 __func__,
7419 			 amdgpu_crtc->crtc_id,
7420 			 plane->state->crtc_w,
7421 			 plane->state->crtc_h);
7422 
7423 	ret = get_cursor_position(plane, crtc, &position);
7424 	if (ret)
7425 		return;
7426 
7427 	if (!position.enable) {
7428 		/* turn off cursor */
7429 		if (crtc_state && crtc_state->stream) {
7430 			mutex_lock(&adev->dm.dc_lock);
7431 			dc_stream_set_cursor_position(crtc_state->stream,
7432 						      &position);
7433 			mutex_unlock(&adev->dm.dc_lock);
7434 		}
7435 		return;
7436 	}
7437 
7438 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7439 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7440 
7441 	memset(&attributes, 0, sizeof(attributes));
7442 	attributes.address.high_part = upper_32_bits(address);
7443 	attributes.address.low_part  = lower_32_bits(address);
7444 	attributes.width             = plane->state->crtc_w;
7445 	attributes.height            = plane->state->crtc_h;
7446 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7447 	attributes.rotation_angle    = 0;
7448 	attributes.attribute_flags.value = 0;
7449 
7450 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7451 
7452 	if (crtc_state->stream) {
7453 		mutex_lock(&adev->dm.dc_lock);
7454 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7455 							 &attributes))
7456 			DRM_ERROR("DC failed to set cursor attributes\n");
7457 
7458 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7459 						   &position))
7460 			DRM_ERROR("DC failed to set cursor position\n");
7461 		mutex_unlock(&adev->dm.dc_lock);
7462 	}
7463 }
7464 
7465 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7466 {
7467 
7468 	assert_spin_locked(&acrtc->base.dev->event_lock);
7469 	WARN_ON(acrtc->event);
7470 
7471 	acrtc->event = acrtc->base.state->event;
7472 
7473 	/* Set the flip status */
7474 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7475 
7476 	/* Mark this event as consumed */
7477 	acrtc->base.state->event = NULL;
7478 
7479 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7480 						 acrtc->crtc_id);
7481 }
7482 
7483 static void update_freesync_state_on_stream(
7484 	struct amdgpu_display_manager *dm,
7485 	struct dm_crtc_state *new_crtc_state,
7486 	struct dc_stream_state *new_stream,
7487 	struct dc_plane_state *surface,
7488 	u32 flip_timestamp_in_us)
7489 {
7490 	struct mod_vrr_params vrr_params;
7491 	struct dc_info_packet vrr_infopacket = {0};
7492 	struct amdgpu_device *adev = dm->adev;
7493 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7494 	unsigned long flags;
7495 
7496 	if (!new_stream)
7497 		return;
7498 
7499 	/*
7500 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7501 	 * For now it's sufficient to just guard against these conditions.
7502 	 */
7503 
7504 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7505 		return;
7506 
7507 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7508         vrr_params = acrtc->dm_irq_params.vrr_params;
7509 
7510 	if (surface) {
7511 		mod_freesync_handle_preflip(
7512 			dm->freesync_module,
7513 			surface,
7514 			new_stream,
7515 			flip_timestamp_in_us,
7516 			&vrr_params);
7517 
7518 		if (adev->family < AMDGPU_FAMILY_AI &&
7519 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7520 			mod_freesync_handle_v_update(dm->freesync_module,
7521 						     new_stream, &vrr_params);
7522 
7523 			/* Need to call this before the frame ends. */
7524 			dc_stream_adjust_vmin_vmax(dm->dc,
7525 						   new_crtc_state->stream,
7526 						   &vrr_params.adjust);
7527 		}
7528 	}
7529 
7530 	mod_freesync_build_vrr_infopacket(
7531 		dm->freesync_module,
7532 		new_stream,
7533 		&vrr_params,
7534 		PACKET_TYPE_VRR,
7535 		TRANSFER_FUNC_UNKNOWN,
7536 		&vrr_infopacket);
7537 
7538 	new_crtc_state->freesync_timing_changed |=
7539 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7540 			&vrr_params.adjust,
7541 			sizeof(vrr_params.adjust)) != 0);
7542 
7543 	new_crtc_state->freesync_vrr_info_changed |=
7544 		(memcmp(&new_crtc_state->vrr_infopacket,
7545 			&vrr_infopacket,
7546 			sizeof(vrr_infopacket)) != 0);
7547 
7548 	acrtc->dm_irq_params.vrr_params = vrr_params;
7549 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7550 
7551 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7552 	new_stream->vrr_infopacket = vrr_infopacket;
7553 
7554 	if (new_crtc_state->freesync_vrr_info_changed)
7555 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7556 			      new_crtc_state->base.crtc->base.id,
7557 			      (int)new_crtc_state->base.vrr_enabled,
7558 			      (int)vrr_params.state);
7559 
7560 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7561 }
7562 
7563 static void update_stream_irq_parameters(
7564 	struct amdgpu_display_manager *dm,
7565 	struct dm_crtc_state *new_crtc_state)
7566 {
7567 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7568 	struct mod_vrr_params vrr_params;
7569 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7570 	struct amdgpu_device *adev = dm->adev;
7571 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7572 	unsigned long flags;
7573 
7574 	if (!new_stream)
7575 		return;
7576 
7577 	/*
7578 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7579 	 * For now it's sufficient to just guard against these conditions.
7580 	 */
7581 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7582 		return;
7583 
7584 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7585 	vrr_params = acrtc->dm_irq_params.vrr_params;
7586 
7587 	if (new_crtc_state->vrr_supported &&
7588 	    config.min_refresh_in_uhz &&
7589 	    config.max_refresh_in_uhz) {
7590 		config.state = new_crtc_state->base.vrr_enabled ?
7591 			VRR_STATE_ACTIVE_VARIABLE :
7592 			VRR_STATE_INACTIVE;
7593 	} else {
7594 		config.state = VRR_STATE_UNSUPPORTED;
7595 	}
7596 
7597 	mod_freesync_build_vrr_params(dm->freesync_module,
7598 				      new_stream,
7599 				      &config, &vrr_params);
7600 
7601 	new_crtc_state->freesync_timing_changed |=
7602 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7603 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7604 
7605 	new_crtc_state->freesync_config = config;
7606 	/* Copy state for access from DM IRQ handler */
7607 	acrtc->dm_irq_params.freesync_config = config;
7608 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7609 	acrtc->dm_irq_params.vrr_params = vrr_params;
7610 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7611 }
7612 
7613 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7614 					    struct dm_crtc_state *new_state)
7615 {
7616 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7617 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7618 
7619 	if (!old_vrr_active && new_vrr_active) {
7620 		/* Transition VRR inactive -> active:
7621 		 * While VRR is active, we must not disable vblank irq, as a
7622 		 * reenable after disable would compute bogus vblank/pflip
7623 		 * timestamps if it likely happened inside display front-porch.
7624 		 *
7625 		 * We also need vupdate irq for the actual core vblank handling
7626 		 * at end of vblank.
7627 		 */
7628 		dm_set_vupdate_irq(new_state->base.crtc, true);
7629 		drm_crtc_vblank_get(new_state->base.crtc);
7630 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7631 				 __func__, new_state->base.crtc->base.id);
7632 	} else if (old_vrr_active && !new_vrr_active) {
7633 		/* Transition VRR active -> inactive:
7634 		 * Allow vblank irq disable again for fixed refresh rate.
7635 		 */
7636 		dm_set_vupdate_irq(new_state->base.crtc, false);
7637 		drm_crtc_vblank_put(new_state->base.crtc);
7638 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7639 				 __func__, new_state->base.crtc->base.id);
7640 	}
7641 }
7642 
7643 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7644 {
7645 	struct drm_plane *plane;
7646 	struct drm_plane_state *old_plane_state, *new_plane_state;
7647 	int i;
7648 
7649 	/*
7650 	 * TODO: Make this per-stream so we don't issue redundant updates for
7651 	 * commits with multiple streams.
7652 	 */
7653 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7654 				       new_plane_state, i)
7655 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7656 			handle_cursor_update(plane, old_plane_state);
7657 }
7658 
7659 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7660 				    struct dc_state *dc_state,
7661 				    struct drm_device *dev,
7662 				    struct amdgpu_display_manager *dm,
7663 				    struct drm_crtc *pcrtc,
7664 				    bool wait_for_vblank)
7665 {
7666 	int i;
7667 	uint64_t timestamp_ns;
7668 	struct drm_plane *plane;
7669 	struct drm_plane_state *old_plane_state, *new_plane_state;
7670 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7671 	struct drm_crtc_state *new_pcrtc_state =
7672 			drm_atomic_get_new_crtc_state(state, pcrtc);
7673 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7674 	struct dm_crtc_state *dm_old_crtc_state =
7675 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7676 	int planes_count = 0, vpos, hpos;
7677 	long r;
7678 	unsigned long flags;
7679 	struct amdgpu_bo *abo;
7680 	uint32_t target_vblank, last_flip_vblank;
7681 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7682 	bool pflip_present = false;
7683 	struct {
7684 		struct dc_surface_update surface_updates[MAX_SURFACES];
7685 		struct dc_plane_info plane_infos[MAX_SURFACES];
7686 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7687 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7688 		struct dc_stream_update stream_update;
7689 	} *bundle;
7690 
7691 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7692 
7693 	if (!bundle) {
7694 		dm_error("Failed to allocate update bundle\n");
7695 		goto cleanup;
7696 	}
7697 
7698 	/*
7699 	 * Disable the cursor first if we're disabling all the planes.
7700 	 * It'll remain on the screen after the planes are re-enabled
7701 	 * if we don't.
7702 	 */
7703 	if (acrtc_state->active_planes == 0)
7704 		amdgpu_dm_commit_cursors(state);
7705 
7706 	/* update planes when needed */
7707 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7708 		struct drm_crtc *crtc = new_plane_state->crtc;
7709 		struct drm_crtc_state *new_crtc_state;
7710 		struct drm_framebuffer *fb = new_plane_state->fb;
7711 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7712 		bool plane_needs_flip;
7713 		struct dc_plane_state *dc_plane;
7714 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7715 
7716 		/* Cursor plane is handled after stream updates */
7717 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7718 			continue;
7719 
7720 		if (!fb || !crtc || pcrtc != crtc)
7721 			continue;
7722 
7723 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7724 		if (!new_crtc_state->active)
7725 			continue;
7726 
7727 		dc_plane = dm_new_plane_state->dc_state;
7728 
7729 		bundle->surface_updates[planes_count].surface = dc_plane;
7730 		if (new_pcrtc_state->color_mgmt_changed) {
7731 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7732 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7733 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7734 		}
7735 
7736 		fill_dc_scaling_info(new_plane_state,
7737 				     &bundle->scaling_infos[planes_count]);
7738 
7739 		bundle->surface_updates[planes_count].scaling_info =
7740 			&bundle->scaling_infos[planes_count];
7741 
7742 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7743 
7744 		pflip_present = pflip_present || plane_needs_flip;
7745 
7746 		if (!plane_needs_flip) {
7747 			planes_count += 1;
7748 			continue;
7749 		}
7750 
7751 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7752 
7753 		/*
7754 		 * Wait for all fences on this FB. Do limited wait to avoid
7755 		 * deadlock during GPU reset when this fence will not signal
7756 		 * but we hold reservation lock for the BO.
7757 		 */
7758 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7759 							false,
7760 							msecs_to_jiffies(5000));
7761 		if (unlikely(r <= 0))
7762 			DRM_ERROR("Waiting for fences timed out!");
7763 
7764 		fill_dc_plane_info_and_addr(
7765 			dm->adev, new_plane_state,
7766 			afb->tiling_flags,
7767 			&bundle->plane_infos[planes_count],
7768 			&bundle->flip_addrs[planes_count].address,
7769 			afb->tmz_surface, false);
7770 
7771 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7772 				 new_plane_state->plane->index,
7773 				 bundle->plane_infos[planes_count].dcc.enable);
7774 
7775 		bundle->surface_updates[planes_count].plane_info =
7776 			&bundle->plane_infos[planes_count];
7777 
7778 		/*
7779 		 * Only allow immediate flips for fast updates that don't
7780 		 * change FB pitch, DCC state, rotation or mirroing.
7781 		 */
7782 		bundle->flip_addrs[planes_count].flip_immediate =
7783 			crtc->state->async_flip &&
7784 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7785 
7786 		timestamp_ns = ktime_get_ns();
7787 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7788 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7789 		bundle->surface_updates[planes_count].surface = dc_plane;
7790 
7791 		if (!bundle->surface_updates[planes_count].surface) {
7792 			DRM_ERROR("No surface for CRTC: id=%d\n",
7793 					acrtc_attach->crtc_id);
7794 			continue;
7795 		}
7796 
7797 		if (plane == pcrtc->primary)
7798 			update_freesync_state_on_stream(
7799 				dm,
7800 				acrtc_state,
7801 				acrtc_state->stream,
7802 				dc_plane,
7803 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7804 
7805 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7806 				 __func__,
7807 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7808 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7809 
7810 		planes_count += 1;
7811 
7812 	}
7813 
7814 	if (pflip_present) {
7815 		if (!vrr_active) {
7816 			/* Use old throttling in non-vrr fixed refresh rate mode
7817 			 * to keep flip scheduling based on target vblank counts
7818 			 * working in a backwards compatible way, e.g., for
7819 			 * clients using the GLX_OML_sync_control extension or
7820 			 * DRI3/Present extension with defined target_msc.
7821 			 */
7822 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7823 		}
7824 		else {
7825 			/* For variable refresh rate mode only:
7826 			 * Get vblank of last completed flip to avoid > 1 vrr
7827 			 * flips per video frame by use of throttling, but allow
7828 			 * flip programming anywhere in the possibly large
7829 			 * variable vrr vblank interval for fine-grained flip
7830 			 * timing control and more opportunity to avoid stutter
7831 			 * on late submission of flips.
7832 			 */
7833 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7834 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7835 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7836 		}
7837 
7838 		target_vblank = last_flip_vblank + wait_for_vblank;
7839 
7840 		/*
7841 		 * Wait until we're out of the vertical blank period before the one
7842 		 * targeted by the flip
7843 		 */
7844 		while ((acrtc_attach->enabled &&
7845 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7846 							    0, &vpos, &hpos, NULL,
7847 							    NULL, &pcrtc->hwmode)
7848 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7849 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7850 			(int)(target_vblank -
7851 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7852 			usleep_range(1000, 1100);
7853 		}
7854 
7855 		/**
7856 		 * Prepare the flip event for the pageflip interrupt to handle.
7857 		 *
7858 		 * This only works in the case where we've already turned on the
7859 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7860 		 * from 0 -> n planes we have to skip a hardware generated event
7861 		 * and rely on sending it from software.
7862 		 */
7863 		if (acrtc_attach->base.state->event &&
7864 		    acrtc_state->active_planes > 0) {
7865 			drm_crtc_vblank_get(pcrtc);
7866 
7867 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7868 
7869 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7870 			prepare_flip_isr(acrtc_attach);
7871 
7872 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7873 		}
7874 
7875 		if (acrtc_state->stream) {
7876 			if (acrtc_state->freesync_vrr_info_changed)
7877 				bundle->stream_update.vrr_infopacket =
7878 					&acrtc_state->stream->vrr_infopacket;
7879 		}
7880 	}
7881 
7882 	/* Update the planes if changed or disable if we don't have any. */
7883 	if ((planes_count || acrtc_state->active_planes == 0) &&
7884 		acrtc_state->stream) {
7885 		bundle->stream_update.stream = acrtc_state->stream;
7886 		if (new_pcrtc_state->mode_changed) {
7887 			bundle->stream_update.src = acrtc_state->stream->src;
7888 			bundle->stream_update.dst = acrtc_state->stream->dst;
7889 		}
7890 
7891 		if (new_pcrtc_state->color_mgmt_changed) {
7892 			/*
7893 			 * TODO: This isn't fully correct since we've actually
7894 			 * already modified the stream in place.
7895 			 */
7896 			bundle->stream_update.gamut_remap =
7897 				&acrtc_state->stream->gamut_remap_matrix;
7898 			bundle->stream_update.output_csc_transform =
7899 				&acrtc_state->stream->csc_color_matrix;
7900 			bundle->stream_update.out_transfer_func =
7901 				acrtc_state->stream->out_transfer_func;
7902 		}
7903 
7904 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7905 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7906 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7907 
7908 		/*
7909 		 * If FreeSync state on the stream has changed then we need to
7910 		 * re-adjust the min/max bounds now that DC doesn't handle this
7911 		 * as part of commit.
7912 		 */
7913 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7914 		    amdgpu_dm_vrr_active(acrtc_state)) {
7915 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7916 			dc_stream_adjust_vmin_vmax(
7917 				dm->dc, acrtc_state->stream,
7918 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7919 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7920 		}
7921 		mutex_lock(&dm->dc_lock);
7922 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7923 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7924 			amdgpu_dm_psr_disable(acrtc_state->stream);
7925 
7926 		dc_commit_updates_for_stream(dm->dc,
7927 						     bundle->surface_updates,
7928 						     planes_count,
7929 						     acrtc_state->stream,
7930 						     &bundle->stream_update);
7931 
7932 		/**
7933 		 * Enable or disable the interrupts on the backend.
7934 		 *
7935 		 * Most pipes are put into power gating when unused.
7936 		 *
7937 		 * When power gating is enabled on a pipe we lose the
7938 		 * interrupt enablement state when power gating is disabled.
7939 		 *
7940 		 * So we need to update the IRQ control state in hardware
7941 		 * whenever the pipe turns on (since it could be previously
7942 		 * power gated) or off (since some pipes can't be power gated
7943 		 * on some ASICs).
7944 		 */
7945 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7946 			dm_update_pflip_irq_state(drm_to_adev(dev),
7947 						  acrtc_attach);
7948 
7949 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7950 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7951 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7952 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7953 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7954 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7955 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7956 			amdgpu_dm_psr_enable(acrtc_state->stream);
7957 		}
7958 
7959 		mutex_unlock(&dm->dc_lock);
7960 	}
7961 
7962 	/*
7963 	 * Update cursor state *after* programming all the planes.
7964 	 * This avoids redundant programming in the case where we're going
7965 	 * to be disabling a single plane - those pipes are being disabled.
7966 	 */
7967 	if (acrtc_state->active_planes)
7968 		amdgpu_dm_commit_cursors(state);
7969 
7970 cleanup:
7971 	kfree(bundle);
7972 }
7973 
7974 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7975 				   struct drm_atomic_state *state)
7976 {
7977 	struct amdgpu_device *adev = drm_to_adev(dev);
7978 	struct amdgpu_dm_connector *aconnector;
7979 	struct drm_connector *connector;
7980 	struct drm_connector_state *old_con_state, *new_con_state;
7981 	struct drm_crtc_state *new_crtc_state;
7982 	struct dm_crtc_state *new_dm_crtc_state;
7983 	const struct dc_stream_status *status;
7984 	int i, inst;
7985 
7986 	/* Notify device removals. */
7987 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7988 		if (old_con_state->crtc != new_con_state->crtc) {
7989 			/* CRTC changes require notification. */
7990 			goto notify;
7991 		}
7992 
7993 		if (!new_con_state->crtc)
7994 			continue;
7995 
7996 		new_crtc_state = drm_atomic_get_new_crtc_state(
7997 			state, new_con_state->crtc);
7998 
7999 		if (!new_crtc_state)
8000 			continue;
8001 
8002 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8003 			continue;
8004 
8005 	notify:
8006 		aconnector = to_amdgpu_dm_connector(connector);
8007 
8008 		mutex_lock(&adev->dm.audio_lock);
8009 		inst = aconnector->audio_inst;
8010 		aconnector->audio_inst = -1;
8011 		mutex_unlock(&adev->dm.audio_lock);
8012 
8013 		amdgpu_dm_audio_eld_notify(adev, inst);
8014 	}
8015 
8016 	/* Notify audio device additions. */
8017 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8018 		if (!new_con_state->crtc)
8019 			continue;
8020 
8021 		new_crtc_state = drm_atomic_get_new_crtc_state(
8022 			state, new_con_state->crtc);
8023 
8024 		if (!new_crtc_state)
8025 			continue;
8026 
8027 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8028 			continue;
8029 
8030 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8031 		if (!new_dm_crtc_state->stream)
8032 			continue;
8033 
8034 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8035 		if (!status)
8036 			continue;
8037 
8038 		aconnector = to_amdgpu_dm_connector(connector);
8039 
8040 		mutex_lock(&adev->dm.audio_lock);
8041 		inst = status->audio_inst;
8042 		aconnector->audio_inst = inst;
8043 		mutex_unlock(&adev->dm.audio_lock);
8044 
8045 		amdgpu_dm_audio_eld_notify(adev, inst);
8046 	}
8047 }
8048 
8049 /*
8050  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8051  * @crtc_state: the DRM CRTC state
8052  * @stream_state: the DC stream state.
8053  *
8054  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8055  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8056  */
8057 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8058 						struct dc_stream_state *stream_state)
8059 {
8060 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8061 }
8062 
8063 /**
8064  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8065  * @state: The atomic state to commit
8066  *
8067  * This will tell DC to commit the constructed DC state from atomic_check,
8068  * programming the hardware. Any failures here implies a hardware failure, since
8069  * atomic check should have filtered anything non-kosher.
8070  */
8071 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8072 {
8073 	struct drm_device *dev = state->dev;
8074 	struct amdgpu_device *adev = drm_to_adev(dev);
8075 	struct amdgpu_display_manager *dm = &adev->dm;
8076 	struct dm_atomic_state *dm_state;
8077 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8078 	uint32_t i, j;
8079 	struct drm_crtc *crtc;
8080 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8081 	unsigned long flags;
8082 	bool wait_for_vblank = true;
8083 	struct drm_connector *connector;
8084 	struct drm_connector_state *old_con_state, *new_con_state;
8085 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8086 	int crtc_disable_count = 0;
8087 	bool mode_set_reset_required = false;
8088 
8089 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8090 
8091 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8092 
8093 	dm_state = dm_atomic_get_new_state(state);
8094 	if (dm_state && dm_state->context) {
8095 		dc_state = dm_state->context;
8096 	} else {
8097 		/* No state changes, retain current state. */
8098 		dc_state_temp = dc_create_state(dm->dc);
8099 		ASSERT(dc_state_temp);
8100 		dc_state = dc_state_temp;
8101 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8102 	}
8103 
8104 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8105 				       new_crtc_state, i) {
8106 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8107 
8108 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8109 
8110 		if (old_crtc_state->active &&
8111 		    (!new_crtc_state->active ||
8112 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8113 			manage_dm_interrupts(adev, acrtc, false);
8114 			dc_stream_release(dm_old_crtc_state->stream);
8115 		}
8116 	}
8117 
8118 	drm_atomic_helper_calc_timestamping_constants(state);
8119 
8120 	/* update changed items */
8121 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8122 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8123 
8124 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8125 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8126 
8127 		DRM_DEBUG_DRIVER(
8128 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8129 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8130 			"connectors_changed:%d\n",
8131 			acrtc->crtc_id,
8132 			new_crtc_state->enable,
8133 			new_crtc_state->active,
8134 			new_crtc_state->planes_changed,
8135 			new_crtc_state->mode_changed,
8136 			new_crtc_state->active_changed,
8137 			new_crtc_state->connectors_changed);
8138 
8139 		/* Disable cursor if disabling crtc */
8140 		if (old_crtc_state->active && !new_crtc_state->active) {
8141 			struct dc_cursor_position position;
8142 
8143 			memset(&position, 0, sizeof(position));
8144 			mutex_lock(&dm->dc_lock);
8145 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8146 			mutex_unlock(&dm->dc_lock);
8147 		}
8148 
8149 		/* Copy all transient state flags into dc state */
8150 		if (dm_new_crtc_state->stream) {
8151 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8152 							    dm_new_crtc_state->stream);
8153 		}
8154 
8155 		/* handles headless hotplug case, updating new_state and
8156 		 * aconnector as needed
8157 		 */
8158 
8159 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8160 
8161 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8162 
8163 			if (!dm_new_crtc_state->stream) {
8164 				/*
8165 				 * this could happen because of issues with
8166 				 * userspace notifications delivery.
8167 				 * In this case userspace tries to set mode on
8168 				 * display which is disconnected in fact.
8169 				 * dc_sink is NULL in this case on aconnector.
8170 				 * We expect reset mode will come soon.
8171 				 *
8172 				 * This can also happen when unplug is done
8173 				 * during resume sequence ended
8174 				 *
8175 				 * In this case, we want to pretend we still
8176 				 * have a sink to keep the pipe running so that
8177 				 * hw state is consistent with the sw state
8178 				 */
8179 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8180 						__func__, acrtc->base.base.id);
8181 				continue;
8182 			}
8183 
8184 			if (dm_old_crtc_state->stream)
8185 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8186 
8187 			pm_runtime_get_noresume(dev->dev);
8188 
8189 			acrtc->enabled = true;
8190 			acrtc->hw_mode = new_crtc_state->mode;
8191 			crtc->hwmode = new_crtc_state->mode;
8192 			mode_set_reset_required = true;
8193 		} else if (modereset_required(new_crtc_state)) {
8194 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8195 			/* i.e. reset mode */
8196 			if (dm_old_crtc_state->stream)
8197 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8198 			mode_set_reset_required = true;
8199 		}
8200 	} /* for_each_crtc_in_state() */
8201 
8202 	if (dc_state) {
8203 		/* if there mode set or reset, disable eDP PSR */
8204 		if (mode_set_reset_required)
8205 			amdgpu_dm_psr_disable_all(dm);
8206 
8207 		dm_enable_per_frame_crtc_master_sync(dc_state);
8208 		mutex_lock(&dm->dc_lock);
8209 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8210 		mutex_unlock(&dm->dc_lock);
8211 	}
8212 
8213 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8214 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8215 
8216 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8217 
8218 		if (dm_new_crtc_state->stream != NULL) {
8219 			const struct dc_stream_status *status =
8220 					dc_stream_get_status(dm_new_crtc_state->stream);
8221 
8222 			if (!status)
8223 				status = dc_stream_get_status_from_state(dc_state,
8224 									 dm_new_crtc_state->stream);
8225 			if (!status)
8226 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8227 			else
8228 				acrtc->otg_inst = status->primary_otg_inst;
8229 		}
8230 	}
8231 #ifdef CONFIG_DRM_AMD_DC_HDCP
8232 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8233 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8234 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8235 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8236 
8237 		new_crtc_state = NULL;
8238 
8239 		if (acrtc)
8240 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8241 
8242 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8243 
8244 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8245 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8246 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8247 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8248 			dm_new_con_state->update_hdcp = true;
8249 			continue;
8250 		}
8251 
8252 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8253 			hdcp_update_display(
8254 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8255 				new_con_state->hdcp_content_type,
8256 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8257 													 : false);
8258 	}
8259 #endif
8260 
8261 	/* Handle connector state changes */
8262 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8263 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8264 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8265 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8266 		struct dc_surface_update surface_updates[MAX_SURFACES];
8267 		struct dc_stream_update stream_update;
8268 		struct dc_info_packet hdr_packet;
8269 		struct dc_stream_status *status = NULL;
8270 		bool abm_changed, hdr_changed, scaling_changed;
8271 
8272 		memset(&surface_updates, 0, sizeof(surface_updates));
8273 		memset(&stream_update, 0, sizeof(stream_update));
8274 
8275 		if (acrtc) {
8276 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8277 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8278 		}
8279 
8280 		/* Skip any modesets/resets */
8281 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8282 			continue;
8283 
8284 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8285 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8286 
8287 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8288 							     dm_old_con_state);
8289 
8290 		abm_changed = dm_new_crtc_state->abm_level !=
8291 			      dm_old_crtc_state->abm_level;
8292 
8293 		hdr_changed =
8294 			is_hdr_metadata_different(old_con_state, new_con_state);
8295 
8296 		if (!scaling_changed && !abm_changed && !hdr_changed)
8297 			continue;
8298 
8299 		stream_update.stream = dm_new_crtc_state->stream;
8300 		if (scaling_changed) {
8301 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8302 					dm_new_con_state, dm_new_crtc_state->stream);
8303 
8304 			stream_update.src = dm_new_crtc_state->stream->src;
8305 			stream_update.dst = dm_new_crtc_state->stream->dst;
8306 		}
8307 
8308 		if (abm_changed) {
8309 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8310 
8311 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8312 		}
8313 
8314 		if (hdr_changed) {
8315 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8316 			stream_update.hdr_static_metadata = &hdr_packet;
8317 		}
8318 
8319 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8320 		WARN_ON(!status);
8321 		WARN_ON(!status->plane_count);
8322 
8323 		/*
8324 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8325 		 * Here we create an empty update on each plane.
8326 		 * To fix this, DC should permit updating only stream properties.
8327 		 */
8328 		for (j = 0; j < status->plane_count; j++)
8329 			surface_updates[j].surface = status->plane_states[j];
8330 
8331 
8332 		mutex_lock(&dm->dc_lock);
8333 		dc_commit_updates_for_stream(dm->dc,
8334 						surface_updates,
8335 						     status->plane_count,
8336 						     dm_new_crtc_state->stream,
8337 						     &stream_update);
8338 		mutex_unlock(&dm->dc_lock);
8339 	}
8340 
8341 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8342 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8343 				      new_crtc_state, i) {
8344 		if (old_crtc_state->active && !new_crtc_state->active)
8345 			crtc_disable_count++;
8346 
8347 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8348 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8349 
8350 		/* For freesync config update on crtc state and params for irq */
8351 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8352 
8353 		/* Handle vrr on->off / off->on transitions */
8354 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8355 						dm_new_crtc_state);
8356 	}
8357 
8358 	/**
8359 	 * Enable interrupts for CRTCs that are newly enabled or went through
8360 	 * a modeset. It was intentionally deferred until after the front end
8361 	 * state was modified to wait until the OTG was on and so the IRQ
8362 	 * handlers didn't access stale or invalid state.
8363 	 */
8364 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8365 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8366 
8367 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8368 
8369 		if (new_crtc_state->active &&
8370 		    (!old_crtc_state->active ||
8371 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8372 			dc_stream_retain(dm_new_crtc_state->stream);
8373 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8374 			manage_dm_interrupts(adev, acrtc, true);
8375 
8376 #ifdef CONFIG_DEBUG_FS
8377 			/**
8378 			 * Frontend may have changed so reapply the CRC capture
8379 			 * settings for the stream.
8380 			 */
8381 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8382 
8383 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8384 				amdgpu_dm_crtc_configure_crc_source(
8385 					crtc, dm_new_crtc_state,
8386 					dm_new_crtc_state->crc_src);
8387 			}
8388 #endif
8389 		}
8390 	}
8391 
8392 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8393 		if (new_crtc_state->async_flip)
8394 			wait_for_vblank = false;
8395 
8396 	/* update planes when needed per crtc*/
8397 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8398 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8399 
8400 		if (dm_new_crtc_state->stream)
8401 			amdgpu_dm_commit_planes(state, dc_state, dev,
8402 						dm, crtc, wait_for_vblank);
8403 	}
8404 
8405 	/* Update audio instances for each connector. */
8406 	amdgpu_dm_commit_audio(dev, state);
8407 
8408 	/*
8409 	 * send vblank event on all events not handled in flip and
8410 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8411 	 */
8412 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8413 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8414 
8415 		if (new_crtc_state->event)
8416 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8417 
8418 		new_crtc_state->event = NULL;
8419 	}
8420 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8421 
8422 	/* Signal HW programming completion */
8423 	drm_atomic_helper_commit_hw_done(state);
8424 
8425 	if (wait_for_vblank)
8426 		drm_atomic_helper_wait_for_flip_done(dev, state);
8427 
8428 	drm_atomic_helper_cleanup_planes(dev, state);
8429 
8430 	/* return the stolen vga memory back to VRAM */
8431 	if (!adev->mman.keep_stolen_vga_memory)
8432 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8433 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8434 
8435 	/*
8436 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8437 	 * so we can put the GPU into runtime suspend if we're not driving any
8438 	 * displays anymore
8439 	 */
8440 	for (i = 0; i < crtc_disable_count; i++)
8441 		pm_runtime_put_autosuspend(dev->dev);
8442 	pm_runtime_mark_last_busy(dev->dev);
8443 
8444 	if (dc_state_temp)
8445 		dc_release_state(dc_state_temp);
8446 }
8447 
8448 
8449 static int dm_force_atomic_commit(struct drm_connector *connector)
8450 {
8451 	int ret = 0;
8452 	struct drm_device *ddev = connector->dev;
8453 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8454 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8455 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8456 	struct drm_connector_state *conn_state;
8457 	struct drm_crtc_state *crtc_state;
8458 	struct drm_plane_state *plane_state;
8459 
8460 	if (!state)
8461 		return -ENOMEM;
8462 
8463 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8464 
8465 	/* Construct an atomic state to restore previous display setting */
8466 
8467 	/*
8468 	 * Attach connectors to drm_atomic_state
8469 	 */
8470 	conn_state = drm_atomic_get_connector_state(state, connector);
8471 
8472 	ret = PTR_ERR_OR_ZERO(conn_state);
8473 	if (ret)
8474 		goto out;
8475 
8476 	/* Attach crtc to drm_atomic_state*/
8477 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8478 
8479 	ret = PTR_ERR_OR_ZERO(crtc_state);
8480 	if (ret)
8481 		goto out;
8482 
8483 	/* force a restore */
8484 	crtc_state->mode_changed = true;
8485 
8486 	/* Attach plane to drm_atomic_state */
8487 	plane_state = drm_atomic_get_plane_state(state, plane);
8488 
8489 	ret = PTR_ERR_OR_ZERO(plane_state);
8490 	if (ret)
8491 		goto out;
8492 
8493 	/* Call commit internally with the state we just constructed */
8494 	ret = drm_atomic_commit(state);
8495 
8496 out:
8497 	drm_atomic_state_put(state);
8498 	if (ret)
8499 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8500 
8501 	return ret;
8502 }
8503 
8504 /*
8505  * This function handles all cases when set mode does not come upon hotplug.
8506  * This includes when a display is unplugged then plugged back into the
8507  * same port and when running without usermode desktop manager supprot
8508  */
8509 void dm_restore_drm_connector_state(struct drm_device *dev,
8510 				    struct drm_connector *connector)
8511 {
8512 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8513 	struct amdgpu_crtc *disconnected_acrtc;
8514 	struct dm_crtc_state *acrtc_state;
8515 
8516 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8517 		return;
8518 
8519 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8520 	if (!disconnected_acrtc)
8521 		return;
8522 
8523 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8524 	if (!acrtc_state->stream)
8525 		return;
8526 
8527 	/*
8528 	 * If the previous sink is not released and different from the current,
8529 	 * we deduce we are in a state where we can not rely on usermode call
8530 	 * to turn on the display, so we do it here
8531 	 */
8532 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8533 		dm_force_atomic_commit(&aconnector->base);
8534 }
8535 
8536 /*
8537  * Grabs all modesetting locks to serialize against any blocking commits,
8538  * Waits for completion of all non blocking commits.
8539  */
8540 static int do_aquire_global_lock(struct drm_device *dev,
8541 				 struct drm_atomic_state *state)
8542 {
8543 	struct drm_crtc *crtc;
8544 	struct drm_crtc_commit *commit;
8545 	long ret;
8546 
8547 	/*
8548 	 * Adding all modeset locks to aquire_ctx will
8549 	 * ensure that when the framework release it the
8550 	 * extra locks we are locking here will get released to
8551 	 */
8552 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8553 	if (ret)
8554 		return ret;
8555 
8556 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8557 		spin_lock(&crtc->commit_lock);
8558 		commit = list_first_entry_or_null(&crtc->commit_list,
8559 				struct drm_crtc_commit, commit_entry);
8560 		if (commit)
8561 			drm_crtc_commit_get(commit);
8562 		spin_unlock(&crtc->commit_lock);
8563 
8564 		if (!commit)
8565 			continue;
8566 
8567 		/*
8568 		 * Make sure all pending HW programming completed and
8569 		 * page flips done
8570 		 */
8571 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8572 
8573 		if (ret > 0)
8574 			ret = wait_for_completion_interruptible_timeout(
8575 					&commit->flip_done, 10*HZ);
8576 
8577 		if (ret == 0)
8578 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8579 				  "timed out\n", crtc->base.id, crtc->name);
8580 
8581 		drm_crtc_commit_put(commit);
8582 	}
8583 
8584 	return ret < 0 ? ret : 0;
8585 }
8586 
8587 static void get_freesync_config_for_crtc(
8588 	struct dm_crtc_state *new_crtc_state,
8589 	struct dm_connector_state *new_con_state)
8590 {
8591 	struct mod_freesync_config config = {0};
8592 	struct amdgpu_dm_connector *aconnector =
8593 			to_amdgpu_dm_connector(new_con_state->base.connector);
8594 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8595 	int vrefresh = drm_mode_vrefresh(mode);
8596 
8597 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8598 					vrefresh >= aconnector->min_vfreq &&
8599 					vrefresh <= aconnector->max_vfreq;
8600 
8601 	if (new_crtc_state->vrr_supported) {
8602 		new_crtc_state->stream->ignore_msa_timing_param = true;
8603 		config.state = new_crtc_state->base.vrr_enabled ?
8604 				VRR_STATE_ACTIVE_VARIABLE :
8605 				VRR_STATE_INACTIVE;
8606 		config.min_refresh_in_uhz =
8607 				aconnector->min_vfreq * 1000000;
8608 		config.max_refresh_in_uhz =
8609 				aconnector->max_vfreq * 1000000;
8610 		config.vsif_supported = true;
8611 		config.btr = true;
8612 	}
8613 
8614 	new_crtc_state->freesync_config = config;
8615 }
8616 
8617 static void reset_freesync_config_for_crtc(
8618 	struct dm_crtc_state *new_crtc_state)
8619 {
8620 	new_crtc_state->vrr_supported = false;
8621 
8622 	memset(&new_crtc_state->vrr_infopacket, 0,
8623 	       sizeof(new_crtc_state->vrr_infopacket));
8624 }
8625 
8626 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8627 				struct drm_atomic_state *state,
8628 				struct drm_crtc *crtc,
8629 				struct drm_crtc_state *old_crtc_state,
8630 				struct drm_crtc_state *new_crtc_state,
8631 				bool enable,
8632 				bool *lock_and_validation_needed)
8633 {
8634 	struct dm_atomic_state *dm_state = NULL;
8635 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8636 	struct dc_stream_state *new_stream;
8637 	int ret = 0;
8638 
8639 	/*
8640 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8641 	 * update changed items
8642 	 */
8643 	struct amdgpu_crtc *acrtc = NULL;
8644 	struct amdgpu_dm_connector *aconnector = NULL;
8645 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8646 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8647 
8648 	new_stream = NULL;
8649 
8650 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8651 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8652 	acrtc = to_amdgpu_crtc(crtc);
8653 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8654 
8655 	/* TODO This hack should go away */
8656 	if (aconnector && enable) {
8657 		/* Make sure fake sink is created in plug-in scenario */
8658 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8659 							    &aconnector->base);
8660 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8661 							    &aconnector->base);
8662 
8663 		if (IS_ERR(drm_new_conn_state)) {
8664 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8665 			goto fail;
8666 		}
8667 
8668 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8669 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8670 
8671 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8672 			goto skip_modeset;
8673 
8674 		new_stream = create_validate_stream_for_sink(aconnector,
8675 							     &new_crtc_state->mode,
8676 							     dm_new_conn_state,
8677 							     dm_old_crtc_state->stream);
8678 
8679 		/*
8680 		 * we can have no stream on ACTION_SET if a display
8681 		 * was disconnected during S3, in this case it is not an
8682 		 * error, the OS will be updated after detection, and
8683 		 * will do the right thing on next atomic commit
8684 		 */
8685 
8686 		if (!new_stream) {
8687 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8688 					__func__, acrtc->base.base.id);
8689 			ret = -ENOMEM;
8690 			goto fail;
8691 		}
8692 
8693 		/*
8694 		 * TODO: Check VSDB bits to decide whether this should
8695 		 * be enabled or not.
8696 		 */
8697 		new_stream->triggered_crtc_reset.enabled =
8698 			dm->force_timing_sync;
8699 
8700 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8701 
8702 		ret = fill_hdr_info_packet(drm_new_conn_state,
8703 					   &new_stream->hdr_static_metadata);
8704 		if (ret)
8705 			goto fail;
8706 
8707 		/*
8708 		 * If we already removed the old stream from the context
8709 		 * (and set the new stream to NULL) then we can't reuse
8710 		 * the old stream even if the stream and scaling are unchanged.
8711 		 * We'll hit the BUG_ON and black screen.
8712 		 *
8713 		 * TODO: Refactor this function to allow this check to work
8714 		 * in all conditions.
8715 		 */
8716 		if (dm_new_crtc_state->stream &&
8717 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8718 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8719 			new_crtc_state->mode_changed = false;
8720 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8721 					 new_crtc_state->mode_changed);
8722 		}
8723 	}
8724 
8725 	/* mode_changed flag may get updated above, need to check again */
8726 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8727 		goto skip_modeset;
8728 
8729 	DRM_DEBUG_DRIVER(
8730 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8731 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8732 		"connectors_changed:%d\n",
8733 		acrtc->crtc_id,
8734 		new_crtc_state->enable,
8735 		new_crtc_state->active,
8736 		new_crtc_state->planes_changed,
8737 		new_crtc_state->mode_changed,
8738 		new_crtc_state->active_changed,
8739 		new_crtc_state->connectors_changed);
8740 
8741 	/* Remove stream for any changed/disabled CRTC */
8742 	if (!enable) {
8743 
8744 		if (!dm_old_crtc_state->stream)
8745 			goto skip_modeset;
8746 
8747 		ret = dm_atomic_get_state(state, &dm_state);
8748 		if (ret)
8749 			goto fail;
8750 
8751 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8752 				crtc->base.id);
8753 
8754 		/* i.e. reset mode */
8755 		if (dc_remove_stream_from_ctx(
8756 				dm->dc,
8757 				dm_state->context,
8758 				dm_old_crtc_state->stream) != DC_OK) {
8759 			ret = -EINVAL;
8760 			goto fail;
8761 		}
8762 
8763 		dc_stream_release(dm_old_crtc_state->stream);
8764 		dm_new_crtc_state->stream = NULL;
8765 
8766 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8767 
8768 		*lock_and_validation_needed = true;
8769 
8770 	} else {/* Add stream for any updated/enabled CRTC */
8771 		/*
8772 		 * Quick fix to prevent NULL pointer on new_stream when
8773 		 * added MST connectors not found in existing crtc_state in the chained mode
8774 		 * TODO: need to dig out the root cause of that
8775 		 */
8776 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8777 			goto skip_modeset;
8778 
8779 		if (modereset_required(new_crtc_state))
8780 			goto skip_modeset;
8781 
8782 		if (modeset_required(new_crtc_state, new_stream,
8783 				     dm_old_crtc_state->stream)) {
8784 
8785 			WARN_ON(dm_new_crtc_state->stream);
8786 
8787 			ret = dm_atomic_get_state(state, &dm_state);
8788 			if (ret)
8789 				goto fail;
8790 
8791 			dm_new_crtc_state->stream = new_stream;
8792 
8793 			dc_stream_retain(new_stream);
8794 
8795 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8796 						crtc->base.id);
8797 
8798 			if (dc_add_stream_to_ctx(
8799 					dm->dc,
8800 					dm_state->context,
8801 					dm_new_crtc_state->stream) != DC_OK) {
8802 				ret = -EINVAL;
8803 				goto fail;
8804 			}
8805 
8806 			*lock_and_validation_needed = true;
8807 		}
8808 	}
8809 
8810 skip_modeset:
8811 	/* Release extra reference */
8812 	if (new_stream)
8813 		 dc_stream_release(new_stream);
8814 
8815 	/*
8816 	 * We want to do dc stream updates that do not require a
8817 	 * full modeset below.
8818 	 */
8819 	if (!(enable && aconnector && new_crtc_state->active))
8820 		return 0;
8821 	/*
8822 	 * Given above conditions, the dc state cannot be NULL because:
8823 	 * 1. We're in the process of enabling CRTCs (just been added
8824 	 *    to the dc context, or already is on the context)
8825 	 * 2. Has a valid connector attached, and
8826 	 * 3. Is currently active and enabled.
8827 	 * => The dc stream state currently exists.
8828 	 */
8829 	BUG_ON(dm_new_crtc_state->stream == NULL);
8830 
8831 	/* Scaling or underscan settings */
8832 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8833 		update_stream_scaling_settings(
8834 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8835 
8836 	/* ABM settings */
8837 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8838 
8839 	/*
8840 	 * Color management settings. We also update color properties
8841 	 * when a modeset is needed, to ensure it gets reprogrammed.
8842 	 */
8843 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8844 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8845 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8846 		if (ret)
8847 			goto fail;
8848 	}
8849 
8850 	/* Update Freesync settings. */
8851 	get_freesync_config_for_crtc(dm_new_crtc_state,
8852 				     dm_new_conn_state);
8853 
8854 	return ret;
8855 
8856 fail:
8857 	if (new_stream)
8858 		dc_stream_release(new_stream);
8859 	return ret;
8860 }
8861 
8862 static bool should_reset_plane(struct drm_atomic_state *state,
8863 			       struct drm_plane *plane,
8864 			       struct drm_plane_state *old_plane_state,
8865 			       struct drm_plane_state *new_plane_state)
8866 {
8867 	struct drm_plane *other;
8868 	struct drm_plane_state *old_other_state, *new_other_state;
8869 	struct drm_crtc_state *new_crtc_state;
8870 	int i;
8871 
8872 	/*
8873 	 * TODO: Remove this hack once the checks below are sufficient
8874 	 * enough to determine when we need to reset all the planes on
8875 	 * the stream.
8876 	 */
8877 	if (state->allow_modeset)
8878 		return true;
8879 
8880 	/* Exit early if we know that we're adding or removing the plane. */
8881 	if (old_plane_state->crtc != new_plane_state->crtc)
8882 		return true;
8883 
8884 	/* old crtc == new_crtc == NULL, plane not in context. */
8885 	if (!new_plane_state->crtc)
8886 		return false;
8887 
8888 	new_crtc_state =
8889 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8890 
8891 	if (!new_crtc_state)
8892 		return true;
8893 
8894 	/* CRTC Degamma changes currently require us to recreate planes. */
8895 	if (new_crtc_state->color_mgmt_changed)
8896 		return true;
8897 
8898 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8899 		return true;
8900 
8901 	/*
8902 	 * If there are any new primary or overlay planes being added or
8903 	 * removed then the z-order can potentially change. To ensure
8904 	 * correct z-order and pipe acquisition the current DC architecture
8905 	 * requires us to remove and recreate all existing planes.
8906 	 *
8907 	 * TODO: Come up with a more elegant solution for this.
8908 	 */
8909 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8910 		struct amdgpu_framebuffer *old_afb, *new_afb;
8911 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8912 			continue;
8913 
8914 		if (old_other_state->crtc != new_plane_state->crtc &&
8915 		    new_other_state->crtc != new_plane_state->crtc)
8916 			continue;
8917 
8918 		if (old_other_state->crtc != new_other_state->crtc)
8919 			return true;
8920 
8921 		/* Src/dst size and scaling updates. */
8922 		if (old_other_state->src_w != new_other_state->src_w ||
8923 		    old_other_state->src_h != new_other_state->src_h ||
8924 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8925 		    old_other_state->crtc_h != new_other_state->crtc_h)
8926 			return true;
8927 
8928 		/* Rotation / mirroring updates. */
8929 		if (old_other_state->rotation != new_other_state->rotation)
8930 			return true;
8931 
8932 		/* Blending updates. */
8933 		if (old_other_state->pixel_blend_mode !=
8934 		    new_other_state->pixel_blend_mode)
8935 			return true;
8936 
8937 		/* Alpha updates. */
8938 		if (old_other_state->alpha != new_other_state->alpha)
8939 			return true;
8940 
8941 		/* Colorspace changes. */
8942 		if (old_other_state->color_range != new_other_state->color_range ||
8943 		    old_other_state->color_encoding != new_other_state->color_encoding)
8944 			return true;
8945 
8946 		/* Framebuffer checks fall at the end. */
8947 		if (!old_other_state->fb || !new_other_state->fb)
8948 			continue;
8949 
8950 		/* Pixel format changes can require bandwidth updates. */
8951 		if (old_other_state->fb->format != new_other_state->fb->format)
8952 			return true;
8953 
8954 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8955 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8956 
8957 		/* Tiling and DCC changes also require bandwidth updates. */
8958 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8959 		    old_afb->base.modifier != new_afb->base.modifier)
8960 			return true;
8961 	}
8962 
8963 	return false;
8964 }
8965 
8966 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8967 			      struct drm_plane_state *new_plane_state,
8968 			      struct drm_framebuffer *fb)
8969 {
8970 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8971 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8972 	unsigned int pitch;
8973 	bool linear;
8974 
8975 	if (fb->width > new_acrtc->max_cursor_width ||
8976 	    fb->height > new_acrtc->max_cursor_height) {
8977 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8978 				 new_plane_state->fb->width,
8979 				 new_plane_state->fb->height);
8980 		return -EINVAL;
8981 	}
8982 	if (new_plane_state->src_w != fb->width << 16 ||
8983 	    new_plane_state->src_h != fb->height << 16) {
8984 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8985 		return -EINVAL;
8986 	}
8987 
8988 	/* Pitch in pixels */
8989 	pitch = fb->pitches[0] / fb->format->cpp[0];
8990 
8991 	if (fb->width != pitch) {
8992 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8993 				 fb->width, pitch);
8994 		return -EINVAL;
8995 	}
8996 
8997 	switch (pitch) {
8998 	case 64:
8999 	case 128:
9000 	case 256:
9001 		/* FB pitch is supported by cursor plane */
9002 		break;
9003 	default:
9004 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9005 		return -EINVAL;
9006 	}
9007 
9008 	/* Core DRM takes care of checking FB modifiers, so we only need to
9009 	 * check tiling flags when the FB doesn't have a modifier. */
9010 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9011 		if (adev->family < AMDGPU_FAMILY_AI) {
9012 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9013 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9014 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9015 		} else {
9016 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9017 		}
9018 		if (!linear) {
9019 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9020 			return -EINVAL;
9021 		}
9022 	}
9023 
9024 	return 0;
9025 }
9026 
9027 static int dm_update_plane_state(struct dc *dc,
9028 				 struct drm_atomic_state *state,
9029 				 struct drm_plane *plane,
9030 				 struct drm_plane_state *old_plane_state,
9031 				 struct drm_plane_state *new_plane_state,
9032 				 bool enable,
9033 				 bool *lock_and_validation_needed)
9034 {
9035 
9036 	struct dm_atomic_state *dm_state = NULL;
9037 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9038 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9039 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9040 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9041 	struct amdgpu_crtc *new_acrtc;
9042 	bool needs_reset;
9043 	int ret = 0;
9044 
9045 
9046 	new_plane_crtc = new_plane_state->crtc;
9047 	old_plane_crtc = old_plane_state->crtc;
9048 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9049 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9050 
9051 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9052 		if (!enable || !new_plane_crtc ||
9053 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9054 			return 0;
9055 
9056 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9057 
9058 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9059 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9060 			return -EINVAL;
9061 		}
9062 
9063 		if (new_plane_state->fb) {
9064 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9065 						 new_plane_state->fb);
9066 			if (ret)
9067 				return ret;
9068 		}
9069 
9070 		return 0;
9071 	}
9072 
9073 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9074 					 new_plane_state);
9075 
9076 	/* Remove any changed/removed planes */
9077 	if (!enable) {
9078 		if (!needs_reset)
9079 			return 0;
9080 
9081 		if (!old_plane_crtc)
9082 			return 0;
9083 
9084 		old_crtc_state = drm_atomic_get_old_crtc_state(
9085 				state, old_plane_crtc);
9086 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9087 
9088 		if (!dm_old_crtc_state->stream)
9089 			return 0;
9090 
9091 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9092 				plane->base.id, old_plane_crtc->base.id);
9093 
9094 		ret = dm_atomic_get_state(state, &dm_state);
9095 		if (ret)
9096 			return ret;
9097 
9098 		if (!dc_remove_plane_from_context(
9099 				dc,
9100 				dm_old_crtc_state->stream,
9101 				dm_old_plane_state->dc_state,
9102 				dm_state->context)) {
9103 
9104 			return -EINVAL;
9105 		}
9106 
9107 
9108 		dc_plane_state_release(dm_old_plane_state->dc_state);
9109 		dm_new_plane_state->dc_state = NULL;
9110 
9111 		*lock_and_validation_needed = true;
9112 
9113 	} else { /* Add new planes */
9114 		struct dc_plane_state *dc_new_plane_state;
9115 
9116 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9117 			return 0;
9118 
9119 		if (!new_plane_crtc)
9120 			return 0;
9121 
9122 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9123 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9124 
9125 		if (!dm_new_crtc_state->stream)
9126 			return 0;
9127 
9128 		if (!needs_reset)
9129 			return 0;
9130 
9131 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9132 		if (ret)
9133 			return ret;
9134 
9135 		WARN_ON(dm_new_plane_state->dc_state);
9136 
9137 		dc_new_plane_state = dc_create_plane_state(dc);
9138 		if (!dc_new_plane_state)
9139 			return -ENOMEM;
9140 
9141 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9142 				plane->base.id, new_plane_crtc->base.id);
9143 
9144 		ret = fill_dc_plane_attributes(
9145 			drm_to_adev(new_plane_crtc->dev),
9146 			dc_new_plane_state,
9147 			new_plane_state,
9148 			new_crtc_state);
9149 		if (ret) {
9150 			dc_plane_state_release(dc_new_plane_state);
9151 			return ret;
9152 		}
9153 
9154 		ret = dm_atomic_get_state(state, &dm_state);
9155 		if (ret) {
9156 			dc_plane_state_release(dc_new_plane_state);
9157 			return ret;
9158 		}
9159 
9160 		/*
9161 		 * Any atomic check errors that occur after this will
9162 		 * not need a release. The plane state will be attached
9163 		 * to the stream, and therefore part of the atomic
9164 		 * state. It'll be released when the atomic state is
9165 		 * cleaned.
9166 		 */
9167 		if (!dc_add_plane_to_context(
9168 				dc,
9169 				dm_new_crtc_state->stream,
9170 				dc_new_plane_state,
9171 				dm_state->context)) {
9172 
9173 			dc_plane_state_release(dc_new_plane_state);
9174 			return -EINVAL;
9175 		}
9176 
9177 		dm_new_plane_state->dc_state = dc_new_plane_state;
9178 
9179 		/* Tell DC to do a full surface update every time there
9180 		 * is a plane change. Inefficient, but works for now.
9181 		 */
9182 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9183 
9184 		*lock_and_validation_needed = true;
9185 	}
9186 
9187 
9188 	return ret;
9189 }
9190 
9191 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9192 				struct drm_crtc *crtc,
9193 				struct drm_crtc_state *new_crtc_state)
9194 {
9195 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9196 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9197 
9198 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9199 	 * cursor per pipe but it's going to inherit the scaling and
9200 	 * positioning from the underlying pipe. Check the cursor plane's
9201 	 * blending properties match the primary plane's. */
9202 
9203 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9204 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9205 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9206 		return 0;
9207 	}
9208 
9209 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9210 			 (new_cursor_state->src_w >> 16);
9211 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9212 			 (new_cursor_state->src_h >> 16);
9213 
9214 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9215 			 (new_primary_state->src_w >> 16);
9216 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9217 			 (new_primary_state->src_h >> 16);
9218 
9219 	if (cursor_scale_w != primary_scale_w ||
9220 	    cursor_scale_h != primary_scale_h) {
9221 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9222 		return -EINVAL;
9223 	}
9224 
9225 	return 0;
9226 }
9227 
9228 #if defined(CONFIG_DRM_AMD_DC_DCN)
9229 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9230 {
9231 	struct drm_connector *connector;
9232 	struct drm_connector_state *conn_state;
9233 	struct amdgpu_dm_connector *aconnector = NULL;
9234 	int i;
9235 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9236 		if (conn_state->crtc != crtc)
9237 			continue;
9238 
9239 		aconnector = to_amdgpu_dm_connector(connector);
9240 		if (!aconnector->port || !aconnector->mst_port)
9241 			aconnector = NULL;
9242 		else
9243 			break;
9244 	}
9245 
9246 	if (!aconnector)
9247 		return 0;
9248 
9249 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9250 }
9251 #endif
9252 
9253 /**
9254  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9255  * @dev: The DRM device
9256  * @state: The atomic state to commit
9257  *
9258  * Validate that the given atomic state is programmable by DC into hardware.
9259  * This involves constructing a &struct dc_state reflecting the new hardware
9260  * state we wish to commit, then querying DC to see if it is programmable. It's
9261  * important not to modify the existing DC state. Otherwise, atomic_check
9262  * may unexpectedly commit hardware changes.
9263  *
9264  * When validating the DC state, it's important that the right locks are
9265  * acquired. For full updates case which removes/adds/updates streams on one
9266  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9267  * that any such full update commit will wait for completion of any outstanding
9268  * flip using DRMs synchronization events.
9269  *
9270  * Note that DM adds the affected connectors for all CRTCs in state, when that
9271  * might not seem necessary. This is because DC stream creation requires the
9272  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9273  * be possible but non-trivial - a possible TODO item.
9274  *
9275  * Return: -Error code if validation failed.
9276  */
9277 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9278 				  struct drm_atomic_state *state)
9279 {
9280 	struct amdgpu_device *adev = drm_to_adev(dev);
9281 	struct dm_atomic_state *dm_state = NULL;
9282 	struct dc *dc = adev->dm.dc;
9283 	struct drm_connector *connector;
9284 	struct drm_connector_state *old_con_state, *new_con_state;
9285 	struct drm_crtc *crtc;
9286 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9287 	struct drm_plane *plane;
9288 	struct drm_plane_state *old_plane_state, *new_plane_state;
9289 	enum dc_status status;
9290 	int ret, i;
9291 	bool lock_and_validation_needed = false;
9292 	struct dm_crtc_state *dm_old_crtc_state;
9293 
9294 	trace_amdgpu_dm_atomic_check_begin(state);
9295 
9296 	ret = drm_atomic_helper_check_modeset(dev, state);
9297 	if (ret)
9298 		goto fail;
9299 
9300 	/* Check connector changes */
9301 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9302 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9303 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9304 
9305 		/* Skip connectors that are disabled or part of modeset already. */
9306 		if (!old_con_state->crtc && !new_con_state->crtc)
9307 			continue;
9308 
9309 		if (!new_con_state->crtc)
9310 			continue;
9311 
9312 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9313 		if (IS_ERR(new_crtc_state)) {
9314 			ret = PTR_ERR(new_crtc_state);
9315 			goto fail;
9316 		}
9317 
9318 		if (dm_old_con_state->abm_level !=
9319 		    dm_new_con_state->abm_level)
9320 			new_crtc_state->connectors_changed = true;
9321 	}
9322 
9323 #if defined(CONFIG_DRM_AMD_DC_DCN)
9324 	if (adev->asic_type >= CHIP_NAVI10) {
9325 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9326 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9327 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9328 				if (ret)
9329 					goto fail;
9330 			}
9331 		}
9332 	}
9333 #endif
9334 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9335 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9336 
9337 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9338 		    !new_crtc_state->color_mgmt_changed &&
9339 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9340 			dm_old_crtc_state->dsc_force_changed == false)
9341 			continue;
9342 
9343 		if (!new_crtc_state->enable)
9344 			continue;
9345 
9346 		ret = drm_atomic_add_affected_connectors(state, crtc);
9347 		if (ret)
9348 			return ret;
9349 
9350 		ret = drm_atomic_add_affected_planes(state, crtc);
9351 		if (ret)
9352 			goto fail;
9353 
9354 		if (dm_old_crtc_state->dsc_force_changed)
9355 			new_crtc_state->mode_changed = true;
9356 	}
9357 
9358 	/*
9359 	 * Add all primary and overlay planes on the CRTC to the state
9360 	 * whenever a plane is enabled to maintain correct z-ordering
9361 	 * and to enable fast surface updates.
9362 	 */
9363 	drm_for_each_crtc(crtc, dev) {
9364 		bool modified = false;
9365 
9366 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9367 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9368 				continue;
9369 
9370 			if (new_plane_state->crtc == crtc ||
9371 			    old_plane_state->crtc == crtc) {
9372 				modified = true;
9373 				break;
9374 			}
9375 		}
9376 
9377 		if (!modified)
9378 			continue;
9379 
9380 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9381 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9382 				continue;
9383 
9384 			new_plane_state =
9385 				drm_atomic_get_plane_state(state, plane);
9386 
9387 			if (IS_ERR(new_plane_state)) {
9388 				ret = PTR_ERR(new_plane_state);
9389 				goto fail;
9390 			}
9391 		}
9392 	}
9393 
9394 	/* Remove exiting planes if they are modified */
9395 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9396 		ret = dm_update_plane_state(dc, state, plane,
9397 					    old_plane_state,
9398 					    new_plane_state,
9399 					    false,
9400 					    &lock_and_validation_needed);
9401 		if (ret)
9402 			goto fail;
9403 	}
9404 
9405 	/* Disable all crtcs which require disable */
9406 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9407 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9408 					   old_crtc_state,
9409 					   new_crtc_state,
9410 					   false,
9411 					   &lock_and_validation_needed);
9412 		if (ret)
9413 			goto fail;
9414 	}
9415 
9416 	/* Enable all crtcs which require enable */
9417 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9418 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9419 					   old_crtc_state,
9420 					   new_crtc_state,
9421 					   true,
9422 					   &lock_and_validation_needed);
9423 		if (ret)
9424 			goto fail;
9425 	}
9426 
9427 	/* Add new/modified planes */
9428 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9429 		ret = dm_update_plane_state(dc, state, plane,
9430 					    old_plane_state,
9431 					    new_plane_state,
9432 					    true,
9433 					    &lock_and_validation_needed);
9434 		if (ret)
9435 			goto fail;
9436 	}
9437 
9438 	/* Run this here since we want to validate the streams we created */
9439 	ret = drm_atomic_helper_check_planes(dev, state);
9440 	if (ret)
9441 		goto fail;
9442 
9443 	/* Check cursor planes scaling */
9444 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9445 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9446 		if (ret)
9447 			goto fail;
9448 	}
9449 
9450 	if (state->legacy_cursor_update) {
9451 		/*
9452 		 * This is a fast cursor update coming from the plane update
9453 		 * helper, check if it can be done asynchronously for better
9454 		 * performance.
9455 		 */
9456 		state->async_update =
9457 			!drm_atomic_helper_async_check(dev, state);
9458 
9459 		/*
9460 		 * Skip the remaining global validation if this is an async
9461 		 * update. Cursor updates can be done without affecting
9462 		 * state or bandwidth calcs and this avoids the performance
9463 		 * penalty of locking the private state object and
9464 		 * allocating a new dc_state.
9465 		 */
9466 		if (state->async_update)
9467 			return 0;
9468 	}
9469 
9470 	/* Check scaling and underscan changes*/
9471 	/* TODO Removed scaling changes validation due to inability to commit
9472 	 * new stream into context w\o causing full reset. Need to
9473 	 * decide how to handle.
9474 	 */
9475 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9476 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9477 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9478 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9479 
9480 		/* Skip any modesets/resets */
9481 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9482 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9483 			continue;
9484 
9485 		/* Skip any thing not scale or underscan changes */
9486 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9487 			continue;
9488 
9489 		lock_and_validation_needed = true;
9490 	}
9491 
9492 	/**
9493 	 * Streams and planes are reset when there are changes that affect
9494 	 * bandwidth. Anything that affects bandwidth needs to go through
9495 	 * DC global validation to ensure that the configuration can be applied
9496 	 * to hardware.
9497 	 *
9498 	 * We have to currently stall out here in atomic_check for outstanding
9499 	 * commits to finish in this case because our IRQ handlers reference
9500 	 * DRM state directly - we can end up disabling interrupts too early
9501 	 * if we don't.
9502 	 *
9503 	 * TODO: Remove this stall and drop DM state private objects.
9504 	 */
9505 	if (lock_and_validation_needed) {
9506 		ret = dm_atomic_get_state(state, &dm_state);
9507 		if (ret)
9508 			goto fail;
9509 
9510 		ret = do_aquire_global_lock(dev, state);
9511 		if (ret)
9512 			goto fail;
9513 
9514 #if defined(CONFIG_DRM_AMD_DC_DCN)
9515 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9516 			goto fail;
9517 
9518 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9519 		if (ret)
9520 			goto fail;
9521 #endif
9522 
9523 		/*
9524 		 * Perform validation of MST topology in the state:
9525 		 * We need to perform MST atomic check before calling
9526 		 * dc_validate_global_state(), or there is a chance
9527 		 * to get stuck in an infinite loop and hang eventually.
9528 		 */
9529 		ret = drm_dp_mst_atomic_check(state);
9530 		if (ret)
9531 			goto fail;
9532 		status = dc_validate_global_state(dc, dm_state->context, false);
9533 		if (status != DC_OK) {
9534 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9535 				       dc_status_to_str(status), status);
9536 			ret = -EINVAL;
9537 			goto fail;
9538 		}
9539 	} else {
9540 		/*
9541 		 * The commit is a fast update. Fast updates shouldn't change
9542 		 * the DC context, affect global validation, and can have their
9543 		 * commit work done in parallel with other commits not touching
9544 		 * the same resource. If we have a new DC context as part of
9545 		 * the DM atomic state from validation we need to free it and
9546 		 * retain the existing one instead.
9547 		 *
9548 		 * Furthermore, since the DM atomic state only contains the DC
9549 		 * context and can safely be annulled, we can free the state
9550 		 * and clear the associated private object now to free
9551 		 * some memory and avoid a possible use-after-free later.
9552 		 */
9553 
9554 		for (i = 0; i < state->num_private_objs; i++) {
9555 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9556 
9557 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9558 				int j = state->num_private_objs-1;
9559 
9560 				dm_atomic_destroy_state(obj,
9561 						state->private_objs[i].state);
9562 
9563 				/* If i is not at the end of the array then the
9564 				 * last element needs to be moved to where i was
9565 				 * before the array can safely be truncated.
9566 				 */
9567 				if (i != j)
9568 					state->private_objs[i] =
9569 						state->private_objs[j];
9570 
9571 				state->private_objs[j].ptr = NULL;
9572 				state->private_objs[j].state = NULL;
9573 				state->private_objs[j].old_state = NULL;
9574 				state->private_objs[j].new_state = NULL;
9575 
9576 				state->num_private_objs = j;
9577 				break;
9578 			}
9579 		}
9580 	}
9581 
9582 	/* Store the overall update type for use later in atomic check. */
9583 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9584 		struct dm_crtc_state *dm_new_crtc_state =
9585 			to_dm_crtc_state(new_crtc_state);
9586 
9587 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9588 							 UPDATE_TYPE_FULL :
9589 							 UPDATE_TYPE_FAST;
9590 	}
9591 
9592 	/* Must be success */
9593 	WARN_ON(ret);
9594 
9595 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9596 
9597 	return ret;
9598 
9599 fail:
9600 	if (ret == -EDEADLK)
9601 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9602 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9603 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9604 	else
9605 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9606 
9607 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9608 
9609 	return ret;
9610 }
9611 
9612 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9613 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9614 {
9615 	uint8_t dpcd_data;
9616 	bool capable = false;
9617 
9618 	if (amdgpu_dm_connector->dc_link &&
9619 		dm_helpers_dp_read_dpcd(
9620 				NULL,
9621 				amdgpu_dm_connector->dc_link,
9622 				DP_DOWN_STREAM_PORT_COUNT,
9623 				&dpcd_data,
9624 				sizeof(dpcd_data))) {
9625 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9626 	}
9627 
9628 	return capable;
9629 }
9630 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9631 					struct edid *edid)
9632 {
9633 	int i;
9634 	bool edid_check_required;
9635 	struct detailed_timing *timing;
9636 	struct detailed_non_pixel *data;
9637 	struct detailed_data_monitor_range *range;
9638 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9639 			to_amdgpu_dm_connector(connector);
9640 	struct dm_connector_state *dm_con_state = NULL;
9641 
9642 	struct drm_device *dev = connector->dev;
9643 	struct amdgpu_device *adev = drm_to_adev(dev);
9644 	bool freesync_capable = false;
9645 
9646 	if (!connector->state) {
9647 		DRM_ERROR("%s - Connector has no state", __func__);
9648 		goto update;
9649 	}
9650 
9651 	if (!edid) {
9652 		dm_con_state = to_dm_connector_state(connector->state);
9653 
9654 		amdgpu_dm_connector->min_vfreq = 0;
9655 		amdgpu_dm_connector->max_vfreq = 0;
9656 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9657 
9658 		goto update;
9659 	}
9660 
9661 	dm_con_state = to_dm_connector_state(connector->state);
9662 
9663 	edid_check_required = false;
9664 	if (!amdgpu_dm_connector->dc_sink) {
9665 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9666 		goto update;
9667 	}
9668 	if (!adev->dm.freesync_module)
9669 		goto update;
9670 	/*
9671 	 * if edid non zero restrict freesync only for dp and edp
9672 	 */
9673 	if (edid) {
9674 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9675 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9676 			edid_check_required = is_dp_capable_without_timing_msa(
9677 						adev->dm.dc,
9678 						amdgpu_dm_connector);
9679 		}
9680 	}
9681 	if (edid_check_required == true && (edid->version > 1 ||
9682 	   (edid->version == 1 && edid->revision > 1))) {
9683 		for (i = 0; i < 4; i++) {
9684 
9685 			timing	= &edid->detailed_timings[i];
9686 			data	= &timing->data.other_data;
9687 			range	= &data->data.range;
9688 			/*
9689 			 * Check if monitor has continuous frequency mode
9690 			 */
9691 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9692 				continue;
9693 			/*
9694 			 * Check for flag range limits only. If flag == 1 then
9695 			 * no additional timing information provided.
9696 			 * Default GTF, GTF Secondary curve and CVT are not
9697 			 * supported
9698 			 */
9699 			if (range->flags != 1)
9700 				continue;
9701 
9702 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9703 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9704 			amdgpu_dm_connector->pixel_clock_mhz =
9705 				range->pixel_clock_mhz * 10;
9706 
9707 			connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9708 			connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9709 
9710 			break;
9711 		}
9712 
9713 		if (amdgpu_dm_connector->max_vfreq -
9714 		    amdgpu_dm_connector->min_vfreq > 10) {
9715 
9716 			freesync_capable = true;
9717 		}
9718 	}
9719 
9720 update:
9721 	if (dm_con_state)
9722 		dm_con_state->freesync_capable = freesync_capable;
9723 
9724 	if (connector->vrr_capable_property)
9725 		drm_connector_set_vrr_capable_property(connector,
9726 						       freesync_capable);
9727 }
9728 
9729 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9730 {
9731 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9732 
9733 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9734 		return;
9735 	if (link->type == dc_connection_none)
9736 		return;
9737 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9738 					dpcd_data, sizeof(dpcd_data))) {
9739 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9740 
9741 		if (dpcd_data[0] == 0) {
9742 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9743 			link->psr_settings.psr_feature_enabled = false;
9744 		} else {
9745 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9746 			link->psr_settings.psr_feature_enabled = true;
9747 		}
9748 
9749 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9750 	}
9751 }
9752 
9753 /*
9754  * amdgpu_dm_link_setup_psr() - configure psr link
9755  * @stream: stream state
9756  *
9757  * Return: true if success
9758  */
9759 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9760 {
9761 	struct dc_link *link = NULL;
9762 	struct psr_config psr_config = {0};
9763 	struct psr_context psr_context = {0};
9764 	bool ret = false;
9765 
9766 	if (stream == NULL)
9767 		return false;
9768 
9769 	link = stream->link;
9770 
9771 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9772 
9773 	if (psr_config.psr_version > 0) {
9774 		psr_config.psr_exit_link_training_required = 0x1;
9775 		psr_config.psr_frame_capture_indication_req = 0;
9776 		psr_config.psr_rfb_setup_time = 0x37;
9777 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9778 		psr_config.allow_smu_optimizations = 0x0;
9779 
9780 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9781 
9782 	}
9783 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9784 
9785 	return ret;
9786 }
9787 
9788 /*
9789  * amdgpu_dm_psr_enable() - enable psr f/w
9790  * @stream: stream state
9791  *
9792  * Return: true if success
9793  */
9794 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9795 {
9796 	struct dc_link *link = stream->link;
9797 	unsigned int vsync_rate_hz = 0;
9798 	struct dc_static_screen_params params = {0};
9799 	/* Calculate number of static frames before generating interrupt to
9800 	 * enter PSR.
9801 	 */
9802 	// Init fail safe of 2 frames static
9803 	unsigned int num_frames_static = 2;
9804 
9805 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9806 
9807 	vsync_rate_hz = div64_u64(div64_u64((
9808 			stream->timing.pix_clk_100hz * 100),
9809 			stream->timing.v_total),
9810 			stream->timing.h_total);
9811 
9812 	/* Round up
9813 	 * Calculate number of frames such that at least 30 ms of time has
9814 	 * passed.
9815 	 */
9816 	if (vsync_rate_hz != 0) {
9817 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9818 		num_frames_static = (30000 / frame_time_microsec) + 1;
9819 	}
9820 
9821 	params.triggers.cursor_update = true;
9822 	params.triggers.overlay_update = true;
9823 	params.triggers.surface_update = true;
9824 	params.num_frames = num_frames_static;
9825 
9826 	dc_stream_set_static_screen_params(link->ctx->dc,
9827 					   &stream, 1,
9828 					   &params);
9829 
9830 	return dc_link_set_psr_allow_active(link, true, false, false);
9831 }
9832 
9833 /*
9834  * amdgpu_dm_psr_disable() - disable psr f/w
9835  * @stream:  stream state
9836  *
9837  * Return: true if success
9838  */
9839 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9840 {
9841 
9842 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9843 
9844 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9845 }
9846 
9847 /*
9848  * amdgpu_dm_psr_disable() - disable psr f/w
9849  * if psr is enabled on any stream
9850  *
9851  * Return: true if success
9852  */
9853 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9854 {
9855 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9856 	return dc_set_psr_allow_active(dm->dc, false);
9857 }
9858 
9859 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9860 {
9861 	struct amdgpu_device *adev = drm_to_adev(dev);
9862 	struct dc *dc = adev->dm.dc;
9863 	int i;
9864 
9865 	mutex_lock(&adev->dm.dc_lock);
9866 	if (dc->current_state) {
9867 		for (i = 0; i < dc->current_state->stream_count; ++i)
9868 			dc->current_state->streams[i]
9869 				->triggered_crtc_reset.enabled =
9870 				adev->dm.force_timing_sync;
9871 
9872 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9873 		dc_trigger_sync(dc, dc->current_state);
9874 	}
9875 	mutex_unlock(&adev->dm.dc_lock);
9876 }
9877 
9878 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9879 		       uint32_t value, const char *func_name)
9880 {
9881 #ifdef DM_CHECK_ADDR_0
9882 	if (address == 0) {
9883 		DC_ERR("invalid register write. address = 0");
9884 		return;
9885 	}
9886 #endif
9887 	cgs_write_register(ctx->cgs_device, address, value);
9888 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9889 }
9890 
9891 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9892 			  const char *func_name)
9893 {
9894 	uint32_t value;
9895 #ifdef DM_CHECK_ADDR_0
9896 	if (address == 0) {
9897 		DC_ERR("invalid register read; address = 0\n");
9898 		return 0;
9899 	}
9900 #endif
9901 
9902 	if (ctx->dmub_srv &&
9903 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9904 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9905 		ASSERT(false);
9906 		return 0;
9907 	}
9908 
9909 	value = cgs_read_register(ctx->cgs_device, address);
9910 
9911 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9912 
9913 	return value;
9914 }
9915