1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80 
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88 
89 #include "soc15_common.h"
90 #endif
91 
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95 
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 
109 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 
112 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
117 
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
120 
121 /**
122  * DOC: overview
123  *
124  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126  * requests into DC requests, and DC responses into DRM responses.
127  *
128  * The root control structure is &struct amdgpu_display_manager.
129  */
130 
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
200 				   struct drm_atomic_state *state,
201 				   bool nonblock);
202 
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204 
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 				  struct drm_atomic_state *state);
207 
208 static void handle_cursor_update(struct drm_plane *plane,
209 				 struct drm_plane_state *old_plane_state);
210 
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 /*
221  * dm_vblank_get_counter
222  *
223  * @brief
224  * Get counter for number of vertical blanks
225  *
226  * @param
227  * struct amdgpu_device *adev - [in] desired amdgpu device
228  * int disp_idx - [in] which CRTC to get the counter from
229  *
230  * @return
231  * Counter for vertical blanks
232  */
233 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234 {
235 	if (crtc >= adev->mode_info.num_crtc)
236 		return 0;
237 	else {
238 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239 
240 		if (acrtc->dm_irq_params.stream == NULL) {
241 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242 				  crtc);
243 			return 0;
244 		}
245 
246 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
247 	}
248 }
249 
250 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
251 				  u32 *vbl, u32 *position)
252 {
253 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
254 
255 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
256 		return -EINVAL;
257 	else {
258 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259 
260 		if (acrtc->dm_irq_params.stream ==  NULL) {
261 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 				  crtc);
263 			return 0;
264 		}
265 
266 		/*
267 		 * TODO rework base driver to use values directly.
268 		 * for now parse it back into reg-format
269 		 */
270 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271 					 &v_blank_start,
272 					 &v_blank_end,
273 					 &h_position,
274 					 &v_position);
275 
276 		*position = v_position | (h_position << 16);
277 		*vbl = v_blank_start | (v_blank_end << 16);
278 	}
279 
280 	return 0;
281 }
282 
283 static bool dm_is_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return true;
287 }
288 
289 static int dm_wait_for_idle(void *handle)
290 {
291 	/* XXX todo */
292 	return 0;
293 }
294 
295 static bool dm_check_soft_reset(void *handle)
296 {
297 	return false;
298 }
299 
300 static int dm_soft_reset(void *handle)
301 {
302 	/* XXX todo */
303 	return 0;
304 }
305 
306 static struct amdgpu_crtc *
307 get_crtc_by_otg_inst(struct amdgpu_device *adev,
308 		     int otg_inst)
309 {
310 	struct drm_device *dev = adev_to_drm(adev);
311 	struct drm_crtc *crtc;
312 	struct amdgpu_crtc *amdgpu_crtc;
313 
314 	if (otg_inst == -1) {
315 		WARN_ON(1);
316 		return adev->mode_info.crtcs[0];
317 	}
318 
319 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
320 		amdgpu_crtc = to_amdgpu_crtc(crtc);
321 
322 		if (amdgpu_crtc->otg_inst == otg_inst)
323 			return amdgpu_crtc;
324 	}
325 
326 	return NULL;
327 }
328 
329 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 {
331 	return acrtc->dm_irq_params.freesync_config.state ==
332 		       VRR_STATE_ACTIVE_VARIABLE ||
333 	       acrtc->dm_irq_params.freesync_config.state ==
334 		       VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 {
339 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
340 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
341 }
342 
343 /**
344  * dm_pflip_high_irq() - Handle pageflip interrupt
345  * @interrupt_params: ignored
346  *
347  * Handles the pageflip interrupt by notifying all interested parties
348  * that the pageflip has been completed.
349  */
350 static void dm_pflip_high_irq(void *interrupt_params)
351 {
352 	struct amdgpu_crtc *amdgpu_crtc;
353 	struct common_irq_params *irq_params = interrupt_params;
354 	struct amdgpu_device *adev = irq_params->adev;
355 	unsigned long flags;
356 	struct drm_pending_vblank_event *e;
357 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
358 	bool vrr_active;
359 
360 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
361 
362 	/* IRQ could occur when in initial stage */
363 	/* TODO work and BO cleanup */
364 	if (amdgpu_crtc == NULL) {
365 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
366 		return;
367 	}
368 
369 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
370 
371 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
372 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
373 						 amdgpu_crtc->pflip_status,
374 						 AMDGPU_FLIP_SUBMITTED,
375 						 amdgpu_crtc->crtc_id,
376 						 amdgpu_crtc);
377 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
378 		return;
379 	}
380 
381 	/* page flip completed. */
382 	e = amdgpu_crtc->event;
383 	amdgpu_crtc->event = NULL;
384 
385 	if (!e)
386 		WARN_ON(1);
387 
388 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
389 
390 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
391 	if (!vrr_active ||
392 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
393 				      &v_blank_end, &hpos, &vpos) ||
394 	    (vpos < v_blank_start)) {
395 		/* Update to correct count and vblank timestamp if racing with
396 		 * vblank irq. This also updates to the correct vblank timestamp
397 		 * even in VRR mode, as scanout is past the front-porch atm.
398 		 */
399 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
400 
401 		/* Wake up userspace by sending the pageflip event with proper
402 		 * count and timestamp of vblank of flip completion.
403 		 */
404 		if (e) {
405 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
406 
407 			/* Event sent, so done with vblank for this flip */
408 			drm_crtc_vblank_put(&amdgpu_crtc->base);
409 		}
410 	} else if (e) {
411 		/* VRR active and inside front-porch: vblank count and
412 		 * timestamp for pageflip event will only be up to date after
413 		 * drm_crtc_handle_vblank() has been executed from late vblank
414 		 * irq handler after start of back-porch (vline 0). We queue the
415 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
416 		 * updated timestamp and count, once it runs after us.
417 		 *
418 		 * We need to open-code this instead of using the helper
419 		 * drm_crtc_arm_vblank_event(), as that helper would
420 		 * call drm_crtc_accurate_vblank_count(), which we must
421 		 * not call in VRR mode while we are in front-porch!
422 		 */
423 
424 		/* sequence will be replaced by real count during send-out. */
425 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
426 		e->pipe = amdgpu_crtc->crtc_id;
427 
428 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
429 		e = NULL;
430 	}
431 
432 	/* Keep track of vblank of this flip for flip throttling. We use the
433 	 * cooked hw counter, as that one incremented at start of this vblank
434 	 * of pageflip completion, so last_flip_vblank is the forbidden count
435 	 * for queueing new pageflips if vsync + VRR is enabled.
436 	 */
437 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
438 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
439 
440 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
441 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
442 
443 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
444 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
445 			 vrr_active, (int) !e);
446 }
447 
448 static void dm_vupdate_high_irq(void *interrupt_params)
449 {
450 	struct common_irq_params *irq_params = interrupt_params;
451 	struct amdgpu_device *adev = irq_params->adev;
452 	struct amdgpu_crtc *acrtc;
453 	unsigned long flags;
454 	int vrr_active;
455 
456 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
457 
458 	if (acrtc) {
459 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
460 
461 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
462 			      acrtc->crtc_id,
463 			      vrr_active);
464 
465 		/* Core vblank handling is done here after end of front-porch in
466 		 * vrr mode, as vblank timestamping will give valid results
467 		 * while now done after front-porch. This will also deliver
468 		 * page-flip completion events that have been queued to us
469 		 * if a pageflip happened inside front-porch.
470 		 */
471 		if (vrr_active) {
472 			drm_crtc_handle_vblank(&acrtc->base);
473 
474 			/* BTR processing for pre-DCE12 ASICs */
475 			if (acrtc->dm_irq_params.stream &&
476 			    adev->family < AMDGPU_FAMILY_AI) {
477 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
478 				mod_freesync_handle_v_update(
479 				    adev->dm.freesync_module,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params);
482 
483 				dc_stream_adjust_vmin_vmax(
484 				    adev->dm.dc,
485 				    acrtc->dm_irq_params.stream,
486 				    &acrtc->dm_irq_params.vrr_params.adjust);
487 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
488 			}
489 		}
490 	}
491 }
492 
493 /**
494  * dm_crtc_high_irq() - Handles CRTC interrupt
495  * @interrupt_params: used for determining the CRTC instance
496  *
497  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
498  * event handler.
499  */
500 static void dm_crtc_high_irq(void *interrupt_params)
501 {
502 	struct common_irq_params *irq_params = interrupt_params;
503 	struct amdgpu_device *adev = irq_params->adev;
504 	struct amdgpu_crtc *acrtc;
505 	unsigned long flags;
506 	int vrr_active;
507 
508 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
509 	if (!acrtc)
510 		return;
511 
512 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
513 
514 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
515 		      vrr_active, acrtc->dm_irq_params.active_planes);
516 
517 	/**
518 	 * Core vblank handling at start of front-porch is only possible
519 	 * in non-vrr mode, as only there vblank timestamping will give
520 	 * valid results while done in front-porch. Otherwise defer it
521 	 * to dm_vupdate_high_irq after end of front-porch.
522 	 */
523 	if (!vrr_active)
524 		drm_crtc_handle_vblank(&acrtc->base);
525 
526 	/**
527 	 * Following stuff must happen at start of vblank, for crc
528 	 * computation and below-the-range btr support in vrr mode.
529 	 */
530 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
531 
532 	/* BTR updates need to happen before VUPDATE on Vega and above. */
533 	if (adev->family < AMDGPU_FAMILY_AI)
534 		return;
535 
536 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
537 
538 	if (acrtc->dm_irq_params.stream &&
539 	    acrtc->dm_irq_params.vrr_params.supported &&
540 	    acrtc->dm_irq_params.freesync_config.state ==
541 		    VRR_STATE_ACTIVE_VARIABLE) {
542 		mod_freesync_handle_v_update(adev->dm.freesync_module,
543 					     acrtc->dm_irq_params.stream,
544 					     &acrtc->dm_irq_params.vrr_params);
545 
546 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
547 					   &acrtc->dm_irq_params.vrr_params.adjust);
548 	}
549 
550 	/*
551 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
552 	 * In that case, pageflip completion interrupts won't fire and pageflip
553 	 * completion events won't get delivered. Prevent this by sending
554 	 * pending pageflip events from here if a flip is still pending.
555 	 *
556 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
557 	 * avoid race conditions between flip programming and completion,
558 	 * which could cause too early flip completion events.
559 	 */
560 	if (adev->family >= AMDGPU_FAMILY_RV &&
561 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
562 	    acrtc->dm_irq_params.active_planes == 0) {
563 		if (acrtc->event) {
564 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
565 			acrtc->event = NULL;
566 			drm_crtc_vblank_put(&acrtc->base);
567 		}
568 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
569 	}
570 
571 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
572 }
573 
574 static int dm_set_clockgating_state(void *handle,
575 		  enum amd_clockgating_state state)
576 {
577 	return 0;
578 }
579 
580 static int dm_set_powergating_state(void *handle,
581 		  enum amd_powergating_state state)
582 {
583 	return 0;
584 }
585 
586 /* Prototypes of private functions */
587 static int dm_early_init(void* handle);
588 
589 /* Allocate memory for FBC compressed data  */
590 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
591 {
592 	struct drm_device *dev = connector->dev;
593 	struct amdgpu_device *adev = drm_to_adev(dev);
594 	struct dm_compressor_info *compressor = &adev->dm.compressor;
595 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
596 	struct drm_display_mode *mode;
597 	unsigned long max_size = 0;
598 
599 	if (adev->dm.dc->fbc_compressor == NULL)
600 		return;
601 
602 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
603 		return;
604 
605 	if (compressor->bo_ptr)
606 		return;
607 
608 
609 	list_for_each_entry(mode, &connector->modes, head) {
610 		if (max_size < mode->htotal * mode->vtotal)
611 			max_size = mode->htotal * mode->vtotal;
612 	}
613 
614 	if (max_size) {
615 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
616 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
617 			    &compressor->gpu_addr, &compressor->cpu_addr);
618 
619 		if (r)
620 			DRM_ERROR("DM: Failed to initialize FBC\n");
621 		else {
622 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
623 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
624 		}
625 
626 	}
627 
628 }
629 
630 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
631 					  int pipe, bool *enabled,
632 					  unsigned char *buf, int max_bytes)
633 {
634 	struct drm_device *dev = dev_get_drvdata(kdev);
635 	struct amdgpu_device *adev = drm_to_adev(dev);
636 	struct drm_connector *connector;
637 	struct drm_connector_list_iter conn_iter;
638 	struct amdgpu_dm_connector *aconnector;
639 	int ret = 0;
640 
641 	*enabled = false;
642 
643 	mutex_lock(&adev->dm.audio_lock);
644 
645 	drm_connector_list_iter_begin(dev, &conn_iter);
646 	drm_for_each_connector_iter(connector, &conn_iter) {
647 		aconnector = to_amdgpu_dm_connector(connector);
648 		if (aconnector->audio_inst != port)
649 			continue;
650 
651 		*enabled = true;
652 		ret = drm_eld_size(connector->eld);
653 		memcpy(buf, connector->eld, min(max_bytes, ret));
654 
655 		break;
656 	}
657 	drm_connector_list_iter_end(&conn_iter);
658 
659 	mutex_unlock(&adev->dm.audio_lock);
660 
661 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662 
663 	return ret;
664 }
665 
666 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
667 	.get_eld = amdgpu_dm_audio_component_get_eld,
668 };
669 
670 static int amdgpu_dm_audio_component_bind(struct device *kdev,
671 				       struct device *hda_kdev, void *data)
672 {
673 	struct drm_device *dev = dev_get_drvdata(kdev);
674 	struct amdgpu_device *adev = drm_to_adev(dev);
675 	struct drm_audio_component *acomp = data;
676 
677 	acomp->ops = &amdgpu_dm_audio_component_ops;
678 	acomp->dev = kdev;
679 	adev->dm.audio_component = acomp;
680 
681 	return 0;
682 }
683 
684 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
685 					  struct device *hda_kdev, void *data)
686 {
687 	struct drm_device *dev = dev_get_drvdata(kdev);
688 	struct amdgpu_device *adev = drm_to_adev(dev);
689 	struct drm_audio_component *acomp = data;
690 
691 	acomp->ops = NULL;
692 	acomp->dev = NULL;
693 	adev->dm.audio_component = NULL;
694 }
695 
696 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
697 	.bind	= amdgpu_dm_audio_component_bind,
698 	.unbind	= amdgpu_dm_audio_component_unbind,
699 };
700 
701 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
702 {
703 	int i, ret;
704 
705 	if (!amdgpu_audio)
706 		return 0;
707 
708 	adev->mode_info.audio.enabled = true;
709 
710 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711 
712 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
713 		adev->mode_info.audio.pin[i].channels = -1;
714 		adev->mode_info.audio.pin[i].rate = -1;
715 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
716 		adev->mode_info.audio.pin[i].status_bits = 0;
717 		adev->mode_info.audio.pin[i].category_code = 0;
718 		adev->mode_info.audio.pin[i].connected = false;
719 		adev->mode_info.audio.pin[i].id =
720 			adev->dm.dc->res_pool->audios[i]->inst;
721 		adev->mode_info.audio.pin[i].offset = 0;
722 	}
723 
724 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
725 	if (ret < 0)
726 		return ret;
727 
728 	adev->dm.audio_registered = true;
729 
730 	return 0;
731 }
732 
733 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734 {
735 	if (!amdgpu_audio)
736 		return;
737 
738 	if (!adev->mode_info.audio.enabled)
739 		return;
740 
741 	if (adev->dm.audio_registered) {
742 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
743 		adev->dm.audio_registered = false;
744 	}
745 
746 	/* TODO: Disable audio? */
747 
748 	adev->mode_info.audio.enabled = false;
749 }
750 
751 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
752 {
753 	struct drm_audio_component *acomp = adev->dm.audio_component;
754 
755 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
756 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757 
758 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759 						 pin, -1);
760 	}
761 }
762 
763 static int dm_dmub_hw_init(struct amdgpu_device *adev)
764 {
765 	const struct dmcub_firmware_header_v1_0 *hdr;
766 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
767 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
768 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
769 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
770 	struct abm *abm = adev->dm.dc->res_pool->abm;
771 	struct dmub_srv_hw_params hw_params;
772 	enum dmub_status status;
773 	const unsigned char *fw_inst_const, *fw_bss_data;
774 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
775 	bool has_hw_support;
776 
777 	if (!dmub_srv)
778 		/* DMUB isn't supported on the ASIC. */
779 		return 0;
780 
781 	if (!fb_info) {
782 		DRM_ERROR("No framebuffer info for DMUB service.\n");
783 		return -EINVAL;
784 	}
785 
786 	if (!dmub_fw) {
787 		/* Firmware required for DMUB support. */
788 		DRM_ERROR("No firmware provided for DMUB.\n");
789 		return -EINVAL;
790 	}
791 
792 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
793 	if (status != DMUB_STATUS_OK) {
794 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
795 		return -EINVAL;
796 	}
797 
798 	if (!has_hw_support) {
799 		DRM_INFO("DMUB unsupported on ASIC\n");
800 		return 0;
801 	}
802 
803 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804 
805 	fw_inst_const = dmub_fw->data +
806 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 			PSP_HEADER_BYTES;
808 
809 	fw_bss_data = dmub_fw->data +
810 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
811 		      le32_to_cpu(hdr->inst_const_bytes);
812 
813 	/* Copy firmware and bios info into FB memory. */
814 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
815 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816 
817 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818 
819 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
820 	 * amdgpu_ucode_init_single_fw will load dmub firmware
821 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
822 	 * will be done by dm_dmub_hw_init
823 	 */
824 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
825 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
826 				fw_inst_const_size);
827 	}
828 
829 	if (fw_bss_data_size)
830 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
831 		       fw_bss_data, fw_bss_data_size);
832 
833 	/* Copy firmware bios info into FB memory. */
834 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
835 	       adev->bios_size);
836 
837 	/* Reset regions that need to be reset. */
838 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
839 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
840 
841 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
842 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
843 
844 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
845 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
846 
847 	/* Initialize hardware. */
848 	memset(&hw_params, 0, sizeof(hw_params));
849 	hw_params.fb_base = adev->gmc.fb_start;
850 	hw_params.fb_offset = adev->gmc.aper_base;
851 
852 	/* backdoor load firmware and trigger dmub running */
853 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
854 		hw_params.load_inst_const = true;
855 
856 	if (dmcu)
857 		hw_params.psp_version = dmcu->psp_version;
858 
859 	for (i = 0; i < fb_info->num_fb; ++i)
860 		hw_params.fb[i] = &fb_info->fb[i];
861 
862 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
863 	if (status != DMUB_STATUS_OK) {
864 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
865 		return -EINVAL;
866 	}
867 
868 	/* Wait for firmware load to finish. */
869 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
870 	if (status != DMUB_STATUS_OK)
871 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
872 
873 	/* Init DMCU and ABM if available. */
874 	if (dmcu && abm) {
875 		dmcu->funcs->dmcu_init(dmcu);
876 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
877 	}
878 
879 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
880 	if (!adev->dm.dc->ctx->dmub_srv) {
881 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
882 		return -ENOMEM;
883 	}
884 
885 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
886 		 adev->dm.dmcub_fw_version);
887 
888 	return 0;
889 }
890 
891 #if defined(CONFIG_DRM_AMD_DC_DCN)
892 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
893 {
894 	uint64_t pt_base;
895 	uint32_t logical_addr_low;
896 	uint32_t logical_addr_high;
897 	uint32_t agp_base, agp_bot, agp_top;
898 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
899 
900 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
901 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
902 
903 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
904 		/*
905 		 * Raven2 has a HW issue that it is unable to use the vram which
906 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
907 		 * workaround that increase system aperture high address (add 1)
908 		 * to get rid of the VM fault and hardware hang.
909 		 */
910 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
911 	else
912 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
913 
914 	agp_base = 0;
915 	agp_bot = adev->gmc.agp_start >> 24;
916 	agp_top = adev->gmc.agp_end >> 24;
917 
918 
919 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
920 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
921 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
922 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
923 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
924 	page_table_base.low_part = lower_32_bits(pt_base);
925 
926 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
927 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
928 
929 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
930 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
931 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
932 
933 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
934 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
935 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
936 
937 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
938 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
939 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
940 
941 	pa_config->is_hvm_enabled = 0;
942 
943 }
944 #endif
945 
946 #ifdef CONFIG_DEBUG_FS
947 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
948 {
949 	dm->crc_win_x_start_property =
950 		drm_property_create_range(adev_to_drm(dm->adev),
951 					  DRM_MODE_PROP_ATOMIC,
952 					  "AMD_CRC_WIN_X_START", 0, U16_MAX);
953 	if (!dm->crc_win_x_start_property)
954 		return -ENOMEM;
955 
956 	dm->crc_win_y_start_property =
957 		drm_property_create_range(adev_to_drm(dm->adev),
958 					  DRM_MODE_PROP_ATOMIC,
959 					  "AMD_CRC_WIN_Y_START", 0, U16_MAX);
960 	if (!dm->crc_win_y_start_property)
961 		return -ENOMEM;
962 
963 	dm->crc_win_x_end_property =
964 		drm_property_create_range(adev_to_drm(dm->adev),
965 					  DRM_MODE_PROP_ATOMIC,
966 					  "AMD_CRC_WIN_X_END", 0, U16_MAX);
967 	if (!dm->crc_win_x_end_property)
968 		return -ENOMEM;
969 
970 	dm->crc_win_y_end_property =
971 		drm_property_create_range(adev_to_drm(dm->adev),
972 					  DRM_MODE_PROP_ATOMIC,
973 					  "AMD_CRC_WIN_Y_END", 0, U16_MAX);
974 	if (!dm->crc_win_y_end_property)
975 		return -ENOMEM;
976 
977 	return 0;
978 }
979 #endif
980 
981 static int amdgpu_dm_init(struct amdgpu_device *adev)
982 {
983 	struct dc_init_data init_data;
984 #ifdef CONFIG_DRM_AMD_DC_HDCP
985 	struct dc_callback_init init_params;
986 #endif
987 	int r;
988 
989 	adev->dm.ddev = adev_to_drm(adev);
990 	adev->dm.adev = adev;
991 
992 	/* Zero all the fields */
993 	memset(&init_data, 0, sizeof(init_data));
994 #ifdef CONFIG_DRM_AMD_DC_HDCP
995 	memset(&init_params, 0, sizeof(init_params));
996 #endif
997 
998 	mutex_init(&adev->dm.dc_lock);
999 	mutex_init(&adev->dm.audio_lock);
1000 
1001 	if(amdgpu_dm_irq_init(adev)) {
1002 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1003 		goto error;
1004 	}
1005 
1006 	init_data.asic_id.chip_family = adev->family;
1007 
1008 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1009 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1010 
1011 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1012 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1013 	init_data.asic_id.atombios_base_address =
1014 		adev->mode_info.atom_context->bios;
1015 
1016 	init_data.driver = adev;
1017 
1018 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1019 
1020 	if (!adev->dm.cgs_device) {
1021 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1022 		goto error;
1023 	}
1024 
1025 	init_data.cgs_device = adev->dm.cgs_device;
1026 
1027 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1028 
1029 	switch (adev->asic_type) {
1030 	case CHIP_CARRIZO:
1031 	case CHIP_STONEY:
1032 	case CHIP_RAVEN:
1033 	case CHIP_RENOIR:
1034 		init_data.flags.gpu_vm_support = true;
1035 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1036 			init_data.flags.disable_dmcu = true;
1037 		break;
1038 	default:
1039 		break;
1040 	}
1041 
1042 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1043 		init_data.flags.fbc_support = true;
1044 
1045 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1046 		init_data.flags.multi_mon_pp_mclk_switch = true;
1047 
1048 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1049 		init_data.flags.disable_fractional_pwm = true;
1050 
1051 	init_data.flags.power_down_display_on_boot = true;
1052 
1053 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1054 
1055 	/* Display Core create. */
1056 	adev->dm.dc = dc_create(&init_data);
1057 
1058 	if (adev->dm.dc) {
1059 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1060 	} else {
1061 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1062 		goto error;
1063 	}
1064 
1065 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1066 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1067 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1068 	}
1069 
1070 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1071 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1072 
1073 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1074 		adev->dm.dc->debug.disable_stutter = true;
1075 
1076 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1077 		adev->dm.dc->debug.disable_dsc = true;
1078 
1079 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1080 		adev->dm.dc->debug.disable_clock_gate = true;
1081 
1082 	r = dm_dmub_hw_init(adev);
1083 	if (r) {
1084 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1085 		goto error;
1086 	}
1087 
1088 	dc_hardware_init(adev->dm.dc);
1089 
1090 #if defined(CONFIG_DRM_AMD_DC_DCN)
1091 	if (adev->asic_type == CHIP_RENOIR) {
1092 		struct dc_phy_addr_space_config pa_config;
1093 
1094 		mmhub_read_system_context(adev, &pa_config);
1095 
1096 		// Call the DC init_memory func
1097 		dc_setup_system_context(adev->dm.dc, &pa_config);
1098 	}
1099 #endif
1100 
1101 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1102 	if (!adev->dm.freesync_module) {
1103 		DRM_ERROR(
1104 		"amdgpu: failed to initialize freesync_module.\n");
1105 	} else
1106 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1107 				adev->dm.freesync_module);
1108 
1109 	amdgpu_dm_init_color_mod();
1110 
1111 #ifdef CONFIG_DRM_AMD_DC_HDCP
1112 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1113 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1114 
1115 		if (!adev->dm.hdcp_workqueue)
1116 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1117 		else
1118 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1119 
1120 		dc_init_callbacks(adev->dm.dc, &init_params);
1121 	}
1122 #endif
1123 #ifdef CONFIG_DEBUG_FS
1124 	if (create_crtc_crc_properties(&adev->dm))
1125 		DRM_ERROR("amdgpu: failed to create crc property.\n");
1126 #endif
1127 	if (amdgpu_dm_initialize_drm_device(adev)) {
1128 		DRM_ERROR(
1129 		"amdgpu: failed to initialize sw for display support.\n");
1130 		goto error;
1131 	}
1132 
1133 	/* Update the actual used number of crtc */
1134 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1135 
1136 	/* create fake encoders for MST */
1137 	dm_dp_create_fake_mst_encoders(adev);
1138 
1139 	/* TODO: Add_display_info? */
1140 
1141 	/* TODO use dynamic cursor width */
1142 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1143 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1144 
1145 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1146 		DRM_ERROR(
1147 		"amdgpu: failed to initialize sw for display support.\n");
1148 		goto error;
1149 	}
1150 
1151 
1152 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1153 
1154 	return 0;
1155 error:
1156 	amdgpu_dm_fini(adev);
1157 
1158 	return -EINVAL;
1159 }
1160 
1161 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1162 {
1163 	int i;
1164 
1165 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1166 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1167 	}
1168 
1169 	amdgpu_dm_audio_fini(adev);
1170 
1171 	amdgpu_dm_destroy_drm_device(&adev->dm);
1172 
1173 #ifdef CONFIG_DRM_AMD_DC_HDCP
1174 	if (adev->dm.hdcp_workqueue) {
1175 		hdcp_destroy(adev->dm.hdcp_workqueue);
1176 		adev->dm.hdcp_workqueue = NULL;
1177 	}
1178 
1179 	if (adev->dm.dc)
1180 		dc_deinit_callbacks(adev->dm.dc);
1181 #endif
1182 	if (adev->dm.dc->ctx->dmub_srv) {
1183 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1184 		adev->dm.dc->ctx->dmub_srv = NULL;
1185 	}
1186 
1187 	if (adev->dm.dmub_bo)
1188 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1189 				      &adev->dm.dmub_bo_gpu_addr,
1190 				      &adev->dm.dmub_bo_cpu_addr);
1191 
1192 	/* DC Destroy TODO: Replace destroy DAL */
1193 	if (adev->dm.dc)
1194 		dc_destroy(&adev->dm.dc);
1195 	/*
1196 	 * TODO: pageflip, vlank interrupt
1197 	 *
1198 	 * amdgpu_dm_irq_fini(adev);
1199 	 */
1200 
1201 	if (adev->dm.cgs_device) {
1202 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1203 		adev->dm.cgs_device = NULL;
1204 	}
1205 	if (adev->dm.freesync_module) {
1206 		mod_freesync_destroy(adev->dm.freesync_module);
1207 		adev->dm.freesync_module = NULL;
1208 	}
1209 
1210 	mutex_destroy(&adev->dm.audio_lock);
1211 	mutex_destroy(&adev->dm.dc_lock);
1212 
1213 	return;
1214 }
1215 
1216 static int load_dmcu_fw(struct amdgpu_device *adev)
1217 {
1218 	const char *fw_name_dmcu = NULL;
1219 	int r;
1220 	const struct dmcu_firmware_header_v1_0 *hdr;
1221 
1222 	switch(adev->asic_type) {
1223 #if defined(CONFIG_DRM_AMD_DC_SI)
1224 	case CHIP_TAHITI:
1225 	case CHIP_PITCAIRN:
1226 	case CHIP_VERDE:
1227 	case CHIP_OLAND:
1228 #endif
1229 	case CHIP_BONAIRE:
1230 	case CHIP_HAWAII:
1231 	case CHIP_KAVERI:
1232 	case CHIP_KABINI:
1233 	case CHIP_MULLINS:
1234 	case CHIP_TONGA:
1235 	case CHIP_FIJI:
1236 	case CHIP_CARRIZO:
1237 	case CHIP_STONEY:
1238 	case CHIP_POLARIS11:
1239 	case CHIP_POLARIS10:
1240 	case CHIP_POLARIS12:
1241 	case CHIP_VEGAM:
1242 	case CHIP_VEGA10:
1243 	case CHIP_VEGA12:
1244 	case CHIP_VEGA20:
1245 	case CHIP_NAVI10:
1246 	case CHIP_NAVI14:
1247 	case CHIP_RENOIR:
1248 	case CHIP_SIENNA_CICHLID:
1249 	case CHIP_NAVY_FLOUNDER:
1250 	case CHIP_DIMGREY_CAVEFISH:
1251 	case CHIP_VANGOGH:
1252 		return 0;
1253 	case CHIP_NAVI12:
1254 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1255 		break;
1256 	case CHIP_RAVEN:
1257 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1258 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1259 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1260 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1261 		else
1262 			return 0;
1263 		break;
1264 	default:
1265 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1266 		return -EINVAL;
1267 	}
1268 
1269 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1270 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1271 		return 0;
1272 	}
1273 
1274 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1275 	if (r == -ENOENT) {
1276 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1277 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1278 		adev->dm.fw_dmcu = NULL;
1279 		return 0;
1280 	}
1281 	if (r) {
1282 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1283 			fw_name_dmcu);
1284 		return r;
1285 	}
1286 
1287 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1288 	if (r) {
1289 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1290 			fw_name_dmcu);
1291 		release_firmware(adev->dm.fw_dmcu);
1292 		adev->dm.fw_dmcu = NULL;
1293 		return r;
1294 	}
1295 
1296 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1297 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1298 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1299 	adev->firmware.fw_size +=
1300 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1301 
1302 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1303 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1304 	adev->firmware.fw_size +=
1305 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1306 
1307 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1308 
1309 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1310 
1311 	return 0;
1312 }
1313 
1314 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1315 {
1316 	struct amdgpu_device *adev = ctx;
1317 
1318 	return dm_read_reg(adev->dm.dc->ctx, address);
1319 }
1320 
1321 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1322 				     uint32_t value)
1323 {
1324 	struct amdgpu_device *adev = ctx;
1325 
1326 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1327 }
1328 
1329 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1330 {
1331 	struct dmub_srv_create_params create_params;
1332 	struct dmub_srv_region_params region_params;
1333 	struct dmub_srv_region_info region_info;
1334 	struct dmub_srv_fb_params fb_params;
1335 	struct dmub_srv_fb_info *fb_info;
1336 	struct dmub_srv *dmub_srv;
1337 	const struct dmcub_firmware_header_v1_0 *hdr;
1338 	const char *fw_name_dmub;
1339 	enum dmub_asic dmub_asic;
1340 	enum dmub_status status;
1341 	int r;
1342 
1343 	switch (adev->asic_type) {
1344 	case CHIP_RENOIR:
1345 		dmub_asic = DMUB_ASIC_DCN21;
1346 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1347 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1348 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1349 		break;
1350 	case CHIP_SIENNA_CICHLID:
1351 		dmub_asic = DMUB_ASIC_DCN30;
1352 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1353 		break;
1354 	case CHIP_NAVY_FLOUNDER:
1355 		dmub_asic = DMUB_ASIC_DCN30;
1356 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1357 		break;
1358 	case CHIP_VANGOGH:
1359 		dmub_asic = DMUB_ASIC_DCN301;
1360 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1361 		break;
1362 	case CHIP_DIMGREY_CAVEFISH:
1363 		dmub_asic = DMUB_ASIC_DCN302;
1364 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1365 		break;
1366 
1367 	default:
1368 		/* ASIC doesn't support DMUB. */
1369 		return 0;
1370 	}
1371 
1372 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1373 	if (r) {
1374 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1375 		return 0;
1376 	}
1377 
1378 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1379 	if (r) {
1380 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1381 		return 0;
1382 	}
1383 
1384 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1385 
1386 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1387 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1388 			AMDGPU_UCODE_ID_DMCUB;
1389 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1390 			adev->dm.dmub_fw;
1391 		adev->firmware.fw_size +=
1392 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1393 
1394 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1395 			 adev->dm.dmcub_fw_version);
1396 	}
1397 
1398 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1399 
1400 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1401 	dmub_srv = adev->dm.dmub_srv;
1402 
1403 	if (!dmub_srv) {
1404 		DRM_ERROR("Failed to allocate DMUB service!\n");
1405 		return -ENOMEM;
1406 	}
1407 
1408 	memset(&create_params, 0, sizeof(create_params));
1409 	create_params.user_ctx = adev;
1410 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1411 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1412 	create_params.asic = dmub_asic;
1413 
1414 	/* Create the DMUB service. */
1415 	status = dmub_srv_create(dmub_srv, &create_params);
1416 	if (status != DMUB_STATUS_OK) {
1417 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1418 		return -EINVAL;
1419 	}
1420 
1421 	/* Calculate the size of all the regions for the DMUB service. */
1422 	memset(&region_params, 0, sizeof(region_params));
1423 
1424 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1425 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1426 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1427 	region_params.vbios_size = adev->bios_size;
1428 	region_params.fw_bss_data = region_params.bss_data_size ?
1429 		adev->dm.dmub_fw->data +
1430 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1431 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1432 	region_params.fw_inst_const =
1433 		adev->dm.dmub_fw->data +
1434 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1435 		PSP_HEADER_BYTES;
1436 
1437 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1438 					   &region_info);
1439 
1440 	if (status != DMUB_STATUS_OK) {
1441 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1442 		return -EINVAL;
1443 	}
1444 
1445 	/*
1446 	 * Allocate a framebuffer based on the total size of all the regions.
1447 	 * TODO: Move this into GART.
1448 	 */
1449 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1450 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1451 				    &adev->dm.dmub_bo_gpu_addr,
1452 				    &adev->dm.dmub_bo_cpu_addr);
1453 	if (r)
1454 		return r;
1455 
1456 	/* Rebase the regions on the framebuffer address. */
1457 	memset(&fb_params, 0, sizeof(fb_params));
1458 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1459 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1460 	fb_params.region_info = &region_info;
1461 
1462 	adev->dm.dmub_fb_info =
1463 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1464 	fb_info = adev->dm.dmub_fb_info;
1465 
1466 	if (!fb_info) {
1467 		DRM_ERROR(
1468 			"Failed to allocate framebuffer info for DMUB service!\n");
1469 		return -ENOMEM;
1470 	}
1471 
1472 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1473 	if (status != DMUB_STATUS_OK) {
1474 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1475 		return -EINVAL;
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 static int dm_sw_init(void *handle)
1482 {
1483 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1484 	int r;
1485 
1486 	r = dm_dmub_sw_init(adev);
1487 	if (r)
1488 		return r;
1489 
1490 	return load_dmcu_fw(adev);
1491 }
1492 
1493 static int dm_sw_fini(void *handle)
1494 {
1495 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496 
1497 	kfree(adev->dm.dmub_fb_info);
1498 	adev->dm.dmub_fb_info = NULL;
1499 
1500 	if (adev->dm.dmub_srv) {
1501 		dmub_srv_destroy(adev->dm.dmub_srv);
1502 		adev->dm.dmub_srv = NULL;
1503 	}
1504 
1505 	release_firmware(adev->dm.dmub_fw);
1506 	adev->dm.dmub_fw = NULL;
1507 
1508 	release_firmware(adev->dm.fw_dmcu);
1509 	adev->dm.fw_dmcu = NULL;
1510 
1511 	return 0;
1512 }
1513 
1514 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1515 {
1516 	struct amdgpu_dm_connector *aconnector;
1517 	struct drm_connector *connector;
1518 	struct drm_connector_list_iter iter;
1519 	int ret = 0;
1520 
1521 	drm_connector_list_iter_begin(dev, &iter);
1522 	drm_for_each_connector_iter(connector, &iter) {
1523 		aconnector = to_amdgpu_dm_connector(connector);
1524 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1525 		    aconnector->mst_mgr.aux) {
1526 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1527 					 aconnector,
1528 					 aconnector->base.base.id);
1529 
1530 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1531 			if (ret < 0) {
1532 				DRM_ERROR("DM_MST: Failed to start MST\n");
1533 				aconnector->dc_link->type =
1534 					dc_connection_single;
1535 				break;
1536 			}
1537 		}
1538 	}
1539 	drm_connector_list_iter_end(&iter);
1540 
1541 	return ret;
1542 }
1543 
1544 static int dm_late_init(void *handle)
1545 {
1546 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1547 
1548 	struct dmcu_iram_parameters params;
1549 	unsigned int linear_lut[16];
1550 	int i;
1551 	struct dmcu *dmcu = NULL;
1552 	bool ret = true;
1553 
1554 	dmcu = adev->dm.dc->res_pool->dmcu;
1555 
1556 	for (i = 0; i < 16; i++)
1557 		linear_lut[i] = 0xFFFF * i / 15;
1558 
1559 	params.set = 0;
1560 	params.backlight_ramping_start = 0xCCCC;
1561 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1562 	params.backlight_lut_array_size = 16;
1563 	params.backlight_lut_array = linear_lut;
1564 
1565 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1566 	 * 0xFFFF x 0.01 = 0x28F
1567 	 */
1568 	params.min_abm_backlight = 0x28F;
1569 
1570 	/* In the case where abm is implemented on dmcub,
1571 	 * dmcu object will be null.
1572 	 * ABM 2.4 and up are implemented on dmcub.
1573 	 */
1574 	if (dmcu)
1575 		ret = dmcu_load_iram(dmcu, params);
1576 	else if (adev->dm.dc->ctx->dmub_srv)
1577 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1578 
1579 	if (!ret)
1580 		return -EINVAL;
1581 
1582 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1583 }
1584 
1585 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1586 {
1587 	struct amdgpu_dm_connector *aconnector;
1588 	struct drm_connector *connector;
1589 	struct drm_connector_list_iter iter;
1590 	struct drm_dp_mst_topology_mgr *mgr;
1591 	int ret;
1592 	bool need_hotplug = false;
1593 
1594 	drm_connector_list_iter_begin(dev, &iter);
1595 	drm_for_each_connector_iter(connector, &iter) {
1596 		aconnector = to_amdgpu_dm_connector(connector);
1597 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1598 		    aconnector->mst_port)
1599 			continue;
1600 
1601 		mgr = &aconnector->mst_mgr;
1602 
1603 		if (suspend) {
1604 			drm_dp_mst_topology_mgr_suspend(mgr);
1605 		} else {
1606 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1607 			if (ret < 0) {
1608 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1609 				need_hotplug = true;
1610 			}
1611 		}
1612 	}
1613 	drm_connector_list_iter_end(&iter);
1614 
1615 	if (need_hotplug)
1616 		drm_kms_helper_hotplug_event(dev);
1617 }
1618 
1619 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1620 {
1621 	struct smu_context *smu = &adev->smu;
1622 	int ret = 0;
1623 
1624 	if (!is_support_sw_smu(adev))
1625 		return 0;
1626 
1627 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1628 	 * on window driver dc implementation.
1629 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1630 	 * should be passed to smu during boot up and resume from s3.
1631 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1632 	 * dcn20_resource_construct
1633 	 * then call pplib functions below to pass the settings to smu:
1634 	 * smu_set_watermarks_for_clock_ranges
1635 	 * smu_set_watermarks_table
1636 	 * navi10_set_watermarks_table
1637 	 * smu_write_watermarks_table
1638 	 *
1639 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1640 	 * dc has implemented different flow for window driver:
1641 	 * dc_hardware_init / dc_set_power_state
1642 	 * dcn10_init_hw
1643 	 * notify_wm_ranges
1644 	 * set_wm_ranges
1645 	 * -- Linux
1646 	 * smu_set_watermarks_for_clock_ranges
1647 	 * renoir_set_watermarks_table
1648 	 * smu_write_watermarks_table
1649 	 *
1650 	 * For Linux,
1651 	 * dc_hardware_init -> amdgpu_dm_init
1652 	 * dc_set_power_state --> dm_resume
1653 	 *
1654 	 * therefore, this function apply to navi10/12/14 but not Renoir
1655 	 * *
1656 	 */
1657 	switch(adev->asic_type) {
1658 	case CHIP_NAVI10:
1659 	case CHIP_NAVI14:
1660 	case CHIP_NAVI12:
1661 		break;
1662 	default:
1663 		return 0;
1664 	}
1665 
1666 	ret = smu_write_watermarks_table(smu);
1667 	if (ret) {
1668 		DRM_ERROR("Failed to update WMTABLE!\n");
1669 		return ret;
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 /**
1676  * dm_hw_init() - Initialize DC device
1677  * @handle: The base driver device containing the amdgpu_dm device.
1678  *
1679  * Initialize the &struct amdgpu_display_manager device. This involves calling
1680  * the initializers of each DM component, then populating the struct with them.
1681  *
1682  * Although the function implies hardware initialization, both hardware and
1683  * software are initialized here. Splitting them out to their relevant init
1684  * hooks is a future TODO item.
1685  *
1686  * Some notable things that are initialized here:
1687  *
1688  * - Display Core, both software and hardware
1689  * - DC modules that we need (freesync and color management)
1690  * - DRM software states
1691  * - Interrupt sources and handlers
1692  * - Vblank support
1693  * - Debug FS entries, if enabled
1694  */
1695 static int dm_hw_init(void *handle)
1696 {
1697 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1698 	/* Create DAL display manager */
1699 	amdgpu_dm_init(adev);
1700 	amdgpu_dm_hpd_init(adev);
1701 
1702 	return 0;
1703 }
1704 
1705 /**
1706  * dm_hw_fini() - Teardown DC device
1707  * @handle: The base driver device containing the amdgpu_dm device.
1708  *
1709  * Teardown components within &struct amdgpu_display_manager that require
1710  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1711  * were loaded. Also flush IRQ workqueues and disable them.
1712  */
1713 static int dm_hw_fini(void *handle)
1714 {
1715 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1716 
1717 	amdgpu_dm_hpd_fini(adev);
1718 
1719 	amdgpu_dm_irq_fini(adev);
1720 	amdgpu_dm_fini(adev);
1721 	return 0;
1722 }
1723 
1724 
1725 static int dm_enable_vblank(struct drm_crtc *crtc);
1726 static void dm_disable_vblank(struct drm_crtc *crtc);
1727 
1728 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1729 				 struct dc_state *state, bool enable)
1730 {
1731 	enum dc_irq_source irq_source;
1732 	struct amdgpu_crtc *acrtc;
1733 	int rc = -EBUSY;
1734 	int i = 0;
1735 
1736 	for (i = 0; i < state->stream_count; i++) {
1737 		acrtc = get_crtc_by_otg_inst(
1738 				adev, state->stream_status[i].primary_otg_inst);
1739 
1740 		if (acrtc && state->stream_status[i].plane_count != 0) {
1741 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1742 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1743 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1744 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1745 			if (rc)
1746 				DRM_WARN("Failed to %s pflip interrupts\n",
1747 					 enable ? "enable" : "disable");
1748 
1749 			if (enable) {
1750 				rc = dm_enable_vblank(&acrtc->base);
1751 				if (rc)
1752 					DRM_WARN("Failed to enable vblank interrupts\n");
1753 			} else {
1754 				dm_disable_vblank(&acrtc->base);
1755 			}
1756 
1757 		}
1758 	}
1759 
1760 }
1761 
1762 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1763 {
1764 	struct dc_state *context = NULL;
1765 	enum dc_status res = DC_ERROR_UNEXPECTED;
1766 	int i;
1767 	struct dc_stream_state *del_streams[MAX_PIPES];
1768 	int del_streams_count = 0;
1769 
1770 	memset(del_streams, 0, sizeof(del_streams));
1771 
1772 	context = dc_create_state(dc);
1773 	if (context == NULL)
1774 		goto context_alloc_fail;
1775 
1776 	dc_resource_state_copy_construct_current(dc, context);
1777 
1778 	/* First remove from context all streams */
1779 	for (i = 0; i < context->stream_count; i++) {
1780 		struct dc_stream_state *stream = context->streams[i];
1781 
1782 		del_streams[del_streams_count++] = stream;
1783 	}
1784 
1785 	/* Remove all planes for removed streams and then remove the streams */
1786 	for (i = 0; i < del_streams_count; i++) {
1787 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1788 			res = DC_FAIL_DETACH_SURFACES;
1789 			goto fail;
1790 		}
1791 
1792 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1793 		if (res != DC_OK)
1794 			goto fail;
1795 	}
1796 
1797 
1798 	res = dc_validate_global_state(dc, context, false);
1799 
1800 	if (res != DC_OK) {
1801 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1802 		goto fail;
1803 	}
1804 
1805 	res = dc_commit_state(dc, context);
1806 
1807 fail:
1808 	dc_release_state(context);
1809 
1810 context_alloc_fail:
1811 	return res;
1812 }
1813 
1814 static int dm_suspend(void *handle)
1815 {
1816 	struct amdgpu_device *adev = handle;
1817 	struct amdgpu_display_manager *dm = &adev->dm;
1818 	int ret = 0;
1819 
1820 	if (amdgpu_in_reset(adev)) {
1821 		mutex_lock(&dm->dc_lock);
1822 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1823 
1824 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1825 
1826 		amdgpu_dm_commit_zero_streams(dm->dc);
1827 
1828 		amdgpu_dm_irq_suspend(adev);
1829 
1830 		return ret;
1831 	}
1832 
1833 	WARN_ON(adev->dm.cached_state);
1834 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1835 
1836 	s3_handle_mst(adev_to_drm(adev), true);
1837 
1838 	amdgpu_dm_irq_suspend(adev);
1839 
1840 
1841 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1842 
1843 	return 0;
1844 }
1845 
1846 static struct amdgpu_dm_connector *
1847 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1848 					     struct drm_crtc *crtc)
1849 {
1850 	uint32_t i;
1851 	struct drm_connector_state *new_con_state;
1852 	struct drm_connector *connector;
1853 	struct drm_crtc *crtc_from_state;
1854 
1855 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1856 		crtc_from_state = new_con_state->crtc;
1857 
1858 		if (crtc_from_state == crtc)
1859 			return to_amdgpu_dm_connector(connector);
1860 	}
1861 
1862 	return NULL;
1863 }
1864 
1865 static void emulated_link_detect(struct dc_link *link)
1866 {
1867 	struct dc_sink_init_data sink_init_data = { 0 };
1868 	struct display_sink_capability sink_caps = { 0 };
1869 	enum dc_edid_status edid_status;
1870 	struct dc_context *dc_ctx = link->ctx;
1871 	struct dc_sink *sink = NULL;
1872 	struct dc_sink *prev_sink = NULL;
1873 
1874 	link->type = dc_connection_none;
1875 	prev_sink = link->local_sink;
1876 
1877 	if (prev_sink != NULL)
1878 		dc_sink_retain(prev_sink);
1879 
1880 	switch (link->connector_signal) {
1881 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1882 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1883 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1884 		break;
1885 	}
1886 
1887 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1888 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1889 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1890 		break;
1891 	}
1892 
1893 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1894 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1895 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1896 		break;
1897 	}
1898 
1899 	case SIGNAL_TYPE_LVDS: {
1900 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1901 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1902 		break;
1903 	}
1904 
1905 	case SIGNAL_TYPE_EDP: {
1906 		sink_caps.transaction_type =
1907 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1908 		sink_caps.signal = SIGNAL_TYPE_EDP;
1909 		break;
1910 	}
1911 
1912 	case SIGNAL_TYPE_DISPLAY_PORT: {
1913 		sink_caps.transaction_type =
1914 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1915 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1916 		break;
1917 	}
1918 
1919 	default:
1920 		DC_ERROR("Invalid connector type! signal:%d\n",
1921 			link->connector_signal);
1922 		return;
1923 	}
1924 
1925 	sink_init_data.link = link;
1926 	sink_init_data.sink_signal = sink_caps.signal;
1927 
1928 	sink = dc_sink_create(&sink_init_data);
1929 	if (!sink) {
1930 		DC_ERROR("Failed to create sink!\n");
1931 		return;
1932 	}
1933 
1934 	/* dc_sink_create returns a new reference */
1935 	link->local_sink = sink;
1936 
1937 	edid_status = dm_helpers_read_local_edid(
1938 			link->ctx,
1939 			link,
1940 			sink);
1941 
1942 	if (edid_status != EDID_OK)
1943 		DC_ERROR("Failed to read EDID");
1944 
1945 }
1946 
1947 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1948 				     struct amdgpu_display_manager *dm)
1949 {
1950 	struct {
1951 		struct dc_surface_update surface_updates[MAX_SURFACES];
1952 		struct dc_plane_info plane_infos[MAX_SURFACES];
1953 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1954 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1955 		struct dc_stream_update stream_update;
1956 	} * bundle;
1957 	int k, m;
1958 
1959 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1960 
1961 	if (!bundle) {
1962 		dm_error("Failed to allocate update bundle\n");
1963 		goto cleanup;
1964 	}
1965 
1966 	for (k = 0; k < dc_state->stream_count; k++) {
1967 		bundle->stream_update.stream = dc_state->streams[k];
1968 
1969 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1970 			bundle->surface_updates[m].surface =
1971 				dc_state->stream_status->plane_states[m];
1972 			bundle->surface_updates[m].surface->force_full_update =
1973 				true;
1974 		}
1975 		dc_commit_updates_for_stream(
1976 			dm->dc, bundle->surface_updates,
1977 			dc_state->stream_status->plane_count,
1978 			dc_state->streams[k], &bundle->stream_update, dc_state);
1979 	}
1980 
1981 cleanup:
1982 	kfree(bundle);
1983 
1984 	return;
1985 }
1986 
1987 static int dm_resume(void *handle)
1988 {
1989 	struct amdgpu_device *adev = handle;
1990 	struct drm_device *ddev = adev_to_drm(adev);
1991 	struct amdgpu_display_manager *dm = &adev->dm;
1992 	struct amdgpu_dm_connector *aconnector;
1993 	struct drm_connector *connector;
1994 	struct drm_connector_list_iter iter;
1995 	struct drm_crtc *crtc;
1996 	struct drm_crtc_state *new_crtc_state;
1997 	struct dm_crtc_state *dm_new_crtc_state;
1998 	struct drm_plane *plane;
1999 	struct drm_plane_state *new_plane_state;
2000 	struct dm_plane_state *dm_new_plane_state;
2001 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2002 	enum dc_connection_type new_connection_type = dc_connection_none;
2003 	struct dc_state *dc_state;
2004 	int i, r, j;
2005 
2006 	if (amdgpu_in_reset(adev)) {
2007 		dc_state = dm->cached_dc_state;
2008 
2009 		r = dm_dmub_hw_init(adev);
2010 		if (r)
2011 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2012 
2013 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2014 		dc_resume(dm->dc);
2015 
2016 		amdgpu_dm_irq_resume_early(adev);
2017 
2018 		for (i = 0; i < dc_state->stream_count; i++) {
2019 			dc_state->streams[i]->mode_changed = true;
2020 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2021 				dc_state->stream_status->plane_states[j]->update_flags.raw
2022 					= 0xffffffff;
2023 			}
2024 		}
2025 
2026 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2027 
2028 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2029 
2030 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2031 
2032 		dc_release_state(dm->cached_dc_state);
2033 		dm->cached_dc_state = NULL;
2034 
2035 		amdgpu_dm_irq_resume_late(adev);
2036 
2037 		mutex_unlock(&dm->dc_lock);
2038 
2039 		return 0;
2040 	}
2041 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2042 	dc_release_state(dm_state->context);
2043 	dm_state->context = dc_create_state(dm->dc);
2044 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2045 	dc_resource_state_construct(dm->dc, dm_state->context);
2046 
2047 	/* Before powering on DC we need to re-initialize DMUB. */
2048 	r = dm_dmub_hw_init(adev);
2049 	if (r)
2050 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2051 
2052 	/* power on hardware */
2053 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2054 
2055 	/* program HPD filter */
2056 	dc_resume(dm->dc);
2057 
2058 	/*
2059 	 * early enable HPD Rx IRQ, should be done before set mode as short
2060 	 * pulse interrupts are used for MST
2061 	 */
2062 	amdgpu_dm_irq_resume_early(adev);
2063 
2064 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2065 	s3_handle_mst(ddev, false);
2066 
2067 	/* Do detection*/
2068 	drm_connector_list_iter_begin(ddev, &iter);
2069 	drm_for_each_connector_iter(connector, &iter) {
2070 		aconnector = to_amdgpu_dm_connector(connector);
2071 
2072 		/*
2073 		 * this is the case when traversing through already created
2074 		 * MST connectors, should be skipped
2075 		 */
2076 		if (aconnector->mst_port)
2077 			continue;
2078 
2079 		mutex_lock(&aconnector->hpd_lock);
2080 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2081 			DRM_ERROR("KMS: Failed to detect connector\n");
2082 
2083 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2084 			emulated_link_detect(aconnector->dc_link);
2085 		else
2086 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2087 
2088 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2089 			aconnector->fake_enable = false;
2090 
2091 		if (aconnector->dc_sink)
2092 			dc_sink_release(aconnector->dc_sink);
2093 		aconnector->dc_sink = NULL;
2094 		amdgpu_dm_update_connector_after_detect(aconnector);
2095 		mutex_unlock(&aconnector->hpd_lock);
2096 	}
2097 	drm_connector_list_iter_end(&iter);
2098 
2099 	/* Force mode set in atomic commit */
2100 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2101 		new_crtc_state->active_changed = true;
2102 
2103 	/*
2104 	 * atomic_check is expected to create the dc states. We need to release
2105 	 * them here, since they were duplicated as part of the suspend
2106 	 * procedure.
2107 	 */
2108 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2109 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2110 		if (dm_new_crtc_state->stream) {
2111 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2112 			dc_stream_release(dm_new_crtc_state->stream);
2113 			dm_new_crtc_state->stream = NULL;
2114 		}
2115 	}
2116 
2117 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2118 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2119 		if (dm_new_plane_state->dc_state) {
2120 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2121 			dc_plane_state_release(dm_new_plane_state->dc_state);
2122 			dm_new_plane_state->dc_state = NULL;
2123 		}
2124 	}
2125 
2126 	drm_atomic_helper_resume(ddev, dm->cached_state);
2127 
2128 	dm->cached_state = NULL;
2129 
2130 	amdgpu_dm_irq_resume_late(adev);
2131 
2132 	amdgpu_dm_smu_write_watermarks_table(adev);
2133 
2134 	return 0;
2135 }
2136 
2137 /**
2138  * DOC: DM Lifecycle
2139  *
2140  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2141  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2142  * the base driver's device list to be initialized and torn down accordingly.
2143  *
2144  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2145  */
2146 
2147 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2148 	.name = "dm",
2149 	.early_init = dm_early_init,
2150 	.late_init = dm_late_init,
2151 	.sw_init = dm_sw_init,
2152 	.sw_fini = dm_sw_fini,
2153 	.hw_init = dm_hw_init,
2154 	.hw_fini = dm_hw_fini,
2155 	.suspend = dm_suspend,
2156 	.resume = dm_resume,
2157 	.is_idle = dm_is_idle,
2158 	.wait_for_idle = dm_wait_for_idle,
2159 	.check_soft_reset = dm_check_soft_reset,
2160 	.soft_reset = dm_soft_reset,
2161 	.set_clockgating_state = dm_set_clockgating_state,
2162 	.set_powergating_state = dm_set_powergating_state,
2163 };
2164 
2165 const struct amdgpu_ip_block_version dm_ip_block =
2166 {
2167 	.type = AMD_IP_BLOCK_TYPE_DCE,
2168 	.major = 1,
2169 	.minor = 0,
2170 	.rev = 0,
2171 	.funcs = &amdgpu_dm_funcs,
2172 };
2173 
2174 
2175 /**
2176  * DOC: atomic
2177  *
2178  * *WIP*
2179  */
2180 
2181 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2182 	.fb_create = amdgpu_display_user_framebuffer_create,
2183 	.get_format_info = amd_get_format_info,
2184 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2185 	.atomic_check = amdgpu_dm_atomic_check,
2186 	.atomic_commit = amdgpu_dm_atomic_commit,
2187 };
2188 
2189 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2190 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2191 };
2192 
2193 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2194 {
2195 	u32 max_cll, min_cll, max, min, q, r;
2196 	struct amdgpu_dm_backlight_caps *caps;
2197 	struct amdgpu_display_manager *dm;
2198 	struct drm_connector *conn_base;
2199 	struct amdgpu_device *adev;
2200 	struct dc_link *link = NULL;
2201 	static const u8 pre_computed_values[] = {
2202 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2203 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2204 
2205 	if (!aconnector || !aconnector->dc_link)
2206 		return;
2207 
2208 	link = aconnector->dc_link;
2209 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2210 		return;
2211 
2212 	conn_base = &aconnector->base;
2213 	adev = drm_to_adev(conn_base->dev);
2214 	dm = &adev->dm;
2215 	caps = &dm->backlight_caps;
2216 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2217 	caps->aux_support = false;
2218 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2219 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2220 
2221 	if (caps->ext_caps->bits.oled == 1 ||
2222 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2223 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2224 		caps->aux_support = true;
2225 
2226 	/* From the specification (CTA-861-G), for calculating the maximum
2227 	 * luminance we need to use:
2228 	 *	Luminance = 50*2**(CV/32)
2229 	 * Where CV is a one-byte value.
2230 	 * For calculating this expression we may need float point precision;
2231 	 * to avoid this complexity level, we take advantage that CV is divided
2232 	 * by a constant. From the Euclids division algorithm, we know that CV
2233 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2234 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2235 	 * need to pre-compute the value of r/32. For pre-computing the values
2236 	 * We just used the following Ruby line:
2237 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2238 	 * The results of the above expressions can be verified at
2239 	 * pre_computed_values.
2240 	 */
2241 	q = max_cll >> 5;
2242 	r = max_cll % 32;
2243 	max = (1 << q) * pre_computed_values[r];
2244 
2245 	// min luminance: maxLum * (CV/255)^2 / 100
2246 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2247 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2248 
2249 	caps->aux_max_input_signal = max;
2250 	caps->aux_min_input_signal = min;
2251 }
2252 
2253 void amdgpu_dm_update_connector_after_detect(
2254 		struct amdgpu_dm_connector *aconnector)
2255 {
2256 	struct drm_connector *connector = &aconnector->base;
2257 	struct drm_device *dev = connector->dev;
2258 	struct dc_sink *sink;
2259 
2260 	/* MST handled by drm_mst framework */
2261 	if (aconnector->mst_mgr.mst_state == true)
2262 		return;
2263 
2264 	sink = aconnector->dc_link->local_sink;
2265 	if (sink)
2266 		dc_sink_retain(sink);
2267 
2268 	/*
2269 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2270 	 * the connector sink is set to either fake or physical sink depends on link status.
2271 	 * Skip if already done during boot.
2272 	 */
2273 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2274 			&& aconnector->dc_em_sink) {
2275 
2276 		/*
2277 		 * For S3 resume with headless use eml_sink to fake stream
2278 		 * because on resume connector->sink is set to NULL
2279 		 */
2280 		mutex_lock(&dev->mode_config.mutex);
2281 
2282 		if (sink) {
2283 			if (aconnector->dc_sink) {
2284 				amdgpu_dm_update_freesync_caps(connector, NULL);
2285 				/*
2286 				 * retain and release below are used to
2287 				 * bump up refcount for sink because the link doesn't point
2288 				 * to it anymore after disconnect, so on next crtc to connector
2289 				 * reshuffle by UMD we will get into unwanted dc_sink release
2290 				 */
2291 				dc_sink_release(aconnector->dc_sink);
2292 			}
2293 			aconnector->dc_sink = sink;
2294 			dc_sink_retain(aconnector->dc_sink);
2295 			amdgpu_dm_update_freesync_caps(connector,
2296 					aconnector->edid);
2297 		} else {
2298 			amdgpu_dm_update_freesync_caps(connector, NULL);
2299 			if (!aconnector->dc_sink) {
2300 				aconnector->dc_sink = aconnector->dc_em_sink;
2301 				dc_sink_retain(aconnector->dc_sink);
2302 			}
2303 		}
2304 
2305 		mutex_unlock(&dev->mode_config.mutex);
2306 
2307 		if (sink)
2308 			dc_sink_release(sink);
2309 		return;
2310 	}
2311 
2312 	/*
2313 	 * TODO: temporary guard to look for proper fix
2314 	 * if this sink is MST sink, we should not do anything
2315 	 */
2316 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2317 		dc_sink_release(sink);
2318 		return;
2319 	}
2320 
2321 	if (aconnector->dc_sink == sink) {
2322 		/*
2323 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2324 		 * Do nothing!!
2325 		 */
2326 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2327 				aconnector->connector_id);
2328 		if (sink)
2329 			dc_sink_release(sink);
2330 		return;
2331 	}
2332 
2333 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2334 		aconnector->connector_id, aconnector->dc_sink, sink);
2335 
2336 	mutex_lock(&dev->mode_config.mutex);
2337 
2338 	/*
2339 	 * 1. Update status of the drm connector
2340 	 * 2. Send an event and let userspace tell us what to do
2341 	 */
2342 	if (sink) {
2343 		/*
2344 		 * TODO: check if we still need the S3 mode update workaround.
2345 		 * If yes, put it here.
2346 		 */
2347 		if (aconnector->dc_sink)
2348 			amdgpu_dm_update_freesync_caps(connector, NULL);
2349 
2350 		aconnector->dc_sink = sink;
2351 		dc_sink_retain(aconnector->dc_sink);
2352 		if (sink->dc_edid.length == 0) {
2353 			aconnector->edid = NULL;
2354 			if (aconnector->dc_link->aux_mode) {
2355 				drm_dp_cec_unset_edid(
2356 					&aconnector->dm_dp_aux.aux);
2357 			}
2358 		} else {
2359 			aconnector->edid =
2360 				(struct edid *)sink->dc_edid.raw_edid;
2361 
2362 			drm_connector_update_edid_property(connector,
2363 							   aconnector->edid);
2364 			drm_add_edid_modes(connector, aconnector->edid);
2365 
2366 			if (aconnector->dc_link->aux_mode)
2367 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2368 						    aconnector->edid);
2369 		}
2370 
2371 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2372 		update_connector_ext_caps(aconnector);
2373 	} else {
2374 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2375 		amdgpu_dm_update_freesync_caps(connector, NULL);
2376 		drm_connector_update_edid_property(connector, NULL);
2377 		aconnector->num_modes = 0;
2378 		dc_sink_release(aconnector->dc_sink);
2379 		aconnector->dc_sink = NULL;
2380 		aconnector->edid = NULL;
2381 #ifdef CONFIG_DRM_AMD_DC_HDCP
2382 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2383 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2384 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2385 #endif
2386 	}
2387 
2388 	mutex_unlock(&dev->mode_config.mutex);
2389 
2390 	update_subconnector_property(aconnector);
2391 
2392 	if (sink)
2393 		dc_sink_release(sink);
2394 }
2395 
2396 static void handle_hpd_irq(void *param)
2397 {
2398 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2399 	struct drm_connector *connector = &aconnector->base;
2400 	struct drm_device *dev = connector->dev;
2401 	enum dc_connection_type new_connection_type = dc_connection_none;
2402 #ifdef CONFIG_DRM_AMD_DC_HDCP
2403 	struct amdgpu_device *adev = drm_to_adev(dev);
2404 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2405 #endif
2406 
2407 	/*
2408 	 * In case of failure or MST no need to update connector status or notify the OS
2409 	 * since (for MST case) MST does this in its own context.
2410 	 */
2411 	mutex_lock(&aconnector->hpd_lock);
2412 
2413 #ifdef CONFIG_DRM_AMD_DC_HDCP
2414 	if (adev->dm.hdcp_workqueue) {
2415 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2416 		dm_con_state->update_hdcp = true;
2417 	}
2418 #endif
2419 	if (aconnector->fake_enable)
2420 		aconnector->fake_enable = false;
2421 
2422 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2423 		DRM_ERROR("KMS: Failed to detect connector\n");
2424 
2425 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2426 		emulated_link_detect(aconnector->dc_link);
2427 
2428 
2429 		drm_modeset_lock_all(dev);
2430 		dm_restore_drm_connector_state(dev, connector);
2431 		drm_modeset_unlock_all(dev);
2432 
2433 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2434 			drm_kms_helper_hotplug_event(dev);
2435 
2436 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2437 		amdgpu_dm_update_connector_after_detect(aconnector);
2438 
2439 
2440 		drm_modeset_lock_all(dev);
2441 		dm_restore_drm_connector_state(dev, connector);
2442 		drm_modeset_unlock_all(dev);
2443 
2444 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2445 			drm_kms_helper_hotplug_event(dev);
2446 	}
2447 	mutex_unlock(&aconnector->hpd_lock);
2448 
2449 }
2450 
2451 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2452 {
2453 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2454 	uint8_t dret;
2455 	bool new_irq_handled = false;
2456 	int dpcd_addr;
2457 	int dpcd_bytes_to_read;
2458 
2459 	const int max_process_count = 30;
2460 	int process_count = 0;
2461 
2462 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2463 
2464 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2465 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2466 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2467 		dpcd_addr = DP_SINK_COUNT;
2468 	} else {
2469 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2470 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2471 		dpcd_addr = DP_SINK_COUNT_ESI;
2472 	}
2473 
2474 	dret = drm_dp_dpcd_read(
2475 		&aconnector->dm_dp_aux.aux,
2476 		dpcd_addr,
2477 		esi,
2478 		dpcd_bytes_to_read);
2479 
2480 	while (dret == dpcd_bytes_to_read &&
2481 		process_count < max_process_count) {
2482 		uint8_t retry;
2483 		dret = 0;
2484 
2485 		process_count++;
2486 
2487 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2488 		/* handle HPD short pulse irq */
2489 		if (aconnector->mst_mgr.mst_state)
2490 			drm_dp_mst_hpd_irq(
2491 				&aconnector->mst_mgr,
2492 				esi,
2493 				&new_irq_handled);
2494 
2495 		if (new_irq_handled) {
2496 			/* ACK at DPCD to notify down stream */
2497 			const int ack_dpcd_bytes_to_write =
2498 				dpcd_bytes_to_read - 1;
2499 
2500 			for (retry = 0; retry < 3; retry++) {
2501 				uint8_t wret;
2502 
2503 				wret = drm_dp_dpcd_write(
2504 					&aconnector->dm_dp_aux.aux,
2505 					dpcd_addr + 1,
2506 					&esi[1],
2507 					ack_dpcd_bytes_to_write);
2508 				if (wret == ack_dpcd_bytes_to_write)
2509 					break;
2510 			}
2511 
2512 			/* check if there is new irq to be handled */
2513 			dret = drm_dp_dpcd_read(
2514 				&aconnector->dm_dp_aux.aux,
2515 				dpcd_addr,
2516 				esi,
2517 				dpcd_bytes_to_read);
2518 
2519 			new_irq_handled = false;
2520 		} else {
2521 			break;
2522 		}
2523 	}
2524 
2525 	if (process_count == max_process_count)
2526 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2527 }
2528 
2529 static void handle_hpd_rx_irq(void *param)
2530 {
2531 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2532 	struct drm_connector *connector = &aconnector->base;
2533 	struct drm_device *dev = connector->dev;
2534 	struct dc_link *dc_link = aconnector->dc_link;
2535 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2536 	enum dc_connection_type new_connection_type = dc_connection_none;
2537 #ifdef CONFIG_DRM_AMD_DC_HDCP
2538 	union hpd_irq_data hpd_irq_data;
2539 	struct amdgpu_device *adev = drm_to_adev(dev);
2540 
2541 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2542 #endif
2543 
2544 	/*
2545 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2546 	 * conflict, after implement i2c helper, this mutex should be
2547 	 * retired.
2548 	 */
2549 	if (dc_link->type != dc_connection_mst_branch)
2550 		mutex_lock(&aconnector->hpd_lock);
2551 
2552 
2553 #ifdef CONFIG_DRM_AMD_DC_HDCP
2554 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2555 #else
2556 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2557 #endif
2558 			!is_mst_root_connector) {
2559 		/* Downstream Port status changed. */
2560 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2561 			DRM_ERROR("KMS: Failed to detect connector\n");
2562 
2563 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2564 			emulated_link_detect(dc_link);
2565 
2566 			if (aconnector->fake_enable)
2567 				aconnector->fake_enable = false;
2568 
2569 			amdgpu_dm_update_connector_after_detect(aconnector);
2570 
2571 
2572 			drm_modeset_lock_all(dev);
2573 			dm_restore_drm_connector_state(dev, connector);
2574 			drm_modeset_unlock_all(dev);
2575 
2576 			drm_kms_helper_hotplug_event(dev);
2577 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2578 
2579 			if (aconnector->fake_enable)
2580 				aconnector->fake_enable = false;
2581 
2582 			amdgpu_dm_update_connector_after_detect(aconnector);
2583 
2584 
2585 			drm_modeset_lock_all(dev);
2586 			dm_restore_drm_connector_state(dev, connector);
2587 			drm_modeset_unlock_all(dev);
2588 
2589 			drm_kms_helper_hotplug_event(dev);
2590 		}
2591 	}
2592 #ifdef CONFIG_DRM_AMD_DC_HDCP
2593 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2594 		if (adev->dm.hdcp_workqueue)
2595 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2596 	}
2597 #endif
2598 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2599 	    (dc_link->type == dc_connection_mst_branch))
2600 		dm_handle_hpd_rx_irq(aconnector);
2601 
2602 	if (dc_link->type != dc_connection_mst_branch) {
2603 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2604 		mutex_unlock(&aconnector->hpd_lock);
2605 	}
2606 }
2607 
2608 static void register_hpd_handlers(struct amdgpu_device *adev)
2609 {
2610 	struct drm_device *dev = adev_to_drm(adev);
2611 	struct drm_connector *connector;
2612 	struct amdgpu_dm_connector *aconnector;
2613 	const struct dc_link *dc_link;
2614 	struct dc_interrupt_params int_params = {0};
2615 
2616 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2617 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2618 
2619 	list_for_each_entry(connector,
2620 			&dev->mode_config.connector_list, head)	{
2621 
2622 		aconnector = to_amdgpu_dm_connector(connector);
2623 		dc_link = aconnector->dc_link;
2624 
2625 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2626 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2627 			int_params.irq_source = dc_link->irq_source_hpd;
2628 
2629 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2630 					handle_hpd_irq,
2631 					(void *) aconnector);
2632 		}
2633 
2634 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2635 
2636 			/* Also register for DP short pulse (hpd_rx). */
2637 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2638 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2639 
2640 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2641 					handle_hpd_rx_irq,
2642 					(void *) aconnector);
2643 		}
2644 	}
2645 }
2646 
2647 #if defined(CONFIG_DRM_AMD_DC_SI)
2648 /* Register IRQ sources and initialize IRQ callbacks */
2649 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2650 {
2651 	struct dc *dc = adev->dm.dc;
2652 	struct common_irq_params *c_irq_params;
2653 	struct dc_interrupt_params int_params = {0};
2654 	int r;
2655 	int i;
2656 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2657 
2658 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2659 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2660 
2661 	/*
2662 	 * Actions of amdgpu_irq_add_id():
2663 	 * 1. Register a set() function with base driver.
2664 	 *    Base driver will call set() function to enable/disable an
2665 	 *    interrupt in DC hardware.
2666 	 * 2. Register amdgpu_dm_irq_handler().
2667 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2668 	 *    coming from DC hardware.
2669 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2670 	 *    for acknowledging and handling. */
2671 
2672 	/* Use VBLANK interrupt */
2673 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2674 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2675 		if (r) {
2676 			DRM_ERROR("Failed to add crtc irq id!\n");
2677 			return r;
2678 		}
2679 
2680 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2681 		int_params.irq_source =
2682 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2683 
2684 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2685 
2686 		c_irq_params->adev = adev;
2687 		c_irq_params->irq_src = int_params.irq_source;
2688 
2689 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2690 				dm_crtc_high_irq, c_irq_params);
2691 	}
2692 
2693 	/* Use GRPH_PFLIP interrupt */
2694 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2695 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2696 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2697 		if (r) {
2698 			DRM_ERROR("Failed to add page flip irq id!\n");
2699 			return r;
2700 		}
2701 
2702 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2703 		int_params.irq_source =
2704 			dc_interrupt_to_irq_source(dc, i, 0);
2705 
2706 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2707 
2708 		c_irq_params->adev = adev;
2709 		c_irq_params->irq_src = int_params.irq_source;
2710 
2711 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2712 				dm_pflip_high_irq, c_irq_params);
2713 
2714 	}
2715 
2716 	/* HPD */
2717 	r = amdgpu_irq_add_id(adev, client_id,
2718 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2719 	if (r) {
2720 		DRM_ERROR("Failed to add hpd irq id!\n");
2721 		return r;
2722 	}
2723 
2724 	register_hpd_handlers(adev);
2725 
2726 	return 0;
2727 }
2728 #endif
2729 
2730 /* Register IRQ sources and initialize IRQ callbacks */
2731 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2732 {
2733 	struct dc *dc = adev->dm.dc;
2734 	struct common_irq_params *c_irq_params;
2735 	struct dc_interrupt_params int_params = {0};
2736 	int r;
2737 	int i;
2738 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2739 
2740 	if (adev->asic_type >= CHIP_VEGA10)
2741 		client_id = SOC15_IH_CLIENTID_DCE;
2742 
2743 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2744 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2745 
2746 	/*
2747 	 * Actions of amdgpu_irq_add_id():
2748 	 * 1. Register a set() function with base driver.
2749 	 *    Base driver will call set() function to enable/disable an
2750 	 *    interrupt in DC hardware.
2751 	 * 2. Register amdgpu_dm_irq_handler().
2752 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2753 	 *    coming from DC hardware.
2754 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2755 	 *    for acknowledging and handling. */
2756 
2757 	/* Use VBLANK interrupt */
2758 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2759 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2760 		if (r) {
2761 			DRM_ERROR("Failed to add crtc irq id!\n");
2762 			return r;
2763 		}
2764 
2765 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2766 		int_params.irq_source =
2767 			dc_interrupt_to_irq_source(dc, i, 0);
2768 
2769 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2770 
2771 		c_irq_params->adev = adev;
2772 		c_irq_params->irq_src = int_params.irq_source;
2773 
2774 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2775 				dm_crtc_high_irq, c_irq_params);
2776 	}
2777 
2778 	/* Use VUPDATE interrupt */
2779 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2780 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2781 		if (r) {
2782 			DRM_ERROR("Failed to add vupdate irq id!\n");
2783 			return r;
2784 		}
2785 
2786 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2787 		int_params.irq_source =
2788 			dc_interrupt_to_irq_source(dc, i, 0);
2789 
2790 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2791 
2792 		c_irq_params->adev = adev;
2793 		c_irq_params->irq_src = int_params.irq_source;
2794 
2795 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2796 				dm_vupdate_high_irq, c_irq_params);
2797 	}
2798 
2799 	/* Use GRPH_PFLIP interrupt */
2800 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2801 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2802 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2803 		if (r) {
2804 			DRM_ERROR("Failed to add page flip irq id!\n");
2805 			return r;
2806 		}
2807 
2808 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2809 		int_params.irq_source =
2810 			dc_interrupt_to_irq_source(dc, i, 0);
2811 
2812 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2813 
2814 		c_irq_params->adev = adev;
2815 		c_irq_params->irq_src = int_params.irq_source;
2816 
2817 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2818 				dm_pflip_high_irq, c_irq_params);
2819 
2820 	}
2821 
2822 	/* HPD */
2823 	r = amdgpu_irq_add_id(adev, client_id,
2824 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2825 	if (r) {
2826 		DRM_ERROR("Failed to add hpd irq id!\n");
2827 		return r;
2828 	}
2829 
2830 	register_hpd_handlers(adev);
2831 
2832 	return 0;
2833 }
2834 
2835 #if defined(CONFIG_DRM_AMD_DC_DCN)
2836 /* Register IRQ sources and initialize IRQ callbacks */
2837 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2838 {
2839 	struct dc *dc = adev->dm.dc;
2840 	struct common_irq_params *c_irq_params;
2841 	struct dc_interrupt_params int_params = {0};
2842 	int r;
2843 	int i;
2844 
2845 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2846 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2847 
2848 	/*
2849 	 * Actions of amdgpu_irq_add_id():
2850 	 * 1. Register a set() function with base driver.
2851 	 *    Base driver will call set() function to enable/disable an
2852 	 *    interrupt in DC hardware.
2853 	 * 2. Register amdgpu_dm_irq_handler().
2854 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2855 	 *    coming from DC hardware.
2856 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2857 	 *    for acknowledging and handling.
2858 	 */
2859 
2860 	/* Use VSTARTUP interrupt */
2861 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2862 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2863 			i++) {
2864 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2865 
2866 		if (r) {
2867 			DRM_ERROR("Failed to add crtc irq id!\n");
2868 			return r;
2869 		}
2870 
2871 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2872 		int_params.irq_source =
2873 			dc_interrupt_to_irq_source(dc, i, 0);
2874 
2875 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2876 
2877 		c_irq_params->adev = adev;
2878 		c_irq_params->irq_src = int_params.irq_source;
2879 
2880 		amdgpu_dm_irq_register_interrupt(
2881 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2882 	}
2883 
2884 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2885 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2886 	 * to trigger at end of each vblank, regardless of state of the lock,
2887 	 * matching DCE behaviour.
2888 	 */
2889 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2890 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2891 	     i++) {
2892 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2893 
2894 		if (r) {
2895 			DRM_ERROR("Failed to add vupdate irq id!\n");
2896 			return r;
2897 		}
2898 
2899 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2900 		int_params.irq_source =
2901 			dc_interrupt_to_irq_source(dc, i, 0);
2902 
2903 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2904 
2905 		c_irq_params->adev = adev;
2906 		c_irq_params->irq_src = int_params.irq_source;
2907 
2908 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2909 				dm_vupdate_high_irq, c_irq_params);
2910 	}
2911 
2912 	/* Use GRPH_PFLIP interrupt */
2913 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2914 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2915 			i++) {
2916 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2917 		if (r) {
2918 			DRM_ERROR("Failed to add page flip irq id!\n");
2919 			return r;
2920 		}
2921 
2922 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2923 		int_params.irq_source =
2924 			dc_interrupt_to_irq_source(dc, i, 0);
2925 
2926 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2927 
2928 		c_irq_params->adev = adev;
2929 		c_irq_params->irq_src = int_params.irq_source;
2930 
2931 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2932 				dm_pflip_high_irq, c_irq_params);
2933 
2934 	}
2935 
2936 	/* HPD */
2937 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2938 			&adev->hpd_irq);
2939 	if (r) {
2940 		DRM_ERROR("Failed to add hpd irq id!\n");
2941 		return r;
2942 	}
2943 
2944 	register_hpd_handlers(adev);
2945 
2946 	return 0;
2947 }
2948 #endif
2949 
2950 /*
2951  * Acquires the lock for the atomic state object and returns
2952  * the new atomic state.
2953  *
2954  * This should only be called during atomic check.
2955  */
2956 static int dm_atomic_get_state(struct drm_atomic_state *state,
2957 			       struct dm_atomic_state **dm_state)
2958 {
2959 	struct drm_device *dev = state->dev;
2960 	struct amdgpu_device *adev = drm_to_adev(dev);
2961 	struct amdgpu_display_manager *dm = &adev->dm;
2962 	struct drm_private_state *priv_state;
2963 
2964 	if (*dm_state)
2965 		return 0;
2966 
2967 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2968 	if (IS_ERR(priv_state))
2969 		return PTR_ERR(priv_state);
2970 
2971 	*dm_state = to_dm_atomic_state(priv_state);
2972 
2973 	return 0;
2974 }
2975 
2976 static struct dm_atomic_state *
2977 dm_atomic_get_new_state(struct drm_atomic_state *state)
2978 {
2979 	struct drm_device *dev = state->dev;
2980 	struct amdgpu_device *adev = drm_to_adev(dev);
2981 	struct amdgpu_display_manager *dm = &adev->dm;
2982 	struct drm_private_obj *obj;
2983 	struct drm_private_state *new_obj_state;
2984 	int i;
2985 
2986 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2987 		if (obj->funcs == dm->atomic_obj.funcs)
2988 			return to_dm_atomic_state(new_obj_state);
2989 	}
2990 
2991 	return NULL;
2992 }
2993 
2994 static struct drm_private_state *
2995 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2996 {
2997 	struct dm_atomic_state *old_state, *new_state;
2998 
2999 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3000 	if (!new_state)
3001 		return NULL;
3002 
3003 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3004 
3005 	old_state = to_dm_atomic_state(obj->state);
3006 
3007 	if (old_state && old_state->context)
3008 		new_state->context = dc_copy_state(old_state->context);
3009 
3010 	if (!new_state->context) {
3011 		kfree(new_state);
3012 		return NULL;
3013 	}
3014 
3015 	return &new_state->base;
3016 }
3017 
3018 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3019 				    struct drm_private_state *state)
3020 {
3021 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3022 
3023 	if (dm_state && dm_state->context)
3024 		dc_release_state(dm_state->context);
3025 
3026 	kfree(dm_state);
3027 }
3028 
3029 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3030 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3031 	.atomic_destroy_state = dm_atomic_destroy_state,
3032 };
3033 
3034 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3035 {
3036 	struct dm_atomic_state *state;
3037 	int r;
3038 
3039 	adev->mode_info.mode_config_initialized = true;
3040 
3041 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3042 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3043 
3044 	adev_to_drm(adev)->mode_config.max_width = 16384;
3045 	adev_to_drm(adev)->mode_config.max_height = 16384;
3046 
3047 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3048 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3049 	/* indicates support for immediate flip */
3050 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3051 
3052 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3053 
3054 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3055 	if (!state)
3056 		return -ENOMEM;
3057 
3058 	state->context = dc_create_state(adev->dm.dc);
3059 	if (!state->context) {
3060 		kfree(state);
3061 		return -ENOMEM;
3062 	}
3063 
3064 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3065 
3066 	drm_atomic_private_obj_init(adev_to_drm(adev),
3067 				    &adev->dm.atomic_obj,
3068 				    &state->base,
3069 				    &dm_atomic_state_funcs);
3070 
3071 	r = amdgpu_display_modeset_create_props(adev);
3072 	if (r) {
3073 		dc_release_state(state->context);
3074 		kfree(state);
3075 		return r;
3076 	}
3077 
3078 	r = amdgpu_dm_audio_init(adev);
3079 	if (r) {
3080 		dc_release_state(state->context);
3081 		kfree(state);
3082 		return r;
3083 	}
3084 
3085 	return 0;
3086 }
3087 
3088 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3089 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3090 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3091 
3092 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3093 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3094 
3095 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3096 {
3097 #if defined(CONFIG_ACPI)
3098 	struct amdgpu_dm_backlight_caps caps;
3099 
3100 	memset(&caps, 0, sizeof(caps));
3101 
3102 	if (dm->backlight_caps.caps_valid)
3103 		return;
3104 
3105 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3106 	if (caps.caps_valid) {
3107 		dm->backlight_caps.caps_valid = true;
3108 		if (caps.aux_support)
3109 			return;
3110 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3111 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3112 	} else {
3113 		dm->backlight_caps.min_input_signal =
3114 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3115 		dm->backlight_caps.max_input_signal =
3116 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3117 	}
3118 #else
3119 	if (dm->backlight_caps.aux_support)
3120 		return;
3121 
3122 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3123 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3124 #endif
3125 }
3126 
3127 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3128 {
3129 	bool rc;
3130 
3131 	if (!link)
3132 		return 1;
3133 
3134 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3135 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3136 
3137 	return rc ? 0 : 1;
3138 }
3139 
3140 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3141 				unsigned *min, unsigned *max)
3142 {
3143 	if (!caps)
3144 		return 0;
3145 
3146 	if (caps->aux_support) {
3147 		// Firmware limits are in nits, DC API wants millinits.
3148 		*max = 1000 * caps->aux_max_input_signal;
3149 		*min = 1000 * caps->aux_min_input_signal;
3150 	} else {
3151 		// Firmware limits are 8-bit, PWM control is 16-bit.
3152 		*max = 0x101 * caps->max_input_signal;
3153 		*min = 0x101 * caps->min_input_signal;
3154 	}
3155 	return 1;
3156 }
3157 
3158 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3159 					uint32_t brightness)
3160 {
3161 	unsigned min, max;
3162 
3163 	if (!get_brightness_range(caps, &min, &max))
3164 		return brightness;
3165 
3166 	// Rescale 0..255 to min..max
3167 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3168 				       AMDGPU_MAX_BL_LEVEL);
3169 }
3170 
3171 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3172 				      uint32_t brightness)
3173 {
3174 	unsigned min, max;
3175 
3176 	if (!get_brightness_range(caps, &min, &max))
3177 		return brightness;
3178 
3179 	if (brightness < min)
3180 		return 0;
3181 	// Rescale min..max to 0..255
3182 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3183 				 max - min);
3184 }
3185 
3186 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3187 {
3188 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3189 	struct amdgpu_dm_backlight_caps caps;
3190 	struct dc_link *link = NULL;
3191 	u32 brightness;
3192 	bool rc;
3193 
3194 	amdgpu_dm_update_backlight_caps(dm);
3195 	caps = dm->backlight_caps;
3196 
3197 	link = (struct dc_link *)dm->backlight_link;
3198 
3199 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3200 	// Change brightness based on AUX property
3201 	if (caps.aux_support)
3202 		return set_backlight_via_aux(link, brightness);
3203 
3204 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3205 
3206 	return rc ? 0 : 1;
3207 }
3208 
3209 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3210 {
3211 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3212 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3213 
3214 	if (ret == DC_ERROR_UNEXPECTED)
3215 		return bd->props.brightness;
3216 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3217 }
3218 
3219 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3220 	.options = BL_CORE_SUSPENDRESUME,
3221 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3222 	.update_status	= amdgpu_dm_backlight_update_status,
3223 };
3224 
3225 static void
3226 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3227 {
3228 	char bl_name[16];
3229 	struct backlight_properties props = { 0 };
3230 
3231 	amdgpu_dm_update_backlight_caps(dm);
3232 
3233 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3234 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3235 	props.type = BACKLIGHT_RAW;
3236 
3237 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3238 		 adev_to_drm(dm->adev)->primary->index);
3239 
3240 	dm->backlight_dev = backlight_device_register(bl_name,
3241 						      adev_to_drm(dm->adev)->dev,
3242 						      dm,
3243 						      &amdgpu_dm_backlight_ops,
3244 						      &props);
3245 
3246 	if (IS_ERR(dm->backlight_dev))
3247 		DRM_ERROR("DM: Backlight registration failed!\n");
3248 	else
3249 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3250 }
3251 
3252 #endif
3253 
3254 static int initialize_plane(struct amdgpu_display_manager *dm,
3255 			    struct amdgpu_mode_info *mode_info, int plane_id,
3256 			    enum drm_plane_type plane_type,
3257 			    const struct dc_plane_cap *plane_cap)
3258 {
3259 	struct drm_plane *plane;
3260 	unsigned long possible_crtcs;
3261 	int ret = 0;
3262 
3263 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3264 	if (!plane) {
3265 		DRM_ERROR("KMS: Failed to allocate plane\n");
3266 		return -ENOMEM;
3267 	}
3268 	plane->type = plane_type;
3269 
3270 	/*
3271 	 * HACK: IGT tests expect that the primary plane for a CRTC
3272 	 * can only have one possible CRTC. Only expose support for
3273 	 * any CRTC if they're not going to be used as a primary plane
3274 	 * for a CRTC - like overlay or underlay planes.
3275 	 */
3276 	possible_crtcs = 1 << plane_id;
3277 	if (plane_id >= dm->dc->caps.max_streams)
3278 		possible_crtcs = 0xff;
3279 
3280 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3281 
3282 	if (ret) {
3283 		DRM_ERROR("KMS: Failed to initialize plane\n");
3284 		kfree(plane);
3285 		return ret;
3286 	}
3287 
3288 	if (mode_info)
3289 		mode_info->planes[plane_id] = plane;
3290 
3291 	return ret;
3292 }
3293 
3294 
3295 static void register_backlight_device(struct amdgpu_display_manager *dm,
3296 				      struct dc_link *link)
3297 {
3298 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3299 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3300 
3301 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3302 	    link->type != dc_connection_none) {
3303 		/*
3304 		 * Event if registration failed, we should continue with
3305 		 * DM initialization because not having a backlight control
3306 		 * is better then a black screen.
3307 		 */
3308 		amdgpu_dm_register_backlight_device(dm);
3309 
3310 		if (dm->backlight_dev)
3311 			dm->backlight_link = link;
3312 	}
3313 #endif
3314 }
3315 
3316 
3317 /*
3318  * In this architecture, the association
3319  * connector -> encoder -> crtc
3320  * id not really requried. The crtc and connector will hold the
3321  * display_index as an abstraction to use with DAL component
3322  *
3323  * Returns 0 on success
3324  */
3325 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3326 {
3327 	struct amdgpu_display_manager *dm = &adev->dm;
3328 	int32_t i;
3329 	struct amdgpu_dm_connector *aconnector = NULL;
3330 	struct amdgpu_encoder *aencoder = NULL;
3331 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3332 	uint32_t link_cnt;
3333 	int32_t primary_planes;
3334 	enum dc_connection_type new_connection_type = dc_connection_none;
3335 	const struct dc_plane_cap *plane;
3336 
3337 	link_cnt = dm->dc->caps.max_links;
3338 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3339 		DRM_ERROR("DM: Failed to initialize mode config\n");
3340 		return -EINVAL;
3341 	}
3342 
3343 	/* There is one primary plane per CRTC */
3344 	primary_planes = dm->dc->caps.max_streams;
3345 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3346 
3347 	/*
3348 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3349 	 * Order is reversed to match iteration order in atomic check.
3350 	 */
3351 	for (i = (primary_planes - 1); i >= 0; i--) {
3352 		plane = &dm->dc->caps.planes[i];
3353 
3354 		if (initialize_plane(dm, mode_info, i,
3355 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3356 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3357 			goto fail;
3358 		}
3359 	}
3360 
3361 	/*
3362 	 * Initialize overlay planes, index starting after primary planes.
3363 	 * These planes have a higher DRM index than the primary planes since
3364 	 * they should be considered as having a higher z-order.
3365 	 * Order is reversed to match iteration order in atomic check.
3366 	 *
3367 	 * Only support DCN for now, and only expose one so we don't encourage
3368 	 * userspace to use up all the pipes.
3369 	 */
3370 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3371 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3372 
3373 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3374 			continue;
3375 
3376 		if (!plane->blends_with_above || !plane->blends_with_below)
3377 			continue;
3378 
3379 		if (!plane->pixel_format_support.argb8888)
3380 			continue;
3381 
3382 		if (initialize_plane(dm, NULL, primary_planes + i,
3383 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3384 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3385 			goto fail;
3386 		}
3387 
3388 		/* Only create one overlay plane. */
3389 		break;
3390 	}
3391 
3392 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3393 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3394 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3395 			goto fail;
3396 		}
3397 
3398 	dm->display_indexes_num = dm->dc->caps.max_streams;
3399 
3400 	/* loops over all connectors on the board */
3401 	for (i = 0; i < link_cnt; i++) {
3402 		struct dc_link *link = NULL;
3403 
3404 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3405 			DRM_ERROR(
3406 				"KMS: Cannot support more than %d display indexes\n",
3407 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3408 			continue;
3409 		}
3410 
3411 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3412 		if (!aconnector)
3413 			goto fail;
3414 
3415 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3416 		if (!aencoder)
3417 			goto fail;
3418 
3419 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3420 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3421 			goto fail;
3422 		}
3423 
3424 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3425 			DRM_ERROR("KMS: Failed to initialize connector\n");
3426 			goto fail;
3427 		}
3428 
3429 		link = dc_get_link_at_index(dm->dc, i);
3430 
3431 		if (!dc_link_detect_sink(link, &new_connection_type))
3432 			DRM_ERROR("KMS: Failed to detect connector\n");
3433 
3434 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3435 			emulated_link_detect(link);
3436 			amdgpu_dm_update_connector_after_detect(aconnector);
3437 
3438 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3439 			amdgpu_dm_update_connector_after_detect(aconnector);
3440 			register_backlight_device(dm, link);
3441 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3442 				amdgpu_dm_set_psr_caps(link);
3443 		}
3444 
3445 
3446 	}
3447 
3448 	/* Software is initialized. Now we can register interrupt handlers. */
3449 	switch (adev->asic_type) {
3450 #if defined(CONFIG_DRM_AMD_DC_SI)
3451 	case CHIP_TAHITI:
3452 	case CHIP_PITCAIRN:
3453 	case CHIP_VERDE:
3454 	case CHIP_OLAND:
3455 		if (dce60_register_irq_handlers(dm->adev)) {
3456 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3457 			goto fail;
3458 		}
3459 		break;
3460 #endif
3461 	case CHIP_BONAIRE:
3462 	case CHIP_HAWAII:
3463 	case CHIP_KAVERI:
3464 	case CHIP_KABINI:
3465 	case CHIP_MULLINS:
3466 	case CHIP_TONGA:
3467 	case CHIP_FIJI:
3468 	case CHIP_CARRIZO:
3469 	case CHIP_STONEY:
3470 	case CHIP_POLARIS11:
3471 	case CHIP_POLARIS10:
3472 	case CHIP_POLARIS12:
3473 	case CHIP_VEGAM:
3474 	case CHIP_VEGA10:
3475 	case CHIP_VEGA12:
3476 	case CHIP_VEGA20:
3477 		if (dce110_register_irq_handlers(dm->adev)) {
3478 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3479 			goto fail;
3480 		}
3481 		break;
3482 #if defined(CONFIG_DRM_AMD_DC_DCN)
3483 	case CHIP_RAVEN:
3484 	case CHIP_NAVI12:
3485 	case CHIP_NAVI10:
3486 	case CHIP_NAVI14:
3487 	case CHIP_RENOIR:
3488 	case CHIP_SIENNA_CICHLID:
3489 	case CHIP_NAVY_FLOUNDER:
3490 	case CHIP_DIMGREY_CAVEFISH:
3491 	case CHIP_VANGOGH:
3492 		if (dcn10_register_irq_handlers(dm->adev)) {
3493 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3494 			goto fail;
3495 		}
3496 		break;
3497 #endif
3498 	default:
3499 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3500 		goto fail;
3501 	}
3502 
3503 	return 0;
3504 fail:
3505 	kfree(aencoder);
3506 	kfree(aconnector);
3507 
3508 	return -EINVAL;
3509 }
3510 
3511 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3512 {
3513 	drm_mode_config_cleanup(dm->ddev);
3514 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3515 	return;
3516 }
3517 
3518 /******************************************************************************
3519  * amdgpu_display_funcs functions
3520  *****************************************************************************/
3521 
3522 /*
3523  * dm_bandwidth_update - program display watermarks
3524  *
3525  * @adev: amdgpu_device pointer
3526  *
3527  * Calculate and program the display watermarks and line buffer allocation.
3528  */
3529 static void dm_bandwidth_update(struct amdgpu_device *adev)
3530 {
3531 	/* TODO: implement later */
3532 }
3533 
3534 static const struct amdgpu_display_funcs dm_display_funcs = {
3535 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3536 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3537 	.backlight_set_level = NULL, /* never called for DC */
3538 	.backlight_get_level = NULL, /* never called for DC */
3539 	.hpd_sense = NULL,/* called unconditionally */
3540 	.hpd_set_polarity = NULL, /* called unconditionally */
3541 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3542 	.page_flip_get_scanoutpos =
3543 		dm_crtc_get_scanoutpos,/* called unconditionally */
3544 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3545 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3546 };
3547 
3548 #if defined(CONFIG_DEBUG_KERNEL_DC)
3549 
3550 static ssize_t s3_debug_store(struct device *device,
3551 			      struct device_attribute *attr,
3552 			      const char *buf,
3553 			      size_t count)
3554 {
3555 	int ret;
3556 	int s3_state;
3557 	struct drm_device *drm_dev = dev_get_drvdata(device);
3558 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3559 
3560 	ret = kstrtoint(buf, 0, &s3_state);
3561 
3562 	if (ret == 0) {
3563 		if (s3_state) {
3564 			dm_resume(adev);
3565 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3566 		} else
3567 			dm_suspend(adev);
3568 	}
3569 
3570 	return ret == 0 ? count : 0;
3571 }
3572 
3573 DEVICE_ATTR_WO(s3_debug);
3574 
3575 #endif
3576 
3577 static int dm_early_init(void *handle)
3578 {
3579 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3580 
3581 	switch (adev->asic_type) {
3582 #if defined(CONFIG_DRM_AMD_DC_SI)
3583 	case CHIP_TAHITI:
3584 	case CHIP_PITCAIRN:
3585 	case CHIP_VERDE:
3586 		adev->mode_info.num_crtc = 6;
3587 		adev->mode_info.num_hpd = 6;
3588 		adev->mode_info.num_dig = 6;
3589 		break;
3590 	case CHIP_OLAND:
3591 		adev->mode_info.num_crtc = 2;
3592 		adev->mode_info.num_hpd = 2;
3593 		adev->mode_info.num_dig = 2;
3594 		break;
3595 #endif
3596 	case CHIP_BONAIRE:
3597 	case CHIP_HAWAII:
3598 		adev->mode_info.num_crtc = 6;
3599 		adev->mode_info.num_hpd = 6;
3600 		adev->mode_info.num_dig = 6;
3601 		break;
3602 	case CHIP_KAVERI:
3603 		adev->mode_info.num_crtc = 4;
3604 		adev->mode_info.num_hpd = 6;
3605 		adev->mode_info.num_dig = 7;
3606 		break;
3607 	case CHIP_KABINI:
3608 	case CHIP_MULLINS:
3609 		adev->mode_info.num_crtc = 2;
3610 		adev->mode_info.num_hpd = 6;
3611 		adev->mode_info.num_dig = 6;
3612 		break;
3613 	case CHIP_FIJI:
3614 	case CHIP_TONGA:
3615 		adev->mode_info.num_crtc = 6;
3616 		adev->mode_info.num_hpd = 6;
3617 		adev->mode_info.num_dig = 7;
3618 		break;
3619 	case CHIP_CARRIZO:
3620 		adev->mode_info.num_crtc = 3;
3621 		adev->mode_info.num_hpd = 6;
3622 		adev->mode_info.num_dig = 9;
3623 		break;
3624 	case CHIP_STONEY:
3625 		adev->mode_info.num_crtc = 2;
3626 		adev->mode_info.num_hpd = 6;
3627 		adev->mode_info.num_dig = 9;
3628 		break;
3629 	case CHIP_POLARIS11:
3630 	case CHIP_POLARIS12:
3631 		adev->mode_info.num_crtc = 5;
3632 		adev->mode_info.num_hpd = 5;
3633 		adev->mode_info.num_dig = 5;
3634 		break;
3635 	case CHIP_POLARIS10:
3636 	case CHIP_VEGAM:
3637 		adev->mode_info.num_crtc = 6;
3638 		adev->mode_info.num_hpd = 6;
3639 		adev->mode_info.num_dig = 6;
3640 		break;
3641 	case CHIP_VEGA10:
3642 	case CHIP_VEGA12:
3643 	case CHIP_VEGA20:
3644 		adev->mode_info.num_crtc = 6;
3645 		adev->mode_info.num_hpd = 6;
3646 		adev->mode_info.num_dig = 6;
3647 		break;
3648 #if defined(CONFIG_DRM_AMD_DC_DCN)
3649 	case CHIP_RAVEN:
3650 	case CHIP_RENOIR:
3651 	case CHIP_VANGOGH:
3652 		adev->mode_info.num_crtc = 4;
3653 		adev->mode_info.num_hpd = 4;
3654 		adev->mode_info.num_dig = 4;
3655 		break;
3656 	case CHIP_NAVI10:
3657 	case CHIP_NAVI12:
3658 	case CHIP_SIENNA_CICHLID:
3659 	case CHIP_NAVY_FLOUNDER:
3660 		adev->mode_info.num_crtc = 6;
3661 		adev->mode_info.num_hpd = 6;
3662 		adev->mode_info.num_dig = 6;
3663 		break;
3664 	case CHIP_NAVI14:
3665 	case CHIP_DIMGREY_CAVEFISH:
3666 		adev->mode_info.num_crtc = 5;
3667 		adev->mode_info.num_hpd = 5;
3668 		adev->mode_info.num_dig = 5;
3669 		break;
3670 #endif
3671 	default:
3672 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3673 		return -EINVAL;
3674 	}
3675 
3676 	amdgpu_dm_set_irq_funcs(adev);
3677 
3678 	if (adev->mode_info.funcs == NULL)
3679 		adev->mode_info.funcs = &dm_display_funcs;
3680 
3681 	/*
3682 	 * Note: Do NOT change adev->audio_endpt_rreg and
3683 	 * adev->audio_endpt_wreg because they are initialised in
3684 	 * amdgpu_device_init()
3685 	 */
3686 #if defined(CONFIG_DEBUG_KERNEL_DC)
3687 	device_create_file(
3688 		adev_to_drm(adev)->dev,
3689 		&dev_attr_s3_debug);
3690 #endif
3691 
3692 	return 0;
3693 }
3694 
3695 static bool modeset_required(struct drm_crtc_state *crtc_state,
3696 			     struct dc_stream_state *new_stream,
3697 			     struct dc_stream_state *old_stream)
3698 {
3699 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3700 }
3701 
3702 static bool modereset_required(struct drm_crtc_state *crtc_state)
3703 {
3704 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3705 }
3706 
3707 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3708 {
3709 	drm_encoder_cleanup(encoder);
3710 	kfree(encoder);
3711 }
3712 
3713 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3714 	.destroy = amdgpu_dm_encoder_destroy,
3715 };
3716 
3717 
3718 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3719 				struct dc_scaling_info *scaling_info)
3720 {
3721 	int scale_w, scale_h;
3722 
3723 	memset(scaling_info, 0, sizeof(*scaling_info));
3724 
3725 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3726 	scaling_info->src_rect.x = state->src_x >> 16;
3727 	scaling_info->src_rect.y = state->src_y >> 16;
3728 
3729 	scaling_info->src_rect.width = state->src_w >> 16;
3730 	if (scaling_info->src_rect.width == 0)
3731 		return -EINVAL;
3732 
3733 	scaling_info->src_rect.height = state->src_h >> 16;
3734 	if (scaling_info->src_rect.height == 0)
3735 		return -EINVAL;
3736 
3737 	scaling_info->dst_rect.x = state->crtc_x;
3738 	scaling_info->dst_rect.y = state->crtc_y;
3739 
3740 	if (state->crtc_w == 0)
3741 		return -EINVAL;
3742 
3743 	scaling_info->dst_rect.width = state->crtc_w;
3744 
3745 	if (state->crtc_h == 0)
3746 		return -EINVAL;
3747 
3748 	scaling_info->dst_rect.height = state->crtc_h;
3749 
3750 	/* DRM doesn't specify clipping on destination output. */
3751 	scaling_info->clip_rect = scaling_info->dst_rect;
3752 
3753 	/* TODO: Validate scaling per-format with DC plane caps */
3754 	scale_w = scaling_info->dst_rect.width * 1000 /
3755 		  scaling_info->src_rect.width;
3756 
3757 	if (scale_w < 250 || scale_w > 16000)
3758 		return -EINVAL;
3759 
3760 	scale_h = scaling_info->dst_rect.height * 1000 /
3761 		  scaling_info->src_rect.height;
3762 
3763 	if (scale_h < 250 || scale_h > 16000)
3764 		return -EINVAL;
3765 
3766 	/*
3767 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3768 	 * assume reasonable defaults based on the format.
3769 	 */
3770 
3771 	return 0;
3772 }
3773 
3774 static void
3775 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3776 				 uint64_t tiling_flags)
3777 {
3778 	/* Fill GFX8 params */
3779 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3780 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3781 
3782 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3783 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3784 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3785 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3786 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3787 
3788 		/* XXX fix me for VI */
3789 		tiling_info->gfx8.num_banks = num_banks;
3790 		tiling_info->gfx8.array_mode =
3791 				DC_ARRAY_2D_TILED_THIN1;
3792 		tiling_info->gfx8.tile_split = tile_split;
3793 		tiling_info->gfx8.bank_width = bankw;
3794 		tiling_info->gfx8.bank_height = bankh;
3795 		tiling_info->gfx8.tile_aspect = mtaspect;
3796 		tiling_info->gfx8.tile_mode =
3797 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3798 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3799 			== DC_ARRAY_1D_TILED_THIN1) {
3800 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3801 	}
3802 
3803 	tiling_info->gfx8.pipe_config =
3804 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3805 }
3806 
3807 static void
3808 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3809 				  union dc_tiling_info *tiling_info)
3810 {
3811 	tiling_info->gfx9.num_pipes =
3812 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3813 	tiling_info->gfx9.num_banks =
3814 		adev->gfx.config.gb_addr_config_fields.num_banks;
3815 	tiling_info->gfx9.pipe_interleave =
3816 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3817 	tiling_info->gfx9.num_shader_engines =
3818 		adev->gfx.config.gb_addr_config_fields.num_se;
3819 	tiling_info->gfx9.max_compressed_frags =
3820 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3821 	tiling_info->gfx9.num_rb_per_se =
3822 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3823 	tiling_info->gfx9.shaderEnable = 1;
3824 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3825 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3826 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3827 	    adev->asic_type == CHIP_VANGOGH)
3828 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3829 }
3830 
3831 static int
3832 validate_dcc(struct amdgpu_device *adev,
3833 	     const enum surface_pixel_format format,
3834 	     const enum dc_rotation_angle rotation,
3835 	     const union dc_tiling_info *tiling_info,
3836 	     const struct dc_plane_dcc_param *dcc,
3837 	     const struct dc_plane_address *address,
3838 	     const struct plane_size *plane_size)
3839 {
3840 	struct dc *dc = adev->dm.dc;
3841 	struct dc_dcc_surface_param input;
3842 	struct dc_surface_dcc_cap output;
3843 
3844 	memset(&input, 0, sizeof(input));
3845 	memset(&output, 0, sizeof(output));
3846 
3847 	if (!dcc->enable)
3848 		return 0;
3849 
3850 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3851 	    !dc->cap_funcs.get_dcc_compression_cap)
3852 		return -EINVAL;
3853 
3854 	input.format = format;
3855 	input.surface_size.width = plane_size->surface_size.width;
3856 	input.surface_size.height = plane_size->surface_size.height;
3857 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3858 
3859 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3860 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3861 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3862 		input.scan = SCAN_DIRECTION_VERTICAL;
3863 
3864 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3865 		return -EINVAL;
3866 
3867 	if (!output.capable)
3868 		return -EINVAL;
3869 
3870 	if (dcc->independent_64b_blks == 0 &&
3871 	    output.grph.rgb.independent_64b_blks != 0)
3872 		return -EINVAL;
3873 
3874 	return 0;
3875 }
3876 
3877 static bool
3878 modifier_has_dcc(uint64_t modifier)
3879 {
3880 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3881 }
3882 
3883 static unsigned
3884 modifier_gfx9_swizzle_mode(uint64_t modifier)
3885 {
3886 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3887 		return 0;
3888 
3889 	return AMD_FMT_MOD_GET(TILE, modifier);
3890 }
3891 
3892 static const struct drm_format_info *
3893 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3894 {
3895 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3896 }
3897 
3898 static void
3899 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3900 				    union dc_tiling_info *tiling_info,
3901 				    uint64_t modifier)
3902 {
3903 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3904 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3905 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3906 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3907 
3908 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
3909 
3910 	if (!IS_AMD_FMT_MOD(modifier))
3911 		return;
3912 
3913 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3914 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3915 
3916 	if (adev->family >= AMDGPU_FAMILY_NV) {
3917 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3918 	} else {
3919 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3920 
3921 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3922 	}
3923 }
3924 
3925 enum dm_micro_swizzle {
3926 	MICRO_SWIZZLE_Z = 0,
3927 	MICRO_SWIZZLE_S = 1,
3928 	MICRO_SWIZZLE_D = 2,
3929 	MICRO_SWIZZLE_R = 3
3930 };
3931 
3932 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3933 					  uint32_t format,
3934 					  uint64_t modifier)
3935 {
3936 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
3937 	const struct drm_format_info *info = drm_format_info(format);
3938 
3939 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3940 
3941 	if (!info)
3942 		return false;
3943 
3944 	/*
3945 	 * We always have to allow this modifier, because core DRM still
3946 	 * checks LINEAR support if userspace does not provide modifers.
3947 	 */
3948 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3949 		return true;
3950 
3951 	/*
3952 	 * The arbitrary tiling support for multiplane formats has not been hooked
3953 	 * up.
3954 	 */
3955 	if (info->num_planes > 1)
3956 		return false;
3957 
3958 	/*
3959 	 * For D swizzle the canonical modifier depends on the bpp, so check
3960 	 * it here.
3961 	 */
3962 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
3963 	    adev->family >= AMDGPU_FAMILY_NV) {
3964 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
3965 			return false;
3966 	}
3967 
3968 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
3969 	    info->cpp[0] < 8)
3970 		return false;
3971 
3972 	if (modifier_has_dcc(modifier)) {
3973 		/* Per radeonsi comments 16/64 bpp are more complicated. */
3974 		if (info->cpp[0] != 4)
3975 			return false;
3976 	}
3977 
3978 	return true;
3979 }
3980 
3981 static void
3982 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
3983 {
3984 	if (!*mods)
3985 		return;
3986 
3987 	if (*cap - *size < 1) {
3988 		uint64_t new_cap = *cap * 2;
3989 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
3990 
3991 		if (!new_mods) {
3992 			kfree(*mods);
3993 			*mods = NULL;
3994 			return;
3995 		}
3996 
3997 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
3998 		kfree(*mods);
3999 		*mods = new_mods;
4000 		*cap = new_cap;
4001 	}
4002 
4003 	(*mods)[*size] = mod;
4004 	*size += 1;
4005 }
4006 
4007 static void
4008 add_gfx9_modifiers(const struct amdgpu_device *adev,
4009 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4010 {
4011 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4012 	int pipe_xor_bits = min(8, pipes +
4013 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4014 	int bank_xor_bits = min(8 - pipe_xor_bits,
4015 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4016 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4017 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4018 
4019 
4020 	if (adev->family == AMDGPU_FAMILY_RV) {
4021 		/* Raven2 and later */
4022 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4023 
4024 		/*
4025 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4026 		 * doesn't support _D on DCN
4027 		 */
4028 
4029 		if (has_constant_encode) {
4030 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4031 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4032 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4033 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4034 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4035 				    AMD_FMT_MOD_SET(DCC, 1) |
4036 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4037 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4038 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4039 		}
4040 
4041 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4042 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4043 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4044 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4045 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4046 			    AMD_FMT_MOD_SET(DCC, 1) |
4047 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4048 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4049 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4050 
4051 		if (has_constant_encode) {
4052 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4053 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4054 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4055 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4056 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4057 				    AMD_FMT_MOD_SET(DCC, 1) |
4058 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4059 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4060 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4061 
4062 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4063 				    AMD_FMT_MOD_SET(RB, rb) |
4064 				    AMD_FMT_MOD_SET(PIPE, pipes));
4065 		}
4066 
4067 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4068 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4069 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4070 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4071 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4072 			    AMD_FMT_MOD_SET(DCC, 1) |
4073 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4074 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4075 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4076 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4077 			    AMD_FMT_MOD_SET(RB, rb) |
4078 			    AMD_FMT_MOD_SET(PIPE, pipes));
4079 	}
4080 
4081 	/*
4082 	 * Only supported for 64bpp on Raven, will be filtered on format in
4083 	 * dm_plane_format_mod_supported.
4084 	 */
4085 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4086 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4087 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4088 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4089 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4090 
4091 	if (adev->family == AMDGPU_FAMILY_RV) {
4092 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4093 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4094 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4095 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4096 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4097 	}
4098 
4099 	/*
4100 	 * Only supported for 64bpp on Raven, will be filtered on format in
4101 	 * dm_plane_format_mod_supported.
4102 	 */
4103 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4104 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4105 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4106 
4107 	if (adev->family == AMDGPU_FAMILY_RV) {
4108 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4109 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4110 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4111 	}
4112 }
4113 
4114 static void
4115 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4116 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4117 {
4118 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4119 
4120 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4121 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4122 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4123 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4124 		    AMD_FMT_MOD_SET(DCC, 1) |
4125 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4126 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4127 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4128 
4129 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4130 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4131 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4132 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4133 		    AMD_FMT_MOD_SET(DCC, 1) |
4134 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4135 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4136 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4137 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4138 
4139 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4140 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4141 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4142 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4143 
4144 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4145 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4146 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4147 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4148 
4149 
4150 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4151 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4152 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4153 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4154 
4155 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4156 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4157 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4158 }
4159 
4160 static void
4161 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4162 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4163 {
4164 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4165 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4166 
4167 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4168 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4169 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4170 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4171 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4172 		    AMD_FMT_MOD_SET(DCC, 1) |
4173 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4174 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4175 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4176 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4177 
4178 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4179 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4180 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4181 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4182 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4183 		    AMD_FMT_MOD_SET(DCC, 1) |
4184 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4185 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4186 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4187 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4188 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4189 
4190 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4191 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4192 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4193 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4194 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4195 
4196 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4197 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4198 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4199 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4200 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4201 
4202 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4203 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4204 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4205 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4206 
4207 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4208 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4209 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4210 }
4211 
4212 static int
4213 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4214 {
4215 	uint64_t size = 0, capacity = 128;
4216 	*mods = NULL;
4217 
4218 	/* We have not hooked up any pre-GFX9 modifiers. */
4219 	if (adev->family < AMDGPU_FAMILY_AI)
4220 		return 0;
4221 
4222 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4223 
4224 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4225 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4226 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4227 		return *mods ? 0 : -ENOMEM;
4228 	}
4229 
4230 	switch (adev->family) {
4231 	case AMDGPU_FAMILY_AI:
4232 	case AMDGPU_FAMILY_RV:
4233 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4234 		break;
4235 	case AMDGPU_FAMILY_NV:
4236 	case AMDGPU_FAMILY_VGH:
4237 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4238 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4239 		else
4240 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4241 		break;
4242 	}
4243 
4244 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4245 
4246 	/* INVALID marks the end of the list. */
4247 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4248 
4249 	if (!*mods)
4250 		return -ENOMEM;
4251 
4252 	return 0;
4253 }
4254 
4255 static int
4256 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4257 					  const struct amdgpu_framebuffer *afb,
4258 					  const enum surface_pixel_format format,
4259 					  const enum dc_rotation_angle rotation,
4260 					  const struct plane_size *plane_size,
4261 					  union dc_tiling_info *tiling_info,
4262 					  struct dc_plane_dcc_param *dcc,
4263 					  struct dc_plane_address *address,
4264 					  const bool force_disable_dcc)
4265 {
4266 	const uint64_t modifier = afb->base.modifier;
4267 	int ret;
4268 
4269 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4270 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4271 
4272 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4273 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4274 
4275 		dcc->enable = 1;
4276 		dcc->meta_pitch = afb->base.pitches[1];
4277 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4278 
4279 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4280 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4281 	}
4282 
4283 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4284 	if (ret)
4285 		return ret;
4286 
4287 	return 0;
4288 }
4289 
4290 static int
4291 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4292 			     const struct amdgpu_framebuffer *afb,
4293 			     const enum surface_pixel_format format,
4294 			     const enum dc_rotation_angle rotation,
4295 			     const uint64_t tiling_flags,
4296 			     union dc_tiling_info *tiling_info,
4297 			     struct plane_size *plane_size,
4298 			     struct dc_plane_dcc_param *dcc,
4299 			     struct dc_plane_address *address,
4300 			     bool tmz_surface,
4301 			     bool force_disable_dcc)
4302 {
4303 	const struct drm_framebuffer *fb = &afb->base;
4304 	int ret;
4305 
4306 	memset(tiling_info, 0, sizeof(*tiling_info));
4307 	memset(plane_size, 0, sizeof(*plane_size));
4308 	memset(dcc, 0, sizeof(*dcc));
4309 	memset(address, 0, sizeof(*address));
4310 
4311 	address->tmz_surface = tmz_surface;
4312 
4313 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4314 		uint64_t addr = afb->address + fb->offsets[0];
4315 
4316 		plane_size->surface_size.x = 0;
4317 		plane_size->surface_size.y = 0;
4318 		plane_size->surface_size.width = fb->width;
4319 		plane_size->surface_size.height = fb->height;
4320 		plane_size->surface_pitch =
4321 			fb->pitches[0] / fb->format->cpp[0];
4322 
4323 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4324 		address->grph.addr.low_part = lower_32_bits(addr);
4325 		address->grph.addr.high_part = upper_32_bits(addr);
4326 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4327 		uint64_t luma_addr = afb->address + fb->offsets[0];
4328 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4329 
4330 		plane_size->surface_size.x = 0;
4331 		plane_size->surface_size.y = 0;
4332 		plane_size->surface_size.width = fb->width;
4333 		plane_size->surface_size.height = fb->height;
4334 		plane_size->surface_pitch =
4335 			fb->pitches[0] / fb->format->cpp[0];
4336 
4337 		plane_size->chroma_size.x = 0;
4338 		plane_size->chroma_size.y = 0;
4339 		/* TODO: set these based on surface format */
4340 		plane_size->chroma_size.width = fb->width / 2;
4341 		plane_size->chroma_size.height = fb->height / 2;
4342 
4343 		plane_size->chroma_pitch =
4344 			fb->pitches[1] / fb->format->cpp[1];
4345 
4346 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4347 		address->video_progressive.luma_addr.low_part =
4348 			lower_32_bits(luma_addr);
4349 		address->video_progressive.luma_addr.high_part =
4350 			upper_32_bits(luma_addr);
4351 		address->video_progressive.chroma_addr.low_part =
4352 			lower_32_bits(chroma_addr);
4353 		address->video_progressive.chroma_addr.high_part =
4354 			upper_32_bits(chroma_addr);
4355 	}
4356 
4357 	if (adev->family >= AMDGPU_FAMILY_AI) {
4358 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4359 								rotation, plane_size,
4360 								tiling_info, dcc,
4361 								address,
4362 								force_disable_dcc);
4363 		if (ret)
4364 			return ret;
4365 	} else {
4366 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4367 	}
4368 
4369 	return 0;
4370 }
4371 
4372 static void
4373 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4374 			       bool *per_pixel_alpha, bool *global_alpha,
4375 			       int *global_alpha_value)
4376 {
4377 	*per_pixel_alpha = false;
4378 	*global_alpha = false;
4379 	*global_alpha_value = 0xff;
4380 
4381 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4382 		return;
4383 
4384 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4385 		static const uint32_t alpha_formats[] = {
4386 			DRM_FORMAT_ARGB8888,
4387 			DRM_FORMAT_RGBA8888,
4388 			DRM_FORMAT_ABGR8888,
4389 		};
4390 		uint32_t format = plane_state->fb->format->format;
4391 		unsigned int i;
4392 
4393 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4394 			if (format == alpha_formats[i]) {
4395 				*per_pixel_alpha = true;
4396 				break;
4397 			}
4398 		}
4399 	}
4400 
4401 	if (plane_state->alpha < 0xffff) {
4402 		*global_alpha = true;
4403 		*global_alpha_value = plane_state->alpha >> 8;
4404 	}
4405 }
4406 
4407 static int
4408 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4409 			    const enum surface_pixel_format format,
4410 			    enum dc_color_space *color_space)
4411 {
4412 	bool full_range;
4413 
4414 	*color_space = COLOR_SPACE_SRGB;
4415 
4416 	/* DRM color properties only affect non-RGB formats. */
4417 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4418 		return 0;
4419 
4420 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4421 
4422 	switch (plane_state->color_encoding) {
4423 	case DRM_COLOR_YCBCR_BT601:
4424 		if (full_range)
4425 			*color_space = COLOR_SPACE_YCBCR601;
4426 		else
4427 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4428 		break;
4429 
4430 	case DRM_COLOR_YCBCR_BT709:
4431 		if (full_range)
4432 			*color_space = COLOR_SPACE_YCBCR709;
4433 		else
4434 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4435 		break;
4436 
4437 	case DRM_COLOR_YCBCR_BT2020:
4438 		if (full_range)
4439 			*color_space = COLOR_SPACE_2020_YCBCR;
4440 		else
4441 			return -EINVAL;
4442 		break;
4443 
4444 	default:
4445 		return -EINVAL;
4446 	}
4447 
4448 	return 0;
4449 }
4450 
4451 static int
4452 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4453 			    const struct drm_plane_state *plane_state,
4454 			    const uint64_t tiling_flags,
4455 			    struct dc_plane_info *plane_info,
4456 			    struct dc_plane_address *address,
4457 			    bool tmz_surface,
4458 			    bool force_disable_dcc)
4459 {
4460 	const struct drm_framebuffer *fb = plane_state->fb;
4461 	const struct amdgpu_framebuffer *afb =
4462 		to_amdgpu_framebuffer(plane_state->fb);
4463 	struct drm_format_name_buf format_name;
4464 	int ret;
4465 
4466 	memset(plane_info, 0, sizeof(*plane_info));
4467 
4468 	switch (fb->format->format) {
4469 	case DRM_FORMAT_C8:
4470 		plane_info->format =
4471 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4472 		break;
4473 	case DRM_FORMAT_RGB565:
4474 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4475 		break;
4476 	case DRM_FORMAT_XRGB8888:
4477 	case DRM_FORMAT_ARGB8888:
4478 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4479 		break;
4480 	case DRM_FORMAT_XRGB2101010:
4481 	case DRM_FORMAT_ARGB2101010:
4482 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4483 		break;
4484 	case DRM_FORMAT_XBGR2101010:
4485 	case DRM_FORMAT_ABGR2101010:
4486 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4487 		break;
4488 	case DRM_FORMAT_XBGR8888:
4489 	case DRM_FORMAT_ABGR8888:
4490 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4491 		break;
4492 	case DRM_FORMAT_NV21:
4493 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4494 		break;
4495 	case DRM_FORMAT_NV12:
4496 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4497 		break;
4498 	case DRM_FORMAT_P010:
4499 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4500 		break;
4501 	case DRM_FORMAT_XRGB16161616F:
4502 	case DRM_FORMAT_ARGB16161616F:
4503 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4504 		break;
4505 	case DRM_FORMAT_XBGR16161616F:
4506 	case DRM_FORMAT_ABGR16161616F:
4507 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4508 		break;
4509 	default:
4510 		DRM_ERROR(
4511 			"Unsupported screen format %s\n",
4512 			drm_get_format_name(fb->format->format, &format_name));
4513 		return -EINVAL;
4514 	}
4515 
4516 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4517 	case DRM_MODE_ROTATE_0:
4518 		plane_info->rotation = ROTATION_ANGLE_0;
4519 		break;
4520 	case DRM_MODE_ROTATE_90:
4521 		plane_info->rotation = ROTATION_ANGLE_90;
4522 		break;
4523 	case DRM_MODE_ROTATE_180:
4524 		plane_info->rotation = ROTATION_ANGLE_180;
4525 		break;
4526 	case DRM_MODE_ROTATE_270:
4527 		plane_info->rotation = ROTATION_ANGLE_270;
4528 		break;
4529 	default:
4530 		plane_info->rotation = ROTATION_ANGLE_0;
4531 		break;
4532 	}
4533 
4534 	plane_info->visible = true;
4535 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4536 
4537 	plane_info->layer_index = 0;
4538 
4539 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4540 					  &plane_info->color_space);
4541 	if (ret)
4542 		return ret;
4543 
4544 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4545 					   plane_info->rotation, tiling_flags,
4546 					   &plane_info->tiling_info,
4547 					   &plane_info->plane_size,
4548 					   &plane_info->dcc, address, tmz_surface,
4549 					   force_disable_dcc);
4550 	if (ret)
4551 		return ret;
4552 
4553 	fill_blending_from_plane_state(
4554 		plane_state, &plane_info->per_pixel_alpha,
4555 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4556 
4557 	return 0;
4558 }
4559 
4560 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4561 				    struct dc_plane_state *dc_plane_state,
4562 				    struct drm_plane_state *plane_state,
4563 				    struct drm_crtc_state *crtc_state)
4564 {
4565 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4566 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4567 	struct dc_scaling_info scaling_info;
4568 	struct dc_plane_info plane_info;
4569 	int ret;
4570 	bool force_disable_dcc = false;
4571 
4572 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4573 	if (ret)
4574 		return ret;
4575 
4576 	dc_plane_state->src_rect = scaling_info.src_rect;
4577 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4578 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4579 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4580 
4581 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4582 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4583 					  afb->tiling_flags,
4584 					  &plane_info,
4585 					  &dc_plane_state->address,
4586 					  afb->tmz_surface,
4587 					  force_disable_dcc);
4588 	if (ret)
4589 		return ret;
4590 
4591 	dc_plane_state->format = plane_info.format;
4592 	dc_plane_state->color_space = plane_info.color_space;
4593 	dc_plane_state->format = plane_info.format;
4594 	dc_plane_state->plane_size = plane_info.plane_size;
4595 	dc_plane_state->rotation = plane_info.rotation;
4596 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4597 	dc_plane_state->stereo_format = plane_info.stereo_format;
4598 	dc_plane_state->tiling_info = plane_info.tiling_info;
4599 	dc_plane_state->visible = plane_info.visible;
4600 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4601 	dc_plane_state->global_alpha = plane_info.global_alpha;
4602 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4603 	dc_plane_state->dcc = plane_info.dcc;
4604 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4605 
4606 	/*
4607 	 * Always set input transfer function, since plane state is refreshed
4608 	 * every time.
4609 	 */
4610 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4611 	if (ret)
4612 		return ret;
4613 
4614 	return 0;
4615 }
4616 
4617 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4618 					   const struct dm_connector_state *dm_state,
4619 					   struct dc_stream_state *stream)
4620 {
4621 	enum amdgpu_rmx_type rmx_type;
4622 
4623 	struct rect src = { 0 }; /* viewport in composition space*/
4624 	struct rect dst = { 0 }; /* stream addressable area */
4625 
4626 	/* no mode. nothing to be done */
4627 	if (!mode)
4628 		return;
4629 
4630 	/* Full screen scaling by default */
4631 	src.width = mode->hdisplay;
4632 	src.height = mode->vdisplay;
4633 	dst.width = stream->timing.h_addressable;
4634 	dst.height = stream->timing.v_addressable;
4635 
4636 	if (dm_state) {
4637 		rmx_type = dm_state->scaling;
4638 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4639 			if (src.width * dst.height <
4640 					src.height * dst.width) {
4641 				/* height needs less upscaling/more downscaling */
4642 				dst.width = src.width *
4643 						dst.height / src.height;
4644 			} else {
4645 				/* width needs less upscaling/more downscaling */
4646 				dst.height = src.height *
4647 						dst.width / src.width;
4648 			}
4649 		} else if (rmx_type == RMX_CENTER) {
4650 			dst = src;
4651 		}
4652 
4653 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4654 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4655 
4656 		if (dm_state->underscan_enable) {
4657 			dst.x += dm_state->underscan_hborder / 2;
4658 			dst.y += dm_state->underscan_vborder / 2;
4659 			dst.width -= dm_state->underscan_hborder;
4660 			dst.height -= dm_state->underscan_vborder;
4661 		}
4662 	}
4663 
4664 	stream->src = src;
4665 	stream->dst = dst;
4666 
4667 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4668 			dst.x, dst.y, dst.width, dst.height);
4669 
4670 }
4671 
4672 static enum dc_color_depth
4673 convert_color_depth_from_display_info(const struct drm_connector *connector,
4674 				      bool is_y420, int requested_bpc)
4675 {
4676 	uint8_t bpc;
4677 
4678 	if (is_y420) {
4679 		bpc = 8;
4680 
4681 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4682 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4683 			bpc = 16;
4684 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4685 			bpc = 12;
4686 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4687 			bpc = 10;
4688 	} else {
4689 		bpc = (uint8_t)connector->display_info.bpc;
4690 		/* Assume 8 bpc by default if no bpc is specified. */
4691 		bpc = bpc ? bpc : 8;
4692 	}
4693 
4694 	if (requested_bpc > 0) {
4695 		/*
4696 		 * Cap display bpc based on the user requested value.
4697 		 *
4698 		 * The value for state->max_bpc may not correctly updated
4699 		 * depending on when the connector gets added to the state
4700 		 * or if this was called outside of atomic check, so it
4701 		 * can't be used directly.
4702 		 */
4703 		bpc = min_t(u8, bpc, requested_bpc);
4704 
4705 		/* Round down to the nearest even number. */
4706 		bpc = bpc - (bpc & 1);
4707 	}
4708 
4709 	switch (bpc) {
4710 	case 0:
4711 		/*
4712 		 * Temporary Work around, DRM doesn't parse color depth for
4713 		 * EDID revision before 1.4
4714 		 * TODO: Fix edid parsing
4715 		 */
4716 		return COLOR_DEPTH_888;
4717 	case 6:
4718 		return COLOR_DEPTH_666;
4719 	case 8:
4720 		return COLOR_DEPTH_888;
4721 	case 10:
4722 		return COLOR_DEPTH_101010;
4723 	case 12:
4724 		return COLOR_DEPTH_121212;
4725 	case 14:
4726 		return COLOR_DEPTH_141414;
4727 	case 16:
4728 		return COLOR_DEPTH_161616;
4729 	default:
4730 		return COLOR_DEPTH_UNDEFINED;
4731 	}
4732 }
4733 
4734 static enum dc_aspect_ratio
4735 get_aspect_ratio(const struct drm_display_mode *mode_in)
4736 {
4737 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4738 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4739 }
4740 
4741 static enum dc_color_space
4742 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4743 {
4744 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4745 
4746 	switch (dc_crtc_timing->pixel_encoding)	{
4747 	case PIXEL_ENCODING_YCBCR422:
4748 	case PIXEL_ENCODING_YCBCR444:
4749 	case PIXEL_ENCODING_YCBCR420:
4750 	{
4751 		/*
4752 		 * 27030khz is the separation point between HDTV and SDTV
4753 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4754 		 * respectively
4755 		 */
4756 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4757 			if (dc_crtc_timing->flags.Y_ONLY)
4758 				color_space =
4759 					COLOR_SPACE_YCBCR709_LIMITED;
4760 			else
4761 				color_space = COLOR_SPACE_YCBCR709;
4762 		} else {
4763 			if (dc_crtc_timing->flags.Y_ONLY)
4764 				color_space =
4765 					COLOR_SPACE_YCBCR601_LIMITED;
4766 			else
4767 				color_space = COLOR_SPACE_YCBCR601;
4768 		}
4769 
4770 	}
4771 	break;
4772 	case PIXEL_ENCODING_RGB:
4773 		color_space = COLOR_SPACE_SRGB;
4774 		break;
4775 
4776 	default:
4777 		WARN_ON(1);
4778 		break;
4779 	}
4780 
4781 	return color_space;
4782 }
4783 
4784 static bool adjust_colour_depth_from_display_info(
4785 	struct dc_crtc_timing *timing_out,
4786 	const struct drm_display_info *info)
4787 {
4788 	enum dc_color_depth depth = timing_out->display_color_depth;
4789 	int normalized_clk;
4790 	do {
4791 		normalized_clk = timing_out->pix_clk_100hz / 10;
4792 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4793 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4794 			normalized_clk /= 2;
4795 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4796 		switch (depth) {
4797 		case COLOR_DEPTH_888:
4798 			break;
4799 		case COLOR_DEPTH_101010:
4800 			normalized_clk = (normalized_clk * 30) / 24;
4801 			break;
4802 		case COLOR_DEPTH_121212:
4803 			normalized_clk = (normalized_clk * 36) / 24;
4804 			break;
4805 		case COLOR_DEPTH_161616:
4806 			normalized_clk = (normalized_clk * 48) / 24;
4807 			break;
4808 		default:
4809 			/* The above depths are the only ones valid for HDMI. */
4810 			return false;
4811 		}
4812 		if (normalized_clk <= info->max_tmds_clock) {
4813 			timing_out->display_color_depth = depth;
4814 			return true;
4815 		}
4816 	} while (--depth > COLOR_DEPTH_666);
4817 	return false;
4818 }
4819 
4820 static void fill_stream_properties_from_drm_display_mode(
4821 	struct dc_stream_state *stream,
4822 	const struct drm_display_mode *mode_in,
4823 	const struct drm_connector *connector,
4824 	const struct drm_connector_state *connector_state,
4825 	const struct dc_stream_state *old_stream,
4826 	int requested_bpc)
4827 {
4828 	struct dc_crtc_timing *timing_out = &stream->timing;
4829 	const struct drm_display_info *info = &connector->display_info;
4830 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4831 	struct hdmi_vendor_infoframe hv_frame;
4832 	struct hdmi_avi_infoframe avi_frame;
4833 
4834 	memset(&hv_frame, 0, sizeof(hv_frame));
4835 	memset(&avi_frame, 0, sizeof(avi_frame));
4836 
4837 	timing_out->h_border_left = 0;
4838 	timing_out->h_border_right = 0;
4839 	timing_out->v_border_top = 0;
4840 	timing_out->v_border_bottom = 0;
4841 	/* TODO: un-hardcode */
4842 	if (drm_mode_is_420_only(info, mode_in)
4843 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4844 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4845 	else if (drm_mode_is_420_also(info, mode_in)
4846 			&& aconnector->force_yuv420_output)
4847 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4848 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4849 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4850 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4851 	else
4852 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4853 
4854 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4855 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4856 		connector,
4857 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4858 		requested_bpc);
4859 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4860 	timing_out->hdmi_vic = 0;
4861 
4862 	if(old_stream) {
4863 		timing_out->vic = old_stream->timing.vic;
4864 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4865 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4866 	} else {
4867 		timing_out->vic = drm_match_cea_mode(mode_in);
4868 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4869 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4870 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4871 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4872 	}
4873 
4874 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4875 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4876 		timing_out->vic = avi_frame.video_code;
4877 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4878 		timing_out->hdmi_vic = hv_frame.vic;
4879 	}
4880 
4881 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4882 	timing_out->h_total = mode_in->crtc_htotal;
4883 	timing_out->h_sync_width =
4884 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4885 	timing_out->h_front_porch =
4886 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4887 	timing_out->v_total = mode_in->crtc_vtotal;
4888 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4889 	timing_out->v_front_porch =
4890 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4891 	timing_out->v_sync_width =
4892 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4893 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4894 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4895 
4896 	stream->output_color_space = get_output_color_space(timing_out);
4897 
4898 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4899 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4900 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4901 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4902 		    drm_mode_is_420_also(info, mode_in) &&
4903 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4904 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4905 			adjust_colour_depth_from_display_info(timing_out, info);
4906 		}
4907 	}
4908 }
4909 
4910 static void fill_audio_info(struct audio_info *audio_info,
4911 			    const struct drm_connector *drm_connector,
4912 			    const struct dc_sink *dc_sink)
4913 {
4914 	int i = 0;
4915 	int cea_revision = 0;
4916 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4917 
4918 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4919 	audio_info->product_id = edid_caps->product_id;
4920 
4921 	cea_revision = drm_connector->display_info.cea_rev;
4922 
4923 	strscpy(audio_info->display_name,
4924 		edid_caps->display_name,
4925 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4926 
4927 	if (cea_revision >= 3) {
4928 		audio_info->mode_count = edid_caps->audio_mode_count;
4929 
4930 		for (i = 0; i < audio_info->mode_count; ++i) {
4931 			audio_info->modes[i].format_code =
4932 					(enum audio_format_code)
4933 					(edid_caps->audio_modes[i].format_code);
4934 			audio_info->modes[i].channel_count =
4935 					edid_caps->audio_modes[i].channel_count;
4936 			audio_info->modes[i].sample_rates.all =
4937 					edid_caps->audio_modes[i].sample_rate;
4938 			audio_info->modes[i].sample_size =
4939 					edid_caps->audio_modes[i].sample_size;
4940 		}
4941 	}
4942 
4943 	audio_info->flags.all = edid_caps->speaker_flags;
4944 
4945 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4946 	if (drm_connector->latency_present[0]) {
4947 		audio_info->video_latency = drm_connector->video_latency[0];
4948 		audio_info->audio_latency = drm_connector->audio_latency[0];
4949 	}
4950 
4951 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4952 
4953 }
4954 
4955 static void
4956 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4957 				      struct drm_display_mode *dst_mode)
4958 {
4959 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4960 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4961 	dst_mode->crtc_clock = src_mode->crtc_clock;
4962 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4963 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4964 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4965 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4966 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4967 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4968 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4969 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4970 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4971 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4972 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4973 }
4974 
4975 static void
4976 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4977 					const struct drm_display_mode *native_mode,
4978 					bool scale_enabled)
4979 {
4980 	if (scale_enabled) {
4981 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4982 	} else if (native_mode->clock == drm_mode->clock &&
4983 			native_mode->htotal == drm_mode->htotal &&
4984 			native_mode->vtotal == drm_mode->vtotal) {
4985 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4986 	} else {
4987 		/* no scaling nor amdgpu inserted, no need to patch */
4988 	}
4989 }
4990 
4991 static struct dc_sink *
4992 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4993 {
4994 	struct dc_sink_init_data sink_init_data = { 0 };
4995 	struct dc_sink *sink = NULL;
4996 	sink_init_data.link = aconnector->dc_link;
4997 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4998 
4999 	sink = dc_sink_create(&sink_init_data);
5000 	if (!sink) {
5001 		DRM_ERROR("Failed to create sink!\n");
5002 		return NULL;
5003 	}
5004 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5005 
5006 	return sink;
5007 }
5008 
5009 static void set_multisync_trigger_params(
5010 		struct dc_stream_state *stream)
5011 {
5012 	if (stream->triggered_crtc_reset.enabled) {
5013 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5014 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5015 	}
5016 }
5017 
5018 static void set_master_stream(struct dc_stream_state *stream_set[],
5019 			      int stream_count)
5020 {
5021 	int j, highest_rfr = 0, master_stream = 0;
5022 
5023 	for (j = 0;  j < stream_count; j++) {
5024 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5025 			int refresh_rate = 0;
5026 
5027 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5028 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5029 			if (refresh_rate > highest_rfr) {
5030 				highest_rfr = refresh_rate;
5031 				master_stream = j;
5032 			}
5033 		}
5034 	}
5035 	for (j = 0;  j < stream_count; j++) {
5036 		if (stream_set[j])
5037 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5038 	}
5039 }
5040 
5041 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5042 {
5043 	int i = 0;
5044 
5045 	if (context->stream_count < 2)
5046 		return;
5047 	for (i = 0; i < context->stream_count ; i++) {
5048 		if (!context->streams[i])
5049 			continue;
5050 		/*
5051 		 * TODO: add a function to read AMD VSDB bits and set
5052 		 * crtc_sync_master.multi_sync_enabled flag
5053 		 * For now it's set to false
5054 		 */
5055 		set_multisync_trigger_params(context->streams[i]);
5056 	}
5057 	set_master_stream(context->streams, context->stream_count);
5058 }
5059 
5060 static struct dc_stream_state *
5061 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5062 		       const struct drm_display_mode *drm_mode,
5063 		       const struct dm_connector_state *dm_state,
5064 		       const struct dc_stream_state *old_stream,
5065 		       int requested_bpc)
5066 {
5067 	struct drm_display_mode *preferred_mode = NULL;
5068 	struct drm_connector *drm_connector;
5069 	const struct drm_connector_state *con_state =
5070 		dm_state ? &dm_state->base : NULL;
5071 	struct dc_stream_state *stream = NULL;
5072 	struct drm_display_mode mode = *drm_mode;
5073 	bool native_mode_found = false;
5074 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5075 	int mode_refresh;
5076 	int preferred_refresh = 0;
5077 #if defined(CONFIG_DRM_AMD_DC_DCN)
5078 	struct dsc_dec_dpcd_caps dsc_caps;
5079 #endif
5080 	uint32_t link_bandwidth_kbps;
5081 
5082 	struct dc_sink *sink = NULL;
5083 	if (aconnector == NULL) {
5084 		DRM_ERROR("aconnector is NULL!\n");
5085 		return stream;
5086 	}
5087 
5088 	drm_connector = &aconnector->base;
5089 
5090 	if (!aconnector->dc_sink) {
5091 		sink = create_fake_sink(aconnector);
5092 		if (!sink)
5093 			return stream;
5094 	} else {
5095 		sink = aconnector->dc_sink;
5096 		dc_sink_retain(sink);
5097 	}
5098 
5099 	stream = dc_create_stream_for_sink(sink);
5100 
5101 	if (stream == NULL) {
5102 		DRM_ERROR("Failed to create stream for sink!\n");
5103 		goto finish;
5104 	}
5105 
5106 	stream->dm_stream_context = aconnector;
5107 
5108 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5109 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5110 
5111 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5112 		/* Search for preferred mode */
5113 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5114 			native_mode_found = true;
5115 			break;
5116 		}
5117 	}
5118 	if (!native_mode_found)
5119 		preferred_mode = list_first_entry_or_null(
5120 				&aconnector->base.modes,
5121 				struct drm_display_mode,
5122 				head);
5123 
5124 	mode_refresh = drm_mode_vrefresh(&mode);
5125 
5126 	if (preferred_mode == NULL) {
5127 		/*
5128 		 * This may not be an error, the use case is when we have no
5129 		 * usermode calls to reset and set mode upon hotplug. In this
5130 		 * case, we call set mode ourselves to restore the previous mode
5131 		 * and the modelist may not be filled in in time.
5132 		 */
5133 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5134 	} else {
5135 		decide_crtc_timing_for_drm_display_mode(
5136 				&mode, preferred_mode,
5137 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5138 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5139 	}
5140 
5141 	if (!dm_state)
5142 		drm_mode_set_crtcinfo(&mode, 0);
5143 
5144 	/*
5145 	* If scaling is enabled and refresh rate didn't change
5146 	* we copy the vic and polarities of the old timings
5147 	*/
5148 	if (!scale || mode_refresh != preferred_refresh)
5149 		fill_stream_properties_from_drm_display_mode(stream,
5150 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5151 	else
5152 		fill_stream_properties_from_drm_display_mode(stream,
5153 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5154 
5155 	stream->timing.flags.DSC = 0;
5156 
5157 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5158 #if defined(CONFIG_DRM_AMD_DC_DCN)
5159 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5160 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5161 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5162 				      &dsc_caps);
5163 #endif
5164 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5165 							     dc_link_get_link_cap(aconnector->dc_link));
5166 
5167 #if defined(CONFIG_DRM_AMD_DC_DCN)
5168 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5169 			/* Set DSC policy according to dsc_clock_en */
5170 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5171 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5172 
5173 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5174 						  &dsc_caps,
5175 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5176 						  0,
5177 						  link_bandwidth_kbps,
5178 						  &stream->timing,
5179 						  &stream->timing.dsc_cfg))
5180 				stream->timing.flags.DSC = 1;
5181 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5182 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5183 				stream->timing.flags.DSC = 1;
5184 
5185 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5186 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5187 
5188 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5189 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5190 
5191 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5192 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5193 		}
5194 #endif
5195 	}
5196 
5197 	update_stream_scaling_settings(&mode, dm_state, stream);
5198 
5199 	fill_audio_info(
5200 		&stream->audio_info,
5201 		drm_connector,
5202 		sink);
5203 
5204 	update_stream_signal(stream, sink);
5205 
5206 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5207 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5208 
5209 	if (stream->link->psr_settings.psr_feature_enabled) {
5210 		//
5211 		// should decide stream support vsc sdp colorimetry capability
5212 		// before building vsc info packet
5213 		//
5214 		stream->use_vsc_sdp_for_colorimetry = false;
5215 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5216 			stream->use_vsc_sdp_for_colorimetry =
5217 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5218 		} else {
5219 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5220 				stream->use_vsc_sdp_for_colorimetry = true;
5221 		}
5222 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5223 	}
5224 finish:
5225 	dc_sink_release(sink);
5226 
5227 	return stream;
5228 }
5229 
5230 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5231 {
5232 	drm_crtc_cleanup(crtc);
5233 	kfree(crtc);
5234 }
5235 
5236 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5237 				  struct drm_crtc_state *state)
5238 {
5239 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5240 
5241 	/* TODO Destroy dc_stream objects are stream object is flattened */
5242 	if (cur->stream)
5243 		dc_stream_release(cur->stream);
5244 
5245 
5246 	__drm_atomic_helper_crtc_destroy_state(state);
5247 
5248 
5249 	kfree(state);
5250 }
5251 
5252 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5253 {
5254 	struct dm_crtc_state *state;
5255 
5256 	if (crtc->state)
5257 		dm_crtc_destroy_state(crtc, crtc->state);
5258 
5259 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5260 	if (WARN_ON(!state))
5261 		return;
5262 
5263 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5264 }
5265 
5266 static struct drm_crtc_state *
5267 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5268 {
5269 	struct dm_crtc_state *state, *cur;
5270 
5271 	cur = to_dm_crtc_state(crtc->state);
5272 
5273 	if (WARN_ON(!crtc->state))
5274 		return NULL;
5275 
5276 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5277 	if (!state)
5278 		return NULL;
5279 
5280 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5281 
5282 	if (cur->stream) {
5283 		state->stream = cur->stream;
5284 		dc_stream_retain(state->stream);
5285 	}
5286 
5287 	state->active_planes = cur->active_planes;
5288 	state->vrr_infopacket = cur->vrr_infopacket;
5289 	state->abm_level = cur->abm_level;
5290 	state->vrr_supported = cur->vrr_supported;
5291 	state->freesync_config = cur->freesync_config;
5292 	state->crc_src = cur->crc_src;
5293 	state->cm_has_degamma = cur->cm_has_degamma;
5294 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5295 #ifdef CONFIG_DEBUG_FS
5296 	state->crc_window = cur->crc_window;
5297 #endif
5298 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5299 
5300 	return &state->base;
5301 }
5302 
5303 #ifdef CONFIG_DEBUG_FS
5304 int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5305 					    struct drm_crtc_state *crtc_state,
5306 					    struct drm_property *property,
5307 					    uint64_t val)
5308 {
5309 	struct drm_device *dev = crtc->dev;
5310 	struct amdgpu_device *adev = drm_to_adev(dev);
5311 	struct dm_crtc_state *dm_new_state =
5312 		to_dm_crtc_state(crtc_state);
5313 
5314 	if (property == adev->dm.crc_win_x_start_property)
5315 		dm_new_state->crc_window.x_start = val;
5316 	else if (property == adev->dm.crc_win_y_start_property)
5317 		dm_new_state->crc_window.y_start = val;
5318 	else if (property == adev->dm.crc_win_x_end_property)
5319 		dm_new_state->crc_window.x_end = val;
5320 	else if (property == adev->dm.crc_win_y_end_property)
5321 		dm_new_state->crc_window.y_end = val;
5322 	else
5323 		return -EINVAL;
5324 
5325 	return 0;
5326 }
5327 
5328 int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5329 					    const struct drm_crtc_state *state,
5330 					    struct drm_property *property,
5331 					    uint64_t *val)
5332 {
5333 	struct drm_device *dev = crtc->dev;
5334 	struct amdgpu_device *adev = drm_to_adev(dev);
5335 	struct dm_crtc_state *dm_state =
5336 		to_dm_crtc_state(state);
5337 
5338 	if (property == adev->dm.crc_win_x_start_property)
5339 		*val = dm_state->crc_window.x_start;
5340 	else if (property == adev->dm.crc_win_y_start_property)
5341 		*val = dm_state->crc_window.y_start;
5342 	else if (property == adev->dm.crc_win_x_end_property)
5343 		*val = dm_state->crc_window.x_end;
5344 	else if (property == adev->dm.crc_win_y_end_property)
5345 		*val = dm_state->crc_window.y_end;
5346 	else
5347 		return -EINVAL;
5348 
5349 	return 0;
5350 }
5351 #endif
5352 
5353 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5354 {
5355 	enum dc_irq_source irq_source;
5356 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5357 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5358 	int rc;
5359 
5360 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5361 
5362 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5363 
5364 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5365 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5366 	return rc;
5367 }
5368 
5369 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5370 {
5371 	enum dc_irq_source irq_source;
5372 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5373 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5374 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5375 	int rc = 0;
5376 
5377 	if (enable) {
5378 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5379 		if (amdgpu_dm_vrr_active(acrtc_state))
5380 			rc = dm_set_vupdate_irq(crtc, true);
5381 	} else {
5382 		/* vblank irq off -> vupdate irq off */
5383 		rc = dm_set_vupdate_irq(crtc, false);
5384 	}
5385 
5386 	if (rc)
5387 		return rc;
5388 
5389 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5390 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5391 }
5392 
5393 static int dm_enable_vblank(struct drm_crtc *crtc)
5394 {
5395 	return dm_set_vblank(crtc, true);
5396 }
5397 
5398 static void dm_disable_vblank(struct drm_crtc *crtc)
5399 {
5400 	dm_set_vblank(crtc, false);
5401 }
5402 
5403 /* Implemented only the options currently availible for the driver */
5404 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5405 	.reset = dm_crtc_reset_state,
5406 	.destroy = amdgpu_dm_crtc_destroy,
5407 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5408 	.set_config = drm_atomic_helper_set_config,
5409 	.page_flip = drm_atomic_helper_page_flip,
5410 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5411 	.atomic_destroy_state = dm_crtc_destroy_state,
5412 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5413 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5414 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5415 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5416 	.enable_vblank = dm_enable_vblank,
5417 	.disable_vblank = dm_disable_vblank,
5418 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5419 #ifdef CONFIG_DEBUG_FS
5420 	.atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5421 	.atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5422 #endif
5423 };
5424 
5425 static enum drm_connector_status
5426 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5427 {
5428 	bool connected;
5429 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5430 
5431 	/*
5432 	 * Notes:
5433 	 * 1. This interface is NOT called in context of HPD irq.
5434 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5435 	 * makes it a bad place for *any* MST-related activity.
5436 	 */
5437 
5438 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5439 	    !aconnector->fake_enable)
5440 		connected = (aconnector->dc_sink != NULL);
5441 	else
5442 		connected = (aconnector->base.force == DRM_FORCE_ON);
5443 
5444 	update_subconnector_property(aconnector);
5445 
5446 	return (connected ? connector_status_connected :
5447 			connector_status_disconnected);
5448 }
5449 
5450 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5451 					    struct drm_connector_state *connector_state,
5452 					    struct drm_property *property,
5453 					    uint64_t val)
5454 {
5455 	struct drm_device *dev = connector->dev;
5456 	struct amdgpu_device *adev = drm_to_adev(dev);
5457 	struct dm_connector_state *dm_old_state =
5458 		to_dm_connector_state(connector->state);
5459 	struct dm_connector_state *dm_new_state =
5460 		to_dm_connector_state(connector_state);
5461 
5462 	int ret = -EINVAL;
5463 
5464 	if (property == dev->mode_config.scaling_mode_property) {
5465 		enum amdgpu_rmx_type rmx_type;
5466 
5467 		switch (val) {
5468 		case DRM_MODE_SCALE_CENTER:
5469 			rmx_type = RMX_CENTER;
5470 			break;
5471 		case DRM_MODE_SCALE_ASPECT:
5472 			rmx_type = RMX_ASPECT;
5473 			break;
5474 		case DRM_MODE_SCALE_FULLSCREEN:
5475 			rmx_type = RMX_FULL;
5476 			break;
5477 		case DRM_MODE_SCALE_NONE:
5478 		default:
5479 			rmx_type = RMX_OFF;
5480 			break;
5481 		}
5482 
5483 		if (dm_old_state->scaling == rmx_type)
5484 			return 0;
5485 
5486 		dm_new_state->scaling = rmx_type;
5487 		ret = 0;
5488 	} else if (property == adev->mode_info.underscan_hborder_property) {
5489 		dm_new_state->underscan_hborder = val;
5490 		ret = 0;
5491 	} else if (property == adev->mode_info.underscan_vborder_property) {
5492 		dm_new_state->underscan_vborder = val;
5493 		ret = 0;
5494 	} else if (property == adev->mode_info.underscan_property) {
5495 		dm_new_state->underscan_enable = val;
5496 		ret = 0;
5497 	} else if (property == adev->mode_info.abm_level_property) {
5498 		dm_new_state->abm_level = val;
5499 		ret = 0;
5500 	}
5501 
5502 	return ret;
5503 }
5504 
5505 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5506 					    const struct drm_connector_state *state,
5507 					    struct drm_property *property,
5508 					    uint64_t *val)
5509 {
5510 	struct drm_device *dev = connector->dev;
5511 	struct amdgpu_device *adev = drm_to_adev(dev);
5512 	struct dm_connector_state *dm_state =
5513 		to_dm_connector_state(state);
5514 	int ret = -EINVAL;
5515 
5516 	if (property == dev->mode_config.scaling_mode_property) {
5517 		switch (dm_state->scaling) {
5518 		case RMX_CENTER:
5519 			*val = DRM_MODE_SCALE_CENTER;
5520 			break;
5521 		case RMX_ASPECT:
5522 			*val = DRM_MODE_SCALE_ASPECT;
5523 			break;
5524 		case RMX_FULL:
5525 			*val = DRM_MODE_SCALE_FULLSCREEN;
5526 			break;
5527 		case RMX_OFF:
5528 		default:
5529 			*val = DRM_MODE_SCALE_NONE;
5530 			break;
5531 		}
5532 		ret = 0;
5533 	} else if (property == adev->mode_info.underscan_hborder_property) {
5534 		*val = dm_state->underscan_hborder;
5535 		ret = 0;
5536 	} else if (property == adev->mode_info.underscan_vborder_property) {
5537 		*val = dm_state->underscan_vborder;
5538 		ret = 0;
5539 	} else if (property == adev->mode_info.underscan_property) {
5540 		*val = dm_state->underscan_enable;
5541 		ret = 0;
5542 	} else if (property == adev->mode_info.abm_level_property) {
5543 		*val = dm_state->abm_level;
5544 		ret = 0;
5545 	}
5546 
5547 	return ret;
5548 }
5549 
5550 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5551 {
5552 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5553 
5554 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5555 }
5556 
5557 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5558 {
5559 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5560 	const struct dc_link *link = aconnector->dc_link;
5561 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5562 	struct amdgpu_display_manager *dm = &adev->dm;
5563 
5564 	/*
5565 	 * Call only if mst_mgr was iniitalized before since it's not done
5566 	 * for all connector types.
5567 	 */
5568 	if (aconnector->mst_mgr.dev)
5569 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5570 
5571 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5572 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5573 
5574 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5575 	    link->type != dc_connection_none &&
5576 	    dm->backlight_dev) {
5577 		backlight_device_unregister(dm->backlight_dev);
5578 		dm->backlight_dev = NULL;
5579 	}
5580 #endif
5581 
5582 	if (aconnector->dc_em_sink)
5583 		dc_sink_release(aconnector->dc_em_sink);
5584 	aconnector->dc_em_sink = NULL;
5585 	if (aconnector->dc_sink)
5586 		dc_sink_release(aconnector->dc_sink);
5587 	aconnector->dc_sink = NULL;
5588 
5589 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5590 	drm_connector_unregister(connector);
5591 	drm_connector_cleanup(connector);
5592 	if (aconnector->i2c) {
5593 		i2c_del_adapter(&aconnector->i2c->base);
5594 		kfree(aconnector->i2c);
5595 	}
5596 	kfree(aconnector->dm_dp_aux.aux.name);
5597 
5598 	kfree(connector);
5599 }
5600 
5601 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5602 {
5603 	struct dm_connector_state *state =
5604 		to_dm_connector_state(connector->state);
5605 
5606 	if (connector->state)
5607 		__drm_atomic_helper_connector_destroy_state(connector->state);
5608 
5609 	kfree(state);
5610 
5611 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5612 
5613 	if (state) {
5614 		state->scaling = RMX_OFF;
5615 		state->underscan_enable = false;
5616 		state->underscan_hborder = 0;
5617 		state->underscan_vborder = 0;
5618 		state->base.max_requested_bpc = 8;
5619 		state->vcpi_slots = 0;
5620 		state->pbn = 0;
5621 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5622 			state->abm_level = amdgpu_dm_abm_level;
5623 
5624 		__drm_atomic_helper_connector_reset(connector, &state->base);
5625 	}
5626 }
5627 
5628 struct drm_connector_state *
5629 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5630 {
5631 	struct dm_connector_state *state =
5632 		to_dm_connector_state(connector->state);
5633 
5634 	struct dm_connector_state *new_state =
5635 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5636 
5637 	if (!new_state)
5638 		return NULL;
5639 
5640 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5641 
5642 	new_state->freesync_capable = state->freesync_capable;
5643 	new_state->abm_level = state->abm_level;
5644 	new_state->scaling = state->scaling;
5645 	new_state->underscan_enable = state->underscan_enable;
5646 	new_state->underscan_hborder = state->underscan_hborder;
5647 	new_state->underscan_vborder = state->underscan_vborder;
5648 	new_state->vcpi_slots = state->vcpi_slots;
5649 	new_state->pbn = state->pbn;
5650 	return &new_state->base;
5651 }
5652 
5653 static int
5654 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5655 {
5656 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5657 		to_amdgpu_dm_connector(connector);
5658 	int r;
5659 
5660 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5661 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5662 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5663 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5664 		if (r)
5665 			return r;
5666 	}
5667 
5668 #if defined(CONFIG_DEBUG_FS)
5669 	connector_debugfs_init(amdgpu_dm_connector);
5670 #endif
5671 
5672 	return 0;
5673 }
5674 
5675 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5676 	.reset = amdgpu_dm_connector_funcs_reset,
5677 	.detect = amdgpu_dm_connector_detect,
5678 	.fill_modes = drm_helper_probe_single_connector_modes,
5679 	.destroy = amdgpu_dm_connector_destroy,
5680 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5681 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5682 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5683 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5684 	.late_register = amdgpu_dm_connector_late_register,
5685 	.early_unregister = amdgpu_dm_connector_unregister
5686 };
5687 
5688 static int get_modes(struct drm_connector *connector)
5689 {
5690 	return amdgpu_dm_connector_get_modes(connector);
5691 }
5692 
5693 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5694 {
5695 	struct dc_sink_init_data init_params = {
5696 			.link = aconnector->dc_link,
5697 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5698 	};
5699 	struct edid *edid;
5700 
5701 	if (!aconnector->base.edid_blob_ptr) {
5702 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5703 				aconnector->base.name);
5704 
5705 		aconnector->base.force = DRM_FORCE_OFF;
5706 		aconnector->base.override_edid = false;
5707 		return;
5708 	}
5709 
5710 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5711 
5712 	aconnector->edid = edid;
5713 
5714 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5715 		aconnector->dc_link,
5716 		(uint8_t *)edid,
5717 		(edid->extensions + 1) * EDID_LENGTH,
5718 		&init_params);
5719 
5720 	if (aconnector->base.force == DRM_FORCE_ON) {
5721 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5722 		aconnector->dc_link->local_sink :
5723 		aconnector->dc_em_sink;
5724 		dc_sink_retain(aconnector->dc_sink);
5725 	}
5726 }
5727 
5728 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5729 {
5730 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5731 
5732 	/*
5733 	 * In case of headless boot with force on for DP managed connector
5734 	 * Those settings have to be != 0 to get initial modeset
5735 	 */
5736 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5737 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5738 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5739 	}
5740 
5741 
5742 	aconnector->base.override_edid = true;
5743 	create_eml_sink(aconnector);
5744 }
5745 
5746 static struct dc_stream_state *
5747 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5748 				const struct drm_display_mode *drm_mode,
5749 				const struct dm_connector_state *dm_state,
5750 				const struct dc_stream_state *old_stream)
5751 {
5752 	struct drm_connector *connector = &aconnector->base;
5753 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5754 	struct dc_stream_state *stream;
5755 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5756 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5757 	enum dc_status dc_result = DC_OK;
5758 
5759 	do {
5760 		stream = create_stream_for_sink(aconnector, drm_mode,
5761 						dm_state, old_stream,
5762 						requested_bpc);
5763 		if (stream == NULL) {
5764 			DRM_ERROR("Failed to create stream for sink!\n");
5765 			break;
5766 		}
5767 
5768 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5769 
5770 		if (dc_result != DC_OK) {
5771 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5772 				      drm_mode->hdisplay,
5773 				      drm_mode->vdisplay,
5774 				      drm_mode->clock,
5775 				      dc_result,
5776 				      dc_status_to_str(dc_result));
5777 
5778 			dc_stream_release(stream);
5779 			stream = NULL;
5780 			requested_bpc -= 2; /* lower bpc to retry validation */
5781 		}
5782 
5783 	} while (stream == NULL && requested_bpc >= 6);
5784 
5785 	return stream;
5786 }
5787 
5788 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5789 				   struct drm_display_mode *mode)
5790 {
5791 	int result = MODE_ERROR;
5792 	struct dc_sink *dc_sink;
5793 	/* TODO: Unhardcode stream count */
5794 	struct dc_stream_state *stream;
5795 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5796 
5797 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5798 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5799 		return result;
5800 
5801 	/*
5802 	 * Only run this the first time mode_valid is called to initilialize
5803 	 * EDID mgmt
5804 	 */
5805 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5806 		!aconnector->dc_em_sink)
5807 		handle_edid_mgmt(aconnector);
5808 
5809 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5810 
5811 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5812 				aconnector->base.force != DRM_FORCE_ON) {
5813 		DRM_ERROR("dc_sink is NULL!\n");
5814 		goto fail;
5815 	}
5816 
5817 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5818 	if (stream) {
5819 		dc_stream_release(stream);
5820 		result = MODE_OK;
5821 	}
5822 
5823 fail:
5824 	/* TODO: error handling*/
5825 	return result;
5826 }
5827 
5828 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5829 				struct dc_info_packet *out)
5830 {
5831 	struct hdmi_drm_infoframe frame;
5832 	unsigned char buf[30]; /* 26 + 4 */
5833 	ssize_t len;
5834 	int ret, i;
5835 
5836 	memset(out, 0, sizeof(*out));
5837 
5838 	if (!state->hdr_output_metadata)
5839 		return 0;
5840 
5841 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5842 	if (ret)
5843 		return ret;
5844 
5845 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5846 	if (len < 0)
5847 		return (int)len;
5848 
5849 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5850 	if (len != 30)
5851 		return -EINVAL;
5852 
5853 	/* Prepare the infopacket for DC. */
5854 	switch (state->connector->connector_type) {
5855 	case DRM_MODE_CONNECTOR_HDMIA:
5856 		out->hb0 = 0x87; /* type */
5857 		out->hb1 = 0x01; /* version */
5858 		out->hb2 = 0x1A; /* length */
5859 		out->sb[0] = buf[3]; /* checksum */
5860 		i = 1;
5861 		break;
5862 
5863 	case DRM_MODE_CONNECTOR_DisplayPort:
5864 	case DRM_MODE_CONNECTOR_eDP:
5865 		out->hb0 = 0x00; /* sdp id, zero */
5866 		out->hb1 = 0x87; /* type */
5867 		out->hb2 = 0x1D; /* payload len - 1 */
5868 		out->hb3 = (0x13 << 2); /* sdp version */
5869 		out->sb[0] = 0x01; /* version */
5870 		out->sb[1] = 0x1A; /* length */
5871 		i = 2;
5872 		break;
5873 
5874 	default:
5875 		return -EINVAL;
5876 	}
5877 
5878 	memcpy(&out->sb[i], &buf[4], 26);
5879 	out->valid = true;
5880 
5881 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5882 		       sizeof(out->sb), false);
5883 
5884 	return 0;
5885 }
5886 
5887 static bool
5888 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5889 			  const struct drm_connector_state *new_state)
5890 {
5891 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5892 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5893 
5894 	if (old_blob != new_blob) {
5895 		if (old_blob && new_blob &&
5896 		    old_blob->length == new_blob->length)
5897 			return memcmp(old_blob->data, new_blob->data,
5898 				      old_blob->length);
5899 
5900 		return true;
5901 	}
5902 
5903 	return false;
5904 }
5905 
5906 static int
5907 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5908 				 struct drm_atomic_state *state)
5909 {
5910 	struct drm_connector_state *new_con_state =
5911 		drm_atomic_get_new_connector_state(state, conn);
5912 	struct drm_connector_state *old_con_state =
5913 		drm_atomic_get_old_connector_state(state, conn);
5914 	struct drm_crtc *crtc = new_con_state->crtc;
5915 	struct drm_crtc_state *new_crtc_state;
5916 	int ret;
5917 
5918 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5919 
5920 	if (!crtc)
5921 		return 0;
5922 
5923 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5924 		struct dc_info_packet hdr_infopacket;
5925 
5926 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5927 		if (ret)
5928 			return ret;
5929 
5930 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5931 		if (IS_ERR(new_crtc_state))
5932 			return PTR_ERR(new_crtc_state);
5933 
5934 		/*
5935 		 * DC considers the stream backends changed if the
5936 		 * static metadata changes. Forcing the modeset also
5937 		 * gives a simple way for userspace to switch from
5938 		 * 8bpc to 10bpc when setting the metadata to enter
5939 		 * or exit HDR.
5940 		 *
5941 		 * Changing the static metadata after it's been
5942 		 * set is permissible, however. So only force a
5943 		 * modeset if we're entering or exiting HDR.
5944 		 */
5945 		new_crtc_state->mode_changed =
5946 			!old_con_state->hdr_output_metadata ||
5947 			!new_con_state->hdr_output_metadata;
5948 	}
5949 
5950 	return 0;
5951 }
5952 
5953 static const struct drm_connector_helper_funcs
5954 amdgpu_dm_connector_helper_funcs = {
5955 	/*
5956 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5957 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5958 	 * are missing after user start lightdm. So we need to renew modes list.
5959 	 * in get_modes call back, not just return the modes count
5960 	 */
5961 	.get_modes = get_modes,
5962 	.mode_valid = amdgpu_dm_connector_mode_valid,
5963 	.atomic_check = amdgpu_dm_connector_atomic_check,
5964 };
5965 
5966 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5967 {
5968 }
5969 
5970 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5971 {
5972 	struct drm_atomic_state *state = new_crtc_state->state;
5973 	struct drm_plane *plane;
5974 	int num_active = 0;
5975 
5976 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5977 		struct drm_plane_state *new_plane_state;
5978 
5979 		/* Cursor planes are "fake". */
5980 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5981 			continue;
5982 
5983 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5984 
5985 		if (!new_plane_state) {
5986 			/*
5987 			 * The plane is enable on the CRTC and hasn't changed
5988 			 * state. This means that it previously passed
5989 			 * validation and is therefore enabled.
5990 			 */
5991 			num_active += 1;
5992 			continue;
5993 		}
5994 
5995 		/* We need a framebuffer to be considered enabled. */
5996 		num_active += (new_plane_state->fb != NULL);
5997 	}
5998 
5999 	return num_active;
6000 }
6001 
6002 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6003 					 struct drm_crtc_state *new_crtc_state)
6004 {
6005 	struct dm_crtc_state *dm_new_crtc_state =
6006 		to_dm_crtc_state(new_crtc_state);
6007 
6008 	dm_new_crtc_state->active_planes = 0;
6009 
6010 	if (!dm_new_crtc_state->stream)
6011 		return;
6012 
6013 	dm_new_crtc_state->active_planes =
6014 		count_crtc_active_planes(new_crtc_state);
6015 }
6016 
6017 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6018 				       struct drm_atomic_state *state)
6019 {
6020 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6021 									  crtc);
6022 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6023 	struct dc *dc = adev->dm.dc;
6024 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6025 	int ret = -EINVAL;
6026 
6027 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6028 
6029 	dm_update_crtc_active_planes(crtc, crtc_state);
6030 
6031 	if (unlikely(!dm_crtc_state->stream &&
6032 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6033 		WARN_ON(1);
6034 		return ret;
6035 	}
6036 
6037 	/*
6038 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6039 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6040 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6041 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6042 	 */
6043 	if (crtc_state->enable &&
6044 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary)))
6045 		return -EINVAL;
6046 
6047 	/* In some use cases, like reset, no stream is attached */
6048 	if (!dm_crtc_state->stream)
6049 		return 0;
6050 
6051 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6052 		return 0;
6053 
6054 	return ret;
6055 }
6056 
6057 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6058 				      const struct drm_display_mode *mode,
6059 				      struct drm_display_mode *adjusted_mode)
6060 {
6061 	return true;
6062 }
6063 
6064 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6065 	.disable = dm_crtc_helper_disable,
6066 	.atomic_check = dm_crtc_helper_atomic_check,
6067 	.mode_fixup = dm_crtc_helper_mode_fixup,
6068 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6069 };
6070 
6071 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6072 {
6073 
6074 }
6075 
6076 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6077 {
6078 	switch (display_color_depth) {
6079 		case COLOR_DEPTH_666:
6080 			return 6;
6081 		case COLOR_DEPTH_888:
6082 			return 8;
6083 		case COLOR_DEPTH_101010:
6084 			return 10;
6085 		case COLOR_DEPTH_121212:
6086 			return 12;
6087 		case COLOR_DEPTH_141414:
6088 			return 14;
6089 		case COLOR_DEPTH_161616:
6090 			return 16;
6091 		default:
6092 			break;
6093 		}
6094 	return 0;
6095 }
6096 
6097 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6098 					  struct drm_crtc_state *crtc_state,
6099 					  struct drm_connector_state *conn_state)
6100 {
6101 	struct drm_atomic_state *state = crtc_state->state;
6102 	struct drm_connector *connector = conn_state->connector;
6103 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6104 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6105 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6106 	struct drm_dp_mst_topology_mgr *mst_mgr;
6107 	struct drm_dp_mst_port *mst_port;
6108 	enum dc_color_depth color_depth;
6109 	int clock, bpp = 0;
6110 	bool is_y420 = false;
6111 
6112 	if (!aconnector->port || !aconnector->dc_sink)
6113 		return 0;
6114 
6115 	mst_port = aconnector->port;
6116 	mst_mgr = &aconnector->mst_port->mst_mgr;
6117 
6118 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6119 		return 0;
6120 
6121 	if (!state->duplicated) {
6122 		int max_bpc = conn_state->max_requested_bpc;
6123 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6124 				aconnector->force_yuv420_output;
6125 		color_depth = convert_color_depth_from_display_info(connector,
6126 								    is_y420,
6127 								    max_bpc);
6128 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6129 		clock = adjusted_mode->clock;
6130 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6131 	}
6132 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6133 									   mst_mgr,
6134 									   mst_port,
6135 									   dm_new_connector_state->pbn,
6136 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6137 	if (dm_new_connector_state->vcpi_slots < 0) {
6138 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6139 		return dm_new_connector_state->vcpi_slots;
6140 	}
6141 	return 0;
6142 }
6143 
6144 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6145 	.disable = dm_encoder_helper_disable,
6146 	.atomic_check = dm_encoder_helper_atomic_check
6147 };
6148 
6149 #if defined(CONFIG_DRM_AMD_DC_DCN)
6150 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6151 					    struct dc_state *dc_state)
6152 {
6153 	struct dc_stream_state *stream = NULL;
6154 	struct drm_connector *connector;
6155 	struct drm_connector_state *new_con_state, *old_con_state;
6156 	struct amdgpu_dm_connector *aconnector;
6157 	struct dm_connector_state *dm_conn_state;
6158 	int i, j, clock, bpp;
6159 	int vcpi, pbn_div, pbn = 0;
6160 
6161 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6162 
6163 		aconnector = to_amdgpu_dm_connector(connector);
6164 
6165 		if (!aconnector->port)
6166 			continue;
6167 
6168 		if (!new_con_state || !new_con_state->crtc)
6169 			continue;
6170 
6171 		dm_conn_state = to_dm_connector_state(new_con_state);
6172 
6173 		for (j = 0; j < dc_state->stream_count; j++) {
6174 			stream = dc_state->streams[j];
6175 			if (!stream)
6176 				continue;
6177 
6178 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6179 				break;
6180 
6181 			stream = NULL;
6182 		}
6183 
6184 		if (!stream)
6185 			continue;
6186 
6187 		if (stream->timing.flags.DSC != 1) {
6188 			drm_dp_mst_atomic_enable_dsc(state,
6189 						     aconnector->port,
6190 						     dm_conn_state->pbn,
6191 						     0,
6192 						     false);
6193 			continue;
6194 		}
6195 
6196 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6197 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6198 		clock = stream->timing.pix_clk_100hz / 10;
6199 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6200 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6201 						    aconnector->port,
6202 						    pbn, pbn_div,
6203 						    true);
6204 		if (vcpi < 0)
6205 			return vcpi;
6206 
6207 		dm_conn_state->pbn = pbn;
6208 		dm_conn_state->vcpi_slots = vcpi;
6209 	}
6210 	return 0;
6211 }
6212 #endif
6213 
6214 static void dm_drm_plane_reset(struct drm_plane *plane)
6215 {
6216 	struct dm_plane_state *amdgpu_state = NULL;
6217 
6218 	if (plane->state)
6219 		plane->funcs->atomic_destroy_state(plane, plane->state);
6220 
6221 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6222 	WARN_ON(amdgpu_state == NULL);
6223 
6224 	if (amdgpu_state)
6225 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6226 }
6227 
6228 static struct drm_plane_state *
6229 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6230 {
6231 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6232 
6233 	old_dm_plane_state = to_dm_plane_state(plane->state);
6234 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6235 	if (!dm_plane_state)
6236 		return NULL;
6237 
6238 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6239 
6240 	if (old_dm_plane_state->dc_state) {
6241 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6242 		dc_plane_state_retain(dm_plane_state->dc_state);
6243 	}
6244 
6245 	return &dm_plane_state->base;
6246 }
6247 
6248 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6249 				struct drm_plane_state *state)
6250 {
6251 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6252 
6253 	if (dm_plane_state->dc_state)
6254 		dc_plane_state_release(dm_plane_state->dc_state);
6255 
6256 	drm_atomic_helper_plane_destroy_state(plane, state);
6257 }
6258 
6259 static const struct drm_plane_funcs dm_plane_funcs = {
6260 	.update_plane	= drm_atomic_helper_update_plane,
6261 	.disable_plane	= drm_atomic_helper_disable_plane,
6262 	.destroy	= drm_primary_helper_destroy,
6263 	.reset = dm_drm_plane_reset,
6264 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6265 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6266 	.format_mod_supported = dm_plane_format_mod_supported,
6267 };
6268 
6269 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6270 				      struct drm_plane_state *new_state)
6271 {
6272 	struct amdgpu_framebuffer *afb;
6273 	struct drm_gem_object *obj;
6274 	struct amdgpu_device *adev;
6275 	struct amdgpu_bo *rbo;
6276 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6277 	struct list_head list;
6278 	struct ttm_validate_buffer tv;
6279 	struct ww_acquire_ctx ticket;
6280 	uint32_t domain;
6281 	int r;
6282 
6283 	if (!new_state->fb) {
6284 		DRM_DEBUG_DRIVER("No FB bound\n");
6285 		return 0;
6286 	}
6287 
6288 	afb = to_amdgpu_framebuffer(new_state->fb);
6289 	obj = new_state->fb->obj[0];
6290 	rbo = gem_to_amdgpu_bo(obj);
6291 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6292 	INIT_LIST_HEAD(&list);
6293 
6294 	tv.bo = &rbo->tbo;
6295 	tv.num_shared = 1;
6296 	list_add(&tv.head, &list);
6297 
6298 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6299 	if (r) {
6300 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6301 		return r;
6302 	}
6303 
6304 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6305 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6306 	else
6307 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6308 
6309 	r = amdgpu_bo_pin(rbo, domain);
6310 	if (unlikely(r != 0)) {
6311 		if (r != -ERESTARTSYS)
6312 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6313 		ttm_eu_backoff_reservation(&ticket, &list);
6314 		return r;
6315 	}
6316 
6317 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6318 	if (unlikely(r != 0)) {
6319 		amdgpu_bo_unpin(rbo);
6320 		ttm_eu_backoff_reservation(&ticket, &list);
6321 		DRM_ERROR("%p bind failed\n", rbo);
6322 		return r;
6323 	}
6324 
6325 	ttm_eu_backoff_reservation(&ticket, &list);
6326 
6327 	afb->address = amdgpu_bo_gpu_offset(rbo);
6328 
6329 	amdgpu_bo_ref(rbo);
6330 
6331 	/**
6332 	 * We don't do surface updates on planes that have been newly created,
6333 	 * but we also don't have the afb->address during atomic check.
6334 	 *
6335 	 * Fill in buffer attributes depending on the address here, but only on
6336 	 * newly created planes since they're not being used by DC yet and this
6337 	 * won't modify global state.
6338 	 */
6339 	dm_plane_state_old = to_dm_plane_state(plane->state);
6340 	dm_plane_state_new = to_dm_plane_state(new_state);
6341 
6342 	if (dm_plane_state_new->dc_state &&
6343 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6344 		struct dc_plane_state *plane_state =
6345 			dm_plane_state_new->dc_state;
6346 		bool force_disable_dcc = !plane_state->dcc.enable;
6347 
6348 		fill_plane_buffer_attributes(
6349 			adev, afb, plane_state->format, plane_state->rotation,
6350 			afb->tiling_flags,
6351 			&plane_state->tiling_info, &plane_state->plane_size,
6352 			&plane_state->dcc, &plane_state->address,
6353 			afb->tmz_surface, force_disable_dcc);
6354 	}
6355 
6356 	return 0;
6357 }
6358 
6359 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6360 				       struct drm_plane_state *old_state)
6361 {
6362 	struct amdgpu_bo *rbo;
6363 	int r;
6364 
6365 	if (!old_state->fb)
6366 		return;
6367 
6368 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6369 	r = amdgpu_bo_reserve(rbo, false);
6370 	if (unlikely(r)) {
6371 		DRM_ERROR("failed to reserve rbo before unpin\n");
6372 		return;
6373 	}
6374 
6375 	amdgpu_bo_unpin(rbo);
6376 	amdgpu_bo_unreserve(rbo);
6377 	amdgpu_bo_unref(&rbo);
6378 }
6379 
6380 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6381 				       struct drm_crtc_state *new_crtc_state)
6382 {
6383 	int max_downscale = 0;
6384 	int max_upscale = INT_MAX;
6385 
6386 	/* TODO: These should be checked against DC plane caps */
6387 	return drm_atomic_helper_check_plane_state(
6388 		state, new_crtc_state, max_downscale, max_upscale, true, true);
6389 }
6390 
6391 static int dm_plane_atomic_check(struct drm_plane *plane,
6392 				 struct drm_plane_state *state)
6393 {
6394 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6395 	struct dc *dc = adev->dm.dc;
6396 	struct dm_plane_state *dm_plane_state;
6397 	struct dc_scaling_info scaling_info;
6398 	struct drm_crtc_state *new_crtc_state;
6399 	int ret;
6400 
6401 	trace_amdgpu_dm_plane_atomic_check(state);
6402 
6403 	dm_plane_state = to_dm_plane_state(state);
6404 
6405 	if (!dm_plane_state->dc_state)
6406 		return 0;
6407 
6408 	new_crtc_state =
6409 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6410 	if (!new_crtc_state)
6411 		return -EINVAL;
6412 
6413 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6414 	if (ret)
6415 		return ret;
6416 
6417 	ret = fill_dc_scaling_info(state, &scaling_info);
6418 	if (ret)
6419 		return ret;
6420 
6421 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6422 		return 0;
6423 
6424 	return -EINVAL;
6425 }
6426 
6427 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6428 				       struct drm_plane_state *new_plane_state)
6429 {
6430 	/* Only support async updates on cursor planes. */
6431 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6432 		return -EINVAL;
6433 
6434 	return 0;
6435 }
6436 
6437 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6438 					 struct drm_plane_state *new_state)
6439 {
6440 	struct drm_plane_state *old_state =
6441 		drm_atomic_get_old_plane_state(new_state->state, plane);
6442 
6443 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6444 
6445 	swap(plane->state->fb, new_state->fb);
6446 
6447 	plane->state->src_x = new_state->src_x;
6448 	plane->state->src_y = new_state->src_y;
6449 	plane->state->src_w = new_state->src_w;
6450 	plane->state->src_h = new_state->src_h;
6451 	plane->state->crtc_x = new_state->crtc_x;
6452 	plane->state->crtc_y = new_state->crtc_y;
6453 	plane->state->crtc_w = new_state->crtc_w;
6454 	plane->state->crtc_h = new_state->crtc_h;
6455 
6456 	handle_cursor_update(plane, old_state);
6457 }
6458 
6459 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6460 	.prepare_fb = dm_plane_helper_prepare_fb,
6461 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6462 	.atomic_check = dm_plane_atomic_check,
6463 	.atomic_async_check = dm_plane_atomic_async_check,
6464 	.atomic_async_update = dm_plane_atomic_async_update
6465 };
6466 
6467 /*
6468  * TODO: these are currently initialized to rgb formats only.
6469  * For future use cases we should either initialize them dynamically based on
6470  * plane capabilities, or initialize this array to all formats, so internal drm
6471  * check will succeed, and let DC implement proper check
6472  */
6473 static const uint32_t rgb_formats[] = {
6474 	DRM_FORMAT_XRGB8888,
6475 	DRM_FORMAT_ARGB8888,
6476 	DRM_FORMAT_RGBA8888,
6477 	DRM_FORMAT_XRGB2101010,
6478 	DRM_FORMAT_XBGR2101010,
6479 	DRM_FORMAT_ARGB2101010,
6480 	DRM_FORMAT_ABGR2101010,
6481 	DRM_FORMAT_XBGR8888,
6482 	DRM_FORMAT_ABGR8888,
6483 	DRM_FORMAT_RGB565,
6484 };
6485 
6486 static const uint32_t overlay_formats[] = {
6487 	DRM_FORMAT_XRGB8888,
6488 	DRM_FORMAT_ARGB8888,
6489 	DRM_FORMAT_RGBA8888,
6490 	DRM_FORMAT_XBGR8888,
6491 	DRM_FORMAT_ABGR8888,
6492 	DRM_FORMAT_RGB565
6493 };
6494 
6495 static const u32 cursor_formats[] = {
6496 	DRM_FORMAT_ARGB8888
6497 };
6498 
6499 static int get_plane_formats(const struct drm_plane *plane,
6500 			     const struct dc_plane_cap *plane_cap,
6501 			     uint32_t *formats, int max_formats)
6502 {
6503 	int i, num_formats = 0;
6504 
6505 	/*
6506 	 * TODO: Query support for each group of formats directly from
6507 	 * DC plane caps. This will require adding more formats to the
6508 	 * caps list.
6509 	 */
6510 
6511 	switch (plane->type) {
6512 	case DRM_PLANE_TYPE_PRIMARY:
6513 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6514 			if (num_formats >= max_formats)
6515 				break;
6516 
6517 			formats[num_formats++] = rgb_formats[i];
6518 		}
6519 
6520 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6521 			formats[num_formats++] = DRM_FORMAT_NV12;
6522 		if (plane_cap && plane_cap->pixel_format_support.p010)
6523 			formats[num_formats++] = DRM_FORMAT_P010;
6524 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6525 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6526 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6527 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6528 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6529 		}
6530 		break;
6531 
6532 	case DRM_PLANE_TYPE_OVERLAY:
6533 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6534 			if (num_formats >= max_formats)
6535 				break;
6536 
6537 			formats[num_formats++] = overlay_formats[i];
6538 		}
6539 		break;
6540 
6541 	case DRM_PLANE_TYPE_CURSOR:
6542 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6543 			if (num_formats >= max_formats)
6544 				break;
6545 
6546 			formats[num_formats++] = cursor_formats[i];
6547 		}
6548 		break;
6549 	}
6550 
6551 	return num_formats;
6552 }
6553 
6554 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6555 				struct drm_plane *plane,
6556 				unsigned long possible_crtcs,
6557 				const struct dc_plane_cap *plane_cap)
6558 {
6559 	uint32_t formats[32];
6560 	int num_formats;
6561 	int res = -EPERM;
6562 	unsigned int supported_rotations;
6563 	uint64_t *modifiers = NULL;
6564 
6565 	num_formats = get_plane_formats(plane, plane_cap, formats,
6566 					ARRAY_SIZE(formats));
6567 
6568 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6569 	if (res)
6570 		return res;
6571 
6572 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6573 				       &dm_plane_funcs, formats, num_formats,
6574 				       modifiers, plane->type, NULL);
6575 	kfree(modifiers);
6576 	if (res)
6577 		return res;
6578 
6579 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6580 	    plane_cap && plane_cap->per_pixel_alpha) {
6581 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6582 					  BIT(DRM_MODE_BLEND_PREMULTI);
6583 
6584 		drm_plane_create_alpha_property(plane);
6585 		drm_plane_create_blend_mode_property(plane, blend_caps);
6586 	}
6587 
6588 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6589 	    plane_cap &&
6590 	    (plane_cap->pixel_format_support.nv12 ||
6591 	     plane_cap->pixel_format_support.p010)) {
6592 		/* This only affects YUV formats. */
6593 		drm_plane_create_color_properties(
6594 			plane,
6595 			BIT(DRM_COLOR_YCBCR_BT601) |
6596 			BIT(DRM_COLOR_YCBCR_BT709) |
6597 			BIT(DRM_COLOR_YCBCR_BT2020),
6598 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6599 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6600 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6601 	}
6602 
6603 	supported_rotations =
6604 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6605 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6606 
6607 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6608 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6609 						   supported_rotations);
6610 
6611 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6612 
6613 	/* Create (reset) the plane state */
6614 	if (plane->funcs->reset)
6615 		plane->funcs->reset(plane);
6616 
6617 	return 0;
6618 }
6619 
6620 #ifdef CONFIG_DEBUG_FS
6621 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6622 				struct amdgpu_crtc *acrtc)
6623 {
6624 	drm_object_attach_property(&acrtc->base.base,
6625 				   dm->crc_win_x_start_property,
6626 				   0);
6627 	drm_object_attach_property(&acrtc->base.base,
6628 				   dm->crc_win_y_start_property,
6629 				   0);
6630 	drm_object_attach_property(&acrtc->base.base,
6631 				   dm->crc_win_x_end_property,
6632 				   0);
6633 	drm_object_attach_property(&acrtc->base.base,
6634 				   dm->crc_win_y_end_property,
6635 				   0);
6636 }
6637 #endif
6638 
6639 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6640 			       struct drm_plane *plane,
6641 			       uint32_t crtc_index)
6642 {
6643 	struct amdgpu_crtc *acrtc = NULL;
6644 	struct drm_plane *cursor_plane;
6645 
6646 	int res = -ENOMEM;
6647 
6648 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6649 	if (!cursor_plane)
6650 		goto fail;
6651 
6652 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6653 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6654 
6655 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6656 	if (!acrtc)
6657 		goto fail;
6658 
6659 	res = drm_crtc_init_with_planes(
6660 			dm->ddev,
6661 			&acrtc->base,
6662 			plane,
6663 			cursor_plane,
6664 			&amdgpu_dm_crtc_funcs, NULL);
6665 
6666 	if (res)
6667 		goto fail;
6668 
6669 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6670 
6671 	/* Create (reset) the plane state */
6672 	if (acrtc->base.funcs->reset)
6673 		acrtc->base.funcs->reset(&acrtc->base);
6674 
6675 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6676 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6677 
6678 	acrtc->crtc_id = crtc_index;
6679 	acrtc->base.enabled = false;
6680 	acrtc->otg_inst = -1;
6681 
6682 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6683 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6684 				   true, MAX_COLOR_LUT_ENTRIES);
6685 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6686 #ifdef CONFIG_DEBUG_FS
6687 	attach_crtc_crc_properties(dm, acrtc);
6688 #endif
6689 	return 0;
6690 
6691 fail:
6692 	kfree(acrtc);
6693 	kfree(cursor_plane);
6694 	return res;
6695 }
6696 
6697 
6698 static int to_drm_connector_type(enum signal_type st)
6699 {
6700 	switch (st) {
6701 	case SIGNAL_TYPE_HDMI_TYPE_A:
6702 		return DRM_MODE_CONNECTOR_HDMIA;
6703 	case SIGNAL_TYPE_EDP:
6704 		return DRM_MODE_CONNECTOR_eDP;
6705 	case SIGNAL_TYPE_LVDS:
6706 		return DRM_MODE_CONNECTOR_LVDS;
6707 	case SIGNAL_TYPE_RGB:
6708 		return DRM_MODE_CONNECTOR_VGA;
6709 	case SIGNAL_TYPE_DISPLAY_PORT:
6710 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6711 		return DRM_MODE_CONNECTOR_DisplayPort;
6712 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6713 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6714 		return DRM_MODE_CONNECTOR_DVID;
6715 	case SIGNAL_TYPE_VIRTUAL:
6716 		return DRM_MODE_CONNECTOR_VIRTUAL;
6717 
6718 	default:
6719 		return DRM_MODE_CONNECTOR_Unknown;
6720 	}
6721 }
6722 
6723 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6724 {
6725 	struct drm_encoder *encoder;
6726 
6727 	/* There is only one encoder per connector */
6728 	drm_connector_for_each_possible_encoder(connector, encoder)
6729 		return encoder;
6730 
6731 	return NULL;
6732 }
6733 
6734 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6735 {
6736 	struct drm_encoder *encoder;
6737 	struct amdgpu_encoder *amdgpu_encoder;
6738 
6739 	encoder = amdgpu_dm_connector_to_encoder(connector);
6740 
6741 	if (encoder == NULL)
6742 		return;
6743 
6744 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6745 
6746 	amdgpu_encoder->native_mode.clock = 0;
6747 
6748 	if (!list_empty(&connector->probed_modes)) {
6749 		struct drm_display_mode *preferred_mode = NULL;
6750 
6751 		list_for_each_entry(preferred_mode,
6752 				    &connector->probed_modes,
6753 				    head) {
6754 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6755 				amdgpu_encoder->native_mode = *preferred_mode;
6756 
6757 			break;
6758 		}
6759 
6760 	}
6761 }
6762 
6763 static struct drm_display_mode *
6764 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6765 			     char *name,
6766 			     int hdisplay, int vdisplay)
6767 {
6768 	struct drm_device *dev = encoder->dev;
6769 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6770 	struct drm_display_mode *mode = NULL;
6771 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6772 
6773 	mode = drm_mode_duplicate(dev, native_mode);
6774 
6775 	if (mode == NULL)
6776 		return NULL;
6777 
6778 	mode->hdisplay = hdisplay;
6779 	mode->vdisplay = vdisplay;
6780 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6781 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6782 
6783 	return mode;
6784 
6785 }
6786 
6787 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6788 						 struct drm_connector *connector)
6789 {
6790 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6791 	struct drm_display_mode *mode = NULL;
6792 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6793 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6794 				to_amdgpu_dm_connector(connector);
6795 	int i;
6796 	int n;
6797 	struct mode_size {
6798 		char name[DRM_DISPLAY_MODE_LEN];
6799 		int w;
6800 		int h;
6801 	} common_modes[] = {
6802 		{  "640x480",  640,  480},
6803 		{  "800x600",  800,  600},
6804 		{ "1024x768", 1024,  768},
6805 		{ "1280x720", 1280,  720},
6806 		{ "1280x800", 1280,  800},
6807 		{"1280x1024", 1280, 1024},
6808 		{ "1440x900", 1440,  900},
6809 		{"1680x1050", 1680, 1050},
6810 		{"1600x1200", 1600, 1200},
6811 		{"1920x1080", 1920, 1080},
6812 		{"1920x1200", 1920, 1200}
6813 	};
6814 
6815 	n = ARRAY_SIZE(common_modes);
6816 
6817 	for (i = 0; i < n; i++) {
6818 		struct drm_display_mode *curmode = NULL;
6819 		bool mode_existed = false;
6820 
6821 		if (common_modes[i].w > native_mode->hdisplay ||
6822 		    common_modes[i].h > native_mode->vdisplay ||
6823 		   (common_modes[i].w == native_mode->hdisplay &&
6824 		    common_modes[i].h == native_mode->vdisplay))
6825 			continue;
6826 
6827 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6828 			if (common_modes[i].w == curmode->hdisplay &&
6829 			    common_modes[i].h == curmode->vdisplay) {
6830 				mode_existed = true;
6831 				break;
6832 			}
6833 		}
6834 
6835 		if (mode_existed)
6836 			continue;
6837 
6838 		mode = amdgpu_dm_create_common_mode(encoder,
6839 				common_modes[i].name, common_modes[i].w,
6840 				common_modes[i].h);
6841 		drm_mode_probed_add(connector, mode);
6842 		amdgpu_dm_connector->num_modes++;
6843 	}
6844 }
6845 
6846 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6847 					      struct edid *edid)
6848 {
6849 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6850 			to_amdgpu_dm_connector(connector);
6851 
6852 	if (edid) {
6853 		/* empty probed_modes */
6854 		INIT_LIST_HEAD(&connector->probed_modes);
6855 		amdgpu_dm_connector->num_modes =
6856 				drm_add_edid_modes(connector, edid);
6857 
6858 		/* sorting the probed modes before calling function
6859 		 * amdgpu_dm_get_native_mode() since EDID can have
6860 		 * more than one preferred mode. The modes that are
6861 		 * later in the probed mode list could be of higher
6862 		 * and preferred resolution. For example, 3840x2160
6863 		 * resolution in base EDID preferred timing and 4096x2160
6864 		 * preferred resolution in DID extension block later.
6865 		 */
6866 		drm_mode_sort(&connector->probed_modes);
6867 		amdgpu_dm_get_native_mode(connector);
6868 	} else {
6869 		amdgpu_dm_connector->num_modes = 0;
6870 	}
6871 }
6872 
6873 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6874 {
6875 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6876 			to_amdgpu_dm_connector(connector);
6877 	struct drm_encoder *encoder;
6878 	struct edid *edid = amdgpu_dm_connector->edid;
6879 
6880 	encoder = amdgpu_dm_connector_to_encoder(connector);
6881 
6882 	if (!drm_edid_is_valid(edid)) {
6883 		amdgpu_dm_connector->num_modes =
6884 				drm_add_modes_noedid(connector, 640, 480);
6885 	} else {
6886 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6887 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6888 	}
6889 	amdgpu_dm_fbc_init(connector);
6890 
6891 	return amdgpu_dm_connector->num_modes;
6892 }
6893 
6894 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6895 				     struct amdgpu_dm_connector *aconnector,
6896 				     int connector_type,
6897 				     struct dc_link *link,
6898 				     int link_index)
6899 {
6900 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6901 
6902 	/*
6903 	 * Some of the properties below require access to state, like bpc.
6904 	 * Allocate some default initial connector state with our reset helper.
6905 	 */
6906 	if (aconnector->base.funcs->reset)
6907 		aconnector->base.funcs->reset(&aconnector->base);
6908 
6909 	aconnector->connector_id = link_index;
6910 	aconnector->dc_link = link;
6911 	aconnector->base.interlace_allowed = false;
6912 	aconnector->base.doublescan_allowed = false;
6913 	aconnector->base.stereo_allowed = false;
6914 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6915 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6916 	aconnector->audio_inst = -1;
6917 	mutex_init(&aconnector->hpd_lock);
6918 
6919 	/*
6920 	 * configure support HPD hot plug connector_>polled default value is 0
6921 	 * which means HPD hot plug not supported
6922 	 */
6923 	switch (connector_type) {
6924 	case DRM_MODE_CONNECTOR_HDMIA:
6925 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6926 		aconnector->base.ycbcr_420_allowed =
6927 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6928 		break;
6929 	case DRM_MODE_CONNECTOR_DisplayPort:
6930 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6931 		aconnector->base.ycbcr_420_allowed =
6932 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6933 		break;
6934 	case DRM_MODE_CONNECTOR_DVID:
6935 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6936 		break;
6937 	default:
6938 		break;
6939 	}
6940 
6941 	drm_object_attach_property(&aconnector->base.base,
6942 				dm->ddev->mode_config.scaling_mode_property,
6943 				DRM_MODE_SCALE_NONE);
6944 
6945 	drm_object_attach_property(&aconnector->base.base,
6946 				adev->mode_info.underscan_property,
6947 				UNDERSCAN_OFF);
6948 	drm_object_attach_property(&aconnector->base.base,
6949 				adev->mode_info.underscan_hborder_property,
6950 				0);
6951 	drm_object_attach_property(&aconnector->base.base,
6952 				adev->mode_info.underscan_vborder_property,
6953 				0);
6954 
6955 	if (!aconnector->mst_port)
6956 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6957 
6958 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6959 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6960 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6961 
6962 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6963 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6964 		drm_object_attach_property(&aconnector->base.base,
6965 				adev->mode_info.abm_level_property, 0);
6966 	}
6967 
6968 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6969 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6970 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6971 		drm_object_attach_property(
6972 			&aconnector->base.base,
6973 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6974 
6975 		if (!aconnector->mst_port)
6976 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6977 
6978 #ifdef CONFIG_DRM_AMD_DC_HDCP
6979 		if (adev->dm.hdcp_workqueue)
6980 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6981 #endif
6982 	}
6983 }
6984 
6985 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6986 			      struct i2c_msg *msgs, int num)
6987 {
6988 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6989 	struct ddc_service *ddc_service = i2c->ddc_service;
6990 	struct i2c_command cmd;
6991 	int i;
6992 	int result = -EIO;
6993 
6994 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6995 
6996 	if (!cmd.payloads)
6997 		return result;
6998 
6999 	cmd.number_of_payloads = num;
7000 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7001 	cmd.speed = 100;
7002 
7003 	for (i = 0; i < num; i++) {
7004 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7005 		cmd.payloads[i].address = msgs[i].addr;
7006 		cmd.payloads[i].length = msgs[i].len;
7007 		cmd.payloads[i].data = msgs[i].buf;
7008 	}
7009 
7010 	if (dc_submit_i2c(
7011 			ddc_service->ctx->dc,
7012 			ddc_service->ddc_pin->hw_info.ddc_channel,
7013 			&cmd))
7014 		result = num;
7015 
7016 	kfree(cmd.payloads);
7017 	return result;
7018 }
7019 
7020 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7021 {
7022 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7023 }
7024 
7025 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7026 	.master_xfer = amdgpu_dm_i2c_xfer,
7027 	.functionality = amdgpu_dm_i2c_func,
7028 };
7029 
7030 static struct amdgpu_i2c_adapter *
7031 create_i2c(struct ddc_service *ddc_service,
7032 	   int link_index,
7033 	   int *res)
7034 {
7035 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7036 	struct amdgpu_i2c_adapter *i2c;
7037 
7038 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7039 	if (!i2c)
7040 		return NULL;
7041 	i2c->base.owner = THIS_MODULE;
7042 	i2c->base.class = I2C_CLASS_DDC;
7043 	i2c->base.dev.parent = &adev->pdev->dev;
7044 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7045 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7046 	i2c_set_adapdata(&i2c->base, i2c);
7047 	i2c->ddc_service = ddc_service;
7048 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7049 
7050 	return i2c;
7051 }
7052 
7053 
7054 /*
7055  * Note: this function assumes that dc_link_detect() was called for the
7056  * dc_link which will be represented by this aconnector.
7057  */
7058 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7059 				    struct amdgpu_dm_connector *aconnector,
7060 				    uint32_t link_index,
7061 				    struct amdgpu_encoder *aencoder)
7062 {
7063 	int res = 0;
7064 	int connector_type;
7065 	struct dc *dc = dm->dc;
7066 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7067 	struct amdgpu_i2c_adapter *i2c;
7068 
7069 	link->priv = aconnector;
7070 
7071 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7072 
7073 	i2c = create_i2c(link->ddc, link->link_index, &res);
7074 	if (!i2c) {
7075 		DRM_ERROR("Failed to create i2c adapter data\n");
7076 		return -ENOMEM;
7077 	}
7078 
7079 	aconnector->i2c = i2c;
7080 	res = i2c_add_adapter(&i2c->base);
7081 
7082 	if (res) {
7083 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7084 		goto out_free;
7085 	}
7086 
7087 	connector_type = to_drm_connector_type(link->connector_signal);
7088 
7089 	res = drm_connector_init_with_ddc(
7090 			dm->ddev,
7091 			&aconnector->base,
7092 			&amdgpu_dm_connector_funcs,
7093 			connector_type,
7094 			&i2c->base);
7095 
7096 	if (res) {
7097 		DRM_ERROR("connector_init failed\n");
7098 		aconnector->connector_id = -1;
7099 		goto out_free;
7100 	}
7101 
7102 	drm_connector_helper_add(
7103 			&aconnector->base,
7104 			&amdgpu_dm_connector_helper_funcs);
7105 
7106 	amdgpu_dm_connector_init_helper(
7107 		dm,
7108 		aconnector,
7109 		connector_type,
7110 		link,
7111 		link_index);
7112 
7113 	drm_connector_attach_encoder(
7114 		&aconnector->base, &aencoder->base);
7115 
7116 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7117 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7118 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7119 
7120 out_free:
7121 	if (res) {
7122 		kfree(i2c);
7123 		aconnector->i2c = NULL;
7124 	}
7125 	return res;
7126 }
7127 
7128 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7129 {
7130 	switch (adev->mode_info.num_crtc) {
7131 	case 1:
7132 		return 0x1;
7133 	case 2:
7134 		return 0x3;
7135 	case 3:
7136 		return 0x7;
7137 	case 4:
7138 		return 0xf;
7139 	case 5:
7140 		return 0x1f;
7141 	case 6:
7142 	default:
7143 		return 0x3f;
7144 	}
7145 }
7146 
7147 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7148 				  struct amdgpu_encoder *aencoder,
7149 				  uint32_t link_index)
7150 {
7151 	struct amdgpu_device *adev = drm_to_adev(dev);
7152 
7153 	int res = drm_encoder_init(dev,
7154 				   &aencoder->base,
7155 				   &amdgpu_dm_encoder_funcs,
7156 				   DRM_MODE_ENCODER_TMDS,
7157 				   NULL);
7158 
7159 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7160 
7161 	if (!res)
7162 		aencoder->encoder_id = link_index;
7163 	else
7164 		aencoder->encoder_id = -1;
7165 
7166 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7167 
7168 	return res;
7169 }
7170 
7171 static void manage_dm_interrupts(struct amdgpu_device *adev,
7172 				 struct amdgpu_crtc *acrtc,
7173 				 bool enable)
7174 {
7175 	/*
7176 	 * We have no guarantee that the frontend index maps to the same
7177 	 * backend index - some even map to more than one.
7178 	 *
7179 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7180 	 */
7181 	int irq_type =
7182 		amdgpu_display_crtc_idx_to_irq_type(
7183 			adev,
7184 			acrtc->crtc_id);
7185 
7186 	if (enable) {
7187 		drm_crtc_vblank_on(&acrtc->base);
7188 		amdgpu_irq_get(
7189 			adev,
7190 			&adev->pageflip_irq,
7191 			irq_type);
7192 	} else {
7193 
7194 		amdgpu_irq_put(
7195 			adev,
7196 			&adev->pageflip_irq,
7197 			irq_type);
7198 		drm_crtc_vblank_off(&acrtc->base);
7199 	}
7200 }
7201 
7202 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7203 				      struct amdgpu_crtc *acrtc)
7204 {
7205 	int irq_type =
7206 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7207 
7208 	/**
7209 	 * This reads the current state for the IRQ and force reapplies
7210 	 * the setting to hardware.
7211 	 */
7212 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7213 }
7214 
7215 static bool
7216 is_scaling_state_different(const struct dm_connector_state *dm_state,
7217 			   const struct dm_connector_state *old_dm_state)
7218 {
7219 	if (dm_state->scaling != old_dm_state->scaling)
7220 		return true;
7221 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7222 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7223 			return true;
7224 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7225 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7226 			return true;
7227 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7228 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7229 		return true;
7230 	return false;
7231 }
7232 
7233 #ifdef CONFIG_DRM_AMD_DC_HDCP
7234 static bool is_content_protection_different(struct drm_connector_state *state,
7235 					    const struct drm_connector_state *old_state,
7236 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7237 {
7238 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7239 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7240 
7241 	/* Handle: Type0/1 change */
7242 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7243 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7244 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7245 		return true;
7246 	}
7247 
7248 	/* CP is being re enabled, ignore this
7249 	 *
7250 	 * Handles:	ENABLED -> DESIRED
7251 	 */
7252 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7253 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7254 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7255 		return false;
7256 	}
7257 
7258 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7259 	 *
7260 	 * Handles:	UNDESIRED -> ENABLED
7261 	 */
7262 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7263 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7264 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7265 
7266 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7267 	 * hot-plug, headless s3, dpms
7268 	 *
7269 	 * Handles:	DESIRED -> DESIRED (Special case)
7270 	 */
7271 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7272 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7273 		dm_con_state->update_hdcp = false;
7274 		return true;
7275 	}
7276 
7277 	/*
7278 	 * Handles:	UNDESIRED -> UNDESIRED
7279 	 *		DESIRED -> DESIRED
7280 	 *		ENABLED -> ENABLED
7281 	 */
7282 	if (old_state->content_protection == state->content_protection)
7283 		return false;
7284 
7285 	/*
7286 	 * Handles:	UNDESIRED -> DESIRED
7287 	 *		DESIRED -> UNDESIRED
7288 	 *		ENABLED -> UNDESIRED
7289 	 */
7290 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7291 		return true;
7292 
7293 	/*
7294 	 * Handles:	DESIRED -> ENABLED
7295 	 */
7296 	return false;
7297 }
7298 
7299 #endif
7300 static void remove_stream(struct amdgpu_device *adev,
7301 			  struct amdgpu_crtc *acrtc,
7302 			  struct dc_stream_state *stream)
7303 {
7304 	/* this is the update mode case */
7305 
7306 	acrtc->otg_inst = -1;
7307 	acrtc->enabled = false;
7308 }
7309 
7310 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7311 			       struct dc_cursor_position *position)
7312 {
7313 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7314 	int x, y;
7315 	int xorigin = 0, yorigin = 0;
7316 
7317 	position->enable = false;
7318 	position->x = 0;
7319 	position->y = 0;
7320 
7321 	if (!crtc || !plane->state->fb)
7322 		return 0;
7323 
7324 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7325 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7326 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7327 			  __func__,
7328 			  plane->state->crtc_w,
7329 			  plane->state->crtc_h);
7330 		return -EINVAL;
7331 	}
7332 
7333 	x = plane->state->crtc_x;
7334 	y = plane->state->crtc_y;
7335 
7336 	if (x <= -amdgpu_crtc->max_cursor_width ||
7337 	    y <= -amdgpu_crtc->max_cursor_height)
7338 		return 0;
7339 
7340 	if (x < 0) {
7341 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7342 		x = 0;
7343 	}
7344 	if (y < 0) {
7345 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7346 		y = 0;
7347 	}
7348 	position->enable = true;
7349 	position->translate_by_source = true;
7350 	position->x = x;
7351 	position->y = y;
7352 	position->x_hotspot = xorigin;
7353 	position->y_hotspot = yorigin;
7354 
7355 	return 0;
7356 }
7357 
7358 static void handle_cursor_update(struct drm_plane *plane,
7359 				 struct drm_plane_state *old_plane_state)
7360 {
7361 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7362 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7363 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7364 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7365 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7366 	uint64_t address = afb ? afb->address : 0;
7367 	struct dc_cursor_position position;
7368 	struct dc_cursor_attributes attributes;
7369 	int ret;
7370 
7371 	if (!plane->state->fb && !old_plane_state->fb)
7372 		return;
7373 
7374 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7375 			 __func__,
7376 			 amdgpu_crtc->crtc_id,
7377 			 plane->state->crtc_w,
7378 			 plane->state->crtc_h);
7379 
7380 	ret = get_cursor_position(plane, crtc, &position);
7381 	if (ret)
7382 		return;
7383 
7384 	if (!position.enable) {
7385 		/* turn off cursor */
7386 		if (crtc_state && crtc_state->stream) {
7387 			mutex_lock(&adev->dm.dc_lock);
7388 			dc_stream_set_cursor_position(crtc_state->stream,
7389 						      &position);
7390 			mutex_unlock(&adev->dm.dc_lock);
7391 		}
7392 		return;
7393 	}
7394 
7395 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7396 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7397 
7398 	memset(&attributes, 0, sizeof(attributes));
7399 	attributes.address.high_part = upper_32_bits(address);
7400 	attributes.address.low_part  = lower_32_bits(address);
7401 	attributes.width             = plane->state->crtc_w;
7402 	attributes.height            = plane->state->crtc_h;
7403 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7404 	attributes.rotation_angle    = 0;
7405 	attributes.attribute_flags.value = 0;
7406 
7407 	attributes.pitch = attributes.width;
7408 
7409 	if (crtc_state->stream) {
7410 		mutex_lock(&adev->dm.dc_lock);
7411 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7412 							 &attributes))
7413 			DRM_ERROR("DC failed to set cursor attributes\n");
7414 
7415 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7416 						   &position))
7417 			DRM_ERROR("DC failed to set cursor position\n");
7418 		mutex_unlock(&adev->dm.dc_lock);
7419 	}
7420 }
7421 
7422 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7423 {
7424 
7425 	assert_spin_locked(&acrtc->base.dev->event_lock);
7426 	WARN_ON(acrtc->event);
7427 
7428 	acrtc->event = acrtc->base.state->event;
7429 
7430 	/* Set the flip status */
7431 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7432 
7433 	/* Mark this event as consumed */
7434 	acrtc->base.state->event = NULL;
7435 
7436 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7437 						 acrtc->crtc_id);
7438 }
7439 
7440 static void update_freesync_state_on_stream(
7441 	struct amdgpu_display_manager *dm,
7442 	struct dm_crtc_state *new_crtc_state,
7443 	struct dc_stream_state *new_stream,
7444 	struct dc_plane_state *surface,
7445 	u32 flip_timestamp_in_us)
7446 {
7447 	struct mod_vrr_params vrr_params;
7448 	struct dc_info_packet vrr_infopacket = {0};
7449 	struct amdgpu_device *adev = dm->adev;
7450 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7451 	unsigned long flags;
7452 
7453 	if (!new_stream)
7454 		return;
7455 
7456 	/*
7457 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7458 	 * For now it's sufficient to just guard against these conditions.
7459 	 */
7460 
7461 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7462 		return;
7463 
7464 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7465         vrr_params = acrtc->dm_irq_params.vrr_params;
7466 
7467 	if (surface) {
7468 		mod_freesync_handle_preflip(
7469 			dm->freesync_module,
7470 			surface,
7471 			new_stream,
7472 			flip_timestamp_in_us,
7473 			&vrr_params);
7474 
7475 		if (adev->family < AMDGPU_FAMILY_AI &&
7476 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7477 			mod_freesync_handle_v_update(dm->freesync_module,
7478 						     new_stream, &vrr_params);
7479 
7480 			/* Need to call this before the frame ends. */
7481 			dc_stream_adjust_vmin_vmax(dm->dc,
7482 						   new_crtc_state->stream,
7483 						   &vrr_params.adjust);
7484 		}
7485 	}
7486 
7487 	mod_freesync_build_vrr_infopacket(
7488 		dm->freesync_module,
7489 		new_stream,
7490 		&vrr_params,
7491 		PACKET_TYPE_VRR,
7492 		TRANSFER_FUNC_UNKNOWN,
7493 		&vrr_infopacket);
7494 
7495 	new_crtc_state->freesync_timing_changed |=
7496 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7497 			&vrr_params.adjust,
7498 			sizeof(vrr_params.adjust)) != 0);
7499 
7500 	new_crtc_state->freesync_vrr_info_changed |=
7501 		(memcmp(&new_crtc_state->vrr_infopacket,
7502 			&vrr_infopacket,
7503 			sizeof(vrr_infopacket)) != 0);
7504 
7505 	acrtc->dm_irq_params.vrr_params = vrr_params;
7506 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7507 
7508 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7509 	new_stream->vrr_infopacket = vrr_infopacket;
7510 
7511 	if (new_crtc_state->freesync_vrr_info_changed)
7512 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7513 			      new_crtc_state->base.crtc->base.id,
7514 			      (int)new_crtc_state->base.vrr_enabled,
7515 			      (int)vrr_params.state);
7516 
7517 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7518 }
7519 
7520 static void update_stream_irq_parameters(
7521 	struct amdgpu_display_manager *dm,
7522 	struct dm_crtc_state *new_crtc_state)
7523 {
7524 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7525 	struct mod_vrr_params vrr_params;
7526 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7527 	struct amdgpu_device *adev = dm->adev;
7528 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7529 	unsigned long flags;
7530 
7531 	if (!new_stream)
7532 		return;
7533 
7534 	/*
7535 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7536 	 * For now it's sufficient to just guard against these conditions.
7537 	 */
7538 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7539 		return;
7540 
7541 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7542 	vrr_params = acrtc->dm_irq_params.vrr_params;
7543 
7544 	if (new_crtc_state->vrr_supported &&
7545 	    config.min_refresh_in_uhz &&
7546 	    config.max_refresh_in_uhz) {
7547 		config.state = new_crtc_state->base.vrr_enabled ?
7548 			VRR_STATE_ACTIVE_VARIABLE :
7549 			VRR_STATE_INACTIVE;
7550 	} else {
7551 		config.state = VRR_STATE_UNSUPPORTED;
7552 	}
7553 
7554 	mod_freesync_build_vrr_params(dm->freesync_module,
7555 				      new_stream,
7556 				      &config, &vrr_params);
7557 
7558 	new_crtc_state->freesync_timing_changed |=
7559 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7560 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7561 
7562 	new_crtc_state->freesync_config = config;
7563 	/* Copy state for access from DM IRQ handler */
7564 	acrtc->dm_irq_params.freesync_config = config;
7565 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7566 	acrtc->dm_irq_params.vrr_params = vrr_params;
7567 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7568 }
7569 
7570 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7571 					    struct dm_crtc_state *new_state)
7572 {
7573 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7574 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7575 
7576 	if (!old_vrr_active && new_vrr_active) {
7577 		/* Transition VRR inactive -> active:
7578 		 * While VRR is active, we must not disable vblank irq, as a
7579 		 * reenable after disable would compute bogus vblank/pflip
7580 		 * timestamps if it likely happened inside display front-porch.
7581 		 *
7582 		 * We also need vupdate irq for the actual core vblank handling
7583 		 * at end of vblank.
7584 		 */
7585 		dm_set_vupdate_irq(new_state->base.crtc, true);
7586 		drm_crtc_vblank_get(new_state->base.crtc);
7587 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7588 				 __func__, new_state->base.crtc->base.id);
7589 	} else if (old_vrr_active && !new_vrr_active) {
7590 		/* Transition VRR active -> inactive:
7591 		 * Allow vblank irq disable again for fixed refresh rate.
7592 		 */
7593 		dm_set_vupdate_irq(new_state->base.crtc, false);
7594 		drm_crtc_vblank_put(new_state->base.crtc);
7595 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7596 				 __func__, new_state->base.crtc->base.id);
7597 	}
7598 }
7599 
7600 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7601 {
7602 	struct drm_plane *plane;
7603 	struct drm_plane_state *old_plane_state, *new_plane_state;
7604 	int i;
7605 
7606 	/*
7607 	 * TODO: Make this per-stream so we don't issue redundant updates for
7608 	 * commits with multiple streams.
7609 	 */
7610 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7611 				       new_plane_state, i)
7612 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7613 			handle_cursor_update(plane, old_plane_state);
7614 }
7615 
7616 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7617 				    struct dc_state *dc_state,
7618 				    struct drm_device *dev,
7619 				    struct amdgpu_display_manager *dm,
7620 				    struct drm_crtc *pcrtc,
7621 				    bool wait_for_vblank)
7622 {
7623 	uint32_t i;
7624 	uint64_t timestamp_ns;
7625 	struct drm_plane *plane;
7626 	struct drm_plane_state *old_plane_state, *new_plane_state;
7627 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7628 	struct drm_crtc_state *new_pcrtc_state =
7629 			drm_atomic_get_new_crtc_state(state, pcrtc);
7630 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7631 	struct dm_crtc_state *dm_old_crtc_state =
7632 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7633 	int planes_count = 0, vpos, hpos;
7634 	long r;
7635 	unsigned long flags;
7636 	struct amdgpu_bo *abo;
7637 	uint32_t target_vblank, last_flip_vblank;
7638 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7639 	bool pflip_present = false;
7640 	struct {
7641 		struct dc_surface_update surface_updates[MAX_SURFACES];
7642 		struct dc_plane_info plane_infos[MAX_SURFACES];
7643 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7644 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7645 		struct dc_stream_update stream_update;
7646 	} *bundle;
7647 
7648 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7649 
7650 	if (!bundle) {
7651 		dm_error("Failed to allocate update bundle\n");
7652 		goto cleanup;
7653 	}
7654 
7655 	/*
7656 	 * Disable the cursor first if we're disabling all the planes.
7657 	 * It'll remain on the screen after the planes are re-enabled
7658 	 * if we don't.
7659 	 */
7660 	if (acrtc_state->active_planes == 0)
7661 		amdgpu_dm_commit_cursors(state);
7662 
7663 	/* update planes when needed */
7664 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7665 		struct drm_crtc *crtc = new_plane_state->crtc;
7666 		struct drm_crtc_state *new_crtc_state;
7667 		struct drm_framebuffer *fb = new_plane_state->fb;
7668 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7669 		bool plane_needs_flip;
7670 		struct dc_plane_state *dc_plane;
7671 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7672 
7673 		/* Cursor plane is handled after stream updates */
7674 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7675 			continue;
7676 
7677 		if (!fb || !crtc || pcrtc != crtc)
7678 			continue;
7679 
7680 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7681 		if (!new_crtc_state->active)
7682 			continue;
7683 
7684 		dc_plane = dm_new_plane_state->dc_state;
7685 
7686 		bundle->surface_updates[planes_count].surface = dc_plane;
7687 		if (new_pcrtc_state->color_mgmt_changed) {
7688 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7689 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7690 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7691 		}
7692 
7693 		fill_dc_scaling_info(new_plane_state,
7694 				     &bundle->scaling_infos[planes_count]);
7695 
7696 		bundle->surface_updates[planes_count].scaling_info =
7697 			&bundle->scaling_infos[planes_count];
7698 
7699 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7700 
7701 		pflip_present = pflip_present || plane_needs_flip;
7702 
7703 		if (!plane_needs_flip) {
7704 			planes_count += 1;
7705 			continue;
7706 		}
7707 
7708 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7709 
7710 		/*
7711 		 * Wait for all fences on this FB. Do limited wait to avoid
7712 		 * deadlock during GPU reset when this fence will not signal
7713 		 * but we hold reservation lock for the BO.
7714 		 */
7715 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7716 							false,
7717 							msecs_to_jiffies(5000));
7718 		if (unlikely(r <= 0))
7719 			DRM_ERROR("Waiting for fences timed out!");
7720 
7721 		fill_dc_plane_info_and_addr(
7722 			dm->adev, new_plane_state,
7723 			afb->tiling_flags,
7724 			&bundle->plane_infos[planes_count],
7725 			&bundle->flip_addrs[planes_count].address,
7726 			afb->tmz_surface, false);
7727 
7728 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7729 				 new_plane_state->plane->index,
7730 				 bundle->plane_infos[planes_count].dcc.enable);
7731 
7732 		bundle->surface_updates[planes_count].plane_info =
7733 			&bundle->plane_infos[planes_count];
7734 
7735 		/*
7736 		 * Only allow immediate flips for fast updates that don't
7737 		 * change FB pitch, DCC state, rotation or mirroing.
7738 		 */
7739 		bundle->flip_addrs[planes_count].flip_immediate =
7740 			crtc->state->async_flip &&
7741 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7742 
7743 		timestamp_ns = ktime_get_ns();
7744 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7745 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7746 		bundle->surface_updates[planes_count].surface = dc_plane;
7747 
7748 		if (!bundle->surface_updates[planes_count].surface) {
7749 			DRM_ERROR("No surface for CRTC: id=%d\n",
7750 					acrtc_attach->crtc_id);
7751 			continue;
7752 		}
7753 
7754 		if (plane == pcrtc->primary)
7755 			update_freesync_state_on_stream(
7756 				dm,
7757 				acrtc_state,
7758 				acrtc_state->stream,
7759 				dc_plane,
7760 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7761 
7762 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7763 				 __func__,
7764 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7765 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7766 
7767 		planes_count += 1;
7768 
7769 	}
7770 
7771 	if (pflip_present) {
7772 		if (!vrr_active) {
7773 			/* Use old throttling in non-vrr fixed refresh rate mode
7774 			 * to keep flip scheduling based on target vblank counts
7775 			 * working in a backwards compatible way, e.g., for
7776 			 * clients using the GLX_OML_sync_control extension or
7777 			 * DRI3/Present extension with defined target_msc.
7778 			 */
7779 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7780 		}
7781 		else {
7782 			/* For variable refresh rate mode only:
7783 			 * Get vblank of last completed flip to avoid > 1 vrr
7784 			 * flips per video frame by use of throttling, but allow
7785 			 * flip programming anywhere in the possibly large
7786 			 * variable vrr vblank interval for fine-grained flip
7787 			 * timing control and more opportunity to avoid stutter
7788 			 * on late submission of flips.
7789 			 */
7790 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7791 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7792 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7793 		}
7794 
7795 		target_vblank = last_flip_vblank + wait_for_vblank;
7796 
7797 		/*
7798 		 * Wait until we're out of the vertical blank period before the one
7799 		 * targeted by the flip
7800 		 */
7801 		while ((acrtc_attach->enabled &&
7802 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7803 							    0, &vpos, &hpos, NULL,
7804 							    NULL, &pcrtc->hwmode)
7805 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7806 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7807 			(int)(target_vblank -
7808 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7809 			usleep_range(1000, 1100);
7810 		}
7811 
7812 		/**
7813 		 * Prepare the flip event for the pageflip interrupt to handle.
7814 		 *
7815 		 * This only works in the case where we've already turned on the
7816 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7817 		 * from 0 -> n planes we have to skip a hardware generated event
7818 		 * and rely on sending it from software.
7819 		 */
7820 		if (acrtc_attach->base.state->event &&
7821 		    acrtc_state->active_planes > 0) {
7822 			drm_crtc_vblank_get(pcrtc);
7823 
7824 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7825 
7826 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7827 			prepare_flip_isr(acrtc_attach);
7828 
7829 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7830 		}
7831 
7832 		if (acrtc_state->stream) {
7833 			if (acrtc_state->freesync_vrr_info_changed)
7834 				bundle->stream_update.vrr_infopacket =
7835 					&acrtc_state->stream->vrr_infopacket;
7836 		}
7837 	}
7838 
7839 	/* Update the planes if changed or disable if we don't have any. */
7840 	if ((planes_count || acrtc_state->active_planes == 0) &&
7841 		acrtc_state->stream) {
7842 		bundle->stream_update.stream = acrtc_state->stream;
7843 		if (new_pcrtc_state->mode_changed) {
7844 			bundle->stream_update.src = acrtc_state->stream->src;
7845 			bundle->stream_update.dst = acrtc_state->stream->dst;
7846 		}
7847 
7848 		if (new_pcrtc_state->color_mgmt_changed) {
7849 			/*
7850 			 * TODO: This isn't fully correct since we've actually
7851 			 * already modified the stream in place.
7852 			 */
7853 			bundle->stream_update.gamut_remap =
7854 				&acrtc_state->stream->gamut_remap_matrix;
7855 			bundle->stream_update.output_csc_transform =
7856 				&acrtc_state->stream->csc_color_matrix;
7857 			bundle->stream_update.out_transfer_func =
7858 				acrtc_state->stream->out_transfer_func;
7859 		}
7860 
7861 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7862 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7863 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7864 
7865 		/*
7866 		 * If FreeSync state on the stream has changed then we need to
7867 		 * re-adjust the min/max bounds now that DC doesn't handle this
7868 		 * as part of commit.
7869 		 */
7870 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7871 		    amdgpu_dm_vrr_active(acrtc_state)) {
7872 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7873 			dc_stream_adjust_vmin_vmax(
7874 				dm->dc, acrtc_state->stream,
7875 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7876 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7877 		}
7878 		mutex_lock(&dm->dc_lock);
7879 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7880 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7881 			amdgpu_dm_psr_disable(acrtc_state->stream);
7882 
7883 		dc_commit_updates_for_stream(dm->dc,
7884 						     bundle->surface_updates,
7885 						     planes_count,
7886 						     acrtc_state->stream,
7887 						     &bundle->stream_update,
7888 						     dc_state);
7889 
7890 		/**
7891 		 * Enable or disable the interrupts on the backend.
7892 		 *
7893 		 * Most pipes are put into power gating when unused.
7894 		 *
7895 		 * When power gating is enabled on a pipe we lose the
7896 		 * interrupt enablement state when power gating is disabled.
7897 		 *
7898 		 * So we need to update the IRQ control state in hardware
7899 		 * whenever the pipe turns on (since it could be previously
7900 		 * power gated) or off (since some pipes can't be power gated
7901 		 * on some ASICs).
7902 		 */
7903 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7904 			dm_update_pflip_irq_state(drm_to_adev(dev),
7905 						  acrtc_attach);
7906 
7907 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7908 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7909 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7910 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7911 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7912 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7913 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7914 			amdgpu_dm_psr_enable(acrtc_state->stream);
7915 		}
7916 
7917 		mutex_unlock(&dm->dc_lock);
7918 	}
7919 
7920 	/*
7921 	 * Update cursor state *after* programming all the planes.
7922 	 * This avoids redundant programming in the case where we're going
7923 	 * to be disabling a single plane - those pipes are being disabled.
7924 	 */
7925 	if (acrtc_state->active_planes)
7926 		amdgpu_dm_commit_cursors(state);
7927 
7928 cleanup:
7929 	kfree(bundle);
7930 }
7931 
7932 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7933 				   struct drm_atomic_state *state)
7934 {
7935 	struct amdgpu_device *adev = drm_to_adev(dev);
7936 	struct amdgpu_dm_connector *aconnector;
7937 	struct drm_connector *connector;
7938 	struct drm_connector_state *old_con_state, *new_con_state;
7939 	struct drm_crtc_state *new_crtc_state;
7940 	struct dm_crtc_state *new_dm_crtc_state;
7941 	const struct dc_stream_status *status;
7942 	int i, inst;
7943 
7944 	/* Notify device removals. */
7945 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7946 		if (old_con_state->crtc != new_con_state->crtc) {
7947 			/* CRTC changes require notification. */
7948 			goto notify;
7949 		}
7950 
7951 		if (!new_con_state->crtc)
7952 			continue;
7953 
7954 		new_crtc_state = drm_atomic_get_new_crtc_state(
7955 			state, new_con_state->crtc);
7956 
7957 		if (!new_crtc_state)
7958 			continue;
7959 
7960 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7961 			continue;
7962 
7963 	notify:
7964 		aconnector = to_amdgpu_dm_connector(connector);
7965 
7966 		mutex_lock(&adev->dm.audio_lock);
7967 		inst = aconnector->audio_inst;
7968 		aconnector->audio_inst = -1;
7969 		mutex_unlock(&adev->dm.audio_lock);
7970 
7971 		amdgpu_dm_audio_eld_notify(adev, inst);
7972 	}
7973 
7974 	/* Notify audio device additions. */
7975 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7976 		if (!new_con_state->crtc)
7977 			continue;
7978 
7979 		new_crtc_state = drm_atomic_get_new_crtc_state(
7980 			state, new_con_state->crtc);
7981 
7982 		if (!new_crtc_state)
7983 			continue;
7984 
7985 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7986 			continue;
7987 
7988 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7989 		if (!new_dm_crtc_state->stream)
7990 			continue;
7991 
7992 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7993 		if (!status)
7994 			continue;
7995 
7996 		aconnector = to_amdgpu_dm_connector(connector);
7997 
7998 		mutex_lock(&adev->dm.audio_lock);
7999 		inst = status->audio_inst;
8000 		aconnector->audio_inst = inst;
8001 		mutex_unlock(&adev->dm.audio_lock);
8002 
8003 		amdgpu_dm_audio_eld_notify(adev, inst);
8004 	}
8005 }
8006 
8007 /*
8008  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8009  * @crtc_state: the DRM CRTC state
8010  * @stream_state: the DC stream state.
8011  *
8012  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8013  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8014  */
8015 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8016 						struct dc_stream_state *stream_state)
8017 {
8018 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8019 }
8020 
8021 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
8022 				   struct drm_atomic_state *state,
8023 				   bool nonblock)
8024 {
8025 	/*
8026 	 * Add check here for SoC's that support hardware cursor plane, to
8027 	 * unset legacy_cursor_update
8028 	 */
8029 
8030 	return drm_atomic_helper_commit(dev, state, nonblock);
8031 
8032 	/*TODO Handle EINTR, reenable IRQ*/
8033 }
8034 
8035 /**
8036  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8037  * @state: The atomic state to commit
8038  *
8039  * This will tell DC to commit the constructed DC state from atomic_check,
8040  * programming the hardware. Any failures here implies a hardware failure, since
8041  * atomic check should have filtered anything non-kosher.
8042  */
8043 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8044 {
8045 	struct drm_device *dev = state->dev;
8046 	struct amdgpu_device *adev = drm_to_adev(dev);
8047 	struct amdgpu_display_manager *dm = &adev->dm;
8048 	struct dm_atomic_state *dm_state;
8049 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8050 	uint32_t i, j;
8051 	struct drm_crtc *crtc;
8052 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8053 	unsigned long flags;
8054 	bool wait_for_vblank = true;
8055 	struct drm_connector *connector;
8056 	struct drm_connector_state *old_con_state, *new_con_state;
8057 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8058 	int crtc_disable_count = 0;
8059 	bool mode_set_reset_required = false;
8060 
8061 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8062 
8063 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8064 
8065 	dm_state = dm_atomic_get_new_state(state);
8066 	if (dm_state && dm_state->context) {
8067 		dc_state = dm_state->context;
8068 	} else {
8069 		/* No state changes, retain current state. */
8070 		dc_state_temp = dc_create_state(dm->dc);
8071 		ASSERT(dc_state_temp);
8072 		dc_state = dc_state_temp;
8073 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8074 	}
8075 
8076 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8077 				       new_crtc_state, i) {
8078 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8079 
8080 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8081 
8082 		if (old_crtc_state->active &&
8083 		    (!new_crtc_state->active ||
8084 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8085 			manage_dm_interrupts(adev, acrtc, false);
8086 			dc_stream_release(dm_old_crtc_state->stream);
8087 		}
8088 	}
8089 
8090 	drm_atomic_helper_calc_timestamping_constants(state);
8091 
8092 	/* update changed items */
8093 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8094 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8095 
8096 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8097 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8098 
8099 		DRM_DEBUG_DRIVER(
8100 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8101 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8102 			"connectors_changed:%d\n",
8103 			acrtc->crtc_id,
8104 			new_crtc_state->enable,
8105 			new_crtc_state->active,
8106 			new_crtc_state->planes_changed,
8107 			new_crtc_state->mode_changed,
8108 			new_crtc_state->active_changed,
8109 			new_crtc_state->connectors_changed);
8110 
8111 		/* Disable cursor if disabling crtc */
8112 		if (old_crtc_state->active && !new_crtc_state->active) {
8113 			struct dc_cursor_position position;
8114 
8115 			memset(&position, 0, sizeof(position));
8116 			mutex_lock(&dm->dc_lock);
8117 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8118 			mutex_unlock(&dm->dc_lock);
8119 		}
8120 
8121 		/* Copy all transient state flags into dc state */
8122 		if (dm_new_crtc_state->stream) {
8123 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8124 							    dm_new_crtc_state->stream);
8125 		}
8126 
8127 		/* handles headless hotplug case, updating new_state and
8128 		 * aconnector as needed
8129 		 */
8130 
8131 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8132 
8133 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8134 
8135 			if (!dm_new_crtc_state->stream) {
8136 				/*
8137 				 * this could happen because of issues with
8138 				 * userspace notifications delivery.
8139 				 * In this case userspace tries to set mode on
8140 				 * display which is disconnected in fact.
8141 				 * dc_sink is NULL in this case on aconnector.
8142 				 * We expect reset mode will come soon.
8143 				 *
8144 				 * This can also happen when unplug is done
8145 				 * during resume sequence ended
8146 				 *
8147 				 * In this case, we want to pretend we still
8148 				 * have a sink to keep the pipe running so that
8149 				 * hw state is consistent with the sw state
8150 				 */
8151 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8152 						__func__, acrtc->base.base.id);
8153 				continue;
8154 			}
8155 
8156 			if (dm_old_crtc_state->stream)
8157 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8158 
8159 			pm_runtime_get_noresume(dev->dev);
8160 
8161 			acrtc->enabled = true;
8162 			acrtc->hw_mode = new_crtc_state->mode;
8163 			crtc->hwmode = new_crtc_state->mode;
8164 			mode_set_reset_required = true;
8165 		} else if (modereset_required(new_crtc_state)) {
8166 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8167 			/* i.e. reset mode */
8168 			if (dm_old_crtc_state->stream)
8169 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8170 			mode_set_reset_required = true;
8171 		}
8172 	} /* for_each_crtc_in_state() */
8173 
8174 	if (dc_state) {
8175 		/* if there mode set or reset, disable eDP PSR */
8176 		if (mode_set_reset_required)
8177 			amdgpu_dm_psr_disable_all(dm);
8178 
8179 		dm_enable_per_frame_crtc_master_sync(dc_state);
8180 		mutex_lock(&dm->dc_lock);
8181 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8182 		mutex_unlock(&dm->dc_lock);
8183 	}
8184 
8185 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8186 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8187 
8188 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8189 
8190 		if (dm_new_crtc_state->stream != NULL) {
8191 			const struct dc_stream_status *status =
8192 					dc_stream_get_status(dm_new_crtc_state->stream);
8193 
8194 			if (!status)
8195 				status = dc_stream_get_status_from_state(dc_state,
8196 									 dm_new_crtc_state->stream);
8197 			if (!status)
8198 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8199 			else
8200 				acrtc->otg_inst = status->primary_otg_inst;
8201 		}
8202 	}
8203 #ifdef CONFIG_DRM_AMD_DC_HDCP
8204 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8205 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8206 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8207 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8208 
8209 		new_crtc_state = NULL;
8210 
8211 		if (acrtc)
8212 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8213 
8214 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8215 
8216 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8217 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8218 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8219 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8220 			dm_new_con_state->update_hdcp = true;
8221 			continue;
8222 		}
8223 
8224 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8225 			hdcp_update_display(
8226 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8227 				new_con_state->hdcp_content_type,
8228 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8229 													 : false);
8230 	}
8231 #endif
8232 
8233 	/* Handle connector state changes */
8234 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8235 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8236 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8237 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8238 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8239 		struct dc_stream_update stream_update;
8240 		struct dc_info_packet hdr_packet;
8241 		struct dc_stream_status *status = NULL;
8242 		bool abm_changed, hdr_changed, scaling_changed;
8243 
8244 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8245 		memset(&stream_update, 0, sizeof(stream_update));
8246 
8247 		if (acrtc) {
8248 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8249 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8250 		}
8251 
8252 		/* Skip any modesets/resets */
8253 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8254 			continue;
8255 
8256 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8257 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8258 
8259 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8260 							     dm_old_con_state);
8261 
8262 		abm_changed = dm_new_crtc_state->abm_level !=
8263 			      dm_old_crtc_state->abm_level;
8264 
8265 		hdr_changed =
8266 			is_hdr_metadata_different(old_con_state, new_con_state);
8267 
8268 		if (!scaling_changed && !abm_changed && !hdr_changed)
8269 			continue;
8270 
8271 		stream_update.stream = dm_new_crtc_state->stream;
8272 		if (scaling_changed) {
8273 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8274 					dm_new_con_state, dm_new_crtc_state->stream);
8275 
8276 			stream_update.src = dm_new_crtc_state->stream->src;
8277 			stream_update.dst = dm_new_crtc_state->stream->dst;
8278 		}
8279 
8280 		if (abm_changed) {
8281 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8282 
8283 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8284 		}
8285 
8286 		if (hdr_changed) {
8287 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8288 			stream_update.hdr_static_metadata = &hdr_packet;
8289 		}
8290 
8291 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8292 		WARN_ON(!status);
8293 		WARN_ON(!status->plane_count);
8294 
8295 		/*
8296 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8297 		 * Here we create an empty update on each plane.
8298 		 * To fix this, DC should permit updating only stream properties.
8299 		 */
8300 		for (j = 0; j < status->plane_count; j++)
8301 			dummy_updates[j].surface = status->plane_states[0];
8302 
8303 
8304 		mutex_lock(&dm->dc_lock);
8305 		dc_commit_updates_for_stream(dm->dc,
8306 						     dummy_updates,
8307 						     status->plane_count,
8308 						     dm_new_crtc_state->stream,
8309 						     &stream_update,
8310 						     dc_state);
8311 		mutex_unlock(&dm->dc_lock);
8312 	}
8313 
8314 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8315 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8316 				      new_crtc_state, i) {
8317 		if (old_crtc_state->active && !new_crtc_state->active)
8318 			crtc_disable_count++;
8319 
8320 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8321 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8322 
8323 		/* For freesync config update on crtc state and params for irq */
8324 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8325 
8326 		/* Handle vrr on->off / off->on transitions */
8327 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8328 						dm_new_crtc_state);
8329 	}
8330 
8331 	/**
8332 	 * Enable interrupts for CRTCs that are newly enabled or went through
8333 	 * a modeset. It was intentionally deferred until after the front end
8334 	 * state was modified to wait until the OTG was on and so the IRQ
8335 	 * handlers didn't access stale or invalid state.
8336 	 */
8337 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8338 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8339 		bool configure_crc = false;
8340 
8341 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8342 
8343 		if (new_crtc_state->active &&
8344 		    (!old_crtc_state->active ||
8345 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8346 			dc_stream_retain(dm_new_crtc_state->stream);
8347 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8348 			manage_dm_interrupts(adev, acrtc, true);
8349 		}
8350 #ifdef CONFIG_DEBUG_FS
8351 		if (new_crtc_state->active &&
8352 			amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8353 			/**
8354 			 * Frontend may have changed so reapply the CRC capture
8355 			 * settings for the stream.
8356 			 */
8357 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8358 			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8359 
8360 			if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8361 				if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8362 					configure_crc = true;
8363 			} else {
8364 				if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8365 					configure_crc = true;
8366 			}
8367 
8368 			if (configure_crc)
8369 				amdgpu_dm_crtc_configure_crc_source(
8370 					crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8371 		}
8372 #endif
8373 	}
8374 
8375 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8376 		if (new_crtc_state->async_flip)
8377 			wait_for_vblank = false;
8378 
8379 	/* update planes when needed per crtc*/
8380 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8381 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8382 
8383 		if (dm_new_crtc_state->stream)
8384 			amdgpu_dm_commit_planes(state, dc_state, dev,
8385 						dm, crtc, wait_for_vblank);
8386 	}
8387 
8388 	/* Update audio instances for each connector. */
8389 	amdgpu_dm_commit_audio(dev, state);
8390 
8391 	/*
8392 	 * send vblank event on all events not handled in flip and
8393 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8394 	 */
8395 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8396 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8397 
8398 		if (new_crtc_state->event)
8399 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8400 
8401 		new_crtc_state->event = NULL;
8402 	}
8403 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8404 
8405 	/* Signal HW programming completion */
8406 	drm_atomic_helper_commit_hw_done(state);
8407 
8408 	if (wait_for_vblank)
8409 		drm_atomic_helper_wait_for_flip_done(dev, state);
8410 
8411 	drm_atomic_helper_cleanup_planes(dev, state);
8412 
8413 	/*
8414 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8415 	 * so we can put the GPU into runtime suspend if we're not driving any
8416 	 * displays anymore
8417 	 */
8418 	for (i = 0; i < crtc_disable_count; i++)
8419 		pm_runtime_put_autosuspend(dev->dev);
8420 	pm_runtime_mark_last_busy(dev->dev);
8421 
8422 	if (dc_state_temp)
8423 		dc_release_state(dc_state_temp);
8424 }
8425 
8426 
8427 static int dm_force_atomic_commit(struct drm_connector *connector)
8428 {
8429 	int ret = 0;
8430 	struct drm_device *ddev = connector->dev;
8431 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8432 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8433 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8434 	struct drm_connector_state *conn_state;
8435 	struct drm_crtc_state *crtc_state;
8436 	struct drm_plane_state *plane_state;
8437 
8438 	if (!state)
8439 		return -ENOMEM;
8440 
8441 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8442 
8443 	/* Construct an atomic state to restore previous display setting */
8444 
8445 	/*
8446 	 * Attach connectors to drm_atomic_state
8447 	 */
8448 	conn_state = drm_atomic_get_connector_state(state, connector);
8449 
8450 	ret = PTR_ERR_OR_ZERO(conn_state);
8451 	if (ret)
8452 		goto err;
8453 
8454 	/* Attach crtc to drm_atomic_state*/
8455 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8456 
8457 	ret = PTR_ERR_OR_ZERO(crtc_state);
8458 	if (ret)
8459 		goto err;
8460 
8461 	/* force a restore */
8462 	crtc_state->mode_changed = true;
8463 
8464 	/* Attach plane to drm_atomic_state */
8465 	plane_state = drm_atomic_get_plane_state(state, plane);
8466 
8467 	ret = PTR_ERR_OR_ZERO(plane_state);
8468 	if (ret)
8469 		goto err;
8470 
8471 
8472 	/* Call commit internally with the state we just constructed */
8473 	ret = drm_atomic_commit(state);
8474 	if (!ret)
8475 		return 0;
8476 
8477 err:
8478 	DRM_ERROR("Restoring old state failed with %i\n", ret);
8479 	drm_atomic_state_put(state);
8480 
8481 	return ret;
8482 }
8483 
8484 /*
8485  * This function handles all cases when set mode does not come upon hotplug.
8486  * This includes when a display is unplugged then plugged back into the
8487  * same port and when running without usermode desktop manager supprot
8488  */
8489 void dm_restore_drm_connector_state(struct drm_device *dev,
8490 				    struct drm_connector *connector)
8491 {
8492 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8493 	struct amdgpu_crtc *disconnected_acrtc;
8494 	struct dm_crtc_state *acrtc_state;
8495 
8496 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8497 		return;
8498 
8499 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8500 	if (!disconnected_acrtc)
8501 		return;
8502 
8503 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8504 	if (!acrtc_state->stream)
8505 		return;
8506 
8507 	/*
8508 	 * If the previous sink is not released and different from the current,
8509 	 * we deduce we are in a state where we can not rely on usermode call
8510 	 * to turn on the display, so we do it here
8511 	 */
8512 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8513 		dm_force_atomic_commit(&aconnector->base);
8514 }
8515 
8516 /*
8517  * Grabs all modesetting locks to serialize against any blocking commits,
8518  * Waits for completion of all non blocking commits.
8519  */
8520 static int do_aquire_global_lock(struct drm_device *dev,
8521 				 struct drm_atomic_state *state)
8522 {
8523 	struct drm_crtc *crtc;
8524 	struct drm_crtc_commit *commit;
8525 	long ret;
8526 
8527 	/*
8528 	 * Adding all modeset locks to aquire_ctx will
8529 	 * ensure that when the framework release it the
8530 	 * extra locks we are locking here will get released to
8531 	 */
8532 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8533 	if (ret)
8534 		return ret;
8535 
8536 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8537 		spin_lock(&crtc->commit_lock);
8538 		commit = list_first_entry_or_null(&crtc->commit_list,
8539 				struct drm_crtc_commit, commit_entry);
8540 		if (commit)
8541 			drm_crtc_commit_get(commit);
8542 		spin_unlock(&crtc->commit_lock);
8543 
8544 		if (!commit)
8545 			continue;
8546 
8547 		/*
8548 		 * Make sure all pending HW programming completed and
8549 		 * page flips done
8550 		 */
8551 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8552 
8553 		if (ret > 0)
8554 			ret = wait_for_completion_interruptible_timeout(
8555 					&commit->flip_done, 10*HZ);
8556 
8557 		if (ret == 0)
8558 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8559 				  "timed out\n", crtc->base.id, crtc->name);
8560 
8561 		drm_crtc_commit_put(commit);
8562 	}
8563 
8564 	return ret < 0 ? ret : 0;
8565 }
8566 
8567 static void get_freesync_config_for_crtc(
8568 	struct dm_crtc_state *new_crtc_state,
8569 	struct dm_connector_state *new_con_state)
8570 {
8571 	struct mod_freesync_config config = {0};
8572 	struct amdgpu_dm_connector *aconnector =
8573 			to_amdgpu_dm_connector(new_con_state->base.connector);
8574 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8575 	int vrefresh = drm_mode_vrefresh(mode);
8576 
8577 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8578 					vrefresh >= aconnector->min_vfreq &&
8579 					vrefresh <= aconnector->max_vfreq;
8580 
8581 	if (new_crtc_state->vrr_supported) {
8582 		new_crtc_state->stream->ignore_msa_timing_param = true;
8583 		config.state = new_crtc_state->base.vrr_enabled ?
8584 				VRR_STATE_ACTIVE_VARIABLE :
8585 				VRR_STATE_INACTIVE;
8586 		config.min_refresh_in_uhz =
8587 				aconnector->min_vfreq * 1000000;
8588 		config.max_refresh_in_uhz =
8589 				aconnector->max_vfreq * 1000000;
8590 		config.vsif_supported = true;
8591 		config.btr = true;
8592 	}
8593 
8594 	new_crtc_state->freesync_config = config;
8595 }
8596 
8597 static void reset_freesync_config_for_crtc(
8598 	struct dm_crtc_state *new_crtc_state)
8599 {
8600 	new_crtc_state->vrr_supported = false;
8601 
8602 	memset(&new_crtc_state->vrr_infopacket, 0,
8603 	       sizeof(new_crtc_state->vrr_infopacket));
8604 }
8605 
8606 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8607 				struct drm_atomic_state *state,
8608 				struct drm_crtc *crtc,
8609 				struct drm_crtc_state *old_crtc_state,
8610 				struct drm_crtc_state *new_crtc_state,
8611 				bool enable,
8612 				bool *lock_and_validation_needed)
8613 {
8614 	struct dm_atomic_state *dm_state = NULL;
8615 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8616 	struct dc_stream_state *new_stream;
8617 	int ret = 0;
8618 
8619 	/*
8620 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8621 	 * update changed items
8622 	 */
8623 	struct amdgpu_crtc *acrtc = NULL;
8624 	struct amdgpu_dm_connector *aconnector = NULL;
8625 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8626 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8627 
8628 	new_stream = NULL;
8629 
8630 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8631 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8632 	acrtc = to_amdgpu_crtc(crtc);
8633 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8634 
8635 	/* TODO This hack should go away */
8636 	if (aconnector && enable) {
8637 		/* Make sure fake sink is created in plug-in scenario */
8638 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8639 							    &aconnector->base);
8640 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8641 							    &aconnector->base);
8642 
8643 		if (IS_ERR(drm_new_conn_state)) {
8644 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8645 			goto fail;
8646 		}
8647 
8648 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8649 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8650 
8651 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8652 			goto skip_modeset;
8653 
8654 		new_stream = create_validate_stream_for_sink(aconnector,
8655 							     &new_crtc_state->mode,
8656 							     dm_new_conn_state,
8657 							     dm_old_crtc_state->stream);
8658 
8659 		/*
8660 		 * we can have no stream on ACTION_SET if a display
8661 		 * was disconnected during S3, in this case it is not an
8662 		 * error, the OS will be updated after detection, and
8663 		 * will do the right thing on next atomic commit
8664 		 */
8665 
8666 		if (!new_stream) {
8667 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8668 					__func__, acrtc->base.base.id);
8669 			ret = -ENOMEM;
8670 			goto fail;
8671 		}
8672 
8673 		/*
8674 		 * TODO: Check VSDB bits to decide whether this should
8675 		 * be enabled or not.
8676 		 */
8677 		new_stream->triggered_crtc_reset.enabled =
8678 			dm->force_timing_sync;
8679 
8680 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8681 
8682 		ret = fill_hdr_info_packet(drm_new_conn_state,
8683 					   &new_stream->hdr_static_metadata);
8684 		if (ret)
8685 			goto fail;
8686 
8687 		/*
8688 		 * If we already removed the old stream from the context
8689 		 * (and set the new stream to NULL) then we can't reuse
8690 		 * the old stream even if the stream and scaling are unchanged.
8691 		 * We'll hit the BUG_ON and black screen.
8692 		 *
8693 		 * TODO: Refactor this function to allow this check to work
8694 		 * in all conditions.
8695 		 */
8696 		if (dm_new_crtc_state->stream &&
8697 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8698 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8699 			new_crtc_state->mode_changed = false;
8700 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8701 					 new_crtc_state->mode_changed);
8702 		}
8703 	}
8704 
8705 	/* mode_changed flag may get updated above, need to check again */
8706 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8707 		goto skip_modeset;
8708 
8709 	DRM_DEBUG_DRIVER(
8710 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8711 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8712 		"connectors_changed:%d\n",
8713 		acrtc->crtc_id,
8714 		new_crtc_state->enable,
8715 		new_crtc_state->active,
8716 		new_crtc_state->planes_changed,
8717 		new_crtc_state->mode_changed,
8718 		new_crtc_state->active_changed,
8719 		new_crtc_state->connectors_changed);
8720 
8721 	/* Remove stream for any changed/disabled CRTC */
8722 	if (!enable) {
8723 
8724 		if (!dm_old_crtc_state->stream)
8725 			goto skip_modeset;
8726 
8727 		ret = dm_atomic_get_state(state, &dm_state);
8728 		if (ret)
8729 			goto fail;
8730 
8731 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8732 				crtc->base.id);
8733 
8734 		/* i.e. reset mode */
8735 		if (dc_remove_stream_from_ctx(
8736 				dm->dc,
8737 				dm_state->context,
8738 				dm_old_crtc_state->stream) != DC_OK) {
8739 			ret = -EINVAL;
8740 			goto fail;
8741 		}
8742 
8743 		dc_stream_release(dm_old_crtc_state->stream);
8744 		dm_new_crtc_state->stream = NULL;
8745 
8746 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8747 
8748 		*lock_and_validation_needed = true;
8749 
8750 	} else {/* Add stream for any updated/enabled CRTC */
8751 		/*
8752 		 * Quick fix to prevent NULL pointer on new_stream when
8753 		 * added MST connectors not found in existing crtc_state in the chained mode
8754 		 * TODO: need to dig out the root cause of that
8755 		 */
8756 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8757 			goto skip_modeset;
8758 
8759 		if (modereset_required(new_crtc_state))
8760 			goto skip_modeset;
8761 
8762 		if (modeset_required(new_crtc_state, new_stream,
8763 				     dm_old_crtc_state->stream)) {
8764 
8765 			WARN_ON(dm_new_crtc_state->stream);
8766 
8767 			ret = dm_atomic_get_state(state, &dm_state);
8768 			if (ret)
8769 				goto fail;
8770 
8771 			dm_new_crtc_state->stream = new_stream;
8772 
8773 			dc_stream_retain(new_stream);
8774 
8775 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8776 						crtc->base.id);
8777 
8778 			if (dc_add_stream_to_ctx(
8779 					dm->dc,
8780 					dm_state->context,
8781 					dm_new_crtc_state->stream) != DC_OK) {
8782 				ret = -EINVAL;
8783 				goto fail;
8784 			}
8785 
8786 			*lock_and_validation_needed = true;
8787 		}
8788 	}
8789 
8790 skip_modeset:
8791 	/* Release extra reference */
8792 	if (new_stream)
8793 		 dc_stream_release(new_stream);
8794 
8795 	/*
8796 	 * We want to do dc stream updates that do not require a
8797 	 * full modeset below.
8798 	 */
8799 	if (!(enable && aconnector && new_crtc_state->active))
8800 		return 0;
8801 	/*
8802 	 * Given above conditions, the dc state cannot be NULL because:
8803 	 * 1. We're in the process of enabling CRTCs (just been added
8804 	 *    to the dc context, or already is on the context)
8805 	 * 2. Has a valid connector attached, and
8806 	 * 3. Is currently active and enabled.
8807 	 * => The dc stream state currently exists.
8808 	 */
8809 	BUG_ON(dm_new_crtc_state->stream == NULL);
8810 
8811 	/* Scaling or underscan settings */
8812 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8813 		update_stream_scaling_settings(
8814 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8815 
8816 	/* ABM settings */
8817 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8818 
8819 	/*
8820 	 * Color management settings. We also update color properties
8821 	 * when a modeset is needed, to ensure it gets reprogrammed.
8822 	 */
8823 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8824 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8825 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8826 		if (ret)
8827 			goto fail;
8828 	}
8829 
8830 	/* Update Freesync settings. */
8831 	get_freesync_config_for_crtc(dm_new_crtc_state,
8832 				     dm_new_conn_state);
8833 
8834 	return ret;
8835 
8836 fail:
8837 	if (new_stream)
8838 		dc_stream_release(new_stream);
8839 	return ret;
8840 }
8841 
8842 static bool should_reset_plane(struct drm_atomic_state *state,
8843 			       struct drm_plane *plane,
8844 			       struct drm_plane_state *old_plane_state,
8845 			       struct drm_plane_state *new_plane_state)
8846 {
8847 	struct drm_plane *other;
8848 	struct drm_plane_state *old_other_state, *new_other_state;
8849 	struct drm_crtc_state *new_crtc_state;
8850 	int i;
8851 
8852 	/*
8853 	 * TODO: Remove this hack once the checks below are sufficient
8854 	 * enough to determine when we need to reset all the planes on
8855 	 * the stream.
8856 	 */
8857 	if (state->allow_modeset)
8858 		return true;
8859 
8860 	/* Exit early if we know that we're adding or removing the plane. */
8861 	if (old_plane_state->crtc != new_plane_state->crtc)
8862 		return true;
8863 
8864 	/* old crtc == new_crtc == NULL, plane not in context. */
8865 	if (!new_plane_state->crtc)
8866 		return false;
8867 
8868 	new_crtc_state =
8869 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8870 
8871 	if (!new_crtc_state)
8872 		return true;
8873 
8874 	/* CRTC Degamma changes currently require us to recreate planes. */
8875 	if (new_crtc_state->color_mgmt_changed)
8876 		return true;
8877 
8878 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8879 		return true;
8880 
8881 	/*
8882 	 * If there are any new primary or overlay planes being added or
8883 	 * removed then the z-order can potentially change. To ensure
8884 	 * correct z-order and pipe acquisition the current DC architecture
8885 	 * requires us to remove and recreate all existing planes.
8886 	 *
8887 	 * TODO: Come up with a more elegant solution for this.
8888 	 */
8889 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8890 		struct amdgpu_framebuffer *old_afb, *new_afb;
8891 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8892 			continue;
8893 
8894 		if (old_other_state->crtc != new_plane_state->crtc &&
8895 		    new_other_state->crtc != new_plane_state->crtc)
8896 			continue;
8897 
8898 		if (old_other_state->crtc != new_other_state->crtc)
8899 			return true;
8900 
8901 		/* Src/dst size and scaling updates. */
8902 		if (old_other_state->src_w != new_other_state->src_w ||
8903 		    old_other_state->src_h != new_other_state->src_h ||
8904 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8905 		    old_other_state->crtc_h != new_other_state->crtc_h)
8906 			return true;
8907 
8908 		/* Rotation / mirroring updates. */
8909 		if (old_other_state->rotation != new_other_state->rotation)
8910 			return true;
8911 
8912 		/* Blending updates. */
8913 		if (old_other_state->pixel_blend_mode !=
8914 		    new_other_state->pixel_blend_mode)
8915 			return true;
8916 
8917 		/* Alpha updates. */
8918 		if (old_other_state->alpha != new_other_state->alpha)
8919 			return true;
8920 
8921 		/* Colorspace changes. */
8922 		if (old_other_state->color_range != new_other_state->color_range ||
8923 		    old_other_state->color_encoding != new_other_state->color_encoding)
8924 			return true;
8925 
8926 		/* Framebuffer checks fall at the end. */
8927 		if (!old_other_state->fb || !new_other_state->fb)
8928 			continue;
8929 
8930 		/* Pixel format changes can require bandwidth updates. */
8931 		if (old_other_state->fb->format != new_other_state->fb->format)
8932 			return true;
8933 
8934 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8935 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8936 
8937 		/* Tiling and DCC changes also require bandwidth updates. */
8938 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8939 		    old_afb->base.modifier != new_afb->base.modifier)
8940 			return true;
8941 	}
8942 
8943 	return false;
8944 }
8945 
8946 static int dm_update_plane_state(struct dc *dc,
8947 				 struct drm_atomic_state *state,
8948 				 struct drm_plane *plane,
8949 				 struct drm_plane_state *old_plane_state,
8950 				 struct drm_plane_state *new_plane_state,
8951 				 bool enable,
8952 				 bool *lock_and_validation_needed)
8953 {
8954 
8955 	struct dm_atomic_state *dm_state = NULL;
8956 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8957 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8958 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8959 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8960 	struct amdgpu_crtc *new_acrtc;
8961 	bool needs_reset;
8962 	int ret = 0;
8963 
8964 
8965 	new_plane_crtc = new_plane_state->crtc;
8966 	old_plane_crtc = old_plane_state->crtc;
8967 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8968 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8969 
8970 	/*TODO Implement better atomic check for cursor plane */
8971 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8972 		if (!enable || !new_plane_crtc ||
8973 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8974 			return 0;
8975 
8976 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8977 
8978 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8979 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8980 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8981 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8982 			return -EINVAL;
8983 		}
8984 
8985 		return 0;
8986 	}
8987 
8988 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8989 					 new_plane_state);
8990 
8991 	/* Remove any changed/removed planes */
8992 	if (!enable) {
8993 		if (!needs_reset)
8994 			return 0;
8995 
8996 		if (!old_plane_crtc)
8997 			return 0;
8998 
8999 		old_crtc_state = drm_atomic_get_old_crtc_state(
9000 				state, old_plane_crtc);
9001 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9002 
9003 		if (!dm_old_crtc_state->stream)
9004 			return 0;
9005 
9006 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9007 				plane->base.id, old_plane_crtc->base.id);
9008 
9009 		ret = dm_atomic_get_state(state, &dm_state);
9010 		if (ret)
9011 			return ret;
9012 
9013 		if (!dc_remove_plane_from_context(
9014 				dc,
9015 				dm_old_crtc_state->stream,
9016 				dm_old_plane_state->dc_state,
9017 				dm_state->context)) {
9018 
9019 			return -EINVAL;
9020 		}
9021 
9022 
9023 		dc_plane_state_release(dm_old_plane_state->dc_state);
9024 		dm_new_plane_state->dc_state = NULL;
9025 
9026 		*lock_and_validation_needed = true;
9027 
9028 	} else { /* Add new planes */
9029 		struct dc_plane_state *dc_new_plane_state;
9030 
9031 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9032 			return 0;
9033 
9034 		if (!new_plane_crtc)
9035 			return 0;
9036 
9037 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9038 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9039 
9040 		if (!dm_new_crtc_state->stream)
9041 			return 0;
9042 
9043 		if (!needs_reset)
9044 			return 0;
9045 
9046 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9047 		if (ret)
9048 			return ret;
9049 
9050 		WARN_ON(dm_new_plane_state->dc_state);
9051 
9052 		dc_new_plane_state = dc_create_plane_state(dc);
9053 		if (!dc_new_plane_state)
9054 			return -ENOMEM;
9055 
9056 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9057 				plane->base.id, new_plane_crtc->base.id);
9058 
9059 		ret = fill_dc_plane_attributes(
9060 			drm_to_adev(new_plane_crtc->dev),
9061 			dc_new_plane_state,
9062 			new_plane_state,
9063 			new_crtc_state);
9064 		if (ret) {
9065 			dc_plane_state_release(dc_new_plane_state);
9066 			return ret;
9067 		}
9068 
9069 		ret = dm_atomic_get_state(state, &dm_state);
9070 		if (ret) {
9071 			dc_plane_state_release(dc_new_plane_state);
9072 			return ret;
9073 		}
9074 
9075 		/*
9076 		 * Any atomic check errors that occur after this will
9077 		 * not need a release. The plane state will be attached
9078 		 * to the stream, and therefore part of the atomic
9079 		 * state. It'll be released when the atomic state is
9080 		 * cleaned.
9081 		 */
9082 		if (!dc_add_plane_to_context(
9083 				dc,
9084 				dm_new_crtc_state->stream,
9085 				dc_new_plane_state,
9086 				dm_state->context)) {
9087 
9088 			dc_plane_state_release(dc_new_plane_state);
9089 			return -EINVAL;
9090 		}
9091 
9092 		dm_new_plane_state->dc_state = dc_new_plane_state;
9093 
9094 		/* Tell DC to do a full surface update every time there
9095 		 * is a plane change. Inefficient, but works for now.
9096 		 */
9097 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9098 
9099 		*lock_and_validation_needed = true;
9100 	}
9101 
9102 
9103 	return ret;
9104 }
9105 
9106 #if defined(CONFIG_DRM_AMD_DC_DCN)
9107 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9108 {
9109 	struct drm_connector *connector;
9110 	struct drm_connector_state *conn_state;
9111 	struct amdgpu_dm_connector *aconnector = NULL;
9112 	int i;
9113 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9114 		if (conn_state->crtc != crtc)
9115 			continue;
9116 
9117 		aconnector = to_amdgpu_dm_connector(connector);
9118 		if (!aconnector->port || !aconnector->mst_port)
9119 			aconnector = NULL;
9120 		else
9121 			break;
9122 	}
9123 
9124 	if (!aconnector)
9125 		return 0;
9126 
9127 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9128 }
9129 #endif
9130 
9131 /**
9132  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9133  * @dev: The DRM device
9134  * @state: The atomic state to commit
9135  *
9136  * Validate that the given atomic state is programmable by DC into hardware.
9137  * This involves constructing a &struct dc_state reflecting the new hardware
9138  * state we wish to commit, then querying DC to see if it is programmable. It's
9139  * important not to modify the existing DC state. Otherwise, atomic_check
9140  * may unexpectedly commit hardware changes.
9141  *
9142  * When validating the DC state, it's important that the right locks are
9143  * acquired. For full updates case which removes/adds/updates streams on one
9144  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9145  * that any such full update commit will wait for completion of any outstanding
9146  * flip using DRMs synchronization events.
9147  *
9148  * Note that DM adds the affected connectors for all CRTCs in state, when that
9149  * might not seem necessary. This is because DC stream creation requires the
9150  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9151  * be possible but non-trivial - a possible TODO item.
9152  *
9153  * Return: -Error code if validation failed.
9154  */
9155 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9156 				  struct drm_atomic_state *state)
9157 {
9158 	struct amdgpu_device *adev = drm_to_adev(dev);
9159 	struct dm_atomic_state *dm_state = NULL;
9160 	struct dc *dc = adev->dm.dc;
9161 	struct drm_connector *connector;
9162 	struct drm_connector_state *old_con_state, *new_con_state;
9163 	struct drm_crtc *crtc;
9164 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9165 	struct drm_plane *plane;
9166 	struct drm_plane_state *old_plane_state, *new_plane_state;
9167 	enum dc_status status;
9168 	int ret, i;
9169 	bool lock_and_validation_needed = false;
9170 	struct dm_crtc_state *dm_old_crtc_state;
9171 
9172 	trace_amdgpu_dm_atomic_check_begin(state);
9173 
9174 	ret = drm_atomic_helper_check_modeset(dev, state);
9175 	if (ret)
9176 		goto fail;
9177 
9178 	/* Check connector changes */
9179 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9180 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9181 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9182 
9183 		/* Skip connectors that are disabled or part of modeset already. */
9184 		if (!old_con_state->crtc && !new_con_state->crtc)
9185 			continue;
9186 
9187 		if (!new_con_state->crtc)
9188 			continue;
9189 
9190 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9191 		if (IS_ERR(new_crtc_state)) {
9192 			ret = PTR_ERR(new_crtc_state);
9193 			goto fail;
9194 		}
9195 
9196 		if (dm_old_con_state->abm_level !=
9197 		    dm_new_con_state->abm_level)
9198 			new_crtc_state->connectors_changed = true;
9199 	}
9200 
9201 #if defined(CONFIG_DRM_AMD_DC_DCN)
9202 	if (adev->asic_type >= CHIP_NAVI10) {
9203 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9204 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9205 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9206 				if (ret)
9207 					goto fail;
9208 			}
9209 		}
9210 	}
9211 #endif
9212 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9213 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9214 
9215 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9216 		    !new_crtc_state->color_mgmt_changed &&
9217 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9218 			dm_old_crtc_state->dsc_force_changed == false)
9219 			continue;
9220 
9221 		if (!new_crtc_state->enable)
9222 			continue;
9223 
9224 		ret = drm_atomic_add_affected_connectors(state, crtc);
9225 		if (ret)
9226 			return ret;
9227 
9228 		ret = drm_atomic_add_affected_planes(state, crtc);
9229 		if (ret)
9230 			goto fail;
9231 	}
9232 
9233 	/*
9234 	 * Add all primary and overlay planes on the CRTC to the state
9235 	 * whenever a plane is enabled to maintain correct z-ordering
9236 	 * and to enable fast surface updates.
9237 	 */
9238 	drm_for_each_crtc(crtc, dev) {
9239 		bool modified = false;
9240 
9241 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9242 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9243 				continue;
9244 
9245 			if (new_plane_state->crtc == crtc ||
9246 			    old_plane_state->crtc == crtc) {
9247 				modified = true;
9248 				break;
9249 			}
9250 		}
9251 
9252 		if (!modified)
9253 			continue;
9254 
9255 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9256 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9257 				continue;
9258 
9259 			new_plane_state =
9260 				drm_atomic_get_plane_state(state, plane);
9261 
9262 			if (IS_ERR(new_plane_state)) {
9263 				ret = PTR_ERR(new_plane_state);
9264 				goto fail;
9265 			}
9266 		}
9267 	}
9268 
9269 	/* Remove exiting planes if they are modified */
9270 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9271 		ret = dm_update_plane_state(dc, state, plane,
9272 					    old_plane_state,
9273 					    new_plane_state,
9274 					    false,
9275 					    &lock_and_validation_needed);
9276 		if (ret)
9277 			goto fail;
9278 	}
9279 
9280 	/* Disable all crtcs which require disable */
9281 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9282 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9283 					   old_crtc_state,
9284 					   new_crtc_state,
9285 					   false,
9286 					   &lock_and_validation_needed);
9287 		if (ret)
9288 			goto fail;
9289 	}
9290 
9291 	/* Enable all crtcs which require enable */
9292 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9293 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9294 					   old_crtc_state,
9295 					   new_crtc_state,
9296 					   true,
9297 					   &lock_and_validation_needed);
9298 		if (ret)
9299 			goto fail;
9300 	}
9301 
9302 	/* Add new/modified planes */
9303 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9304 		ret = dm_update_plane_state(dc, state, plane,
9305 					    old_plane_state,
9306 					    new_plane_state,
9307 					    true,
9308 					    &lock_and_validation_needed);
9309 		if (ret)
9310 			goto fail;
9311 	}
9312 
9313 	/* Run this here since we want to validate the streams we created */
9314 	ret = drm_atomic_helper_check_planes(dev, state);
9315 	if (ret)
9316 		goto fail;
9317 
9318 	if (state->legacy_cursor_update) {
9319 		/*
9320 		 * This is a fast cursor update coming from the plane update
9321 		 * helper, check if it can be done asynchronously for better
9322 		 * performance.
9323 		 */
9324 		state->async_update =
9325 			!drm_atomic_helper_async_check(dev, state);
9326 
9327 		/*
9328 		 * Skip the remaining global validation if this is an async
9329 		 * update. Cursor updates can be done without affecting
9330 		 * state or bandwidth calcs and this avoids the performance
9331 		 * penalty of locking the private state object and
9332 		 * allocating a new dc_state.
9333 		 */
9334 		if (state->async_update)
9335 			return 0;
9336 	}
9337 
9338 	/* Check scaling and underscan changes*/
9339 	/* TODO Removed scaling changes validation due to inability to commit
9340 	 * new stream into context w\o causing full reset. Need to
9341 	 * decide how to handle.
9342 	 */
9343 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9344 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9345 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9346 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9347 
9348 		/* Skip any modesets/resets */
9349 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9350 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9351 			continue;
9352 
9353 		/* Skip any thing not scale or underscan changes */
9354 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9355 			continue;
9356 
9357 		lock_and_validation_needed = true;
9358 	}
9359 
9360 	/**
9361 	 * Streams and planes are reset when there are changes that affect
9362 	 * bandwidth. Anything that affects bandwidth needs to go through
9363 	 * DC global validation to ensure that the configuration can be applied
9364 	 * to hardware.
9365 	 *
9366 	 * We have to currently stall out here in atomic_check for outstanding
9367 	 * commits to finish in this case because our IRQ handlers reference
9368 	 * DRM state directly - we can end up disabling interrupts too early
9369 	 * if we don't.
9370 	 *
9371 	 * TODO: Remove this stall and drop DM state private objects.
9372 	 */
9373 	if (lock_and_validation_needed) {
9374 		ret = dm_atomic_get_state(state, &dm_state);
9375 		if (ret)
9376 			goto fail;
9377 
9378 		ret = do_aquire_global_lock(dev, state);
9379 		if (ret)
9380 			goto fail;
9381 
9382 #if defined(CONFIG_DRM_AMD_DC_DCN)
9383 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9384 			goto fail;
9385 
9386 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9387 		if (ret)
9388 			goto fail;
9389 #endif
9390 
9391 		/*
9392 		 * Perform validation of MST topology in the state:
9393 		 * We need to perform MST atomic check before calling
9394 		 * dc_validate_global_state(), or there is a chance
9395 		 * to get stuck in an infinite loop and hang eventually.
9396 		 */
9397 		ret = drm_dp_mst_atomic_check(state);
9398 		if (ret)
9399 			goto fail;
9400 		status = dc_validate_global_state(dc, dm_state->context, false);
9401 		if (status != DC_OK) {
9402 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9403 				       dc_status_to_str(status), status);
9404 			ret = -EINVAL;
9405 			goto fail;
9406 		}
9407 	} else {
9408 		/*
9409 		 * The commit is a fast update. Fast updates shouldn't change
9410 		 * the DC context, affect global validation, and can have their
9411 		 * commit work done in parallel with other commits not touching
9412 		 * the same resource. If we have a new DC context as part of
9413 		 * the DM atomic state from validation we need to free it and
9414 		 * retain the existing one instead.
9415 		 *
9416 		 * Furthermore, since the DM atomic state only contains the DC
9417 		 * context and can safely be annulled, we can free the state
9418 		 * and clear the associated private object now to free
9419 		 * some memory and avoid a possible use-after-free later.
9420 		 */
9421 
9422 		for (i = 0; i < state->num_private_objs; i++) {
9423 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9424 
9425 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9426 				int j = state->num_private_objs-1;
9427 
9428 				dm_atomic_destroy_state(obj,
9429 						state->private_objs[i].state);
9430 
9431 				/* If i is not at the end of the array then the
9432 				 * last element needs to be moved to where i was
9433 				 * before the array can safely be truncated.
9434 				 */
9435 				if (i != j)
9436 					state->private_objs[i] =
9437 						state->private_objs[j];
9438 
9439 				state->private_objs[j].ptr = NULL;
9440 				state->private_objs[j].state = NULL;
9441 				state->private_objs[j].old_state = NULL;
9442 				state->private_objs[j].new_state = NULL;
9443 
9444 				state->num_private_objs = j;
9445 				break;
9446 			}
9447 		}
9448 	}
9449 
9450 	/* Store the overall update type for use later in atomic check. */
9451 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9452 		struct dm_crtc_state *dm_new_crtc_state =
9453 			to_dm_crtc_state(new_crtc_state);
9454 
9455 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9456 							 UPDATE_TYPE_FULL :
9457 							 UPDATE_TYPE_FAST;
9458 	}
9459 
9460 	/* Must be success */
9461 	WARN_ON(ret);
9462 
9463 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9464 
9465 	return ret;
9466 
9467 fail:
9468 	if (ret == -EDEADLK)
9469 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9470 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9471 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9472 	else
9473 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9474 
9475 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9476 
9477 	return ret;
9478 }
9479 
9480 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9481 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9482 {
9483 	uint8_t dpcd_data;
9484 	bool capable = false;
9485 
9486 	if (amdgpu_dm_connector->dc_link &&
9487 		dm_helpers_dp_read_dpcd(
9488 				NULL,
9489 				amdgpu_dm_connector->dc_link,
9490 				DP_DOWN_STREAM_PORT_COUNT,
9491 				&dpcd_data,
9492 				sizeof(dpcd_data))) {
9493 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9494 	}
9495 
9496 	return capable;
9497 }
9498 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9499 					struct edid *edid)
9500 {
9501 	int i;
9502 	bool edid_check_required;
9503 	struct detailed_timing *timing;
9504 	struct detailed_non_pixel *data;
9505 	struct detailed_data_monitor_range *range;
9506 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9507 			to_amdgpu_dm_connector(connector);
9508 	struct dm_connector_state *dm_con_state = NULL;
9509 
9510 	struct drm_device *dev = connector->dev;
9511 	struct amdgpu_device *adev = drm_to_adev(dev);
9512 	bool freesync_capable = false;
9513 
9514 	if (!connector->state) {
9515 		DRM_ERROR("%s - Connector has no state", __func__);
9516 		goto update;
9517 	}
9518 
9519 	if (!edid) {
9520 		dm_con_state = to_dm_connector_state(connector->state);
9521 
9522 		amdgpu_dm_connector->min_vfreq = 0;
9523 		amdgpu_dm_connector->max_vfreq = 0;
9524 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9525 
9526 		goto update;
9527 	}
9528 
9529 	dm_con_state = to_dm_connector_state(connector->state);
9530 
9531 	edid_check_required = false;
9532 	if (!amdgpu_dm_connector->dc_sink) {
9533 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9534 		goto update;
9535 	}
9536 	if (!adev->dm.freesync_module)
9537 		goto update;
9538 	/*
9539 	 * if edid non zero restrict freesync only for dp and edp
9540 	 */
9541 	if (edid) {
9542 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9543 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9544 			edid_check_required = is_dp_capable_without_timing_msa(
9545 						adev->dm.dc,
9546 						amdgpu_dm_connector);
9547 		}
9548 	}
9549 	if (edid_check_required == true && (edid->version > 1 ||
9550 	   (edid->version == 1 && edid->revision > 1))) {
9551 		for (i = 0; i < 4; i++) {
9552 
9553 			timing	= &edid->detailed_timings[i];
9554 			data	= &timing->data.other_data;
9555 			range	= &data->data.range;
9556 			/*
9557 			 * Check if monitor has continuous frequency mode
9558 			 */
9559 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9560 				continue;
9561 			/*
9562 			 * Check for flag range limits only. If flag == 1 then
9563 			 * no additional timing information provided.
9564 			 * Default GTF, GTF Secondary curve and CVT are not
9565 			 * supported
9566 			 */
9567 			if (range->flags != 1)
9568 				continue;
9569 
9570 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9571 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9572 			amdgpu_dm_connector->pixel_clock_mhz =
9573 				range->pixel_clock_mhz * 10;
9574 			break;
9575 		}
9576 
9577 		if (amdgpu_dm_connector->max_vfreq -
9578 		    amdgpu_dm_connector->min_vfreq > 10) {
9579 
9580 			freesync_capable = true;
9581 		}
9582 	}
9583 
9584 update:
9585 	if (dm_con_state)
9586 		dm_con_state->freesync_capable = freesync_capable;
9587 
9588 	if (connector->vrr_capable_property)
9589 		drm_connector_set_vrr_capable_property(connector,
9590 						       freesync_capable);
9591 }
9592 
9593 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9594 {
9595 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9596 
9597 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9598 		return;
9599 	if (link->type == dc_connection_none)
9600 		return;
9601 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9602 					dpcd_data, sizeof(dpcd_data))) {
9603 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9604 
9605 		if (dpcd_data[0] == 0) {
9606 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9607 			link->psr_settings.psr_feature_enabled = false;
9608 		} else {
9609 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9610 			link->psr_settings.psr_feature_enabled = true;
9611 		}
9612 
9613 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9614 	}
9615 }
9616 
9617 /*
9618  * amdgpu_dm_link_setup_psr() - configure psr link
9619  * @stream: stream state
9620  *
9621  * Return: true if success
9622  */
9623 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9624 {
9625 	struct dc_link *link = NULL;
9626 	struct psr_config psr_config = {0};
9627 	struct psr_context psr_context = {0};
9628 	bool ret = false;
9629 
9630 	if (stream == NULL)
9631 		return false;
9632 
9633 	link = stream->link;
9634 
9635 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9636 
9637 	if (psr_config.psr_version > 0) {
9638 		psr_config.psr_exit_link_training_required = 0x1;
9639 		psr_config.psr_frame_capture_indication_req = 0;
9640 		psr_config.psr_rfb_setup_time = 0x37;
9641 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9642 		psr_config.allow_smu_optimizations = 0x0;
9643 
9644 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9645 
9646 	}
9647 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9648 
9649 	return ret;
9650 }
9651 
9652 /*
9653  * amdgpu_dm_psr_enable() - enable psr f/w
9654  * @stream: stream state
9655  *
9656  * Return: true if success
9657  */
9658 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9659 {
9660 	struct dc_link *link = stream->link;
9661 	unsigned int vsync_rate_hz = 0;
9662 	struct dc_static_screen_params params = {0};
9663 	/* Calculate number of static frames before generating interrupt to
9664 	 * enter PSR.
9665 	 */
9666 	// Init fail safe of 2 frames static
9667 	unsigned int num_frames_static = 2;
9668 
9669 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9670 
9671 	vsync_rate_hz = div64_u64(div64_u64((
9672 			stream->timing.pix_clk_100hz * 100),
9673 			stream->timing.v_total),
9674 			stream->timing.h_total);
9675 
9676 	/* Round up
9677 	 * Calculate number of frames such that at least 30 ms of time has
9678 	 * passed.
9679 	 */
9680 	if (vsync_rate_hz != 0) {
9681 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9682 		num_frames_static = (30000 / frame_time_microsec) + 1;
9683 	}
9684 
9685 	params.triggers.cursor_update = true;
9686 	params.triggers.overlay_update = true;
9687 	params.triggers.surface_update = true;
9688 	params.num_frames = num_frames_static;
9689 
9690 	dc_stream_set_static_screen_params(link->ctx->dc,
9691 					   &stream, 1,
9692 					   &params);
9693 
9694 	return dc_link_set_psr_allow_active(link, true, false, false);
9695 }
9696 
9697 /*
9698  * amdgpu_dm_psr_disable() - disable psr f/w
9699  * @stream:  stream state
9700  *
9701  * Return: true if success
9702  */
9703 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9704 {
9705 
9706 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9707 
9708 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9709 }
9710 
9711 /*
9712  * amdgpu_dm_psr_disable() - disable psr f/w
9713  * if psr is enabled on any stream
9714  *
9715  * Return: true if success
9716  */
9717 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9718 {
9719 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9720 	return dc_set_psr_allow_active(dm->dc, false);
9721 }
9722 
9723 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9724 {
9725 	struct amdgpu_device *adev = drm_to_adev(dev);
9726 	struct dc *dc = adev->dm.dc;
9727 	int i;
9728 
9729 	mutex_lock(&adev->dm.dc_lock);
9730 	if (dc->current_state) {
9731 		for (i = 0; i < dc->current_state->stream_count; ++i)
9732 			dc->current_state->streams[i]
9733 				->triggered_crtc_reset.enabled =
9734 				adev->dm.force_timing_sync;
9735 
9736 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9737 		dc_trigger_sync(dc, dc->current_state);
9738 	}
9739 	mutex_unlock(&adev->dm.dc_lock);
9740 }
9741 
9742 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9743 		       uint32_t value, const char *func_name)
9744 {
9745 #ifdef DM_CHECK_ADDR_0
9746 	if (address == 0) {
9747 		DC_ERR("invalid register write. address = 0");
9748 		return;
9749 	}
9750 #endif
9751 	cgs_write_register(ctx->cgs_device, address, value);
9752 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9753 }
9754 
9755 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9756 			  const char *func_name)
9757 {
9758 	uint32_t value;
9759 #ifdef DM_CHECK_ADDR_0
9760 	if (address == 0) {
9761 		DC_ERR("invalid register read; address = 0\n");
9762 		return 0;
9763 	}
9764 #endif
9765 
9766 	if (ctx->dmub_srv &&
9767 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9768 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9769 		ASSERT(false);
9770 		return 0;
9771 	}
9772 
9773 	value = cgs_read_register(ctx->cgs_device, address);
9774 
9775 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9776 
9777 	return value;
9778 }
9779