1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80 
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88 
89 #include "soc15_common.h"
90 #endif
91 
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95 
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 
109 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 
112 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
117 
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
120 
121 /**
122  * DOC: overview
123  *
124  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126  * requests into DC requests, and DC responses into DRM responses.
127  *
128  * The root control structure is &struct amdgpu_display_manager.
129  */
130 
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
200 				   struct drm_atomic_state *state,
201 				   bool nonblock);
202 
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204 
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 				  struct drm_atomic_state *state);
207 
208 static void handle_cursor_update(struct drm_plane *plane,
209 				 struct drm_plane_state *old_plane_state);
210 
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 /*
221  * dm_vblank_get_counter
222  *
223  * @brief
224  * Get counter for number of vertical blanks
225  *
226  * @param
227  * struct amdgpu_device *adev - [in] desired amdgpu device
228  * int disp_idx - [in] which CRTC to get the counter from
229  *
230  * @return
231  * Counter for vertical blanks
232  */
233 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234 {
235 	if (crtc >= adev->mode_info.num_crtc)
236 		return 0;
237 	else {
238 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239 
240 		if (acrtc->dm_irq_params.stream == NULL) {
241 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242 				  crtc);
243 			return 0;
244 		}
245 
246 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
247 	}
248 }
249 
250 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
251 				  u32 *vbl, u32 *position)
252 {
253 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
254 
255 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
256 		return -EINVAL;
257 	else {
258 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259 
260 		if (acrtc->dm_irq_params.stream ==  NULL) {
261 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 				  crtc);
263 			return 0;
264 		}
265 
266 		/*
267 		 * TODO rework base driver to use values directly.
268 		 * for now parse it back into reg-format
269 		 */
270 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271 					 &v_blank_start,
272 					 &v_blank_end,
273 					 &h_position,
274 					 &v_position);
275 
276 		*position = v_position | (h_position << 16);
277 		*vbl = v_blank_start | (v_blank_end << 16);
278 	}
279 
280 	return 0;
281 }
282 
283 static bool dm_is_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return true;
287 }
288 
289 static int dm_wait_for_idle(void *handle)
290 {
291 	/* XXX todo */
292 	return 0;
293 }
294 
295 static bool dm_check_soft_reset(void *handle)
296 {
297 	return false;
298 }
299 
300 static int dm_soft_reset(void *handle)
301 {
302 	/* XXX todo */
303 	return 0;
304 }
305 
306 static struct amdgpu_crtc *
307 get_crtc_by_otg_inst(struct amdgpu_device *adev,
308 		     int otg_inst)
309 {
310 	struct drm_device *dev = adev_to_drm(adev);
311 	struct drm_crtc *crtc;
312 	struct amdgpu_crtc *amdgpu_crtc;
313 
314 	if (otg_inst == -1) {
315 		WARN_ON(1);
316 		return adev->mode_info.crtcs[0];
317 	}
318 
319 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
320 		amdgpu_crtc = to_amdgpu_crtc(crtc);
321 
322 		if (amdgpu_crtc->otg_inst == otg_inst)
323 			return amdgpu_crtc;
324 	}
325 
326 	return NULL;
327 }
328 
329 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 {
331 	return acrtc->dm_irq_params.freesync_config.state ==
332 		       VRR_STATE_ACTIVE_VARIABLE ||
333 	       acrtc->dm_irq_params.freesync_config.state ==
334 		       VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 {
339 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
340 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
341 }
342 
343 /**
344  * dm_pflip_high_irq() - Handle pageflip interrupt
345  * @interrupt_params: ignored
346  *
347  * Handles the pageflip interrupt by notifying all interested parties
348  * that the pageflip has been completed.
349  */
350 static void dm_pflip_high_irq(void *interrupt_params)
351 {
352 	struct amdgpu_crtc *amdgpu_crtc;
353 	struct common_irq_params *irq_params = interrupt_params;
354 	struct amdgpu_device *adev = irq_params->adev;
355 	unsigned long flags;
356 	struct drm_pending_vblank_event *e;
357 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
358 	bool vrr_active;
359 
360 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
361 
362 	/* IRQ could occur when in initial stage */
363 	/* TODO work and BO cleanup */
364 	if (amdgpu_crtc == NULL) {
365 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
366 		return;
367 	}
368 
369 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
370 
371 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
372 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
373 						 amdgpu_crtc->pflip_status,
374 						 AMDGPU_FLIP_SUBMITTED,
375 						 amdgpu_crtc->crtc_id,
376 						 amdgpu_crtc);
377 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
378 		return;
379 	}
380 
381 	/* page flip completed. */
382 	e = amdgpu_crtc->event;
383 	amdgpu_crtc->event = NULL;
384 
385 	if (!e)
386 		WARN_ON(1);
387 
388 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
389 
390 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
391 	if (!vrr_active ||
392 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
393 				      &v_blank_end, &hpos, &vpos) ||
394 	    (vpos < v_blank_start)) {
395 		/* Update to correct count and vblank timestamp if racing with
396 		 * vblank irq. This also updates to the correct vblank timestamp
397 		 * even in VRR mode, as scanout is past the front-porch atm.
398 		 */
399 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
400 
401 		/* Wake up userspace by sending the pageflip event with proper
402 		 * count and timestamp of vblank of flip completion.
403 		 */
404 		if (e) {
405 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
406 
407 			/* Event sent, so done with vblank for this flip */
408 			drm_crtc_vblank_put(&amdgpu_crtc->base);
409 		}
410 	} else if (e) {
411 		/* VRR active and inside front-porch: vblank count and
412 		 * timestamp for pageflip event will only be up to date after
413 		 * drm_crtc_handle_vblank() has been executed from late vblank
414 		 * irq handler after start of back-porch (vline 0). We queue the
415 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
416 		 * updated timestamp and count, once it runs after us.
417 		 *
418 		 * We need to open-code this instead of using the helper
419 		 * drm_crtc_arm_vblank_event(), as that helper would
420 		 * call drm_crtc_accurate_vblank_count(), which we must
421 		 * not call in VRR mode while we are in front-porch!
422 		 */
423 
424 		/* sequence will be replaced by real count during send-out. */
425 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
426 		e->pipe = amdgpu_crtc->crtc_id;
427 
428 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
429 		e = NULL;
430 	}
431 
432 	/* Keep track of vblank of this flip for flip throttling. We use the
433 	 * cooked hw counter, as that one incremented at start of this vblank
434 	 * of pageflip completion, so last_flip_vblank is the forbidden count
435 	 * for queueing new pageflips if vsync + VRR is enabled.
436 	 */
437 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
438 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
439 
440 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
441 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
442 
443 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
444 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
445 			 vrr_active, (int) !e);
446 }
447 
448 static void dm_vupdate_high_irq(void *interrupt_params)
449 {
450 	struct common_irq_params *irq_params = interrupt_params;
451 	struct amdgpu_device *adev = irq_params->adev;
452 	struct amdgpu_crtc *acrtc;
453 	unsigned long flags;
454 	int vrr_active;
455 
456 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
457 
458 	if (acrtc) {
459 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
460 
461 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
462 			      acrtc->crtc_id,
463 			      vrr_active);
464 
465 		/* Core vblank handling is done here after end of front-porch in
466 		 * vrr mode, as vblank timestamping will give valid results
467 		 * while now done after front-porch. This will also deliver
468 		 * page-flip completion events that have been queued to us
469 		 * if a pageflip happened inside front-porch.
470 		 */
471 		if (vrr_active) {
472 			drm_crtc_handle_vblank(&acrtc->base);
473 
474 			/* BTR processing for pre-DCE12 ASICs */
475 			if (acrtc->dm_irq_params.stream &&
476 			    adev->family < AMDGPU_FAMILY_AI) {
477 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
478 				mod_freesync_handle_v_update(
479 				    adev->dm.freesync_module,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params);
482 
483 				dc_stream_adjust_vmin_vmax(
484 				    adev->dm.dc,
485 				    acrtc->dm_irq_params.stream,
486 				    &acrtc->dm_irq_params.vrr_params.adjust);
487 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
488 			}
489 		}
490 	}
491 }
492 
493 /**
494  * dm_crtc_high_irq() - Handles CRTC interrupt
495  * @interrupt_params: used for determining the CRTC instance
496  *
497  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
498  * event handler.
499  */
500 static void dm_crtc_high_irq(void *interrupt_params)
501 {
502 	struct common_irq_params *irq_params = interrupt_params;
503 	struct amdgpu_device *adev = irq_params->adev;
504 	struct amdgpu_crtc *acrtc;
505 	unsigned long flags;
506 	int vrr_active;
507 
508 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
509 	if (!acrtc)
510 		return;
511 
512 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
513 
514 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
515 		      vrr_active, acrtc->dm_irq_params.active_planes);
516 
517 	/**
518 	 * Core vblank handling at start of front-porch is only possible
519 	 * in non-vrr mode, as only there vblank timestamping will give
520 	 * valid results while done in front-porch. Otherwise defer it
521 	 * to dm_vupdate_high_irq after end of front-porch.
522 	 */
523 	if (!vrr_active)
524 		drm_crtc_handle_vblank(&acrtc->base);
525 
526 	/**
527 	 * Following stuff must happen at start of vblank, for crc
528 	 * computation and below-the-range btr support in vrr mode.
529 	 */
530 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
531 
532 	/* BTR updates need to happen before VUPDATE on Vega and above. */
533 	if (adev->family < AMDGPU_FAMILY_AI)
534 		return;
535 
536 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
537 
538 	if (acrtc->dm_irq_params.stream &&
539 	    acrtc->dm_irq_params.vrr_params.supported &&
540 	    acrtc->dm_irq_params.freesync_config.state ==
541 		    VRR_STATE_ACTIVE_VARIABLE) {
542 		mod_freesync_handle_v_update(adev->dm.freesync_module,
543 					     acrtc->dm_irq_params.stream,
544 					     &acrtc->dm_irq_params.vrr_params);
545 
546 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
547 					   &acrtc->dm_irq_params.vrr_params.adjust);
548 	}
549 
550 	/*
551 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
552 	 * In that case, pageflip completion interrupts won't fire and pageflip
553 	 * completion events won't get delivered. Prevent this by sending
554 	 * pending pageflip events from here if a flip is still pending.
555 	 *
556 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
557 	 * avoid race conditions between flip programming and completion,
558 	 * which could cause too early flip completion events.
559 	 */
560 	if (adev->family >= AMDGPU_FAMILY_RV &&
561 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
562 	    acrtc->dm_irq_params.active_planes == 0) {
563 		if (acrtc->event) {
564 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
565 			acrtc->event = NULL;
566 			drm_crtc_vblank_put(&acrtc->base);
567 		}
568 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
569 	}
570 
571 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
572 }
573 
574 static int dm_set_clockgating_state(void *handle,
575 		  enum amd_clockgating_state state)
576 {
577 	return 0;
578 }
579 
580 static int dm_set_powergating_state(void *handle,
581 		  enum amd_powergating_state state)
582 {
583 	return 0;
584 }
585 
586 /* Prototypes of private functions */
587 static int dm_early_init(void* handle);
588 
589 /* Allocate memory for FBC compressed data  */
590 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
591 {
592 	struct drm_device *dev = connector->dev;
593 	struct amdgpu_device *adev = drm_to_adev(dev);
594 	struct dm_compressor_info *compressor = &adev->dm.compressor;
595 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
596 	struct drm_display_mode *mode;
597 	unsigned long max_size = 0;
598 
599 	if (adev->dm.dc->fbc_compressor == NULL)
600 		return;
601 
602 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
603 		return;
604 
605 	if (compressor->bo_ptr)
606 		return;
607 
608 
609 	list_for_each_entry(mode, &connector->modes, head) {
610 		if (max_size < mode->htotal * mode->vtotal)
611 			max_size = mode->htotal * mode->vtotal;
612 	}
613 
614 	if (max_size) {
615 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
616 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
617 			    &compressor->gpu_addr, &compressor->cpu_addr);
618 
619 		if (r)
620 			DRM_ERROR("DM: Failed to initialize FBC\n");
621 		else {
622 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
623 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
624 		}
625 
626 	}
627 
628 }
629 
630 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
631 					  int pipe, bool *enabled,
632 					  unsigned char *buf, int max_bytes)
633 {
634 	struct drm_device *dev = dev_get_drvdata(kdev);
635 	struct amdgpu_device *adev = drm_to_adev(dev);
636 	struct drm_connector *connector;
637 	struct drm_connector_list_iter conn_iter;
638 	struct amdgpu_dm_connector *aconnector;
639 	int ret = 0;
640 
641 	*enabled = false;
642 
643 	mutex_lock(&adev->dm.audio_lock);
644 
645 	drm_connector_list_iter_begin(dev, &conn_iter);
646 	drm_for_each_connector_iter(connector, &conn_iter) {
647 		aconnector = to_amdgpu_dm_connector(connector);
648 		if (aconnector->audio_inst != port)
649 			continue;
650 
651 		*enabled = true;
652 		ret = drm_eld_size(connector->eld);
653 		memcpy(buf, connector->eld, min(max_bytes, ret));
654 
655 		break;
656 	}
657 	drm_connector_list_iter_end(&conn_iter);
658 
659 	mutex_unlock(&adev->dm.audio_lock);
660 
661 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662 
663 	return ret;
664 }
665 
666 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
667 	.get_eld = amdgpu_dm_audio_component_get_eld,
668 };
669 
670 static int amdgpu_dm_audio_component_bind(struct device *kdev,
671 				       struct device *hda_kdev, void *data)
672 {
673 	struct drm_device *dev = dev_get_drvdata(kdev);
674 	struct amdgpu_device *adev = drm_to_adev(dev);
675 	struct drm_audio_component *acomp = data;
676 
677 	acomp->ops = &amdgpu_dm_audio_component_ops;
678 	acomp->dev = kdev;
679 	adev->dm.audio_component = acomp;
680 
681 	return 0;
682 }
683 
684 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
685 					  struct device *hda_kdev, void *data)
686 {
687 	struct drm_device *dev = dev_get_drvdata(kdev);
688 	struct amdgpu_device *adev = drm_to_adev(dev);
689 	struct drm_audio_component *acomp = data;
690 
691 	acomp->ops = NULL;
692 	acomp->dev = NULL;
693 	adev->dm.audio_component = NULL;
694 }
695 
696 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
697 	.bind	= amdgpu_dm_audio_component_bind,
698 	.unbind	= amdgpu_dm_audio_component_unbind,
699 };
700 
701 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
702 {
703 	int i, ret;
704 
705 	if (!amdgpu_audio)
706 		return 0;
707 
708 	adev->mode_info.audio.enabled = true;
709 
710 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711 
712 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
713 		adev->mode_info.audio.pin[i].channels = -1;
714 		adev->mode_info.audio.pin[i].rate = -1;
715 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
716 		adev->mode_info.audio.pin[i].status_bits = 0;
717 		adev->mode_info.audio.pin[i].category_code = 0;
718 		adev->mode_info.audio.pin[i].connected = false;
719 		adev->mode_info.audio.pin[i].id =
720 			adev->dm.dc->res_pool->audios[i]->inst;
721 		adev->mode_info.audio.pin[i].offset = 0;
722 	}
723 
724 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
725 	if (ret < 0)
726 		return ret;
727 
728 	adev->dm.audio_registered = true;
729 
730 	return 0;
731 }
732 
733 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734 {
735 	if (!amdgpu_audio)
736 		return;
737 
738 	if (!adev->mode_info.audio.enabled)
739 		return;
740 
741 	if (adev->dm.audio_registered) {
742 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
743 		adev->dm.audio_registered = false;
744 	}
745 
746 	/* TODO: Disable audio? */
747 
748 	adev->mode_info.audio.enabled = false;
749 }
750 
751 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
752 {
753 	struct drm_audio_component *acomp = adev->dm.audio_component;
754 
755 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
756 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757 
758 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759 						 pin, -1);
760 	}
761 }
762 
763 static int dm_dmub_hw_init(struct amdgpu_device *adev)
764 {
765 	const struct dmcub_firmware_header_v1_0 *hdr;
766 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
767 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
768 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
769 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
770 	struct abm *abm = adev->dm.dc->res_pool->abm;
771 	struct dmub_srv_hw_params hw_params;
772 	enum dmub_status status;
773 	const unsigned char *fw_inst_const, *fw_bss_data;
774 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
775 	bool has_hw_support;
776 
777 	if (!dmub_srv)
778 		/* DMUB isn't supported on the ASIC. */
779 		return 0;
780 
781 	if (!fb_info) {
782 		DRM_ERROR("No framebuffer info for DMUB service.\n");
783 		return -EINVAL;
784 	}
785 
786 	if (!dmub_fw) {
787 		/* Firmware required for DMUB support. */
788 		DRM_ERROR("No firmware provided for DMUB.\n");
789 		return -EINVAL;
790 	}
791 
792 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
793 	if (status != DMUB_STATUS_OK) {
794 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
795 		return -EINVAL;
796 	}
797 
798 	if (!has_hw_support) {
799 		DRM_INFO("DMUB unsupported on ASIC\n");
800 		return 0;
801 	}
802 
803 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804 
805 	fw_inst_const = dmub_fw->data +
806 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 			PSP_HEADER_BYTES;
808 
809 	fw_bss_data = dmub_fw->data +
810 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
811 		      le32_to_cpu(hdr->inst_const_bytes);
812 
813 	/* Copy firmware and bios info into FB memory. */
814 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
815 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816 
817 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818 
819 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
820 	 * amdgpu_ucode_init_single_fw will load dmub firmware
821 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
822 	 * will be done by dm_dmub_hw_init
823 	 */
824 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
825 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
826 				fw_inst_const_size);
827 	}
828 
829 	if (fw_bss_data_size)
830 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
831 		       fw_bss_data, fw_bss_data_size);
832 
833 	/* Copy firmware bios info into FB memory. */
834 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
835 	       adev->bios_size);
836 
837 	/* Reset regions that need to be reset. */
838 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
839 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
840 
841 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
842 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
843 
844 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
845 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
846 
847 	/* Initialize hardware. */
848 	memset(&hw_params, 0, sizeof(hw_params));
849 	hw_params.fb_base = adev->gmc.fb_start;
850 	hw_params.fb_offset = adev->gmc.aper_base;
851 
852 	/* backdoor load firmware and trigger dmub running */
853 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
854 		hw_params.load_inst_const = true;
855 
856 	if (dmcu)
857 		hw_params.psp_version = dmcu->psp_version;
858 
859 	for (i = 0; i < fb_info->num_fb; ++i)
860 		hw_params.fb[i] = &fb_info->fb[i];
861 
862 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
863 	if (status != DMUB_STATUS_OK) {
864 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
865 		return -EINVAL;
866 	}
867 
868 	/* Wait for firmware load to finish. */
869 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
870 	if (status != DMUB_STATUS_OK)
871 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
872 
873 	/* Init DMCU and ABM if available. */
874 	if (dmcu && abm) {
875 		dmcu->funcs->dmcu_init(dmcu);
876 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
877 	}
878 
879 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
880 	if (!adev->dm.dc->ctx->dmub_srv) {
881 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
882 		return -ENOMEM;
883 	}
884 
885 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
886 		 adev->dm.dmcub_fw_version);
887 
888 	return 0;
889 }
890 
891 #if defined(CONFIG_DRM_AMD_DC_DCN)
892 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
893 {
894 	uint64_t pt_base;
895 	uint32_t logical_addr_low;
896 	uint32_t logical_addr_high;
897 	uint32_t agp_base, agp_bot, agp_top;
898 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
899 
900 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
901 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
902 
903 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
904 		/*
905 		 * Raven2 has a HW issue that it is unable to use the vram which
906 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
907 		 * workaround that increase system aperture high address (add 1)
908 		 * to get rid of the VM fault and hardware hang.
909 		 */
910 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
911 	else
912 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
913 
914 	agp_base = 0;
915 	agp_bot = adev->gmc.agp_start >> 24;
916 	agp_top = adev->gmc.agp_end >> 24;
917 
918 
919 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
920 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
921 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
922 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
923 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
924 	page_table_base.low_part = lower_32_bits(pt_base);
925 
926 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
927 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
928 
929 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
930 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
931 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
932 
933 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
934 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
935 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
936 
937 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
938 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
939 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
940 
941 	pa_config->is_hvm_enabled = 0;
942 
943 }
944 #endif
945 
946 static int amdgpu_dm_init(struct amdgpu_device *adev)
947 {
948 	struct dc_init_data init_data;
949 #ifdef CONFIG_DRM_AMD_DC_HDCP
950 	struct dc_callback_init init_params;
951 #endif
952 	int r;
953 
954 	adev->dm.ddev = adev_to_drm(adev);
955 	adev->dm.adev = adev;
956 
957 	/* Zero all the fields */
958 	memset(&init_data, 0, sizeof(init_data));
959 #ifdef CONFIG_DRM_AMD_DC_HDCP
960 	memset(&init_params, 0, sizeof(init_params));
961 #endif
962 
963 	mutex_init(&adev->dm.dc_lock);
964 	mutex_init(&adev->dm.audio_lock);
965 
966 	if(amdgpu_dm_irq_init(adev)) {
967 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
968 		goto error;
969 	}
970 
971 	init_data.asic_id.chip_family = adev->family;
972 
973 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
974 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
975 
976 	init_data.asic_id.vram_width = adev->gmc.vram_width;
977 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
978 	init_data.asic_id.atombios_base_address =
979 		adev->mode_info.atom_context->bios;
980 
981 	init_data.driver = adev;
982 
983 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
984 
985 	if (!adev->dm.cgs_device) {
986 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
987 		goto error;
988 	}
989 
990 	init_data.cgs_device = adev->dm.cgs_device;
991 
992 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
993 
994 	switch (adev->asic_type) {
995 	case CHIP_CARRIZO:
996 	case CHIP_STONEY:
997 	case CHIP_RAVEN:
998 	case CHIP_RENOIR:
999 		init_data.flags.gpu_vm_support = true;
1000 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1001 			init_data.flags.disable_dmcu = true;
1002 		break;
1003 	default:
1004 		break;
1005 	}
1006 
1007 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008 		init_data.flags.fbc_support = true;
1009 
1010 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011 		init_data.flags.multi_mon_pp_mclk_switch = true;
1012 
1013 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014 		init_data.flags.disable_fractional_pwm = true;
1015 
1016 	init_data.flags.power_down_display_on_boot = true;
1017 
1018 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1019 
1020 	/* Display Core create. */
1021 	adev->dm.dc = dc_create(&init_data);
1022 
1023 	if (adev->dm.dc) {
1024 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1025 	} else {
1026 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1027 		goto error;
1028 	}
1029 
1030 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1031 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1032 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1033 	}
1034 
1035 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1036 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1037 
1038 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1039 		adev->dm.dc->debug.disable_stutter = true;
1040 
1041 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1042 		adev->dm.dc->debug.disable_dsc = true;
1043 
1044 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1045 		adev->dm.dc->debug.disable_clock_gate = true;
1046 
1047 	r = dm_dmub_hw_init(adev);
1048 	if (r) {
1049 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1050 		goto error;
1051 	}
1052 
1053 	dc_hardware_init(adev->dm.dc);
1054 
1055 #if defined(CONFIG_DRM_AMD_DC_DCN)
1056 	if (adev->asic_type == CHIP_RENOIR) {
1057 		struct dc_phy_addr_space_config pa_config;
1058 
1059 		mmhub_read_system_context(adev, &pa_config);
1060 
1061 		// Call the DC init_memory func
1062 		dc_setup_system_context(adev->dm.dc, &pa_config);
1063 	}
1064 #endif
1065 
1066 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1067 	if (!adev->dm.freesync_module) {
1068 		DRM_ERROR(
1069 		"amdgpu: failed to initialize freesync_module.\n");
1070 	} else
1071 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1072 				adev->dm.freesync_module);
1073 
1074 	amdgpu_dm_init_color_mod();
1075 
1076 #ifdef CONFIG_DRM_AMD_DC_HDCP
1077 	if (adev->asic_type >= CHIP_RAVEN) {
1078 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1079 
1080 		if (!adev->dm.hdcp_workqueue)
1081 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1082 		else
1083 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1084 
1085 		dc_init_callbacks(adev->dm.dc, &init_params);
1086 	}
1087 #endif
1088 	if (amdgpu_dm_initialize_drm_device(adev)) {
1089 		DRM_ERROR(
1090 		"amdgpu: failed to initialize sw for display support.\n");
1091 		goto error;
1092 	}
1093 
1094 	/* Update the actual used number of crtc */
1095 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1096 
1097 	/* create fake encoders for MST */
1098 	dm_dp_create_fake_mst_encoders(adev);
1099 
1100 	/* TODO: Add_display_info? */
1101 
1102 	/* TODO use dynamic cursor width */
1103 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1104 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1105 
1106 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1107 		DRM_ERROR(
1108 		"amdgpu: failed to initialize sw for display support.\n");
1109 		goto error;
1110 	}
1111 
1112 
1113 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1114 
1115 	return 0;
1116 error:
1117 	amdgpu_dm_fini(adev);
1118 
1119 	return -EINVAL;
1120 }
1121 
1122 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1123 {
1124 	int i;
1125 
1126 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1127 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1128 	}
1129 
1130 	amdgpu_dm_audio_fini(adev);
1131 
1132 	amdgpu_dm_destroy_drm_device(&adev->dm);
1133 
1134 #ifdef CONFIG_DRM_AMD_DC_HDCP
1135 	if (adev->dm.hdcp_workqueue) {
1136 		hdcp_destroy(adev->dm.hdcp_workqueue);
1137 		adev->dm.hdcp_workqueue = NULL;
1138 	}
1139 
1140 	if (adev->dm.dc)
1141 		dc_deinit_callbacks(adev->dm.dc);
1142 #endif
1143 	if (adev->dm.dc->ctx->dmub_srv) {
1144 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1145 		adev->dm.dc->ctx->dmub_srv = NULL;
1146 	}
1147 
1148 	if (adev->dm.dmub_bo)
1149 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1150 				      &adev->dm.dmub_bo_gpu_addr,
1151 				      &adev->dm.dmub_bo_cpu_addr);
1152 
1153 	/* DC Destroy TODO: Replace destroy DAL */
1154 	if (adev->dm.dc)
1155 		dc_destroy(&adev->dm.dc);
1156 	/*
1157 	 * TODO: pageflip, vlank interrupt
1158 	 *
1159 	 * amdgpu_dm_irq_fini(adev);
1160 	 */
1161 
1162 	if (adev->dm.cgs_device) {
1163 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1164 		adev->dm.cgs_device = NULL;
1165 	}
1166 	if (adev->dm.freesync_module) {
1167 		mod_freesync_destroy(adev->dm.freesync_module);
1168 		adev->dm.freesync_module = NULL;
1169 	}
1170 
1171 	mutex_destroy(&adev->dm.audio_lock);
1172 	mutex_destroy(&adev->dm.dc_lock);
1173 
1174 	return;
1175 }
1176 
1177 static int load_dmcu_fw(struct amdgpu_device *adev)
1178 {
1179 	const char *fw_name_dmcu = NULL;
1180 	int r;
1181 	const struct dmcu_firmware_header_v1_0 *hdr;
1182 
1183 	switch(adev->asic_type) {
1184 #if defined(CONFIG_DRM_AMD_DC_SI)
1185 	case CHIP_TAHITI:
1186 	case CHIP_PITCAIRN:
1187 	case CHIP_VERDE:
1188 	case CHIP_OLAND:
1189 #endif
1190 	case CHIP_BONAIRE:
1191 	case CHIP_HAWAII:
1192 	case CHIP_KAVERI:
1193 	case CHIP_KABINI:
1194 	case CHIP_MULLINS:
1195 	case CHIP_TONGA:
1196 	case CHIP_FIJI:
1197 	case CHIP_CARRIZO:
1198 	case CHIP_STONEY:
1199 	case CHIP_POLARIS11:
1200 	case CHIP_POLARIS10:
1201 	case CHIP_POLARIS12:
1202 	case CHIP_VEGAM:
1203 	case CHIP_VEGA10:
1204 	case CHIP_VEGA12:
1205 	case CHIP_VEGA20:
1206 	case CHIP_NAVI10:
1207 	case CHIP_NAVI14:
1208 	case CHIP_RENOIR:
1209 	case CHIP_SIENNA_CICHLID:
1210 	case CHIP_NAVY_FLOUNDER:
1211 	case CHIP_DIMGREY_CAVEFISH:
1212 	case CHIP_VANGOGH:
1213 		return 0;
1214 	case CHIP_NAVI12:
1215 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1216 		break;
1217 	case CHIP_RAVEN:
1218 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1219 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1220 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1221 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1222 		else
1223 			return 0;
1224 		break;
1225 	default:
1226 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1227 		return -EINVAL;
1228 	}
1229 
1230 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1231 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1232 		return 0;
1233 	}
1234 
1235 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1236 	if (r == -ENOENT) {
1237 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1238 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1239 		adev->dm.fw_dmcu = NULL;
1240 		return 0;
1241 	}
1242 	if (r) {
1243 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1244 			fw_name_dmcu);
1245 		return r;
1246 	}
1247 
1248 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1249 	if (r) {
1250 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1251 			fw_name_dmcu);
1252 		release_firmware(adev->dm.fw_dmcu);
1253 		adev->dm.fw_dmcu = NULL;
1254 		return r;
1255 	}
1256 
1257 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1258 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1259 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1260 	adev->firmware.fw_size +=
1261 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1262 
1263 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1264 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1265 	adev->firmware.fw_size +=
1266 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1267 
1268 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1269 
1270 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1271 
1272 	return 0;
1273 }
1274 
1275 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1276 {
1277 	struct amdgpu_device *adev = ctx;
1278 
1279 	return dm_read_reg(adev->dm.dc->ctx, address);
1280 }
1281 
1282 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1283 				     uint32_t value)
1284 {
1285 	struct amdgpu_device *adev = ctx;
1286 
1287 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1288 }
1289 
1290 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1291 {
1292 	struct dmub_srv_create_params create_params;
1293 	struct dmub_srv_region_params region_params;
1294 	struct dmub_srv_region_info region_info;
1295 	struct dmub_srv_fb_params fb_params;
1296 	struct dmub_srv_fb_info *fb_info;
1297 	struct dmub_srv *dmub_srv;
1298 	const struct dmcub_firmware_header_v1_0 *hdr;
1299 	const char *fw_name_dmub;
1300 	enum dmub_asic dmub_asic;
1301 	enum dmub_status status;
1302 	int r;
1303 
1304 	switch (adev->asic_type) {
1305 	case CHIP_RENOIR:
1306 		dmub_asic = DMUB_ASIC_DCN21;
1307 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1308 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1309 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1310 		break;
1311 	case CHIP_SIENNA_CICHLID:
1312 		dmub_asic = DMUB_ASIC_DCN30;
1313 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1314 		break;
1315 	case CHIP_NAVY_FLOUNDER:
1316 		dmub_asic = DMUB_ASIC_DCN30;
1317 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1318 		break;
1319 	case CHIP_VANGOGH:
1320 		dmub_asic = DMUB_ASIC_DCN301;
1321 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1322 		break;
1323 	case CHIP_DIMGREY_CAVEFISH:
1324 		dmub_asic = DMUB_ASIC_DCN302;
1325 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1326 		break;
1327 
1328 	default:
1329 		/* ASIC doesn't support DMUB. */
1330 		return 0;
1331 	}
1332 
1333 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1334 	if (r) {
1335 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1336 		return 0;
1337 	}
1338 
1339 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1340 	if (r) {
1341 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1342 		return 0;
1343 	}
1344 
1345 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1346 
1347 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1348 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1349 			AMDGPU_UCODE_ID_DMCUB;
1350 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1351 			adev->dm.dmub_fw;
1352 		adev->firmware.fw_size +=
1353 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1354 
1355 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1356 			 adev->dm.dmcub_fw_version);
1357 	}
1358 
1359 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1360 
1361 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1362 	dmub_srv = adev->dm.dmub_srv;
1363 
1364 	if (!dmub_srv) {
1365 		DRM_ERROR("Failed to allocate DMUB service!\n");
1366 		return -ENOMEM;
1367 	}
1368 
1369 	memset(&create_params, 0, sizeof(create_params));
1370 	create_params.user_ctx = adev;
1371 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1372 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1373 	create_params.asic = dmub_asic;
1374 
1375 	/* Create the DMUB service. */
1376 	status = dmub_srv_create(dmub_srv, &create_params);
1377 	if (status != DMUB_STATUS_OK) {
1378 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1379 		return -EINVAL;
1380 	}
1381 
1382 	/* Calculate the size of all the regions for the DMUB service. */
1383 	memset(&region_params, 0, sizeof(region_params));
1384 
1385 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1386 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1387 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1388 	region_params.vbios_size = adev->bios_size;
1389 	region_params.fw_bss_data = region_params.bss_data_size ?
1390 		adev->dm.dmub_fw->data +
1391 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1392 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1393 	region_params.fw_inst_const =
1394 		adev->dm.dmub_fw->data +
1395 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1396 		PSP_HEADER_BYTES;
1397 
1398 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1399 					   &region_info);
1400 
1401 	if (status != DMUB_STATUS_OK) {
1402 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1403 		return -EINVAL;
1404 	}
1405 
1406 	/*
1407 	 * Allocate a framebuffer based on the total size of all the regions.
1408 	 * TODO: Move this into GART.
1409 	 */
1410 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1411 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1412 				    &adev->dm.dmub_bo_gpu_addr,
1413 				    &adev->dm.dmub_bo_cpu_addr);
1414 	if (r)
1415 		return r;
1416 
1417 	/* Rebase the regions on the framebuffer address. */
1418 	memset(&fb_params, 0, sizeof(fb_params));
1419 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1420 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1421 	fb_params.region_info = &region_info;
1422 
1423 	adev->dm.dmub_fb_info =
1424 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1425 	fb_info = adev->dm.dmub_fb_info;
1426 
1427 	if (!fb_info) {
1428 		DRM_ERROR(
1429 			"Failed to allocate framebuffer info for DMUB service!\n");
1430 		return -ENOMEM;
1431 	}
1432 
1433 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1434 	if (status != DMUB_STATUS_OK) {
1435 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1436 		return -EINVAL;
1437 	}
1438 
1439 	return 0;
1440 }
1441 
1442 static int dm_sw_init(void *handle)
1443 {
1444 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445 	int r;
1446 
1447 	r = dm_dmub_sw_init(adev);
1448 	if (r)
1449 		return r;
1450 
1451 	return load_dmcu_fw(adev);
1452 }
1453 
1454 static int dm_sw_fini(void *handle)
1455 {
1456 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457 
1458 	kfree(adev->dm.dmub_fb_info);
1459 	adev->dm.dmub_fb_info = NULL;
1460 
1461 	if (adev->dm.dmub_srv) {
1462 		dmub_srv_destroy(adev->dm.dmub_srv);
1463 		adev->dm.dmub_srv = NULL;
1464 	}
1465 
1466 	release_firmware(adev->dm.dmub_fw);
1467 	adev->dm.dmub_fw = NULL;
1468 
1469 	release_firmware(adev->dm.fw_dmcu);
1470 	adev->dm.fw_dmcu = NULL;
1471 
1472 	return 0;
1473 }
1474 
1475 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1476 {
1477 	struct amdgpu_dm_connector *aconnector;
1478 	struct drm_connector *connector;
1479 	struct drm_connector_list_iter iter;
1480 	int ret = 0;
1481 
1482 	drm_connector_list_iter_begin(dev, &iter);
1483 	drm_for_each_connector_iter(connector, &iter) {
1484 		aconnector = to_amdgpu_dm_connector(connector);
1485 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1486 		    aconnector->mst_mgr.aux) {
1487 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1488 					 aconnector,
1489 					 aconnector->base.base.id);
1490 
1491 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1492 			if (ret < 0) {
1493 				DRM_ERROR("DM_MST: Failed to start MST\n");
1494 				aconnector->dc_link->type =
1495 					dc_connection_single;
1496 				break;
1497 			}
1498 		}
1499 	}
1500 	drm_connector_list_iter_end(&iter);
1501 
1502 	return ret;
1503 }
1504 
1505 static int dm_late_init(void *handle)
1506 {
1507 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508 
1509 	struct dmcu_iram_parameters params;
1510 	unsigned int linear_lut[16];
1511 	int i;
1512 	struct dmcu *dmcu = NULL;
1513 	bool ret = true;
1514 
1515 	dmcu = adev->dm.dc->res_pool->dmcu;
1516 
1517 	for (i = 0; i < 16; i++)
1518 		linear_lut[i] = 0xFFFF * i / 15;
1519 
1520 	params.set = 0;
1521 	params.backlight_ramping_start = 0xCCCC;
1522 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1523 	params.backlight_lut_array_size = 16;
1524 	params.backlight_lut_array = linear_lut;
1525 
1526 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1527 	 * 0xFFFF x 0.01 = 0x28F
1528 	 */
1529 	params.min_abm_backlight = 0x28F;
1530 
1531 	/* In the case where abm is implemented on dmcub,
1532 	 * dmcu object will be null.
1533 	 * ABM 2.4 and up are implemented on dmcub.
1534 	 */
1535 	if (dmcu)
1536 		ret = dmcu_load_iram(dmcu, params);
1537 	else if (adev->dm.dc->ctx->dmub_srv)
1538 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1539 
1540 	if (!ret)
1541 		return -EINVAL;
1542 
1543 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1544 }
1545 
1546 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1547 {
1548 	struct amdgpu_dm_connector *aconnector;
1549 	struct drm_connector *connector;
1550 	struct drm_connector_list_iter iter;
1551 	struct drm_dp_mst_topology_mgr *mgr;
1552 	int ret;
1553 	bool need_hotplug = false;
1554 
1555 	drm_connector_list_iter_begin(dev, &iter);
1556 	drm_for_each_connector_iter(connector, &iter) {
1557 		aconnector = to_amdgpu_dm_connector(connector);
1558 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1559 		    aconnector->mst_port)
1560 			continue;
1561 
1562 		mgr = &aconnector->mst_mgr;
1563 
1564 		if (suspend) {
1565 			drm_dp_mst_topology_mgr_suspend(mgr);
1566 		} else {
1567 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1568 			if (ret < 0) {
1569 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1570 				need_hotplug = true;
1571 			}
1572 		}
1573 	}
1574 	drm_connector_list_iter_end(&iter);
1575 
1576 	if (need_hotplug)
1577 		drm_kms_helper_hotplug_event(dev);
1578 }
1579 
1580 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1581 {
1582 	struct smu_context *smu = &adev->smu;
1583 	int ret = 0;
1584 
1585 	if (!is_support_sw_smu(adev))
1586 		return 0;
1587 
1588 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1589 	 * on window driver dc implementation.
1590 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1591 	 * should be passed to smu during boot up and resume from s3.
1592 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1593 	 * dcn20_resource_construct
1594 	 * then call pplib functions below to pass the settings to smu:
1595 	 * smu_set_watermarks_for_clock_ranges
1596 	 * smu_set_watermarks_table
1597 	 * navi10_set_watermarks_table
1598 	 * smu_write_watermarks_table
1599 	 *
1600 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1601 	 * dc has implemented different flow for window driver:
1602 	 * dc_hardware_init / dc_set_power_state
1603 	 * dcn10_init_hw
1604 	 * notify_wm_ranges
1605 	 * set_wm_ranges
1606 	 * -- Linux
1607 	 * smu_set_watermarks_for_clock_ranges
1608 	 * renoir_set_watermarks_table
1609 	 * smu_write_watermarks_table
1610 	 *
1611 	 * For Linux,
1612 	 * dc_hardware_init -> amdgpu_dm_init
1613 	 * dc_set_power_state --> dm_resume
1614 	 *
1615 	 * therefore, this function apply to navi10/12/14 but not Renoir
1616 	 * *
1617 	 */
1618 	switch(adev->asic_type) {
1619 	case CHIP_NAVI10:
1620 	case CHIP_NAVI14:
1621 	case CHIP_NAVI12:
1622 		break;
1623 	default:
1624 		return 0;
1625 	}
1626 
1627 	ret = smu_write_watermarks_table(smu);
1628 	if (ret) {
1629 		DRM_ERROR("Failed to update WMTABLE!\n");
1630 		return ret;
1631 	}
1632 
1633 	return 0;
1634 }
1635 
1636 /**
1637  * dm_hw_init() - Initialize DC device
1638  * @handle: The base driver device containing the amdgpu_dm device.
1639  *
1640  * Initialize the &struct amdgpu_display_manager device. This involves calling
1641  * the initializers of each DM component, then populating the struct with them.
1642  *
1643  * Although the function implies hardware initialization, both hardware and
1644  * software are initialized here. Splitting them out to their relevant init
1645  * hooks is a future TODO item.
1646  *
1647  * Some notable things that are initialized here:
1648  *
1649  * - Display Core, both software and hardware
1650  * - DC modules that we need (freesync and color management)
1651  * - DRM software states
1652  * - Interrupt sources and handlers
1653  * - Vblank support
1654  * - Debug FS entries, if enabled
1655  */
1656 static int dm_hw_init(void *handle)
1657 {
1658 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1659 	/* Create DAL display manager */
1660 	amdgpu_dm_init(adev);
1661 	amdgpu_dm_hpd_init(adev);
1662 
1663 	return 0;
1664 }
1665 
1666 /**
1667  * dm_hw_fini() - Teardown DC device
1668  * @handle: The base driver device containing the amdgpu_dm device.
1669  *
1670  * Teardown components within &struct amdgpu_display_manager that require
1671  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1672  * were loaded. Also flush IRQ workqueues and disable them.
1673  */
1674 static int dm_hw_fini(void *handle)
1675 {
1676 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1677 
1678 	amdgpu_dm_hpd_fini(adev);
1679 
1680 	amdgpu_dm_irq_fini(adev);
1681 	amdgpu_dm_fini(adev);
1682 	return 0;
1683 }
1684 
1685 
1686 static int dm_enable_vblank(struct drm_crtc *crtc);
1687 static void dm_disable_vblank(struct drm_crtc *crtc);
1688 
1689 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1690 				 struct dc_state *state, bool enable)
1691 {
1692 	enum dc_irq_source irq_source;
1693 	struct amdgpu_crtc *acrtc;
1694 	int rc = -EBUSY;
1695 	int i = 0;
1696 
1697 	for (i = 0; i < state->stream_count; i++) {
1698 		acrtc = get_crtc_by_otg_inst(
1699 				adev, state->stream_status[i].primary_otg_inst);
1700 
1701 		if (acrtc && state->stream_status[i].plane_count != 0) {
1702 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1703 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1704 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1705 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1706 			if (rc)
1707 				DRM_WARN("Failed to %s pflip interrupts\n",
1708 					 enable ? "enable" : "disable");
1709 
1710 			if (enable) {
1711 				rc = dm_enable_vblank(&acrtc->base);
1712 				if (rc)
1713 					DRM_WARN("Failed to enable vblank interrupts\n");
1714 			} else {
1715 				dm_disable_vblank(&acrtc->base);
1716 			}
1717 
1718 		}
1719 	}
1720 
1721 }
1722 
1723 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1724 {
1725 	struct dc_state *context = NULL;
1726 	enum dc_status res = DC_ERROR_UNEXPECTED;
1727 	int i;
1728 	struct dc_stream_state *del_streams[MAX_PIPES];
1729 	int del_streams_count = 0;
1730 
1731 	memset(del_streams, 0, sizeof(del_streams));
1732 
1733 	context = dc_create_state(dc);
1734 	if (context == NULL)
1735 		goto context_alloc_fail;
1736 
1737 	dc_resource_state_copy_construct_current(dc, context);
1738 
1739 	/* First remove from context all streams */
1740 	for (i = 0; i < context->stream_count; i++) {
1741 		struct dc_stream_state *stream = context->streams[i];
1742 
1743 		del_streams[del_streams_count++] = stream;
1744 	}
1745 
1746 	/* Remove all planes for removed streams and then remove the streams */
1747 	for (i = 0; i < del_streams_count; i++) {
1748 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1749 			res = DC_FAIL_DETACH_SURFACES;
1750 			goto fail;
1751 		}
1752 
1753 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1754 		if (res != DC_OK)
1755 			goto fail;
1756 	}
1757 
1758 
1759 	res = dc_validate_global_state(dc, context, false);
1760 
1761 	if (res != DC_OK) {
1762 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1763 		goto fail;
1764 	}
1765 
1766 	res = dc_commit_state(dc, context);
1767 
1768 fail:
1769 	dc_release_state(context);
1770 
1771 context_alloc_fail:
1772 	return res;
1773 }
1774 
1775 static int dm_suspend(void *handle)
1776 {
1777 	struct amdgpu_device *adev = handle;
1778 	struct amdgpu_display_manager *dm = &adev->dm;
1779 	int ret = 0;
1780 
1781 	if (amdgpu_in_reset(adev)) {
1782 		mutex_lock(&dm->dc_lock);
1783 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1784 
1785 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1786 
1787 		amdgpu_dm_commit_zero_streams(dm->dc);
1788 
1789 		amdgpu_dm_irq_suspend(adev);
1790 
1791 		return ret;
1792 	}
1793 
1794 	WARN_ON(adev->dm.cached_state);
1795 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1796 
1797 	s3_handle_mst(adev_to_drm(adev), true);
1798 
1799 	amdgpu_dm_irq_suspend(adev);
1800 
1801 
1802 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1803 
1804 	return 0;
1805 }
1806 
1807 static struct amdgpu_dm_connector *
1808 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1809 					     struct drm_crtc *crtc)
1810 {
1811 	uint32_t i;
1812 	struct drm_connector_state *new_con_state;
1813 	struct drm_connector *connector;
1814 	struct drm_crtc *crtc_from_state;
1815 
1816 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1817 		crtc_from_state = new_con_state->crtc;
1818 
1819 		if (crtc_from_state == crtc)
1820 			return to_amdgpu_dm_connector(connector);
1821 	}
1822 
1823 	return NULL;
1824 }
1825 
1826 static void emulated_link_detect(struct dc_link *link)
1827 {
1828 	struct dc_sink_init_data sink_init_data = { 0 };
1829 	struct display_sink_capability sink_caps = { 0 };
1830 	enum dc_edid_status edid_status;
1831 	struct dc_context *dc_ctx = link->ctx;
1832 	struct dc_sink *sink = NULL;
1833 	struct dc_sink *prev_sink = NULL;
1834 
1835 	link->type = dc_connection_none;
1836 	prev_sink = link->local_sink;
1837 
1838 	if (prev_sink != NULL)
1839 		dc_sink_retain(prev_sink);
1840 
1841 	switch (link->connector_signal) {
1842 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1843 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1845 		break;
1846 	}
1847 
1848 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1849 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1851 		break;
1852 	}
1853 
1854 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1855 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1857 		break;
1858 	}
1859 
1860 	case SIGNAL_TYPE_LVDS: {
1861 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1862 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1863 		break;
1864 	}
1865 
1866 	case SIGNAL_TYPE_EDP: {
1867 		sink_caps.transaction_type =
1868 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1869 		sink_caps.signal = SIGNAL_TYPE_EDP;
1870 		break;
1871 	}
1872 
1873 	case SIGNAL_TYPE_DISPLAY_PORT: {
1874 		sink_caps.transaction_type =
1875 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1876 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1877 		break;
1878 	}
1879 
1880 	default:
1881 		DC_ERROR("Invalid connector type! signal:%d\n",
1882 			link->connector_signal);
1883 		return;
1884 	}
1885 
1886 	sink_init_data.link = link;
1887 	sink_init_data.sink_signal = sink_caps.signal;
1888 
1889 	sink = dc_sink_create(&sink_init_data);
1890 	if (!sink) {
1891 		DC_ERROR("Failed to create sink!\n");
1892 		return;
1893 	}
1894 
1895 	/* dc_sink_create returns a new reference */
1896 	link->local_sink = sink;
1897 
1898 	edid_status = dm_helpers_read_local_edid(
1899 			link->ctx,
1900 			link,
1901 			sink);
1902 
1903 	if (edid_status != EDID_OK)
1904 		DC_ERROR("Failed to read EDID");
1905 
1906 }
1907 
1908 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1909 				     struct amdgpu_display_manager *dm)
1910 {
1911 	struct {
1912 		struct dc_surface_update surface_updates[MAX_SURFACES];
1913 		struct dc_plane_info plane_infos[MAX_SURFACES];
1914 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1915 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1916 		struct dc_stream_update stream_update;
1917 	} * bundle;
1918 	int k, m;
1919 
1920 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1921 
1922 	if (!bundle) {
1923 		dm_error("Failed to allocate update bundle\n");
1924 		goto cleanup;
1925 	}
1926 
1927 	for (k = 0; k < dc_state->stream_count; k++) {
1928 		bundle->stream_update.stream = dc_state->streams[k];
1929 
1930 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1931 			bundle->surface_updates[m].surface =
1932 				dc_state->stream_status->plane_states[m];
1933 			bundle->surface_updates[m].surface->force_full_update =
1934 				true;
1935 		}
1936 		dc_commit_updates_for_stream(
1937 			dm->dc, bundle->surface_updates,
1938 			dc_state->stream_status->plane_count,
1939 			dc_state->streams[k], &bundle->stream_update, dc_state);
1940 	}
1941 
1942 cleanup:
1943 	kfree(bundle);
1944 
1945 	return;
1946 }
1947 
1948 static int dm_resume(void *handle)
1949 {
1950 	struct amdgpu_device *adev = handle;
1951 	struct drm_device *ddev = adev_to_drm(adev);
1952 	struct amdgpu_display_manager *dm = &adev->dm;
1953 	struct amdgpu_dm_connector *aconnector;
1954 	struct drm_connector *connector;
1955 	struct drm_connector_list_iter iter;
1956 	struct drm_crtc *crtc;
1957 	struct drm_crtc_state *new_crtc_state;
1958 	struct dm_crtc_state *dm_new_crtc_state;
1959 	struct drm_plane *plane;
1960 	struct drm_plane_state *new_plane_state;
1961 	struct dm_plane_state *dm_new_plane_state;
1962 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1963 	enum dc_connection_type new_connection_type = dc_connection_none;
1964 	struct dc_state *dc_state;
1965 	int i, r, j;
1966 
1967 	if (amdgpu_in_reset(adev)) {
1968 		dc_state = dm->cached_dc_state;
1969 
1970 		r = dm_dmub_hw_init(adev);
1971 		if (r)
1972 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1973 
1974 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1975 		dc_resume(dm->dc);
1976 
1977 		amdgpu_dm_irq_resume_early(adev);
1978 
1979 		for (i = 0; i < dc_state->stream_count; i++) {
1980 			dc_state->streams[i]->mode_changed = true;
1981 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1982 				dc_state->stream_status->plane_states[j]->update_flags.raw
1983 					= 0xffffffff;
1984 			}
1985 		}
1986 
1987 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1988 
1989 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1990 
1991 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1992 
1993 		dc_release_state(dm->cached_dc_state);
1994 		dm->cached_dc_state = NULL;
1995 
1996 		amdgpu_dm_irq_resume_late(adev);
1997 
1998 		mutex_unlock(&dm->dc_lock);
1999 
2000 		return 0;
2001 	}
2002 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2003 	dc_release_state(dm_state->context);
2004 	dm_state->context = dc_create_state(dm->dc);
2005 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2006 	dc_resource_state_construct(dm->dc, dm_state->context);
2007 
2008 	/* Before powering on DC we need to re-initialize DMUB. */
2009 	r = dm_dmub_hw_init(adev);
2010 	if (r)
2011 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2012 
2013 	/* power on hardware */
2014 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2015 
2016 	/* program HPD filter */
2017 	dc_resume(dm->dc);
2018 
2019 	/*
2020 	 * early enable HPD Rx IRQ, should be done before set mode as short
2021 	 * pulse interrupts are used for MST
2022 	 */
2023 	amdgpu_dm_irq_resume_early(adev);
2024 
2025 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2026 	s3_handle_mst(ddev, false);
2027 
2028 	/* Do detection*/
2029 	drm_connector_list_iter_begin(ddev, &iter);
2030 	drm_for_each_connector_iter(connector, &iter) {
2031 		aconnector = to_amdgpu_dm_connector(connector);
2032 
2033 		/*
2034 		 * this is the case when traversing through already created
2035 		 * MST connectors, should be skipped
2036 		 */
2037 		if (aconnector->mst_port)
2038 			continue;
2039 
2040 		mutex_lock(&aconnector->hpd_lock);
2041 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2042 			DRM_ERROR("KMS: Failed to detect connector\n");
2043 
2044 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2045 			emulated_link_detect(aconnector->dc_link);
2046 		else
2047 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2048 
2049 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2050 			aconnector->fake_enable = false;
2051 
2052 		if (aconnector->dc_sink)
2053 			dc_sink_release(aconnector->dc_sink);
2054 		aconnector->dc_sink = NULL;
2055 		amdgpu_dm_update_connector_after_detect(aconnector);
2056 		mutex_unlock(&aconnector->hpd_lock);
2057 	}
2058 	drm_connector_list_iter_end(&iter);
2059 
2060 	/* Force mode set in atomic commit */
2061 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2062 		new_crtc_state->active_changed = true;
2063 
2064 	/*
2065 	 * atomic_check is expected to create the dc states. We need to release
2066 	 * them here, since they were duplicated as part of the suspend
2067 	 * procedure.
2068 	 */
2069 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2070 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2071 		if (dm_new_crtc_state->stream) {
2072 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2073 			dc_stream_release(dm_new_crtc_state->stream);
2074 			dm_new_crtc_state->stream = NULL;
2075 		}
2076 	}
2077 
2078 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2079 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2080 		if (dm_new_plane_state->dc_state) {
2081 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2082 			dc_plane_state_release(dm_new_plane_state->dc_state);
2083 			dm_new_plane_state->dc_state = NULL;
2084 		}
2085 	}
2086 
2087 	drm_atomic_helper_resume(ddev, dm->cached_state);
2088 
2089 	dm->cached_state = NULL;
2090 
2091 	amdgpu_dm_irq_resume_late(adev);
2092 
2093 	amdgpu_dm_smu_write_watermarks_table(adev);
2094 
2095 	return 0;
2096 }
2097 
2098 /**
2099  * DOC: DM Lifecycle
2100  *
2101  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2102  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2103  * the base driver's device list to be initialized and torn down accordingly.
2104  *
2105  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2106  */
2107 
2108 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2109 	.name = "dm",
2110 	.early_init = dm_early_init,
2111 	.late_init = dm_late_init,
2112 	.sw_init = dm_sw_init,
2113 	.sw_fini = dm_sw_fini,
2114 	.hw_init = dm_hw_init,
2115 	.hw_fini = dm_hw_fini,
2116 	.suspend = dm_suspend,
2117 	.resume = dm_resume,
2118 	.is_idle = dm_is_idle,
2119 	.wait_for_idle = dm_wait_for_idle,
2120 	.check_soft_reset = dm_check_soft_reset,
2121 	.soft_reset = dm_soft_reset,
2122 	.set_clockgating_state = dm_set_clockgating_state,
2123 	.set_powergating_state = dm_set_powergating_state,
2124 };
2125 
2126 const struct amdgpu_ip_block_version dm_ip_block =
2127 {
2128 	.type = AMD_IP_BLOCK_TYPE_DCE,
2129 	.major = 1,
2130 	.minor = 0,
2131 	.rev = 0,
2132 	.funcs = &amdgpu_dm_funcs,
2133 };
2134 
2135 
2136 /**
2137  * DOC: atomic
2138  *
2139  * *WIP*
2140  */
2141 
2142 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2143 	.fb_create = amdgpu_display_user_framebuffer_create,
2144 	.get_format_info = amd_get_format_info,
2145 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2146 	.atomic_check = amdgpu_dm_atomic_check,
2147 	.atomic_commit = amdgpu_dm_atomic_commit,
2148 };
2149 
2150 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2151 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2152 };
2153 
2154 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2155 {
2156 	u32 max_cll, min_cll, max, min, q, r;
2157 	struct amdgpu_dm_backlight_caps *caps;
2158 	struct amdgpu_display_manager *dm;
2159 	struct drm_connector *conn_base;
2160 	struct amdgpu_device *adev;
2161 	struct dc_link *link = NULL;
2162 	static const u8 pre_computed_values[] = {
2163 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2164 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2165 
2166 	if (!aconnector || !aconnector->dc_link)
2167 		return;
2168 
2169 	link = aconnector->dc_link;
2170 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2171 		return;
2172 
2173 	conn_base = &aconnector->base;
2174 	adev = drm_to_adev(conn_base->dev);
2175 	dm = &adev->dm;
2176 	caps = &dm->backlight_caps;
2177 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2178 	caps->aux_support = false;
2179 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2180 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2181 
2182 	if (caps->ext_caps->bits.oled == 1 ||
2183 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2184 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2185 		caps->aux_support = true;
2186 
2187 	/* From the specification (CTA-861-G), for calculating the maximum
2188 	 * luminance we need to use:
2189 	 *	Luminance = 50*2**(CV/32)
2190 	 * Where CV is a one-byte value.
2191 	 * For calculating this expression we may need float point precision;
2192 	 * to avoid this complexity level, we take advantage that CV is divided
2193 	 * by a constant. From the Euclids division algorithm, we know that CV
2194 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2195 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2196 	 * need to pre-compute the value of r/32. For pre-computing the values
2197 	 * We just used the following Ruby line:
2198 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2199 	 * The results of the above expressions can be verified at
2200 	 * pre_computed_values.
2201 	 */
2202 	q = max_cll >> 5;
2203 	r = max_cll % 32;
2204 	max = (1 << q) * pre_computed_values[r];
2205 
2206 	// min luminance: maxLum * (CV/255)^2 / 100
2207 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2208 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2209 
2210 	caps->aux_max_input_signal = max;
2211 	caps->aux_min_input_signal = min;
2212 }
2213 
2214 void amdgpu_dm_update_connector_after_detect(
2215 		struct amdgpu_dm_connector *aconnector)
2216 {
2217 	struct drm_connector *connector = &aconnector->base;
2218 	struct drm_device *dev = connector->dev;
2219 	struct dc_sink *sink;
2220 
2221 	/* MST handled by drm_mst framework */
2222 	if (aconnector->mst_mgr.mst_state == true)
2223 		return;
2224 
2225 	sink = aconnector->dc_link->local_sink;
2226 	if (sink)
2227 		dc_sink_retain(sink);
2228 
2229 	/*
2230 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2231 	 * the connector sink is set to either fake or physical sink depends on link status.
2232 	 * Skip if already done during boot.
2233 	 */
2234 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2235 			&& aconnector->dc_em_sink) {
2236 
2237 		/*
2238 		 * For S3 resume with headless use eml_sink to fake stream
2239 		 * because on resume connector->sink is set to NULL
2240 		 */
2241 		mutex_lock(&dev->mode_config.mutex);
2242 
2243 		if (sink) {
2244 			if (aconnector->dc_sink) {
2245 				amdgpu_dm_update_freesync_caps(connector, NULL);
2246 				/*
2247 				 * retain and release below are used to
2248 				 * bump up refcount for sink because the link doesn't point
2249 				 * to it anymore after disconnect, so on next crtc to connector
2250 				 * reshuffle by UMD we will get into unwanted dc_sink release
2251 				 */
2252 				dc_sink_release(aconnector->dc_sink);
2253 			}
2254 			aconnector->dc_sink = sink;
2255 			dc_sink_retain(aconnector->dc_sink);
2256 			amdgpu_dm_update_freesync_caps(connector,
2257 					aconnector->edid);
2258 		} else {
2259 			amdgpu_dm_update_freesync_caps(connector, NULL);
2260 			if (!aconnector->dc_sink) {
2261 				aconnector->dc_sink = aconnector->dc_em_sink;
2262 				dc_sink_retain(aconnector->dc_sink);
2263 			}
2264 		}
2265 
2266 		mutex_unlock(&dev->mode_config.mutex);
2267 
2268 		if (sink)
2269 			dc_sink_release(sink);
2270 		return;
2271 	}
2272 
2273 	/*
2274 	 * TODO: temporary guard to look for proper fix
2275 	 * if this sink is MST sink, we should not do anything
2276 	 */
2277 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2278 		dc_sink_release(sink);
2279 		return;
2280 	}
2281 
2282 	if (aconnector->dc_sink == sink) {
2283 		/*
2284 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2285 		 * Do nothing!!
2286 		 */
2287 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2288 				aconnector->connector_id);
2289 		if (sink)
2290 			dc_sink_release(sink);
2291 		return;
2292 	}
2293 
2294 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2295 		aconnector->connector_id, aconnector->dc_sink, sink);
2296 
2297 	mutex_lock(&dev->mode_config.mutex);
2298 
2299 	/*
2300 	 * 1. Update status of the drm connector
2301 	 * 2. Send an event and let userspace tell us what to do
2302 	 */
2303 	if (sink) {
2304 		/*
2305 		 * TODO: check if we still need the S3 mode update workaround.
2306 		 * If yes, put it here.
2307 		 */
2308 		if (aconnector->dc_sink)
2309 			amdgpu_dm_update_freesync_caps(connector, NULL);
2310 
2311 		aconnector->dc_sink = sink;
2312 		dc_sink_retain(aconnector->dc_sink);
2313 		if (sink->dc_edid.length == 0) {
2314 			aconnector->edid = NULL;
2315 			if (aconnector->dc_link->aux_mode) {
2316 				drm_dp_cec_unset_edid(
2317 					&aconnector->dm_dp_aux.aux);
2318 			}
2319 		} else {
2320 			aconnector->edid =
2321 				(struct edid *)sink->dc_edid.raw_edid;
2322 
2323 			drm_connector_update_edid_property(connector,
2324 							   aconnector->edid);
2325 			drm_add_edid_modes(connector, aconnector->edid);
2326 
2327 			if (aconnector->dc_link->aux_mode)
2328 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2329 						    aconnector->edid);
2330 		}
2331 
2332 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2333 		update_connector_ext_caps(aconnector);
2334 	} else {
2335 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2336 		amdgpu_dm_update_freesync_caps(connector, NULL);
2337 		drm_connector_update_edid_property(connector, NULL);
2338 		aconnector->num_modes = 0;
2339 		dc_sink_release(aconnector->dc_sink);
2340 		aconnector->dc_sink = NULL;
2341 		aconnector->edid = NULL;
2342 #ifdef CONFIG_DRM_AMD_DC_HDCP
2343 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2344 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2345 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2346 #endif
2347 	}
2348 
2349 	mutex_unlock(&dev->mode_config.mutex);
2350 
2351 	update_subconnector_property(aconnector);
2352 
2353 	if (sink)
2354 		dc_sink_release(sink);
2355 }
2356 
2357 static void handle_hpd_irq(void *param)
2358 {
2359 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2360 	struct drm_connector *connector = &aconnector->base;
2361 	struct drm_device *dev = connector->dev;
2362 	enum dc_connection_type new_connection_type = dc_connection_none;
2363 #ifdef CONFIG_DRM_AMD_DC_HDCP
2364 	struct amdgpu_device *adev = drm_to_adev(dev);
2365 #endif
2366 
2367 	/*
2368 	 * In case of failure or MST no need to update connector status or notify the OS
2369 	 * since (for MST case) MST does this in its own context.
2370 	 */
2371 	mutex_lock(&aconnector->hpd_lock);
2372 
2373 #ifdef CONFIG_DRM_AMD_DC_HDCP
2374 	if (adev->dm.hdcp_workqueue)
2375 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2376 #endif
2377 	if (aconnector->fake_enable)
2378 		aconnector->fake_enable = false;
2379 
2380 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2381 		DRM_ERROR("KMS: Failed to detect connector\n");
2382 
2383 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2384 		emulated_link_detect(aconnector->dc_link);
2385 
2386 
2387 		drm_modeset_lock_all(dev);
2388 		dm_restore_drm_connector_state(dev, connector);
2389 		drm_modeset_unlock_all(dev);
2390 
2391 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2392 			drm_kms_helper_hotplug_event(dev);
2393 
2394 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2395 		amdgpu_dm_update_connector_after_detect(aconnector);
2396 
2397 
2398 		drm_modeset_lock_all(dev);
2399 		dm_restore_drm_connector_state(dev, connector);
2400 		drm_modeset_unlock_all(dev);
2401 
2402 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2403 			drm_kms_helper_hotplug_event(dev);
2404 	}
2405 	mutex_unlock(&aconnector->hpd_lock);
2406 
2407 }
2408 
2409 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2410 {
2411 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2412 	uint8_t dret;
2413 	bool new_irq_handled = false;
2414 	int dpcd_addr;
2415 	int dpcd_bytes_to_read;
2416 
2417 	const int max_process_count = 30;
2418 	int process_count = 0;
2419 
2420 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2421 
2422 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2423 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2424 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2425 		dpcd_addr = DP_SINK_COUNT;
2426 	} else {
2427 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2428 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2429 		dpcd_addr = DP_SINK_COUNT_ESI;
2430 	}
2431 
2432 	dret = drm_dp_dpcd_read(
2433 		&aconnector->dm_dp_aux.aux,
2434 		dpcd_addr,
2435 		esi,
2436 		dpcd_bytes_to_read);
2437 
2438 	while (dret == dpcd_bytes_to_read &&
2439 		process_count < max_process_count) {
2440 		uint8_t retry;
2441 		dret = 0;
2442 
2443 		process_count++;
2444 
2445 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2446 		/* handle HPD short pulse irq */
2447 		if (aconnector->mst_mgr.mst_state)
2448 			drm_dp_mst_hpd_irq(
2449 				&aconnector->mst_mgr,
2450 				esi,
2451 				&new_irq_handled);
2452 
2453 		if (new_irq_handled) {
2454 			/* ACK at DPCD to notify down stream */
2455 			const int ack_dpcd_bytes_to_write =
2456 				dpcd_bytes_to_read - 1;
2457 
2458 			for (retry = 0; retry < 3; retry++) {
2459 				uint8_t wret;
2460 
2461 				wret = drm_dp_dpcd_write(
2462 					&aconnector->dm_dp_aux.aux,
2463 					dpcd_addr + 1,
2464 					&esi[1],
2465 					ack_dpcd_bytes_to_write);
2466 				if (wret == ack_dpcd_bytes_to_write)
2467 					break;
2468 			}
2469 
2470 			/* check if there is new irq to be handled */
2471 			dret = drm_dp_dpcd_read(
2472 				&aconnector->dm_dp_aux.aux,
2473 				dpcd_addr,
2474 				esi,
2475 				dpcd_bytes_to_read);
2476 
2477 			new_irq_handled = false;
2478 		} else {
2479 			break;
2480 		}
2481 	}
2482 
2483 	if (process_count == max_process_count)
2484 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2485 }
2486 
2487 static void handle_hpd_rx_irq(void *param)
2488 {
2489 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2490 	struct drm_connector *connector = &aconnector->base;
2491 	struct drm_device *dev = connector->dev;
2492 	struct dc_link *dc_link = aconnector->dc_link;
2493 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2494 	enum dc_connection_type new_connection_type = dc_connection_none;
2495 #ifdef CONFIG_DRM_AMD_DC_HDCP
2496 	union hpd_irq_data hpd_irq_data;
2497 	struct amdgpu_device *adev = drm_to_adev(dev);
2498 
2499 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2500 #endif
2501 
2502 	/*
2503 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2504 	 * conflict, after implement i2c helper, this mutex should be
2505 	 * retired.
2506 	 */
2507 	if (dc_link->type != dc_connection_mst_branch)
2508 		mutex_lock(&aconnector->hpd_lock);
2509 
2510 
2511 #ifdef CONFIG_DRM_AMD_DC_HDCP
2512 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2513 #else
2514 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2515 #endif
2516 			!is_mst_root_connector) {
2517 		/* Downstream Port status changed. */
2518 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2519 			DRM_ERROR("KMS: Failed to detect connector\n");
2520 
2521 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2522 			emulated_link_detect(dc_link);
2523 
2524 			if (aconnector->fake_enable)
2525 				aconnector->fake_enable = false;
2526 
2527 			amdgpu_dm_update_connector_after_detect(aconnector);
2528 
2529 
2530 			drm_modeset_lock_all(dev);
2531 			dm_restore_drm_connector_state(dev, connector);
2532 			drm_modeset_unlock_all(dev);
2533 
2534 			drm_kms_helper_hotplug_event(dev);
2535 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2536 
2537 			if (aconnector->fake_enable)
2538 				aconnector->fake_enable = false;
2539 
2540 			amdgpu_dm_update_connector_after_detect(aconnector);
2541 
2542 
2543 			drm_modeset_lock_all(dev);
2544 			dm_restore_drm_connector_state(dev, connector);
2545 			drm_modeset_unlock_all(dev);
2546 
2547 			drm_kms_helper_hotplug_event(dev);
2548 		}
2549 	}
2550 #ifdef CONFIG_DRM_AMD_DC_HDCP
2551 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2552 		if (adev->dm.hdcp_workqueue)
2553 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2554 	}
2555 #endif
2556 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2557 	    (dc_link->type == dc_connection_mst_branch))
2558 		dm_handle_hpd_rx_irq(aconnector);
2559 
2560 	if (dc_link->type != dc_connection_mst_branch) {
2561 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2562 		mutex_unlock(&aconnector->hpd_lock);
2563 	}
2564 }
2565 
2566 static void register_hpd_handlers(struct amdgpu_device *adev)
2567 {
2568 	struct drm_device *dev = adev_to_drm(adev);
2569 	struct drm_connector *connector;
2570 	struct amdgpu_dm_connector *aconnector;
2571 	const struct dc_link *dc_link;
2572 	struct dc_interrupt_params int_params = {0};
2573 
2574 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2575 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2576 
2577 	list_for_each_entry(connector,
2578 			&dev->mode_config.connector_list, head)	{
2579 
2580 		aconnector = to_amdgpu_dm_connector(connector);
2581 		dc_link = aconnector->dc_link;
2582 
2583 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2584 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2585 			int_params.irq_source = dc_link->irq_source_hpd;
2586 
2587 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2588 					handle_hpd_irq,
2589 					(void *) aconnector);
2590 		}
2591 
2592 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2593 
2594 			/* Also register for DP short pulse (hpd_rx). */
2595 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2596 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2597 
2598 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2599 					handle_hpd_rx_irq,
2600 					(void *) aconnector);
2601 		}
2602 	}
2603 }
2604 
2605 #if defined(CONFIG_DRM_AMD_DC_SI)
2606 /* Register IRQ sources and initialize IRQ callbacks */
2607 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2608 {
2609 	struct dc *dc = adev->dm.dc;
2610 	struct common_irq_params *c_irq_params;
2611 	struct dc_interrupt_params int_params = {0};
2612 	int r;
2613 	int i;
2614 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2615 
2616 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2617 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2618 
2619 	/*
2620 	 * Actions of amdgpu_irq_add_id():
2621 	 * 1. Register a set() function with base driver.
2622 	 *    Base driver will call set() function to enable/disable an
2623 	 *    interrupt in DC hardware.
2624 	 * 2. Register amdgpu_dm_irq_handler().
2625 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2626 	 *    coming from DC hardware.
2627 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2628 	 *    for acknowledging and handling. */
2629 
2630 	/* Use VBLANK interrupt */
2631 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2632 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2633 		if (r) {
2634 			DRM_ERROR("Failed to add crtc irq id!\n");
2635 			return r;
2636 		}
2637 
2638 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2639 		int_params.irq_source =
2640 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2641 
2642 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2643 
2644 		c_irq_params->adev = adev;
2645 		c_irq_params->irq_src = int_params.irq_source;
2646 
2647 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2648 				dm_crtc_high_irq, c_irq_params);
2649 	}
2650 
2651 	/* Use GRPH_PFLIP interrupt */
2652 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2653 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2654 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2655 		if (r) {
2656 			DRM_ERROR("Failed to add page flip irq id!\n");
2657 			return r;
2658 		}
2659 
2660 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2661 		int_params.irq_source =
2662 			dc_interrupt_to_irq_source(dc, i, 0);
2663 
2664 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2665 
2666 		c_irq_params->adev = adev;
2667 		c_irq_params->irq_src = int_params.irq_source;
2668 
2669 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2670 				dm_pflip_high_irq, c_irq_params);
2671 
2672 	}
2673 
2674 	/* HPD */
2675 	r = amdgpu_irq_add_id(adev, client_id,
2676 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2677 	if (r) {
2678 		DRM_ERROR("Failed to add hpd irq id!\n");
2679 		return r;
2680 	}
2681 
2682 	register_hpd_handlers(adev);
2683 
2684 	return 0;
2685 }
2686 #endif
2687 
2688 /* Register IRQ sources and initialize IRQ callbacks */
2689 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2690 {
2691 	struct dc *dc = adev->dm.dc;
2692 	struct common_irq_params *c_irq_params;
2693 	struct dc_interrupt_params int_params = {0};
2694 	int r;
2695 	int i;
2696 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2697 
2698 	if (adev->asic_type >= CHIP_VEGA10)
2699 		client_id = SOC15_IH_CLIENTID_DCE;
2700 
2701 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2702 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2703 
2704 	/*
2705 	 * Actions of amdgpu_irq_add_id():
2706 	 * 1. Register a set() function with base driver.
2707 	 *    Base driver will call set() function to enable/disable an
2708 	 *    interrupt in DC hardware.
2709 	 * 2. Register amdgpu_dm_irq_handler().
2710 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2711 	 *    coming from DC hardware.
2712 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2713 	 *    for acknowledging and handling. */
2714 
2715 	/* Use VBLANK interrupt */
2716 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2717 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2718 		if (r) {
2719 			DRM_ERROR("Failed to add crtc irq id!\n");
2720 			return r;
2721 		}
2722 
2723 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2724 		int_params.irq_source =
2725 			dc_interrupt_to_irq_source(dc, i, 0);
2726 
2727 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2728 
2729 		c_irq_params->adev = adev;
2730 		c_irq_params->irq_src = int_params.irq_source;
2731 
2732 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2733 				dm_crtc_high_irq, c_irq_params);
2734 	}
2735 
2736 	/* Use VUPDATE interrupt */
2737 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2738 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2739 		if (r) {
2740 			DRM_ERROR("Failed to add vupdate irq id!\n");
2741 			return r;
2742 		}
2743 
2744 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2745 		int_params.irq_source =
2746 			dc_interrupt_to_irq_source(dc, i, 0);
2747 
2748 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2749 
2750 		c_irq_params->adev = adev;
2751 		c_irq_params->irq_src = int_params.irq_source;
2752 
2753 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2754 				dm_vupdate_high_irq, c_irq_params);
2755 	}
2756 
2757 	/* Use GRPH_PFLIP interrupt */
2758 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2759 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2760 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2761 		if (r) {
2762 			DRM_ERROR("Failed to add page flip irq id!\n");
2763 			return r;
2764 		}
2765 
2766 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2767 		int_params.irq_source =
2768 			dc_interrupt_to_irq_source(dc, i, 0);
2769 
2770 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2771 
2772 		c_irq_params->adev = adev;
2773 		c_irq_params->irq_src = int_params.irq_source;
2774 
2775 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2776 				dm_pflip_high_irq, c_irq_params);
2777 
2778 	}
2779 
2780 	/* HPD */
2781 	r = amdgpu_irq_add_id(adev, client_id,
2782 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2783 	if (r) {
2784 		DRM_ERROR("Failed to add hpd irq id!\n");
2785 		return r;
2786 	}
2787 
2788 	register_hpd_handlers(adev);
2789 
2790 	return 0;
2791 }
2792 
2793 #if defined(CONFIG_DRM_AMD_DC_DCN)
2794 /* Register IRQ sources and initialize IRQ callbacks */
2795 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2796 {
2797 	struct dc *dc = adev->dm.dc;
2798 	struct common_irq_params *c_irq_params;
2799 	struct dc_interrupt_params int_params = {0};
2800 	int r;
2801 	int i;
2802 
2803 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2804 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2805 
2806 	/*
2807 	 * Actions of amdgpu_irq_add_id():
2808 	 * 1. Register a set() function with base driver.
2809 	 *    Base driver will call set() function to enable/disable an
2810 	 *    interrupt in DC hardware.
2811 	 * 2. Register amdgpu_dm_irq_handler().
2812 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2813 	 *    coming from DC hardware.
2814 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2815 	 *    for acknowledging and handling.
2816 	 */
2817 
2818 	/* Use VSTARTUP interrupt */
2819 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2820 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2821 			i++) {
2822 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2823 
2824 		if (r) {
2825 			DRM_ERROR("Failed to add crtc irq id!\n");
2826 			return r;
2827 		}
2828 
2829 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2830 		int_params.irq_source =
2831 			dc_interrupt_to_irq_source(dc, i, 0);
2832 
2833 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2834 
2835 		c_irq_params->adev = adev;
2836 		c_irq_params->irq_src = int_params.irq_source;
2837 
2838 		amdgpu_dm_irq_register_interrupt(
2839 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2840 	}
2841 
2842 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2843 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2844 	 * to trigger at end of each vblank, regardless of state of the lock,
2845 	 * matching DCE behaviour.
2846 	 */
2847 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2848 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2849 	     i++) {
2850 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2851 
2852 		if (r) {
2853 			DRM_ERROR("Failed to add vupdate irq id!\n");
2854 			return r;
2855 		}
2856 
2857 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2858 		int_params.irq_source =
2859 			dc_interrupt_to_irq_source(dc, i, 0);
2860 
2861 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2862 
2863 		c_irq_params->adev = adev;
2864 		c_irq_params->irq_src = int_params.irq_source;
2865 
2866 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2867 				dm_vupdate_high_irq, c_irq_params);
2868 	}
2869 
2870 	/* Use GRPH_PFLIP interrupt */
2871 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2872 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2873 			i++) {
2874 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2875 		if (r) {
2876 			DRM_ERROR("Failed to add page flip irq id!\n");
2877 			return r;
2878 		}
2879 
2880 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2881 		int_params.irq_source =
2882 			dc_interrupt_to_irq_source(dc, i, 0);
2883 
2884 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2885 
2886 		c_irq_params->adev = adev;
2887 		c_irq_params->irq_src = int_params.irq_source;
2888 
2889 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2890 				dm_pflip_high_irq, c_irq_params);
2891 
2892 	}
2893 
2894 	/* HPD */
2895 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2896 			&adev->hpd_irq);
2897 	if (r) {
2898 		DRM_ERROR("Failed to add hpd irq id!\n");
2899 		return r;
2900 	}
2901 
2902 	register_hpd_handlers(adev);
2903 
2904 	return 0;
2905 }
2906 #endif
2907 
2908 /*
2909  * Acquires the lock for the atomic state object and returns
2910  * the new atomic state.
2911  *
2912  * This should only be called during atomic check.
2913  */
2914 static int dm_atomic_get_state(struct drm_atomic_state *state,
2915 			       struct dm_atomic_state **dm_state)
2916 {
2917 	struct drm_device *dev = state->dev;
2918 	struct amdgpu_device *adev = drm_to_adev(dev);
2919 	struct amdgpu_display_manager *dm = &adev->dm;
2920 	struct drm_private_state *priv_state;
2921 
2922 	if (*dm_state)
2923 		return 0;
2924 
2925 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2926 	if (IS_ERR(priv_state))
2927 		return PTR_ERR(priv_state);
2928 
2929 	*dm_state = to_dm_atomic_state(priv_state);
2930 
2931 	return 0;
2932 }
2933 
2934 static struct dm_atomic_state *
2935 dm_atomic_get_new_state(struct drm_atomic_state *state)
2936 {
2937 	struct drm_device *dev = state->dev;
2938 	struct amdgpu_device *adev = drm_to_adev(dev);
2939 	struct amdgpu_display_manager *dm = &adev->dm;
2940 	struct drm_private_obj *obj;
2941 	struct drm_private_state *new_obj_state;
2942 	int i;
2943 
2944 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2945 		if (obj->funcs == dm->atomic_obj.funcs)
2946 			return to_dm_atomic_state(new_obj_state);
2947 	}
2948 
2949 	return NULL;
2950 }
2951 
2952 static struct drm_private_state *
2953 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2954 {
2955 	struct dm_atomic_state *old_state, *new_state;
2956 
2957 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2958 	if (!new_state)
2959 		return NULL;
2960 
2961 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2962 
2963 	old_state = to_dm_atomic_state(obj->state);
2964 
2965 	if (old_state && old_state->context)
2966 		new_state->context = dc_copy_state(old_state->context);
2967 
2968 	if (!new_state->context) {
2969 		kfree(new_state);
2970 		return NULL;
2971 	}
2972 
2973 	return &new_state->base;
2974 }
2975 
2976 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2977 				    struct drm_private_state *state)
2978 {
2979 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2980 
2981 	if (dm_state && dm_state->context)
2982 		dc_release_state(dm_state->context);
2983 
2984 	kfree(dm_state);
2985 }
2986 
2987 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2988 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2989 	.atomic_destroy_state = dm_atomic_destroy_state,
2990 };
2991 
2992 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2993 {
2994 	struct dm_atomic_state *state;
2995 	int r;
2996 
2997 	adev->mode_info.mode_config_initialized = true;
2998 
2999 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3000 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3001 
3002 	adev_to_drm(adev)->mode_config.max_width = 16384;
3003 	adev_to_drm(adev)->mode_config.max_height = 16384;
3004 
3005 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3006 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3007 	/* indicates support for immediate flip */
3008 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3009 
3010 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3011 
3012 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3013 	if (!state)
3014 		return -ENOMEM;
3015 
3016 	state->context = dc_create_state(adev->dm.dc);
3017 	if (!state->context) {
3018 		kfree(state);
3019 		return -ENOMEM;
3020 	}
3021 
3022 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3023 
3024 	drm_atomic_private_obj_init(adev_to_drm(adev),
3025 				    &adev->dm.atomic_obj,
3026 				    &state->base,
3027 				    &dm_atomic_state_funcs);
3028 
3029 	r = amdgpu_display_modeset_create_props(adev);
3030 	if (r) {
3031 		dc_release_state(state->context);
3032 		kfree(state);
3033 		return r;
3034 	}
3035 
3036 	r = amdgpu_dm_audio_init(adev);
3037 	if (r) {
3038 		dc_release_state(state->context);
3039 		kfree(state);
3040 		return r;
3041 	}
3042 
3043 	return 0;
3044 }
3045 
3046 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3047 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3048 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3049 
3050 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3051 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3052 
3053 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3054 {
3055 #if defined(CONFIG_ACPI)
3056 	struct amdgpu_dm_backlight_caps caps;
3057 
3058 	memset(&caps, 0, sizeof(caps));
3059 
3060 	if (dm->backlight_caps.caps_valid)
3061 		return;
3062 
3063 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3064 	if (caps.caps_valid) {
3065 		dm->backlight_caps.caps_valid = true;
3066 		if (caps.aux_support)
3067 			return;
3068 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3069 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3070 	} else {
3071 		dm->backlight_caps.min_input_signal =
3072 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3073 		dm->backlight_caps.max_input_signal =
3074 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3075 	}
3076 #else
3077 	if (dm->backlight_caps.aux_support)
3078 		return;
3079 
3080 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3081 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3082 #endif
3083 }
3084 
3085 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3086 {
3087 	bool rc;
3088 
3089 	if (!link)
3090 		return 1;
3091 
3092 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3093 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3094 
3095 	return rc ? 0 : 1;
3096 }
3097 
3098 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3099 				unsigned *min, unsigned *max)
3100 {
3101 	if (!caps)
3102 		return 0;
3103 
3104 	if (caps->aux_support) {
3105 		// Firmware limits are in nits, DC API wants millinits.
3106 		*max = 1000 * caps->aux_max_input_signal;
3107 		*min = 1000 * caps->aux_min_input_signal;
3108 	} else {
3109 		// Firmware limits are 8-bit, PWM control is 16-bit.
3110 		*max = 0x101 * caps->max_input_signal;
3111 		*min = 0x101 * caps->min_input_signal;
3112 	}
3113 	return 1;
3114 }
3115 
3116 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3117 					uint32_t brightness)
3118 {
3119 	unsigned min, max;
3120 
3121 	if (!get_brightness_range(caps, &min, &max))
3122 		return brightness;
3123 
3124 	// Rescale 0..255 to min..max
3125 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3126 				       AMDGPU_MAX_BL_LEVEL);
3127 }
3128 
3129 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3130 				      uint32_t brightness)
3131 {
3132 	unsigned min, max;
3133 
3134 	if (!get_brightness_range(caps, &min, &max))
3135 		return brightness;
3136 
3137 	if (brightness < min)
3138 		return 0;
3139 	// Rescale min..max to 0..255
3140 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3141 				 max - min);
3142 }
3143 
3144 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3145 {
3146 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3147 	struct amdgpu_dm_backlight_caps caps;
3148 	struct dc_link *link = NULL;
3149 	u32 brightness;
3150 	bool rc;
3151 
3152 	amdgpu_dm_update_backlight_caps(dm);
3153 	caps = dm->backlight_caps;
3154 
3155 	link = (struct dc_link *)dm->backlight_link;
3156 
3157 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3158 	// Change brightness based on AUX property
3159 	if (caps.aux_support)
3160 		return set_backlight_via_aux(link, brightness);
3161 
3162 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3163 
3164 	return rc ? 0 : 1;
3165 }
3166 
3167 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3168 {
3169 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3170 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3171 
3172 	if (ret == DC_ERROR_UNEXPECTED)
3173 		return bd->props.brightness;
3174 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3175 }
3176 
3177 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3178 	.options = BL_CORE_SUSPENDRESUME,
3179 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3180 	.update_status	= amdgpu_dm_backlight_update_status,
3181 };
3182 
3183 static void
3184 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3185 {
3186 	char bl_name[16];
3187 	struct backlight_properties props = { 0 };
3188 
3189 	amdgpu_dm_update_backlight_caps(dm);
3190 
3191 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3192 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3193 	props.type = BACKLIGHT_RAW;
3194 
3195 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3196 		 adev_to_drm(dm->adev)->primary->index);
3197 
3198 	dm->backlight_dev = backlight_device_register(bl_name,
3199 						      adev_to_drm(dm->adev)->dev,
3200 						      dm,
3201 						      &amdgpu_dm_backlight_ops,
3202 						      &props);
3203 
3204 	if (IS_ERR(dm->backlight_dev))
3205 		DRM_ERROR("DM: Backlight registration failed!\n");
3206 	else
3207 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3208 }
3209 
3210 #endif
3211 
3212 static int initialize_plane(struct amdgpu_display_manager *dm,
3213 			    struct amdgpu_mode_info *mode_info, int plane_id,
3214 			    enum drm_plane_type plane_type,
3215 			    const struct dc_plane_cap *plane_cap)
3216 {
3217 	struct drm_plane *plane;
3218 	unsigned long possible_crtcs;
3219 	int ret = 0;
3220 
3221 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3222 	if (!plane) {
3223 		DRM_ERROR("KMS: Failed to allocate plane\n");
3224 		return -ENOMEM;
3225 	}
3226 	plane->type = plane_type;
3227 
3228 	/*
3229 	 * HACK: IGT tests expect that the primary plane for a CRTC
3230 	 * can only have one possible CRTC. Only expose support for
3231 	 * any CRTC if they're not going to be used as a primary plane
3232 	 * for a CRTC - like overlay or underlay planes.
3233 	 */
3234 	possible_crtcs = 1 << plane_id;
3235 	if (plane_id >= dm->dc->caps.max_streams)
3236 		possible_crtcs = 0xff;
3237 
3238 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3239 
3240 	if (ret) {
3241 		DRM_ERROR("KMS: Failed to initialize plane\n");
3242 		kfree(plane);
3243 		return ret;
3244 	}
3245 
3246 	if (mode_info)
3247 		mode_info->planes[plane_id] = plane;
3248 
3249 	return ret;
3250 }
3251 
3252 
3253 static void register_backlight_device(struct amdgpu_display_manager *dm,
3254 				      struct dc_link *link)
3255 {
3256 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3257 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3258 
3259 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3260 	    link->type != dc_connection_none) {
3261 		/*
3262 		 * Event if registration failed, we should continue with
3263 		 * DM initialization because not having a backlight control
3264 		 * is better then a black screen.
3265 		 */
3266 		amdgpu_dm_register_backlight_device(dm);
3267 
3268 		if (dm->backlight_dev)
3269 			dm->backlight_link = link;
3270 	}
3271 #endif
3272 }
3273 
3274 
3275 /*
3276  * In this architecture, the association
3277  * connector -> encoder -> crtc
3278  * id not really requried. The crtc and connector will hold the
3279  * display_index as an abstraction to use with DAL component
3280  *
3281  * Returns 0 on success
3282  */
3283 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3284 {
3285 	struct amdgpu_display_manager *dm = &adev->dm;
3286 	int32_t i;
3287 	struct amdgpu_dm_connector *aconnector = NULL;
3288 	struct amdgpu_encoder *aencoder = NULL;
3289 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3290 	uint32_t link_cnt;
3291 	int32_t primary_planes;
3292 	enum dc_connection_type new_connection_type = dc_connection_none;
3293 	const struct dc_plane_cap *plane;
3294 
3295 	link_cnt = dm->dc->caps.max_links;
3296 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3297 		DRM_ERROR("DM: Failed to initialize mode config\n");
3298 		return -EINVAL;
3299 	}
3300 
3301 	/* There is one primary plane per CRTC */
3302 	primary_planes = dm->dc->caps.max_streams;
3303 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3304 
3305 	/*
3306 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3307 	 * Order is reversed to match iteration order in atomic check.
3308 	 */
3309 	for (i = (primary_planes - 1); i >= 0; i--) {
3310 		plane = &dm->dc->caps.planes[i];
3311 
3312 		if (initialize_plane(dm, mode_info, i,
3313 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3314 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3315 			goto fail;
3316 		}
3317 	}
3318 
3319 	/*
3320 	 * Initialize overlay planes, index starting after primary planes.
3321 	 * These planes have a higher DRM index than the primary planes since
3322 	 * they should be considered as having a higher z-order.
3323 	 * Order is reversed to match iteration order in atomic check.
3324 	 *
3325 	 * Only support DCN for now, and only expose one so we don't encourage
3326 	 * userspace to use up all the pipes.
3327 	 */
3328 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3329 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3330 
3331 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3332 			continue;
3333 
3334 		if (!plane->blends_with_above || !plane->blends_with_below)
3335 			continue;
3336 
3337 		if (!plane->pixel_format_support.argb8888)
3338 			continue;
3339 
3340 		if (initialize_plane(dm, NULL, primary_planes + i,
3341 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3342 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3343 			goto fail;
3344 		}
3345 
3346 		/* Only create one overlay plane. */
3347 		break;
3348 	}
3349 
3350 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3351 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3352 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3353 			goto fail;
3354 		}
3355 
3356 	dm->display_indexes_num = dm->dc->caps.max_streams;
3357 
3358 	/* loops over all connectors on the board */
3359 	for (i = 0; i < link_cnt; i++) {
3360 		struct dc_link *link = NULL;
3361 
3362 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3363 			DRM_ERROR(
3364 				"KMS: Cannot support more than %d display indexes\n",
3365 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3366 			continue;
3367 		}
3368 
3369 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3370 		if (!aconnector)
3371 			goto fail;
3372 
3373 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3374 		if (!aencoder)
3375 			goto fail;
3376 
3377 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3378 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3379 			goto fail;
3380 		}
3381 
3382 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3383 			DRM_ERROR("KMS: Failed to initialize connector\n");
3384 			goto fail;
3385 		}
3386 
3387 		link = dc_get_link_at_index(dm->dc, i);
3388 
3389 		if (!dc_link_detect_sink(link, &new_connection_type))
3390 			DRM_ERROR("KMS: Failed to detect connector\n");
3391 
3392 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3393 			emulated_link_detect(link);
3394 			amdgpu_dm_update_connector_after_detect(aconnector);
3395 
3396 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3397 			amdgpu_dm_update_connector_after_detect(aconnector);
3398 			register_backlight_device(dm, link);
3399 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3400 				amdgpu_dm_set_psr_caps(link);
3401 		}
3402 
3403 
3404 	}
3405 
3406 	/* Software is initialized. Now we can register interrupt handlers. */
3407 	switch (adev->asic_type) {
3408 #if defined(CONFIG_DRM_AMD_DC_SI)
3409 	case CHIP_TAHITI:
3410 	case CHIP_PITCAIRN:
3411 	case CHIP_VERDE:
3412 	case CHIP_OLAND:
3413 		if (dce60_register_irq_handlers(dm->adev)) {
3414 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3415 			goto fail;
3416 		}
3417 		break;
3418 #endif
3419 	case CHIP_BONAIRE:
3420 	case CHIP_HAWAII:
3421 	case CHIP_KAVERI:
3422 	case CHIP_KABINI:
3423 	case CHIP_MULLINS:
3424 	case CHIP_TONGA:
3425 	case CHIP_FIJI:
3426 	case CHIP_CARRIZO:
3427 	case CHIP_STONEY:
3428 	case CHIP_POLARIS11:
3429 	case CHIP_POLARIS10:
3430 	case CHIP_POLARIS12:
3431 	case CHIP_VEGAM:
3432 	case CHIP_VEGA10:
3433 	case CHIP_VEGA12:
3434 	case CHIP_VEGA20:
3435 		if (dce110_register_irq_handlers(dm->adev)) {
3436 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3437 			goto fail;
3438 		}
3439 		break;
3440 #if defined(CONFIG_DRM_AMD_DC_DCN)
3441 	case CHIP_RAVEN:
3442 	case CHIP_NAVI12:
3443 	case CHIP_NAVI10:
3444 	case CHIP_NAVI14:
3445 	case CHIP_RENOIR:
3446 	case CHIP_SIENNA_CICHLID:
3447 	case CHIP_NAVY_FLOUNDER:
3448 	case CHIP_DIMGREY_CAVEFISH:
3449 	case CHIP_VANGOGH:
3450 		if (dcn10_register_irq_handlers(dm->adev)) {
3451 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3452 			goto fail;
3453 		}
3454 		break;
3455 #endif
3456 	default:
3457 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3458 		goto fail;
3459 	}
3460 
3461 	return 0;
3462 fail:
3463 	kfree(aencoder);
3464 	kfree(aconnector);
3465 
3466 	return -EINVAL;
3467 }
3468 
3469 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3470 {
3471 	drm_mode_config_cleanup(dm->ddev);
3472 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3473 	return;
3474 }
3475 
3476 /******************************************************************************
3477  * amdgpu_display_funcs functions
3478  *****************************************************************************/
3479 
3480 /*
3481  * dm_bandwidth_update - program display watermarks
3482  *
3483  * @adev: amdgpu_device pointer
3484  *
3485  * Calculate and program the display watermarks and line buffer allocation.
3486  */
3487 static void dm_bandwidth_update(struct amdgpu_device *adev)
3488 {
3489 	/* TODO: implement later */
3490 }
3491 
3492 static const struct amdgpu_display_funcs dm_display_funcs = {
3493 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3494 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3495 	.backlight_set_level = NULL, /* never called for DC */
3496 	.backlight_get_level = NULL, /* never called for DC */
3497 	.hpd_sense = NULL,/* called unconditionally */
3498 	.hpd_set_polarity = NULL, /* called unconditionally */
3499 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3500 	.page_flip_get_scanoutpos =
3501 		dm_crtc_get_scanoutpos,/* called unconditionally */
3502 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3503 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3504 };
3505 
3506 #if defined(CONFIG_DEBUG_KERNEL_DC)
3507 
3508 static ssize_t s3_debug_store(struct device *device,
3509 			      struct device_attribute *attr,
3510 			      const char *buf,
3511 			      size_t count)
3512 {
3513 	int ret;
3514 	int s3_state;
3515 	struct drm_device *drm_dev = dev_get_drvdata(device);
3516 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3517 
3518 	ret = kstrtoint(buf, 0, &s3_state);
3519 
3520 	if (ret == 0) {
3521 		if (s3_state) {
3522 			dm_resume(adev);
3523 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3524 		} else
3525 			dm_suspend(adev);
3526 	}
3527 
3528 	return ret == 0 ? count : 0;
3529 }
3530 
3531 DEVICE_ATTR_WO(s3_debug);
3532 
3533 #endif
3534 
3535 static int dm_early_init(void *handle)
3536 {
3537 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3538 
3539 	switch (adev->asic_type) {
3540 #if defined(CONFIG_DRM_AMD_DC_SI)
3541 	case CHIP_TAHITI:
3542 	case CHIP_PITCAIRN:
3543 	case CHIP_VERDE:
3544 		adev->mode_info.num_crtc = 6;
3545 		adev->mode_info.num_hpd = 6;
3546 		adev->mode_info.num_dig = 6;
3547 		break;
3548 	case CHIP_OLAND:
3549 		adev->mode_info.num_crtc = 2;
3550 		adev->mode_info.num_hpd = 2;
3551 		adev->mode_info.num_dig = 2;
3552 		break;
3553 #endif
3554 	case CHIP_BONAIRE:
3555 	case CHIP_HAWAII:
3556 		adev->mode_info.num_crtc = 6;
3557 		adev->mode_info.num_hpd = 6;
3558 		adev->mode_info.num_dig = 6;
3559 		break;
3560 	case CHIP_KAVERI:
3561 		adev->mode_info.num_crtc = 4;
3562 		adev->mode_info.num_hpd = 6;
3563 		adev->mode_info.num_dig = 7;
3564 		break;
3565 	case CHIP_KABINI:
3566 	case CHIP_MULLINS:
3567 		adev->mode_info.num_crtc = 2;
3568 		adev->mode_info.num_hpd = 6;
3569 		adev->mode_info.num_dig = 6;
3570 		break;
3571 	case CHIP_FIJI:
3572 	case CHIP_TONGA:
3573 		adev->mode_info.num_crtc = 6;
3574 		adev->mode_info.num_hpd = 6;
3575 		adev->mode_info.num_dig = 7;
3576 		break;
3577 	case CHIP_CARRIZO:
3578 		adev->mode_info.num_crtc = 3;
3579 		adev->mode_info.num_hpd = 6;
3580 		adev->mode_info.num_dig = 9;
3581 		break;
3582 	case CHIP_STONEY:
3583 		adev->mode_info.num_crtc = 2;
3584 		adev->mode_info.num_hpd = 6;
3585 		adev->mode_info.num_dig = 9;
3586 		break;
3587 	case CHIP_POLARIS11:
3588 	case CHIP_POLARIS12:
3589 		adev->mode_info.num_crtc = 5;
3590 		adev->mode_info.num_hpd = 5;
3591 		adev->mode_info.num_dig = 5;
3592 		break;
3593 	case CHIP_POLARIS10:
3594 	case CHIP_VEGAM:
3595 		adev->mode_info.num_crtc = 6;
3596 		adev->mode_info.num_hpd = 6;
3597 		adev->mode_info.num_dig = 6;
3598 		break;
3599 	case CHIP_VEGA10:
3600 	case CHIP_VEGA12:
3601 	case CHIP_VEGA20:
3602 		adev->mode_info.num_crtc = 6;
3603 		adev->mode_info.num_hpd = 6;
3604 		adev->mode_info.num_dig = 6;
3605 		break;
3606 #if defined(CONFIG_DRM_AMD_DC_DCN)
3607 	case CHIP_RAVEN:
3608 	case CHIP_RENOIR:
3609 	case CHIP_VANGOGH:
3610 		adev->mode_info.num_crtc = 4;
3611 		adev->mode_info.num_hpd = 4;
3612 		adev->mode_info.num_dig = 4;
3613 		break;
3614 	case CHIP_NAVI10:
3615 	case CHIP_NAVI12:
3616 	case CHIP_SIENNA_CICHLID:
3617 	case CHIP_NAVY_FLOUNDER:
3618 		adev->mode_info.num_crtc = 6;
3619 		adev->mode_info.num_hpd = 6;
3620 		adev->mode_info.num_dig = 6;
3621 		break;
3622 	case CHIP_NAVI14:
3623 	case CHIP_DIMGREY_CAVEFISH:
3624 		adev->mode_info.num_crtc = 5;
3625 		adev->mode_info.num_hpd = 5;
3626 		adev->mode_info.num_dig = 5;
3627 		break;
3628 #endif
3629 	default:
3630 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3631 		return -EINVAL;
3632 	}
3633 
3634 	amdgpu_dm_set_irq_funcs(adev);
3635 
3636 	if (adev->mode_info.funcs == NULL)
3637 		adev->mode_info.funcs = &dm_display_funcs;
3638 
3639 	/*
3640 	 * Note: Do NOT change adev->audio_endpt_rreg and
3641 	 * adev->audio_endpt_wreg because they are initialised in
3642 	 * amdgpu_device_init()
3643 	 */
3644 #if defined(CONFIG_DEBUG_KERNEL_DC)
3645 	device_create_file(
3646 		adev_to_drm(adev)->dev,
3647 		&dev_attr_s3_debug);
3648 #endif
3649 
3650 	return 0;
3651 }
3652 
3653 static bool modeset_required(struct drm_crtc_state *crtc_state,
3654 			     struct dc_stream_state *new_stream,
3655 			     struct dc_stream_state *old_stream)
3656 {
3657 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3658 }
3659 
3660 static bool modereset_required(struct drm_crtc_state *crtc_state)
3661 {
3662 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3663 }
3664 
3665 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3666 {
3667 	drm_encoder_cleanup(encoder);
3668 	kfree(encoder);
3669 }
3670 
3671 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3672 	.destroy = amdgpu_dm_encoder_destroy,
3673 };
3674 
3675 
3676 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3677 				struct dc_scaling_info *scaling_info)
3678 {
3679 	int scale_w, scale_h;
3680 
3681 	memset(scaling_info, 0, sizeof(*scaling_info));
3682 
3683 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3684 	scaling_info->src_rect.x = state->src_x >> 16;
3685 	scaling_info->src_rect.y = state->src_y >> 16;
3686 
3687 	scaling_info->src_rect.width = state->src_w >> 16;
3688 	if (scaling_info->src_rect.width == 0)
3689 		return -EINVAL;
3690 
3691 	scaling_info->src_rect.height = state->src_h >> 16;
3692 	if (scaling_info->src_rect.height == 0)
3693 		return -EINVAL;
3694 
3695 	scaling_info->dst_rect.x = state->crtc_x;
3696 	scaling_info->dst_rect.y = state->crtc_y;
3697 
3698 	if (state->crtc_w == 0)
3699 		return -EINVAL;
3700 
3701 	scaling_info->dst_rect.width = state->crtc_w;
3702 
3703 	if (state->crtc_h == 0)
3704 		return -EINVAL;
3705 
3706 	scaling_info->dst_rect.height = state->crtc_h;
3707 
3708 	/* DRM doesn't specify clipping on destination output. */
3709 	scaling_info->clip_rect = scaling_info->dst_rect;
3710 
3711 	/* TODO: Validate scaling per-format with DC plane caps */
3712 	scale_w = scaling_info->dst_rect.width * 1000 /
3713 		  scaling_info->src_rect.width;
3714 
3715 	if (scale_w < 250 || scale_w > 16000)
3716 		return -EINVAL;
3717 
3718 	scale_h = scaling_info->dst_rect.height * 1000 /
3719 		  scaling_info->src_rect.height;
3720 
3721 	if (scale_h < 250 || scale_h > 16000)
3722 		return -EINVAL;
3723 
3724 	/*
3725 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3726 	 * assume reasonable defaults based on the format.
3727 	 */
3728 
3729 	return 0;
3730 }
3731 
3732 static void
3733 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3734 				 uint64_t tiling_flags)
3735 {
3736 	/* Fill GFX8 params */
3737 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3738 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3739 
3740 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3741 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3742 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3743 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3744 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3745 
3746 		/* XXX fix me for VI */
3747 		tiling_info->gfx8.num_banks = num_banks;
3748 		tiling_info->gfx8.array_mode =
3749 				DC_ARRAY_2D_TILED_THIN1;
3750 		tiling_info->gfx8.tile_split = tile_split;
3751 		tiling_info->gfx8.bank_width = bankw;
3752 		tiling_info->gfx8.bank_height = bankh;
3753 		tiling_info->gfx8.tile_aspect = mtaspect;
3754 		tiling_info->gfx8.tile_mode =
3755 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3756 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3757 			== DC_ARRAY_1D_TILED_THIN1) {
3758 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3759 	}
3760 
3761 	tiling_info->gfx8.pipe_config =
3762 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3763 }
3764 
3765 static void
3766 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3767 				  union dc_tiling_info *tiling_info)
3768 {
3769 	tiling_info->gfx9.num_pipes =
3770 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3771 	tiling_info->gfx9.num_banks =
3772 		adev->gfx.config.gb_addr_config_fields.num_banks;
3773 	tiling_info->gfx9.pipe_interleave =
3774 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3775 	tiling_info->gfx9.num_shader_engines =
3776 		adev->gfx.config.gb_addr_config_fields.num_se;
3777 	tiling_info->gfx9.max_compressed_frags =
3778 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3779 	tiling_info->gfx9.num_rb_per_se =
3780 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3781 	tiling_info->gfx9.shaderEnable = 1;
3782 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3783 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3784 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3785 	    adev->asic_type == CHIP_VANGOGH)
3786 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3787 }
3788 
3789 static int
3790 validate_dcc(struct amdgpu_device *adev,
3791 	     const enum surface_pixel_format format,
3792 	     const enum dc_rotation_angle rotation,
3793 	     const union dc_tiling_info *tiling_info,
3794 	     const struct dc_plane_dcc_param *dcc,
3795 	     const struct dc_plane_address *address,
3796 	     const struct plane_size *plane_size)
3797 {
3798 	struct dc *dc = adev->dm.dc;
3799 	struct dc_dcc_surface_param input;
3800 	struct dc_surface_dcc_cap output;
3801 
3802 	memset(&input, 0, sizeof(input));
3803 	memset(&output, 0, sizeof(output));
3804 
3805 	if (!dcc->enable)
3806 		return 0;
3807 
3808 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3809 	    !dc->cap_funcs.get_dcc_compression_cap)
3810 		return -EINVAL;
3811 
3812 	input.format = format;
3813 	input.surface_size.width = plane_size->surface_size.width;
3814 	input.surface_size.height = plane_size->surface_size.height;
3815 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3816 
3817 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3818 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3819 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3820 		input.scan = SCAN_DIRECTION_VERTICAL;
3821 
3822 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3823 		return -EINVAL;
3824 
3825 	if (!output.capable)
3826 		return -EINVAL;
3827 
3828 	if (dcc->independent_64b_blks == 0 &&
3829 	    output.grph.rgb.independent_64b_blks != 0)
3830 		return -EINVAL;
3831 
3832 	return 0;
3833 }
3834 
3835 static bool
3836 modifier_has_dcc(uint64_t modifier)
3837 {
3838 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3839 }
3840 
3841 static unsigned
3842 modifier_gfx9_swizzle_mode(uint64_t modifier)
3843 {
3844 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3845 		return 0;
3846 
3847 	return AMD_FMT_MOD_GET(TILE, modifier);
3848 }
3849 
3850 static const struct drm_format_info dcc_formats[] = {
3851 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
3852 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3853 	 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
3854 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3855 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
3856 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3857 	   .has_alpha = true, },
3858 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
3859 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3860 	  .has_alpha = true, },
3861 	{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
3862 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3863 	  .has_alpha = true, },
3864 	{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
3865 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3866 	{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
3867 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3868 	{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
3869 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3870 	  .has_alpha = true, },
3871 	{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
3872 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3873 	  .has_alpha = true, },
3874 	{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
3875 	  .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3876 };
3877 
3878 static const struct drm_format_info dcc_retile_formats[] = {
3879 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
3880 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3881 	 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
3882 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3883 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
3884 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3885 	   .has_alpha = true, },
3886 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
3887 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3888 	  .has_alpha = true, },
3889 	{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
3890 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3891 	  .has_alpha = true, },
3892 	{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
3893 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3894 	{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
3895 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3896 	{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
3897 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3898 	  .has_alpha = true, },
3899 	{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
3900 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
3901 	  .has_alpha = true, },
3902 	{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
3903 	  .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
3904 };
3905 
3906 
3907 static const struct drm_format_info *
3908 lookup_format_info(const struct drm_format_info formats[],
3909 		  int num_formats, u32 format)
3910 {
3911 	int i;
3912 
3913 	for (i = 0; i < num_formats; i++) {
3914 		if (formats[i].format == format)
3915 			return &formats[i];
3916 	}
3917 
3918 	return NULL;
3919 }
3920 
3921 static const struct drm_format_info *
3922 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3923 {
3924 	uint64_t modifier = cmd->modifier[0];
3925 
3926 	if (!IS_AMD_FMT_MOD(modifier))
3927 		return NULL;
3928 
3929 	if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
3930 		return lookup_format_info(dcc_retile_formats,
3931 					  ARRAY_SIZE(dcc_retile_formats),
3932 					  cmd->pixel_format);
3933 
3934 	if (AMD_FMT_MOD_GET(DCC, modifier))
3935 		return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
3936 					  cmd->pixel_format);
3937 
3938 	/* returning NULL will cause the default format structs to be used. */
3939 	return NULL;
3940 }
3941 
3942 static void
3943 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3944 				    union dc_tiling_info *tiling_info,
3945 				    uint64_t modifier)
3946 {
3947 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3948 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3949 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3950 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3951 
3952 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
3953 
3954 	if (!IS_AMD_FMT_MOD(modifier))
3955 		return;
3956 
3957 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3958 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3959 
3960 	if (adev->family >= AMDGPU_FAMILY_NV) {
3961 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3962 	} else {
3963 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3964 
3965 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3966 	}
3967 }
3968 
3969 enum dm_micro_swizzle {
3970 	MICRO_SWIZZLE_Z = 0,
3971 	MICRO_SWIZZLE_S = 1,
3972 	MICRO_SWIZZLE_D = 2,
3973 	MICRO_SWIZZLE_R = 3
3974 };
3975 
3976 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3977 					  uint32_t format,
3978 					  uint64_t modifier)
3979 {
3980 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
3981 	const struct drm_format_info *info = drm_format_info(format);
3982 
3983 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3984 
3985 	if (!info)
3986 		return false;
3987 
3988 	/*
3989 	 * We always have to allow this modifier, because core DRM still
3990 	 * checks LINEAR support if userspace does not provide modifers.
3991 	 */
3992 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3993 		return true;
3994 
3995 	/*
3996 	 * The arbitrary tiling support for multiplane formats has not been hooked
3997 	 * up.
3998 	 */
3999 	if (info->num_planes > 1)
4000 		return false;
4001 
4002 	/*
4003 	 * For D swizzle the canonical modifier depends on the bpp, so check
4004 	 * it here.
4005 	 */
4006 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4007 	    adev->family >= AMDGPU_FAMILY_NV) {
4008 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4009 			return false;
4010 	}
4011 
4012 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4013 	    info->cpp[0] < 8)
4014 		return false;
4015 
4016 	if (modifier_has_dcc(modifier)) {
4017 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4018 		if (info->cpp[0] != 4)
4019 			return false;
4020 	}
4021 
4022 	return true;
4023 }
4024 
4025 static void
4026 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4027 {
4028 	if (!*mods)
4029 		return;
4030 
4031 	if (*cap - *size < 1) {
4032 		uint64_t new_cap = *cap * 2;
4033 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4034 
4035 		if (!new_mods) {
4036 			kfree(*mods);
4037 			*mods = NULL;
4038 			return;
4039 		}
4040 
4041 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4042 		kfree(*mods);
4043 		*mods = new_mods;
4044 		*cap = new_cap;
4045 	}
4046 
4047 	(*mods)[*size] = mod;
4048 	*size += 1;
4049 }
4050 
4051 static void
4052 add_gfx9_modifiers(const struct amdgpu_device *adev,
4053 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4054 {
4055 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4056 	int pipe_xor_bits = min(8, pipes +
4057 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4058 	int bank_xor_bits = min(8 - pipe_xor_bits,
4059 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4060 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4061 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4062 
4063 
4064 	if (adev->family == AMDGPU_FAMILY_RV) {
4065 		/* Raven2 and later */
4066 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4067 
4068 		/*
4069 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4070 		 * doesn't support _D on DCN
4071 		 */
4072 
4073 		if (has_constant_encode) {
4074 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4075 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4076 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4077 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4078 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4079 				    AMD_FMT_MOD_SET(DCC, 1) |
4080 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4081 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4082 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4083 		}
4084 
4085 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4086 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4087 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4088 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4089 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4090 			    AMD_FMT_MOD_SET(DCC, 1) |
4091 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4092 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4093 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4094 
4095 		if (has_constant_encode) {
4096 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4097 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4098 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4099 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4100 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4101 				    AMD_FMT_MOD_SET(DCC, 1) |
4102 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4103 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4104 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4105 
4106 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4107 				    AMD_FMT_MOD_SET(RB, rb) |
4108 				    AMD_FMT_MOD_SET(PIPE, pipes));
4109 		}
4110 
4111 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4112 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4113 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4114 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4115 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4116 			    AMD_FMT_MOD_SET(DCC, 1) |
4117 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4118 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4119 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4120 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4121 			    AMD_FMT_MOD_SET(RB, rb) |
4122 			    AMD_FMT_MOD_SET(PIPE, pipes));
4123 	}
4124 
4125 	/*
4126 	 * Only supported for 64bpp on Raven, will be filtered on format in
4127 	 * dm_plane_format_mod_supported.
4128 	 */
4129 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4130 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4131 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4132 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4133 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4134 
4135 	if (adev->family == AMDGPU_FAMILY_RV) {
4136 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4137 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4138 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4139 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4140 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4141 	}
4142 
4143 	/*
4144 	 * Only supported for 64bpp on Raven, will be filtered on format in
4145 	 * dm_plane_format_mod_supported.
4146 	 */
4147 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4148 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4149 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4150 
4151 	if (adev->family == AMDGPU_FAMILY_RV) {
4152 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4153 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4154 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4155 	}
4156 }
4157 
4158 static void
4159 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4160 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4161 {
4162 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4163 
4164 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4165 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4166 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4167 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4168 		    AMD_FMT_MOD_SET(DCC, 1) |
4169 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4170 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4171 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4172 
4173 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4174 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4175 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4176 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4177 		    AMD_FMT_MOD_SET(DCC, 1) |
4178 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4179 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4180 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4181 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4182 
4183 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4184 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4185 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4186 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4187 
4188 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4189 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4190 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4191 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4192 
4193 
4194 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4195 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4196 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4197 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4198 
4199 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4200 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4201 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4202 }
4203 
4204 static void
4205 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4206 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4207 {
4208 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4209 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4210 
4211 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4212 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4213 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4214 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4215 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4216 		    AMD_FMT_MOD_SET(DCC, 1) |
4217 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4218 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4219 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4220 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4221 
4222 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4223 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4224 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4225 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4226 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4227 		    AMD_FMT_MOD_SET(DCC, 1) |
4228 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4229 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4230 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4231 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4232 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4233 
4234 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4235 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4236 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4237 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4238 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4239 
4240 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4241 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4242 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4243 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4244 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4245 
4246 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4247 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4248 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4249 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4250 
4251 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4252 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4253 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4254 }
4255 
4256 static int
4257 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4258 {
4259 	uint64_t size = 0, capacity = 128;
4260 	*mods = NULL;
4261 
4262 	/* We have not hooked up any pre-GFX9 modifiers. */
4263 	if (adev->family < AMDGPU_FAMILY_AI)
4264 		return 0;
4265 
4266 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4267 
4268 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4269 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4270 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4271 		return *mods ? 0 : -ENOMEM;
4272 	}
4273 
4274 	switch (adev->family) {
4275 	case AMDGPU_FAMILY_AI:
4276 	case AMDGPU_FAMILY_RV:
4277 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4278 		break;
4279 	case AMDGPU_FAMILY_NV:
4280 	case AMDGPU_FAMILY_VGH:
4281 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4282 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4283 		else
4284 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4285 		break;
4286 	}
4287 
4288 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4289 
4290 	/* INVALID marks the end of the list. */
4291 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4292 
4293 	if (!*mods)
4294 		return -ENOMEM;
4295 
4296 	return 0;
4297 }
4298 
4299 static int
4300 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4301 					  const struct amdgpu_framebuffer *afb,
4302 					  const enum surface_pixel_format format,
4303 					  const enum dc_rotation_angle rotation,
4304 					  const struct plane_size *plane_size,
4305 					  union dc_tiling_info *tiling_info,
4306 					  struct dc_plane_dcc_param *dcc,
4307 					  struct dc_plane_address *address,
4308 					  const bool force_disable_dcc)
4309 {
4310 	const uint64_t modifier = afb->base.modifier;
4311 	int ret;
4312 
4313 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4314 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4315 
4316 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4317 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4318 
4319 		dcc->enable = 1;
4320 		dcc->meta_pitch = afb->base.pitches[1];
4321 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4322 
4323 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4324 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4325 	}
4326 
4327 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4328 	if (ret)
4329 		return ret;
4330 
4331 	return 0;
4332 }
4333 
4334 static int
4335 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4336 			     const struct amdgpu_framebuffer *afb,
4337 			     const enum surface_pixel_format format,
4338 			     const enum dc_rotation_angle rotation,
4339 			     const uint64_t tiling_flags,
4340 			     union dc_tiling_info *tiling_info,
4341 			     struct plane_size *plane_size,
4342 			     struct dc_plane_dcc_param *dcc,
4343 			     struct dc_plane_address *address,
4344 			     bool tmz_surface,
4345 			     bool force_disable_dcc)
4346 {
4347 	const struct drm_framebuffer *fb = &afb->base;
4348 	int ret;
4349 
4350 	memset(tiling_info, 0, sizeof(*tiling_info));
4351 	memset(plane_size, 0, sizeof(*plane_size));
4352 	memset(dcc, 0, sizeof(*dcc));
4353 	memset(address, 0, sizeof(*address));
4354 
4355 	address->tmz_surface = tmz_surface;
4356 
4357 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4358 		uint64_t addr = afb->address + fb->offsets[0];
4359 
4360 		plane_size->surface_size.x = 0;
4361 		plane_size->surface_size.y = 0;
4362 		plane_size->surface_size.width = fb->width;
4363 		plane_size->surface_size.height = fb->height;
4364 		plane_size->surface_pitch =
4365 			fb->pitches[0] / fb->format->cpp[0];
4366 
4367 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4368 		address->grph.addr.low_part = lower_32_bits(addr);
4369 		address->grph.addr.high_part = upper_32_bits(addr);
4370 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4371 		uint64_t luma_addr = afb->address + fb->offsets[0];
4372 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4373 
4374 		plane_size->surface_size.x = 0;
4375 		plane_size->surface_size.y = 0;
4376 		plane_size->surface_size.width = fb->width;
4377 		plane_size->surface_size.height = fb->height;
4378 		plane_size->surface_pitch =
4379 			fb->pitches[0] / fb->format->cpp[0];
4380 
4381 		plane_size->chroma_size.x = 0;
4382 		plane_size->chroma_size.y = 0;
4383 		/* TODO: set these based on surface format */
4384 		plane_size->chroma_size.width = fb->width / 2;
4385 		plane_size->chroma_size.height = fb->height / 2;
4386 
4387 		plane_size->chroma_pitch =
4388 			fb->pitches[1] / fb->format->cpp[1];
4389 
4390 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4391 		address->video_progressive.luma_addr.low_part =
4392 			lower_32_bits(luma_addr);
4393 		address->video_progressive.luma_addr.high_part =
4394 			upper_32_bits(luma_addr);
4395 		address->video_progressive.chroma_addr.low_part =
4396 			lower_32_bits(chroma_addr);
4397 		address->video_progressive.chroma_addr.high_part =
4398 			upper_32_bits(chroma_addr);
4399 	}
4400 
4401 	if (adev->family >= AMDGPU_FAMILY_AI) {
4402 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4403 								rotation, plane_size,
4404 								tiling_info, dcc,
4405 								address,
4406 								force_disable_dcc);
4407 		if (ret)
4408 			return ret;
4409 	} else {
4410 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4411 	}
4412 
4413 	return 0;
4414 }
4415 
4416 static void
4417 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4418 			       bool *per_pixel_alpha, bool *global_alpha,
4419 			       int *global_alpha_value)
4420 {
4421 	*per_pixel_alpha = false;
4422 	*global_alpha = false;
4423 	*global_alpha_value = 0xff;
4424 
4425 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4426 		return;
4427 
4428 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4429 		static const uint32_t alpha_formats[] = {
4430 			DRM_FORMAT_ARGB8888,
4431 			DRM_FORMAT_RGBA8888,
4432 			DRM_FORMAT_ABGR8888,
4433 		};
4434 		uint32_t format = plane_state->fb->format->format;
4435 		unsigned int i;
4436 
4437 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4438 			if (format == alpha_formats[i]) {
4439 				*per_pixel_alpha = true;
4440 				break;
4441 			}
4442 		}
4443 	}
4444 
4445 	if (plane_state->alpha < 0xffff) {
4446 		*global_alpha = true;
4447 		*global_alpha_value = plane_state->alpha >> 8;
4448 	}
4449 }
4450 
4451 static int
4452 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4453 			    const enum surface_pixel_format format,
4454 			    enum dc_color_space *color_space)
4455 {
4456 	bool full_range;
4457 
4458 	*color_space = COLOR_SPACE_SRGB;
4459 
4460 	/* DRM color properties only affect non-RGB formats. */
4461 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4462 		return 0;
4463 
4464 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4465 
4466 	switch (plane_state->color_encoding) {
4467 	case DRM_COLOR_YCBCR_BT601:
4468 		if (full_range)
4469 			*color_space = COLOR_SPACE_YCBCR601;
4470 		else
4471 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4472 		break;
4473 
4474 	case DRM_COLOR_YCBCR_BT709:
4475 		if (full_range)
4476 			*color_space = COLOR_SPACE_YCBCR709;
4477 		else
4478 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4479 		break;
4480 
4481 	case DRM_COLOR_YCBCR_BT2020:
4482 		if (full_range)
4483 			*color_space = COLOR_SPACE_2020_YCBCR;
4484 		else
4485 			return -EINVAL;
4486 		break;
4487 
4488 	default:
4489 		return -EINVAL;
4490 	}
4491 
4492 	return 0;
4493 }
4494 
4495 static int
4496 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4497 			    const struct drm_plane_state *plane_state,
4498 			    const uint64_t tiling_flags,
4499 			    struct dc_plane_info *plane_info,
4500 			    struct dc_plane_address *address,
4501 			    bool tmz_surface,
4502 			    bool force_disable_dcc)
4503 {
4504 	const struct drm_framebuffer *fb = plane_state->fb;
4505 	const struct amdgpu_framebuffer *afb =
4506 		to_amdgpu_framebuffer(plane_state->fb);
4507 	struct drm_format_name_buf format_name;
4508 	int ret;
4509 
4510 	memset(plane_info, 0, sizeof(*plane_info));
4511 
4512 	switch (fb->format->format) {
4513 	case DRM_FORMAT_C8:
4514 		plane_info->format =
4515 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4516 		break;
4517 	case DRM_FORMAT_RGB565:
4518 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4519 		break;
4520 	case DRM_FORMAT_XRGB8888:
4521 	case DRM_FORMAT_ARGB8888:
4522 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4523 		break;
4524 	case DRM_FORMAT_XRGB2101010:
4525 	case DRM_FORMAT_ARGB2101010:
4526 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4527 		break;
4528 	case DRM_FORMAT_XBGR2101010:
4529 	case DRM_FORMAT_ABGR2101010:
4530 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4531 		break;
4532 	case DRM_FORMAT_XBGR8888:
4533 	case DRM_FORMAT_ABGR8888:
4534 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4535 		break;
4536 	case DRM_FORMAT_NV21:
4537 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4538 		break;
4539 	case DRM_FORMAT_NV12:
4540 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4541 		break;
4542 	case DRM_FORMAT_P010:
4543 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4544 		break;
4545 	case DRM_FORMAT_XRGB16161616F:
4546 	case DRM_FORMAT_ARGB16161616F:
4547 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4548 		break;
4549 	case DRM_FORMAT_XBGR16161616F:
4550 	case DRM_FORMAT_ABGR16161616F:
4551 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4552 		break;
4553 	default:
4554 		DRM_ERROR(
4555 			"Unsupported screen format %s\n",
4556 			drm_get_format_name(fb->format->format, &format_name));
4557 		return -EINVAL;
4558 	}
4559 
4560 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4561 	case DRM_MODE_ROTATE_0:
4562 		plane_info->rotation = ROTATION_ANGLE_0;
4563 		break;
4564 	case DRM_MODE_ROTATE_90:
4565 		plane_info->rotation = ROTATION_ANGLE_90;
4566 		break;
4567 	case DRM_MODE_ROTATE_180:
4568 		plane_info->rotation = ROTATION_ANGLE_180;
4569 		break;
4570 	case DRM_MODE_ROTATE_270:
4571 		plane_info->rotation = ROTATION_ANGLE_270;
4572 		break;
4573 	default:
4574 		plane_info->rotation = ROTATION_ANGLE_0;
4575 		break;
4576 	}
4577 
4578 	plane_info->visible = true;
4579 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4580 
4581 	plane_info->layer_index = 0;
4582 
4583 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4584 					  &plane_info->color_space);
4585 	if (ret)
4586 		return ret;
4587 
4588 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4589 					   plane_info->rotation, tiling_flags,
4590 					   &plane_info->tiling_info,
4591 					   &plane_info->plane_size,
4592 					   &plane_info->dcc, address, tmz_surface,
4593 					   force_disable_dcc);
4594 	if (ret)
4595 		return ret;
4596 
4597 	fill_blending_from_plane_state(
4598 		plane_state, &plane_info->per_pixel_alpha,
4599 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4600 
4601 	return 0;
4602 }
4603 
4604 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4605 				    struct dc_plane_state *dc_plane_state,
4606 				    struct drm_plane_state *plane_state,
4607 				    struct drm_crtc_state *crtc_state)
4608 {
4609 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4610 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4611 	struct dc_scaling_info scaling_info;
4612 	struct dc_plane_info plane_info;
4613 	int ret;
4614 	bool force_disable_dcc = false;
4615 
4616 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4617 	if (ret)
4618 		return ret;
4619 
4620 	dc_plane_state->src_rect = scaling_info.src_rect;
4621 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4622 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4623 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4624 
4625 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4626 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4627 					  afb->tiling_flags,
4628 					  &plane_info,
4629 					  &dc_plane_state->address,
4630 					  afb->tmz_surface,
4631 					  force_disable_dcc);
4632 	if (ret)
4633 		return ret;
4634 
4635 	dc_plane_state->format = plane_info.format;
4636 	dc_plane_state->color_space = plane_info.color_space;
4637 	dc_plane_state->format = plane_info.format;
4638 	dc_plane_state->plane_size = plane_info.plane_size;
4639 	dc_plane_state->rotation = plane_info.rotation;
4640 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4641 	dc_plane_state->stereo_format = plane_info.stereo_format;
4642 	dc_plane_state->tiling_info = plane_info.tiling_info;
4643 	dc_plane_state->visible = plane_info.visible;
4644 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4645 	dc_plane_state->global_alpha = plane_info.global_alpha;
4646 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4647 	dc_plane_state->dcc = plane_info.dcc;
4648 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4649 
4650 	/*
4651 	 * Always set input transfer function, since plane state is refreshed
4652 	 * every time.
4653 	 */
4654 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4655 	if (ret)
4656 		return ret;
4657 
4658 	return 0;
4659 }
4660 
4661 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4662 					   const struct dm_connector_state *dm_state,
4663 					   struct dc_stream_state *stream)
4664 {
4665 	enum amdgpu_rmx_type rmx_type;
4666 
4667 	struct rect src = { 0 }; /* viewport in composition space*/
4668 	struct rect dst = { 0 }; /* stream addressable area */
4669 
4670 	/* no mode. nothing to be done */
4671 	if (!mode)
4672 		return;
4673 
4674 	/* Full screen scaling by default */
4675 	src.width = mode->hdisplay;
4676 	src.height = mode->vdisplay;
4677 	dst.width = stream->timing.h_addressable;
4678 	dst.height = stream->timing.v_addressable;
4679 
4680 	if (dm_state) {
4681 		rmx_type = dm_state->scaling;
4682 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4683 			if (src.width * dst.height <
4684 					src.height * dst.width) {
4685 				/* height needs less upscaling/more downscaling */
4686 				dst.width = src.width *
4687 						dst.height / src.height;
4688 			} else {
4689 				/* width needs less upscaling/more downscaling */
4690 				dst.height = src.height *
4691 						dst.width / src.width;
4692 			}
4693 		} else if (rmx_type == RMX_CENTER) {
4694 			dst = src;
4695 		}
4696 
4697 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4698 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4699 
4700 		if (dm_state->underscan_enable) {
4701 			dst.x += dm_state->underscan_hborder / 2;
4702 			dst.y += dm_state->underscan_vborder / 2;
4703 			dst.width -= dm_state->underscan_hborder;
4704 			dst.height -= dm_state->underscan_vborder;
4705 		}
4706 	}
4707 
4708 	stream->src = src;
4709 	stream->dst = dst;
4710 
4711 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4712 			dst.x, dst.y, dst.width, dst.height);
4713 
4714 }
4715 
4716 static enum dc_color_depth
4717 convert_color_depth_from_display_info(const struct drm_connector *connector,
4718 				      bool is_y420, int requested_bpc)
4719 {
4720 	uint8_t bpc;
4721 
4722 	if (is_y420) {
4723 		bpc = 8;
4724 
4725 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4726 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4727 			bpc = 16;
4728 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4729 			bpc = 12;
4730 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4731 			bpc = 10;
4732 	} else {
4733 		bpc = (uint8_t)connector->display_info.bpc;
4734 		/* Assume 8 bpc by default if no bpc is specified. */
4735 		bpc = bpc ? bpc : 8;
4736 	}
4737 
4738 	if (requested_bpc > 0) {
4739 		/*
4740 		 * Cap display bpc based on the user requested value.
4741 		 *
4742 		 * The value for state->max_bpc may not correctly updated
4743 		 * depending on when the connector gets added to the state
4744 		 * or if this was called outside of atomic check, so it
4745 		 * can't be used directly.
4746 		 */
4747 		bpc = min_t(u8, bpc, requested_bpc);
4748 
4749 		/* Round down to the nearest even number. */
4750 		bpc = bpc - (bpc & 1);
4751 	}
4752 
4753 	switch (bpc) {
4754 	case 0:
4755 		/*
4756 		 * Temporary Work around, DRM doesn't parse color depth for
4757 		 * EDID revision before 1.4
4758 		 * TODO: Fix edid parsing
4759 		 */
4760 		return COLOR_DEPTH_888;
4761 	case 6:
4762 		return COLOR_DEPTH_666;
4763 	case 8:
4764 		return COLOR_DEPTH_888;
4765 	case 10:
4766 		return COLOR_DEPTH_101010;
4767 	case 12:
4768 		return COLOR_DEPTH_121212;
4769 	case 14:
4770 		return COLOR_DEPTH_141414;
4771 	case 16:
4772 		return COLOR_DEPTH_161616;
4773 	default:
4774 		return COLOR_DEPTH_UNDEFINED;
4775 	}
4776 }
4777 
4778 static enum dc_aspect_ratio
4779 get_aspect_ratio(const struct drm_display_mode *mode_in)
4780 {
4781 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4782 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4783 }
4784 
4785 static enum dc_color_space
4786 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4787 {
4788 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4789 
4790 	switch (dc_crtc_timing->pixel_encoding)	{
4791 	case PIXEL_ENCODING_YCBCR422:
4792 	case PIXEL_ENCODING_YCBCR444:
4793 	case PIXEL_ENCODING_YCBCR420:
4794 	{
4795 		/*
4796 		 * 27030khz is the separation point between HDTV and SDTV
4797 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4798 		 * respectively
4799 		 */
4800 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4801 			if (dc_crtc_timing->flags.Y_ONLY)
4802 				color_space =
4803 					COLOR_SPACE_YCBCR709_LIMITED;
4804 			else
4805 				color_space = COLOR_SPACE_YCBCR709;
4806 		} else {
4807 			if (dc_crtc_timing->flags.Y_ONLY)
4808 				color_space =
4809 					COLOR_SPACE_YCBCR601_LIMITED;
4810 			else
4811 				color_space = COLOR_SPACE_YCBCR601;
4812 		}
4813 
4814 	}
4815 	break;
4816 	case PIXEL_ENCODING_RGB:
4817 		color_space = COLOR_SPACE_SRGB;
4818 		break;
4819 
4820 	default:
4821 		WARN_ON(1);
4822 		break;
4823 	}
4824 
4825 	return color_space;
4826 }
4827 
4828 static bool adjust_colour_depth_from_display_info(
4829 	struct dc_crtc_timing *timing_out,
4830 	const struct drm_display_info *info)
4831 {
4832 	enum dc_color_depth depth = timing_out->display_color_depth;
4833 	int normalized_clk;
4834 	do {
4835 		normalized_clk = timing_out->pix_clk_100hz / 10;
4836 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4837 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4838 			normalized_clk /= 2;
4839 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4840 		switch (depth) {
4841 		case COLOR_DEPTH_888:
4842 			break;
4843 		case COLOR_DEPTH_101010:
4844 			normalized_clk = (normalized_clk * 30) / 24;
4845 			break;
4846 		case COLOR_DEPTH_121212:
4847 			normalized_clk = (normalized_clk * 36) / 24;
4848 			break;
4849 		case COLOR_DEPTH_161616:
4850 			normalized_clk = (normalized_clk * 48) / 24;
4851 			break;
4852 		default:
4853 			/* The above depths are the only ones valid for HDMI. */
4854 			return false;
4855 		}
4856 		if (normalized_clk <= info->max_tmds_clock) {
4857 			timing_out->display_color_depth = depth;
4858 			return true;
4859 		}
4860 	} while (--depth > COLOR_DEPTH_666);
4861 	return false;
4862 }
4863 
4864 static void fill_stream_properties_from_drm_display_mode(
4865 	struct dc_stream_state *stream,
4866 	const struct drm_display_mode *mode_in,
4867 	const struct drm_connector *connector,
4868 	const struct drm_connector_state *connector_state,
4869 	const struct dc_stream_state *old_stream,
4870 	int requested_bpc)
4871 {
4872 	struct dc_crtc_timing *timing_out = &stream->timing;
4873 	const struct drm_display_info *info = &connector->display_info;
4874 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4875 	struct hdmi_vendor_infoframe hv_frame;
4876 	struct hdmi_avi_infoframe avi_frame;
4877 
4878 	memset(&hv_frame, 0, sizeof(hv_frame));
4879 	memset(&avi_frame, 0, sizeof(avi_frame));
4880 
4881 	timing_out->h_border_left = 0;
4882 	timing_out->h_border_right = 0;
4883 	timing_out->v_border_top = 0;
4884 	timing_out->v_border_bottom = 0;
4885 	/* TODO: un-hardcode */
4886 	if (drm_mode_is_420_only(info, mode_in)
4887 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4888 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4889 	else if (drm_mode_is_420_also(info, mode_in)
4890 			&& aconnector->force_yuv420_output)
4891 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4892 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4893 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4894 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4895 	else
4896 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4897 
4898 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4899 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4900 		connector,
4901 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4902 		requested_bpc);
4903 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4904 	timing_out->hdmi_vic = 0;
4905 
4906 	if(old_stream) {
4907 		timing_out->vic = old_stream->timing.vic;
4908 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4909 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4910 	} else {
4911 		timing_out->vic = drm_match_cea_mode(mode_in);
4912 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4913 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4914 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4915 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4916 	}
4917 
4918 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4919 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4920 		timing_out->vic = avi_frame.video_code;
4921 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4922 		timing_out->hdmi_vic = hv_frame.vic;
4923 	}
4924 
4925 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4926 	timing_out->h_total = mode_in->crtc_htotal;
4927 	timing_out->h_sync_width =
4928 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4929 	timing_out->h_front_porch =
4930 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4931 	timing_out->v_total = mode_in->crtc_vtotal;
4932 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4933 	timing_out->v_front_porch =
4934 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4935 	timing_out->v_sync_width =
4936 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4937 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4938 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4939 
4940 	stream->output_color_space = get_output_color_space(timing_out);
4941 
4942 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4943 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4944 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4945 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4946 		    drm_mode_is_420_also(info, mode_in) &&
4947 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4948 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4949 			adjust_colour_depth_from_display_info(timing_out, info);
4950 		}
4951 	}
4952 }
4953 
4954 static void fill_audio_info(struct audio_info *audio_info,
4955 			    const struct drm_connector *drm_connector,
4956 			    const struct dc_sink *dc_sink)
4957 {
4958 	int i = 0;
4959 	int cea_revision = 0;
4960 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4961 
4962 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4963 	audio_info->product_id = edid_caps->product_id;
4964 
4965 	cea_revision = drm_connector->display_info.cea_rev;
4966 
4967 	strscpy(audio_info->display_name,
4968 		edid_caps->display_name,
4969 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4970 
4971 	if (cea_revision >= 3) {
4972 		audio_info->mode_count = edid_caps->audio_mode_count;
4973 
4974 		for (i = 0; i < audio_info->mode_count; ++i) {
4975 			audio_info->modes[i].format_code =
4976 					(enum audio_format_code)
4977 					(edid_caps->audio_modes[i].format_code);
4978 			audio_info->modes[i].channel_count =
4979 					edid_caps->audio_modes[i].channel_count;
4980 			audio_info->modes[i].sample_rates.all =
4981 					edid_caps->audio_modes[i].sample_rate;
4982 			audio_info->modes[i].sample_size =
4983 					edid_caps->audio_modes[i].sample_size;
4984 		}
4985 	}
4986 
4987 	audio_info->flags.all = edid_caps->speaker_flags;
4988 
4989 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4990 	if (drm_connector->latency_present[0]) {
4991 		audio_info->video_latency = drm_connector->video_latency[0];
4992 		audio_info->audio_latency = drm_connector->audio_latency[0];
4993 	}
4994 
4995 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4996 
4997 }
4998 
4999 static void
5000 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5001 				      struct drm_display_mode *dst_mode)
5002 {
5003 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5004 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5005 	dst_mode->crtc_clock = src_mode->crtc_clock;
5006 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5007 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5008 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5009 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5010 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5011 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5012 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5013 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5014 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5015 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5016 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5017 }
5018 
5019 static void
5020 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5021 					const struct drm_display_mode *native_mode,
5022 					bool scale_enabled)
5023 {
5024 	if (scale_enabled) {
5025 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5026 	} else if (native_mode->clock == drm_mode->clock &&
5027 			native_mode->htotal == drm_mode->htotal &&
5028 			native_mode->vtotal == drm_mode->vtotal) {
5029 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5030 	} else {
5031 		/* no scaling nor amdgpu inserted, no need to patch */
5032 	}
5033 }
5034 
5035 static struct dc_sink *
5036 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5037 {
5038 	struct dc_sink_init_data sink_init_data = { 0 };
5039 	struct dc_sink *sink = NULL;
5040 	sink_init_data.link = aconnector->dc_link;
5041 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5042 
5043 	sink = dc_sink_create(&sink_init_data);
5044 	if (!sink) {
5045 		DRM_ERROR("Failed to create sink!\n");
5046 		return NULL;
5047 	}
5048 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5049 
5050 	return sink;
5051 }
5052 
5053 static void set_multisync_trigger_params(
5054 		struct dc_stream_state *stream)
5055 {
5056 	if (stream->triggered_crtc_reset.enabled) {
5057 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5058 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5059 	}
5060 }
5061 
5062 static void set_master_stream(struct dc_stream_state *stream_set[],
5063 			      int stream_count)
5064 {
5065 	int j, highest_rfr = 0, master_stream = 0;
5066 
5067 	for (j = 0;  j < stream_count; j++) {
5068 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5069 			int refresh_rate = 0;
5070 
5071 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5072 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5073 			if (refresh_rate > highest_rfr) {
5074 				highest_rfr = refresh_rate;
5075 				master_stream = j;
5076 			}
5077 		}
5078 	}
5079 	for (j = 0;  j < stream_count; j++) {
5080 		if (stream_set[j])
5081 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5082 	}
5083 }
5084 
5085 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5086 {
5087 	int i = 0;
5088 
5089 	if (context->stream_count < 2)
5090 		return;
5091 	for (i = 0; i < context->stream_count ; i++) {
5092 		if (!context->streams[i])
5093 			continue;
5094 		/*
5095 		 * TODO: add a function to read AMD VSDB bits and set
5096 		 * crtc_sync_master.multi_sync_enabled flag
5097 		 * For now it's set to false
5098 		 */
5099 		set_multisync_trigger_params(context->streams[i]);
5100 	}
5101 	set_master_stream(context->streams, context->stream_count);
5102 }
5103 
5104 static struct dc_stream_state *
5105 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5106 		       const struct drm_display_mode *drm_mode,
5107 		       const struct dm_connector_state *dm_state,
5108 		       const struct dc_stream_state *old_stream,
5109 		       int requested_bpc)
5110 {
5111 	struct drm_display_mode *preferred_mode = NULL;
5112 	struct drm_connector *drm_connector;
5113 	const struct drm_connector_state *con_state =
5114 		dm_state ? &dm_state->base : NULL;
5115 	struct dc_stream_state *stream = NULL;
5116 	struct drm_display_mode mode = *drm_mode;
5117 	bool native_mode_found = false;
5118 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5119 	int mode_refresh;
5120 	int preferred_refresh = 0;
5121 #if defined(CONFIG_DRM_AMD_DC_DCN)
5122 	struct dsc_dec_dpcd_caps dsc_caps;
5123 #endif
5124 	uint32_t link_bandwidth_kbps;
5125 
5126 	struct dc_sink *sink = NULL;
5127 	if (aconnector == NULL) {
5128 		DRM_ERROR("aconnector is NULL!\n");
5129 		return stream;
5130 	}
5131 
5132 	drm_connector = &aconnector->base;
5133 
5134 	if (!aconnector->dc_sink) {
5135 		sink = create_fake_sink(aconnector);
5136 		if (!sink)
5137 			return stream;
5138 	} else {
5139 		sink = aconnector->dc_sink;
5140 		dc_sink_retain(sink);
5141 	}
5142 
5143 	stream = dc_create_stream_for_sink(sink);
5144 
5145 	if (stream == NULL) {
5146 		DRM_ERROR("Failed to create stream for sink!\n");
5147 		goto finish;
5148 	}
5149 
5150 	stream->dm_stream_context = aconnector;
5151 
5152 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5153 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5154 
5155 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5156 		/* Search for preferred mode */
5157 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5158 			native_mode_found = true;
5159 			break;
5160 		}
5161 	}
5162 	if (!native_mode_found)
5163 		preferred_mode = list_first_entry_or_null(
5164 				&aconnector->base.modes,
5165 				struct drm_display_mode,
5166 				head);
5167 
5168 	mode_refresh = drm_mode_vrefresh(&mode);
5169 
5170 	if (preferred_mode == NULL) {
5171 		/*
5172 		 * This may not be an error, the use case is when we have no
5173 		 * usermode calls to reset and set mode upon hotplug. In this
5174 		 * case, we call set mode ourselves to restore the previous mode
5175 		 * and the modelist may not be filled in in time.
5176 		 */
5177 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5178 	} else {
5179 		decide_crtc_timing_for_drm_display_mode(
5180 				&mode, preferred_mode,
5181 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5182 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5183 	}
5184 
5185 	if (!dm_state)
5186 		drm_mode_set_crtcinfo(&mode, 0);
5187 
5188 	/*
5189 	* If scaling is enabled and refresh rate didn't change
5190 	* we copy the vic and polarities of the old timings
5191 	*/
5192 	if (!scale || mode_refresh != preferred_refresh)
5193 		fill_stream_properties_from_drm_display_mode(stream,
5194 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5195 	else
5196 		fill_stream_properties_from_drm_display_mode(stream,
5197 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5198 
5199 	stream->timing.flags.DSC = 0;
5200 
5201 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5202 #if defined(CONFIG_DRM_AMD_DC_DCN)
5203 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5204 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5205 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5206 				      &dsc_caps);
5207 #endif
5208 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5209 							     dc_link_get_link_cap(aconnector->dc_link));
5210 
5211 #if defined(CONFIG_DRM_AMD_DC_DCN)
5212 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5213 			/* Set DSC policy according to dsc_clock_en */
5214 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5215 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5216 
5217 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5218 						  &dsc_caps,
5219 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5220 						  0,
5221 						  link_bandwidth_kbps,
5222 						  &stream->timing,
5223 						  &stream->timing.dsc_cfg))
5224 				stream->timing.flags.DSC = 1;
5225 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5226 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5227 				stream->timing.flags.DSC = 1;
5228 
5229 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5230 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5231 
5232 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5233 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5234 
5235 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5236 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5237 		}
5238 #endif
5239 	}
5240 
5241 	update_stream_scaling_settings(&mode, dm_state, stream);
5242 
5243 	fill_audio_info(
5244 		&stream->audio_info,
5245 		drm_connector,
5246 		sink);
5247 
5248 	update_stream_signal(stream, sink);
5249 
5250 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5251 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5252 
5253 	if (stream->link->psr_settings.psr_feature_enabled) {
5254 		//
5255 		// should decide stream support vsc sdp colorimetry capability
5256 		// before building vsc info packet
5257 		//
5258 		stream->use_vsc_sdp_for_colorimetry = false;
5259 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5260 			stream->use_vsc_sdp_for_colorimetry =
5261 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5262 		} else {
5263 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5264 				stream->use_vsc_sdp_for_colorimetry = true;
5265 		}
5266 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5267 	}
5268 finish:
5269 	dc_sink_release(sink);
5270 
5271 	return stream;
5272 }
5273 
5274 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5275 {
5276 	drm_crtc_cleanup(crtc);
5277 	kfree(crtc);
5278 }
5279 
5280 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5281 				  struct drm_crtc_state *state)
5282 {
5283 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5284 
5285 	/* TODO Destroy dc_stream objects are stream object is flattened */
5286 	if (cur->stream)
5287 		dc_stream_release(cur->stream);
5288 
5289 
5290 	__drm_atomic_helper_crtc_destroy_state(state);
5291 
5292 
5293 	kfree(state);
5294 }
5295 
5296 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5297 {
5298 	struct dm_crtc_state *state;
5299 
5300 	if (crtc->state)
5301 		dm_crtc_destroy_state(crtc, crtc->state);
5302 
5303 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5304 	if (WARN_ON(!state))
5305 		return;
5306 
5307 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5308 }
5309 
5310 static struct drm_crtc_state *
5311 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5312 {
5313 	struct dm_crtc_state *state, *cur;
5314 
5315 	cur = to_dm_crtc_state(crtc->state);
5316 
5317 	if (WARN_ON(!crtc->state))
5318 		return NULL;
5319 
5320 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5321 	if (!state)
5322 		return NULL;
5323 
5324 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5325 
5326 	if (cur->stream) {
5327 		state->stream = cur->stream;
5328 		dc_stream_retain(state->stream);
5329 	}
5330 
5331 	state->active_planes = cur->active_planes;
5332 	state->vrr_infopacket = cur->vrr_infopacket;
5333 	state->abm_level = cur->abm_level;
5334 	state->vrr_supported = cur->vrr_supported;
5335 	state->freesync_config = cur->freesync_config;
5336 	state->crc_src = cur->crc_src;
5337 	state->cm_has_degamma = cur->cm_has_degamma;
5338 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5339 
5340 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5341 
5342 	return &state->base;
5343 }
5344 
5345 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5346 {
5347 	enum dc_irq_source irq_source;
5348 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5349 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5350 	int rc;
5351 
5352 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5353 
5354 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5355 
5356 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5357 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5358 	return rc;
5359 }
5360 
5361 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5362 {
5363 	enum dc_irq_source irq_source;
5364 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5365 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5366 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5367 	int rc = 0;
5368 
5369 	if (enable) {
5370 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5371 		if (amdgpu_dm_vrr_active(acrtc_state))
5372 			rc = dm_set_vupdate_irq(crtc, true);
5373 	} else {
5374 		/* vblank irq off -> vupdate irq off */
5375 		rc = dm_set_vupdate_irq(crtc, false);
5376 	}
5377 
5378 	if (rc)
5379 		return rc;
5380 
5381 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5382 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5383 }
5384 
5385 static int dm_enable_vblank(struct drm_crtc *crtc)
5386 {
5387 	return dm_set_vblank(crtc, true);
5388 }
5389 
5390 static void dm_disable_vblank(struct drm_crtc *crtc)
5391 {
5392 	dm_set_vblank(crtc, false);
5393 }
5394 
5395 /* Implemented only the options currently availible for the driver */
5396 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5397 	.reset = dm_crtc_reset_state,
5398 	.destroy = amdgpu_dm_crtc_destroy,
5399 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5400 	.set_config = drm_atomic_helper_set_config,
5401 	.page_flip = drm_atomic_helper_page_flip,
5402 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5403 	.atomic_destroy_state = dm_crtc_destroy_state,
5404 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5405 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5406 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5407 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5408 	.enable_vblank = dm_enable_vblank,
5409 	.disable_vblank = dm_disable_vblank,
5410 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5411 };
5412 
5413 static enum drm_connector_status
5414 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5415 {
5416 	bool connected;
5417 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5418 
5419 	/*
5420 	 * Notes:
5421 	 * 1. This interface is NOT called in context of HPD irq.
5422 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5423 	 * makes it a bad place for *any* MST-related activity.
5424 	 */
5425 
5426 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5427 	    !aconnector->fake_enable)
5428 		connected = (aconnector->dc_sink != NULL);
5429 	else
5430 		connected = (aconnector->base.force == DRM_FORCE_ON);
5431 
5432 	update_subconnector_property(aconnector);
5433 
5434 	return (connected ? connector_status_connected :
5435 			connector_status_disconnected);
5436 }
5437 
5438 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5439 					    struct drm_connector_state *connector_state,
5440 					    struct drm_property *property,
5441 					    uint64_t val)
5442 {
5443 	struct drm_device *dev = connector->dev;
5444 	struct amdgpu_device *adev = drm_to_adev(dev);
5445 	struct dm_connector_state *dm_old_state =
5446 		to_dm_connector_state(connector->state);
5447 	struct dm_connector_state *dm_new_state =
5448 		to_dm_connector_state(connector_state);
5449 
5450 	int ret = -EINVAL;
5451 
5452 	if (property == dev->mode_config.scaling_mode_property) {
5453 		enum amdgpu_rmx_type rmx_type;
5454 
5455 		switch (val) {
5456 		case DRM_MODE_SCALE_CENTER:
5457 			rmx_type = RMX_CENTER;
5458 			break;
5459 		case DRM_MODE_SCALE_ASPECT:
5460 			rmx_type = RMX_ASPECT;
5461 			break;
5462 		case DRM_MODE_SCALE_FULLSCREEN:
5463 			rmx_type = RMX_FULL;
5464 			break;
5465 		case DRM_MODE_SCALE_NONE:
5466 		default:
5467 			rmx_type = RMX_OFF;
5468 			break;
5469 		}
5470 
5471 		if (dm_old_state->scaling == rmx_type)
5472 			return 0;
5473 
5474 		dm_new_state->scaling = rmx_type;
5475 		ret = 0;
5476 	} else if (property == adev->mode_info.underscan_hborder_property) {
5477 		dm_new_state->underscan_hborder = val;
5478 		ret = 0;
5479 	} else if (property == adev->mode_info.underscan_vborder_property) {
5480 		dm_new_state->underscan_vborder = val;
5481 		ret = 0;
5482 	} else if (property == adev->mode_info.underscan_property) {
5483 		dm_new_state->underscan_enable = val;
5484 		ret = 0;
5485 	} else if (property == adev->mode_info.abm_level_property) {
5486 		dm_new_state->abm_level = val;
5487 		ret = 0;
5488 	}
5489 
5490 	return ret;
5491 }
5492 
5493 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5494 					    const struct drm_connector_state *state,
5495 					    struct drm_property *property,
5496 					    uint64_t *val)
5497 {
5498 	struct drm_device *dev = connector->dev;
5499 	struct amdgpu_device *adev = drm_to_adev(dev);
5500 	struct dm_connector_state *dm_state =
5501 		to_dm_connector_state(state);
5502 	int ret = -EINVAL;
5503 
5504 	if (property == dev->mode_config.scaling_mode_property) {
5505 		switch (dm_state->scaling) {
5506 		case RMX_CENTER:
5507 			*val = DRM_MODE_SCALE_CENTER;
5508 			break;
5509 		case RMX_ASPECT:
5510 			*val = DRM_MODE_SCALE_ASPECT;
5511 			break;
5512 		case RMX_FULL:
5513 			*val = DRM_MODE_SCALE_FULLSCREEN;
5514 			break;
5515 		case RMX_OFF:
5516 		default:
5517 			*val = DRM_MODE_SCALE_NONE;
5518 			break;
5519 		}
5520 		ret = 0;
5521 	} else if (property == adev->mode_info.underscan_hborder_property) {
5522 		*val = dm_state->underscan_hborder;
5523 		ret = 0;
5524 	} else if (property == adev->mode_info.underscan_vborder_property) {
5525 		*val = dm_state->underscan_vborder;
5526 		ret = 0;
5527 	} else if (property == adev->mode_info.underscan_property) {
5528 		*val = dm_state->underscan_enable;
5529 		ret = 0;
5530 	} else if (property == adev->mode_info.abm_level_property) {
5531 		*val = dm_state->abm_level;
5532 		ret = 0;
5533 	}
5534 
5535 	return ret;
5536 }
5537 
5538 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5539 {
5540 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5541 
5542 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5543 }
5544 
5545 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5546 {
5547 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5548 	const struct dc_link *link = aconnector->dc_link;
5549 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5550 	struct amdgpu_display_manager *dm = &adev->dm;
5551 
5552 	/*
5553 	 * Call only if mst_mgr was iniitalized before since it's not done
5554 	 * for all connector types.
5555 	 */
5556 	if (aconnector->mst_mgr.dev)
5557 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5558 
5559 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5560 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5561 
5562 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5563 	    link->type != dc_connection_none &&
5564 	    dm->backlight_dev) {
5565 		backlight_device_unregister(dm->backlight_dev);
5566 		dm->backlight_dev = NULL;
5567 	}
5568 #endif
5569 
5570 	if (aconnector->dc_em_sink)
5571 		dc_sink_release(aconnector->dc_em_sink);
5572 	aconnector->dc_em_sink = NULL;
5573 	if (aconnector->dc_sink)
5574 		dc_sink_release(aconnector->dc_sink);
5575 	aconnector->dc_sink = NULL;
5576 
5577 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5578 	drm_connector_unregister(connector);
5579 	drm_connector_cleanup(connector);
5580 	if (aconnector->i2c) {
5581 		i2c_del_adapter(&aconnector->i2c->base);
5582 		kfree(aconnector->i2c);
5583 	}
5584 	kfree(aconnector->dm_dp_aux.aux.name);
5585 
5586 	kfree(connector);
5587 }
5588 
5589 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5590 {
5591 	struct dm_connector_state *state =
5592 		to_dm_connector_state(connector->state);
5593 
5594 	if (connector->state)
5595 		__drm_atomic_helper_connector_destroy_state(connector->state);
5596 
5597 	kfree(state);
5598 
5599 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5600 
5601 	if (state) {
5602 		state->scaling = RMX_OFF;
5603 		state->underscan_enable = false;
5604 		state->underscan_hborder = 0;
5605 		state->underscan_vborder = 0;
5606 		state->base.max_requested_bpc = 8;
5607 		state->vcpi_slots = 0;
5608 		state->pbn = 0;
5609 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5610 			state->abm_level = amdgpu_dm_abm_level;
5611 
5612 		__drm_atomic_helper_connector_reset(connector, &state->base);
5613 	}
5614 }
5615 
5616 struct drm_connector_state *
5617 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5618 {
5619 	struct dm_connector_state *state =
5620 		to_dm_connector_state(connector->state);
5621 
5622 	struct dm_connector_state *new_state =
5623 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5624 
5625 	if (!new_state)
5626 		return NULL;
5627 
5628 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5629 
5630 	new_state->freesync_capable = state->freesync_capable;
5631 	new_state->abm_level = state->abm_level;
5632 	new_state->scaling = state->scaling;
5633 	new_state->underscan_enable = state->underscan_enable;
5634 	new_state->underscan_hborder = state->underscan_hborder;
5635 	new_state->underscan_vborder = state->underscan_vborder;
5636 	new_state->vcpi_slots = state->vcpi_slots;
5637 	new_state->pbn = state->pbn;
5638 	return &new_state->base;
5639 }
5640 
5641 static int
5642 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5643 {
5644 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5645 		to_amdgpu_dm_connector(connector);
5646 	int r;
5647 
5648 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5649 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5650 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5651 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5652 		if (r)
5653 			return r;
5654 	}
5655 
5656 #if defined(CONFIG_DEBUG_FS)
5657 	connector_debugfs_init(amdgpu_dm_connector);
5658 #endif
5659 
5660 	return 0;
5661 }
5662 
5663 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5664 	.reset = amdgpu_dm_connector_funcs_reset,
5665 	.detect = amdgpu_dm_connector_detect,
5666 	.fill_modes = drm_helper_probe_single_connector_modes,
5667 	.destroy = amdgpu_dm_connector_destroy,
5668 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5669 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5670 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5671 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5672 	.late_register = amdgpu_dm_connector_late_register,
5673 	.early_unregister = amdgpu_dm_connector_unregister
5674 };
5675 
5676 static int get_modes(struct drm_connector *connector)
5677 {
5678 	return amdgpu_dm_connector_get_modes(connector);
5679 }
5680 
5681 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5682 {
5683 	struct dc_sink_init_data init_params = {
5684 			.link = aconnector->dc_link,
5685 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5686 	};
5687 	struct edid *edid;
5688 
5689 	if (!aconnector->base.edid_blob_ptr) {
5690 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5691 				aconnector->base.name);
5692 
5693 		aconnector->base.force = DRM_FORCE_OFF;
5694 		aconnector->base.override_edid = false;
5695 		return;
5696 	}
5697 
5698 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5699 
5700 	aconnector->edid = edid;
5701 
5702 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5703 		aconnector->dc_link,
5704 		(uint8_t *)edid,
5705 		(edid->extensions + 1) * EDID_LENGTH,
5706 		&init_params);
5707 
5708 	if (aconnector->base.force == DRM_FORCE_ON) {
5709 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5710 		aconnector->dc_link->local_sink :
5711 		aconnector->dc_em_sink;
5712 		dc_sink_retain(aconnector->dc_sink);
5713 	}
5714 }
5715 
5716 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5717 {
5718 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5719 
5720 	/*
5721 	 * In case of headless boot with force on for DP managed connector
5722 	 * Those settings have to be != 0 to get initial modeset
5723 	 */
5724 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5725 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5726 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5727 	}
5728 
5729 
5730 	aconnector->base.override_edid = true;
5731 	create_eml_sink(aconnector);
5732 }
5733 
5734 static struct dc_stream_state *
5735 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5736 				const struct drm_display_mode *drm_mode,
5737 				const struct dm_connector_state *dm_state,
5738 				const struct dc_stream_state *old_stream)
5739 {
5740 	struct drm_connector *connector = &aconnector->base;
5741 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5742 	struct dc_stream_state *stream;
5743 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5744 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5745 	enum dc_status dc_result = DC_OK;
5746 
5747 	do {
5748 		stream = create_stream_for_sink(aconnector, drm_mode,
5749 						dm_state, old_stream,
5750 						requested_bpc);
5751 		if (stream == NULL) {
5752 			DRM_ERROR("Failed to create stream for sink!\n");
5753 			break;
5754 		}
5755 
5756 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5757 
5758 		if (dc_result != DC_OK) {
5759 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5760 				      drm_mode->hdisplay,
5761 				      drm_mode->vdisplay,
5762 				      drm_mode->clock,
5763 				      dc_result,
5764 				      dc_status_to_str(dc_result));
5765 
5766 			dc_stream_release(stream);
5767 			stream = NULL;
5768 			requested_bpc -= 2; /* lower bpc to retry validation */
5769 		}
5770 
5771 	} while (stream == NULL && requested_bpc >= 6);
5772 
5773 	return stream;
5774 }
5775 
5776 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5777 				   struct drm_display_mode *mode)
5778 {
5779 	int result = MODE_ERROR;
5780 	struct dc_sink *dc_sink;
5781 	/* TODO: Unhardcode stream count */
5782 	struct dc_stream_state *stream;
5783 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5784 
5785 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5786 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5787 		return result;
5788 
5789 	/*
5790 	 * Only run this the first time mode_valid is called to initilialize
5791 	 * EDID mgmt
5792 	 */
5793 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5794 		!aconnector->dc_em_sink)
5795 		handle_edid_mgmt(aconnector);
5796 
5797 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5798 
5799 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5800 				aconnector->base.force != DRM_FORCE_ON) {
5801 		DRM_ERROR("dc_sink is NULL!\n");
5802 		goto fail;
5803 	}
5804 
5805 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5806 	if (stream) {
5807 		dc_stream_release(stream);
5808 		result = MODE_OK;
5809 	}
5810 
5811 fail:
5812 	/* TODO: error handling*/
5813 	return result;
5814 }
5815 
5816 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5817 				struct dc_info_packet *out)
5818 {
5819 	struct hdmi_drm_infoframe frame;
5820 	unsigned char buf[30]; /* 26 + 4 */
5821 	ssize_t len;
5822 	int ret, i;
5823 
5824 	memset(out, 0, sizeof(*out));
5825 
5826 	if (!state->hdr_output_metadata)
5827 		return 0;
5828 
5829 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5830 	if (ret)
5831 		return ret;
5832 
5833 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5834 	if (len < 0)
5835 		return (int)len;
5836 
5837 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5838 	if (len != 30)
5839 		return -EINVAL;
5840 
5841 	/* Prepare the infopacket for DC. */
5842 	switch (state->connector->connector_type) {
5843 	case DRM_MODE_CONNECTOR_HDMIA:
5844 		out->hb0 = 0x87; /* type */
5845 		out->hb1 = 0x01; /* version */
5846 		out->hb2 = 0x1A; /* length */
5847 		out->sb[0] = buf[3]; /* checksum */
5848 		i = 1;
5849 		break;
5850 
5851 	case DRM_MODE_CONNECTOR_DisplayPort:
5852 	case DRM_MODE_CONNECTOR_eDP:
5853 		out->hb0 = 0x00; /* sdp id, zero */
5854 		out->hb1 = 0x87; /* type */
5855 		out->hb2 = 0x1D; /* payload len - 1 */
5856 		out->hb3 = (0x13 << 2); /* sdp version */
5857 		out->sb[0] = 0x01; /* version */
5858 		out->sb[1] = 0x1A; /* length */
5859 		i = 2;
5860 		break;
5861 
5862 	default:
5863 		return -EINVAL;
5864 	}
5865 
5866 	memcpy(&out->sb[i], &buf[4], 26);
5867 	out->valid = true;
5868 
5869 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5870 		       sizeof(out->sb), false);
5871 
5872 	return 0;
5873 }
5874 
5875 static bool
5876 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5877 			  const struct drm_connector_state *new_state)
5878 {
5879 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5880 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5881 
5882 	if (old_blob != new_blob) {
5883 		if (old_blob && new_blob &&
5884 		    old_blob->length == new_blob->length)
5885 			return memcmp(old_blob->data, new_blob->data,
5886 				      old_blob->length);
5887 
5888 		return true;
5889 	}
5890 
5891 	return false;
5892 }
5893 
5894 static int
5895 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5896 				 struct drm_atomic_state *state)
5897 {
5898 	struct drm_connector_state *new_con_state =
5899 		drm_atomic_get_new_connector_state(state, conn);
5900 	struct drm_connector_state *old_con_state =
5901 		drm_atomic_get_old_connector_state(state, conn);
5902 	struct drm_crtc *crtc = new_con_state->crtc;
5903 	struct drm_crtc_state *new_crtc_state;
5904 	int ret;
5905 
5906 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5907 
5908 	if (!crtc)
5909 		return 0;
5910 
5911 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5912 		struct dc_info_packet hdr_infopacket;
5913 
5914 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5915 		if (ret)
5916 			return ret;
5917 
5918 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5919 		if (IS_ERR(new_crtc_state))
5920 			return PTR_ERR(new_crtc_state);
5921 
5922 		/*
5923 		 * DC considers the stream backends changed if the
5924 		 * static metadata changes. Forcing the modeset also
5925 		 * gives a simple way for userspace to switch from
5926 		 * 8bpc to 10bpc when setting the metadata to enter
5927 		 * or exit HDR.
5928 		 *
5929 		 * Changing the static metadata after it's been
5930 		 * set is permissible, however. So only force a
5931 		 * modeset if we're entering or exiting HDR.
5932 		 */
5933 		new_crtc_state->mode_changed =
5934 			!old_con_state->hdr_output_metadata ||
5935 			!new_con_state->hdr_output_metadata;
5936 	}
5937 
5938 	return 0;
5939 }
5940 
5941 static const struct drm_connector_helper_funcs
5942 amdgpu_dm_connector_helper_funcs = {
5943 	/*
5944 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5945 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5946 	 * are missing after user start lightdm. So we need to renew modes list.
5947 	 * in get_modes call back, not just return the modes count
5948 	 */
5949 	.get_modes = get_modes,
5950 	.mode_valid = amdgpu_dm_connector_mode_valid,
5951 	.atomic_check = amdgpu_dm_connector_atomic_check,
5952 };
5953 
5954 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5955 {
5956 }
5957 
5958 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5959 {
5960 	struct drm_atomic_state *state = new_crtc_state->state;
5961 	struct drm_plane *plane;
5962 	int num_active = 0;
5963 
5964 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5965 		struct drm_plane_state *new_plane_state;
5966 
5967 		/* Cursor planes are "fake". */
5968 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5969 			continue;
5970 
5971 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5972 
5973 		if (!new_plane_state) {
5974 			/*
5975 			 * The plane is enable on the CRTC and hasn't changed
5976 			 * state. This means that it previously passed
5977 			 * validation and is therefore enabled.
5978 			 */
5979 			num_active += 1;
5980 			continue;
5981 		}
5982 
5983 		/* We need a framebuffer to be considered enabled. */
5984 		num_active += (new_plane_state->fb != NULL);
5985 	}
5986 
5987 	return num_active;
5988 }
5989 
5990 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5991 					 struct drm_crtc_state *new_crtc_state)
5992 {
5993 	struct dm_crtc_state *dm_new_crtc_state =
5994 		to_dm_crtc_state(new_crtc_state);
5995 
5996 	dm_new_crtc_state->active_planes = 0;
5997 
5998 	if (!dm_new_crtc_state->stream)
5999 		return;
6000 
6001 	dm_new_crtc_state->active_planes =
6002 		count_crtc_active_planes(new_crtc_state);
6003 }
6004 
6005 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6006 				       struct drm_atomic_state *state)
6007 {
6008 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6009 									  crtc);
6010 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6011 	struct dc *dc = adev->dm.dc;
6012 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6013 	int ret = -EINVAL;
6014 
6015 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6016 
6017 	dm_update_crtc_active_planes(crtc, crtc_state);
6018 
6019 	if (unlikely(!dm_crtc_state->stream &&
6020 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6021 		WARN_ON(1);
6022 		return ret;
6023 	}
6024 
6025 	/*
6026 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6027 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6028 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6029 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6030 	 */
6031 	if (crtc_state->enable &&
6032 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary)))
6033 		return -EINVAL;
6034 
6035 	/* In some use cases, like reset, no stream is attached */
6036 	if (!dm_crtc_state->stream)
6037 		return 0;
6038 
6039 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6040 		return 0;
6041 
6042 	return ret;
6043 }
6044 
6045 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6046 				      const struct drm_display_mode *mode,
6047 				      struct drm_display_mode *adjusted_mode)
6048 {
6049 	return true;
6050 }
6051 
6052 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6053 	.disable = dm_crtc_helper_disable,
6054 	.atomic_check = dm_crtc_helper_atomic_check,
6055 	.mode_fixup = dm_crtc_helper_mode_fixup,
6056 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6057 };
6058 
6059 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6060 {
6061 
6062 }
6063 
6064 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6065 {
6066 	switch (display_color_depth) {
6067 		case COLOR_DEPTH_666:
6068 			return 6;
6069 		case COLOR_DEPTH_888:
6070 			return 8;
6071 		case COLOR_DEPTH_101010:
6072 			return 10;
6073 		case COLOR_DEPTH_121212:
6074 			return 12;
6075 		case COLOR_DEPTH_141414:
6076 			return 14;
6077 		case COLOR_DEPTH_161616:
6078 			return 16;
6079 		default:
6080 			break;
6081 		}
6082 	return 0;
6083 }
6084 
6085 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6086 					  struct drm_crtc_state *crtc_state,
6087 					  struct drm_connector_state *conn_state)
6088 {
6089 	struct drm_atomic_state *state = crtc_state->state;
6090 	struct drm_connector *connector = conn_state->connector;
6091 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6092 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6093 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6094 	struct drm_dp_mst_topology_mgr *mst_mgr;
6095 	struct drm_dp_mst_port *mst_port;
6096 	enum dc_color_depth color_depth;
6097 	int clock, bpp = 0;
6098 	bool is_y420 = false;
6099 
6100 	if (!aconnector->port || !aconnector->dc_sink)
6101 		return 0;
6102 
6103 	mst_port = aconnector->port;
6104 	mst_mgr = &aconnector->mst_port->mst_mgr;
6105 
6106 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6107 		return 0;
6108 
6109 	if (!state->duplicated) {
6110 		int max_bpc = conn_state->max_requested_bpc;
6111 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6112 				aconnector->force_yuv420_output;
6113 		color_depth = convert_color_depth_from_display_info(connector,
6114 								    is_y420,
6115 								    max_bpc);
6116 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6117 		clock = adjusted_mode->clock;
6118 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6119 	}
6120 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6121 									   mst_mgr,
6122 									   mst_port,
6123 									   dm_new_connector_state->pbn,
6124 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6125 	if (dm_new_connector_state->vcpi_slots < 0) {
6126 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6127 		return dm_new_connector_state->vcpi_slots;
6128 	}
6129 	return 0;
6130 }
6131 
6132 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6133 	.disable = dm_encoder_helper_disable,
6134 	.atomic_check = dm_encoder_helper_atomic_check
6135 };
6136 
6137 #if defined(CONFIG_DRM_AMD_DC_DCN)
6138 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6139 					    struct dc_state *dc_state)
6140 {
6141 	struct dc_stream_state *stream = NULL;
6142 	struct drm_connector *connector;
6143 	struct drm_connector_state *new_con_state, *old_con_state;
6144 	struct amdgpu_dm_connector *aconnector;
6145 	struct dm_connector_state *dm_conn_state;
6146 	int i, j, clock, bpp;
6147 	int vcpi, pbn_div, pbn = 0;
6148 
6149 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6150 
6151 		aconnector = to_amdgpu_dm_connector(connector);
6152 
6153 		if (!aconnector->port)
6154 			continue;
6155 
6156 		if (!new_con_state || !new_con_state->crtc)
6157 			continue;
6158 
6159 		dm_conn_state = to_dm_connector_state(new_con_state);
6160 
6161 		for (j = 0; j < dc_state->stream_count; j++) {
6162 			stream = dc_state->streams[j];
6163 			if (!stream)
6164 				continue;
6165 
6166 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6167 				break;
6168 
6169 			stream = NULL;
6170 		}
6171 
6172 		if (!stream)
6173 			continue;
6174 
6175 		if (stream->timing.flags.DSC != 1) {
6176 			drm_dp_mst_atomic_enable_dsc(state,
6177 						     aconnector->port,
6178 						     dm_conn_state->pbn,
6179 						     0,
6180 						     false);
6181 			continue;
6182 		}
6183 
6184 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6185 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6186 		clock = stream->timing.pix_clk_100hz / 10;
6187 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6188 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6189 						    aconnector->port,
6190 						    pbn, pbn_div,
6191 						    true);
6192 		if (vcpi < 0)
6193 			return vcpi;
6194 
6195 		dm_conn_state->pbn = pbn;
6196 		dm_conn_state->vcpi_slots = vcpi;
6197 	}
6198 	return 0;
6199 }
6200 #endif
6201 
6202 static void dm_drm_plane_reset(struct drm_plane *plane)
6203 {
6204 	struct dm_plane_state *amdgpu_state = NULL;
6205 
6206 	if (plane->state)
6207 		plane->funcs->atomic_destroy_state(plane, plane->state);
6208 
6209 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6210 	WARN_ON(amdgpu_state == NULL);
6211 
6212 	if (amdgpu_state)
6213 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6214 }
6215 
6216 static struct drm_plane_state *
6217 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6218 {
6219 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6220 
6221 	old_dm_plane_state = to_dm_plane_state(plane->state);
6222 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6223 	if (!dm_plane_state)
6224 		return NULL;
6225 
6226 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6227 
6228 	if (old_dm_plane_state->dc_state) {
6229 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6230 		dc_plane_state_retain(dm_plane_state->dc_state);
6231 	}
6232 
6233 	return &dm_plane_state->base;
6234 }
6235 
6236 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6237 				struct drm_plane_state *state)
6238 {
6239 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6240 
6241 	if (dm_plane_state->dc_state)
6242 		dc_plane_state_release(dm_plane_state->dc_state);
6243 
6244 	drm_atomic_helper_plane_destroy_state(plane, state);
6245 }
6246 
6247 static const struct drm_plane_funcs dm_plane_funcs = {
6248 	.update_plane	= drm_atomic_helper_update_plane,
6249 	.disable_plane	= drm_atomic_helper_disable_plane,
6250 	.destroy	= drm_primary_helper_destroy,
6251 	.reset = dm_drm_plane_reset,
6252 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6253 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6254 	.format_mod_supported = dm_plane_format_mod_supported,
6255 };
6256 
6257 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6258 				      struct drm_plane_state *new_state)
6259 {
6260 	struct amdgpu_framebuffer *afb;
6261 	struct drm_gem_object *obj;
6262 	struct amdgpu_device *adev;
6263 	struct amdgpu_bo *rbo;
6264 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6265 	struct list_head list;
6266 	struct ttm_validate_buffer tv;
6267 	struct ww_acquire_ctx ticket;
6268 	uint32_t domain;
6269 	int r;
6270 
6271 	if (!new_state->fb) {
6272 		DRM_DEBUG_DRIVER("No FB bound\n");
6273 		return 0;
6274 	}
6275 
6276 	afb = to_amdgpu_framebuffer(new_state->fb);
6277 	obj = new_state->fb->obj[0];
6278 	rbo = gem_to_amdgpu_bo(obj);
6279 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6280 	INIT_LIST_HEAD(&list);
6281 
6282 	tv.bo = &rbo->tbo;
6283 	tv.num_shared = 1;
6284 	list_add(&tv.head, &list);
6285 
6286 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6287 	if (r) {
6288 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6289 		return r;
6290 	}
6291 
6292 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6293 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6294 	else
6295 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6296 
6297 	r = amdgpu_bo_pin(rbo, domain);
6298 	if (unlikely(r != 0)) {
6299 		if (r != -ERESTARTSYS)
6300 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6301 		ttm_eu_backoff_reservation(&ticket, &list);
6302 		return r;
6303 	}
6304 
6305 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6306 	if (unlikely(r != 0)) {
6307 		amdgpu_bo_unpin(rbo);
6308 		ttm_eu_backoff_reservation(&ticket, &list);
6309 		DRM_ERROR("%p bind failed\n", rbo);
6310 		return r;
6311 	}
6312 
6313 	ttm_eu_backoff_reservation(&ticket, &list);
6314 
6315 	afb->address = amdgpu_bo_gpu_offset(rbo);
6316 
6317 	amdgpu_bo_ref(rbo);
6318 
6319 	/**
6320 	 * We don't do surface updates on planes that have been newly created,
6321 	 * but we also don't have the afb->address during atomic check.
6322 	 *
6323 	 * Fill in buffer attributes depending on the address here, but only on
6324 	 * newly created planes since they're not being used by DC yet and this
6325 	 * won't modify global state.
6326 	 */
6327 	dm_plane_state_old = to_dm_plane_state(plane->state);
6328 	dm_plane_state_new = to_dm_plane_state(new_state);
6329 
6330 	if (dm_plane_state_new->dc_state &&
6331 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6332 		struct dc_plane_state *plane_state =
6333 			dm_plane_state_new->dc_state;
6334 		bool force_disable_dcc = !plane_state->dcc.enable;
6335 
6336 		fill_plane_buffer_attributes(
6337 			adev, afb, plane_state->format, plane_state->rotation,
6338 			afb->tiling_flags,
6339 			&plane_state->tiling_info, &plane_state->plane_size,
6340 			&plane_state->dcc, &plane_state->address,
6341 			afb->tmz_surface, force_disable_dcc);
6342 	}
6343 
6344 	return 0;
6345 }
6346 
6347 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6348 				       struct drm_plane_state *old_state)
6349 {
6350 	struct amdgpu_bo *rbo;
6351 	int r;
6352 
6353 	if (!old_state->fb)
6354 		return;
6355 
6356 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6357 	r = amdgpu_bo_reserve(rbo, false);
6358 	if (unlikely(r)) {
6359 		DRM_ERROR("failed to reserve rbo before unpin\n");
6360 		return;
6361 	}
6362 
6363 	amdgpu_bo_unpin(rbo);
6364 	amdgpu_bo_unreserve(rbo);
6365 	amdgpu_bo_unref(&rbo);
6366 }
6367 
6368 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6369 				       struct drm_crtc_state *new_crtc_state)
6370 {
6371 	int max_downscale = 0;
6372 	int max_upscale = INT_MAX;
6373 
6374 	/* TODO: These should be checked against DC plane caps */
6375 	return drm_atomic_helper_check_plane_state(
6376 		state, new_crtc_state, max_downscale, max_upscale, true, true);
6377 }
6378 
6379 static int dm_plane_atomic_check(struct drm_plane *plane,
6380 				 struct drm_plane_state *state)
6381 {
6382 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6383 	struct dc *dc = adev->dm.dc;
6384 	struct dm_plane_state *dm_plane_state;
6385 	struct dc_scaling_info scaling_info;
6386 	struct drm_crtc_state *new_crtc_state;
6387 	int ret;
6388 
6389 	trace_amdgpu_dm_plane_atomic_check(state);
6390 
6391 	dm_plane_state = to_dm_plane_state(state);
6392 
6393 	if (!dm_plane_state->dc_state)
6394 		return 0;
6395 
6396 	new_crtc_state =
6397 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6398 	if (!new_crtc_state)
6399 		return -EINVAL;
6400 
6401 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6402 	if (ret)
6403 		return ret;
6404 
6405 	ret = fill_dc_scaling_info(state, &scaling_info);
6406 	if (ret)
6407 		return ret;
6408 
6409 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6410 		return 0;
6411 
6412 	return -EINVAL;
6413 }
6414 
6415 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6416 				       struct drm_plane_state *new_plane_state)
6417 {
6418 	/* Only support async updates on cursor planes. */
6419 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6420 		return -EINVAL;
6421 
6422 	return 0;
6423 }
6424 
6425 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6426 					 struct drm_plane_state *new_state)
6427 {
6428 	struct drm_plane_state *old_state =
6429 		drm_atomic_get_old_plane_state(new_state->state, plane);
6430 
6431 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6432 
6433 	swap(plane->state->fb, new_state->fb);
6434 
6435 	plane->state->src_x = new_state->src_x;
6436 	plane->state->src_y = new_state->src_y;
6437 	plane->state->src_w = new_state->src_w;
6438 	plane->state->src_h = new_state->src_h;
6439 	plane->state->crtc_x = new_state->crtc_x;
6440 	plane->state->crtc_y = new_state->crtc_y;
6441 	plane->state->crtc_w = new_state->crtc_w;
6442 	plane->state->crtc_h = new_state->crtc_h;
6443 
6444 	handle_cursor_update(plane, old_state);
6445 }
6446 
6447 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6448 	.prepare_fb = dm_plane_helper_prepare_fb,
6449 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6450 	.atomic_check = dm_plane_atomic_check,
6451 	.atomic_async_check = dm_plane_atomic_async_check,
6452 	.atomic_async_update = dm_plane_atomic_async_update
6453 };
6454 
6455 /*
6456  * TODO: these are currently initialized to rgb formats only.
6457  * For future use cases we should either initialize them dynamically based on
6458  * plane capabilities, or initialize this array to all formats, so internal drm
6459  * check will succeed, and let DC implement proper check
6460  */
6461 static const uint32_t rgb_formats[] = {
6462 	DRM_FORMAT_XRGB8888,
6463 	DRM_FORMAT_ARGB8888,
6464 	DRM_FORMAT_RGBA8888,
6465 	DRM_FORMAT_XRGB2101010,
6466 	DRM_FORMAT_XBGR2101010,
6467 	DRM_FORMAT_ARGB2101010,
6468 	DRM_FORMAT_ABGR2101010,
6469 	DRM_FORMAT_XBGR8888,
6470 	DRM_FORMAT_ABGR8888,
6471 	DRM_FORMAT_RGB565,
6472 };
6473 
6474 static const uint32_t overlay_formats[] = {
6475 	DRM_FORMAT_XRGB8888,
6476 	DRM_FORMAT_ARGB8888,
6477 	DRM_FORMAT_RGBA8888,
6478 	DRM_FORMAT_XBGR8888,
6479 	DRM_FORMAT_ABGR8888,
6480 	DRM_FORMAT_RGB565
6481 };
6482 
6483 static const u32 cursor_formats[] = {
6484 	DRM_FORMAT_ARGB8888
6485 };
6486 
6487 static int get_plane_formats(const struct drm_plane *plane,
6488 			     const struct dc_plane_cap *plane_cap,
6489 			     uint32_t *formats, int max_formats)
6490 {
6491 	int i, num_formats = 0;
6492 
6493 	/*
6494 	 * TODO: Query support for each group of formats directly from
6495 	 * DC plane caps. This will require adding more formats to the
6496 	 * caps list.
6497 	 */
6498 
6499 	switch (plane->type) {
6500 	case DRM_PLANE_TYPE_PRIMARY:
6501 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6502 			if (num_formats >= max_formats)
6503 				break;
6504 
6505 			formats[num_formats++] = rgb_formats[i];
6506 		}
6507 
6508 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6509 			formats[num_formats++] = DRM_FORMAT_NV12;
6510 		if (plane_cap && plane_cap->pixel_format_support.p010)
6511 			formats[num_formats++] = DRM_FORMAT_P010;
6512 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6513 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6514 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6515 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6516 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6517 		}
6518 		break;
6519 
6520 	case DRM_PLANE_TYPE_OVERLAY:
6521 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6522 			if (num_formats >= max_formats)
6523 				break;
6524 
6525 			formats[num_formats++] = overlay_formats[i];
6526 		}
6527 		break;
6528 
6529 	case DRM_PLANE_TYPE_CURSOR:
6530 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6531 			if (num_formats >= max_formats)
6532 				break;
6533 
6534 			formats[num_formats++] = cursor_formats[i];
6535 		}
6536 		break;
6537 	}
6538 
6539 	return num_formats;
6540 }
6541 
6542 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6543 				struct drm_plane *plane,
6544 				unsigned long possible_crtcs,
6545 				const struct dc_plane_cap *plane_cap)
6546 {
6547 	uint32_t formats[32];
6548 	int num_formats;
6549 	int res = -EPERM;
6550 	unsigned int supported_rotations;
6551 	uint64_t *modifiers = NULL;
6552 
6553 	num_formats = get_plane_formats(plane, plane_cap, formats,
6554 					ARRAY_SIZE(formats));
6555 
6556 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6557 	if (res)
6558 		return res;
6559 
6560 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6561 				       &dm_plane_funcs, formats, num_formats,
6562 				       modifiers, plane->type, NULL);
6563 	kfree(modifiers);
6564 	if (res)
6565 		return res;
6566 
6567 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6568 	    plane_cap && plane_cap->per_pixel_alpha) {
6569 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6570 					  BIT(DRM_MODE_BLEND_PREMULTI);
6571 
6572 		drm_plane_create_alpha_property(plane);
6573 		drm_plane_create_blend_mode_property(plane, blend_caps);
6574 	}
6575 
6576 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6577 	    plane_cap &&
6578 	    (plane_cap->pixel_format_support.nv12 ||
6579 	     plane_cap->pixel_format_support.p010)) {
6580 		/* This only affects YUV formats. */
6581 		drm_plane_create_color_properties(
6582 			plane,
6583 			BIT(DRM_COLOR_YCBCR_BT601) |
6584 			BIT(DRM_COLOR_YCBCR_BT709) |
6585 			BIT(DRM_COLOR_YCBCR_BT2020),
6586 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6587 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6588 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6589 	}
6590 
6591 	supported_rotations =
6592 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6593 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6594 
6595 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6596 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6597 						   supported_rotations);
6598 
6599 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6600 
6601 	/* Create (reset) the plane state */
6602 	if (plane->funcs->reset)
6603 		plane->funcs->reset(plane);
6604 
6605 	return 0;
6606 }
6607 
6608 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6609 			       struct drm_plane *plane,
6610 			       uint32_t crtc_index)
6611 {
6612 	struct amdgpu_crtc *acrtc = NULL;
6613 	struct drm_plane *cursor_plane;
6614 
6615 	int res = -ENOMEM;
6616 
6617 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6618 	if (!cursor_plane)
6619 		goto fail;
6620 
6621 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6622 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6623 
6624 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6625 	if (!acrtc)
6626 		goto fail;
6627 
6628 	res = drm_crtc_init_with_planes(
6629 			dm->ddev,
6630 			&acrtc->base,
6631 			plane,
6632 			cursor_plane,
6633 			&amdgpu_dm_crtc_funcs, NULL);
6634 
6635 	if (res)
6636 		goto fail;
6637 
6638 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6639 
6640 	/* Create (reset) the plane state */
6641 	if (acrtc->base.funcs->reset)
6642 		acrtc->base.funcs->reset(&acrtc->base);
6643 
6644 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6645 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6646 
6647 	acrtc->crtc_id = crtc_index;
6648 	acrtc->base.enabled = false;
6649 	acrtc->otg_inst = -1;
6650 
6651 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6652 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6653 				   true, MAX_COLOR_LUT_ENTRIES);
6654 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6655 
6656 	return 0;
6657 
6658 fail:
6659 	kfree(acrtc);
6660 	kfree(cursor_plane);
6661 	return res;
6662 }
6663 
6664 
6665 static int to_drm_connector_type(enum signal_type st)
6666 {
6667 	switch (st) {
6668 	case SIGNAL_TYPE_HDMI_TYPE_A:
6669 		return DRM_MODE_CONNECTOR_HDMIA;
6670 	case SIGNAL_TYPE_EDP:
6671 		return DRM_MODE_CONNECTOR_eDP;
6672 	case SIGNAL_TYPE_LVDS:
6673 		return DRM_MODE_CONNECTOR_LVDS;
6674 	case SIGNAL_TYPE_RGB:
6675 		return DRM_MODE_CONNECTOR_VGA;
6676 	case SIGNAL_TYPE_DISPLAY_PORT:
6677 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6678 		return DRM_MODE_CONNECTOR_DisplayPort;
6679 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6680 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6681 		return DRM_MODE_CONNECTOR_DVID;
6682 	case SIGNAL_TYPE_VIRTUAL:
6683 		return DRM_MODE_CONNECTOR_VIRTUAL;
6684 
6685 	default:
6686 		return DRM_MODE_CONNECTOR_Unknown;
6687 	}
6688 }
6689 
6690 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6691 {
6692 	struct drm_encoder *encoder;
6693 
6694 	/* There is only one encoder per connector */
6695 	drm_connector_for_each_possible_encoder(connector, encoder)
6696 		return encoder;
6697 
6698 	return NULL;
6699 }
6700 
6701 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6702 {
6703 	struct drm_encoder *encoder;
6704 	struct amdgpu_encoder *amdgpu_encoder;
6705 
6706 	encoder = amdgpu_dm_connector_to_encoder(connector);
6707 
6708 	if (encoder == NULL)
6709 		return;
6710 
6711 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6712 
6713 	amdgpu_encoder->native_mode.clock = 0;
6714 
6715 	if (!list_empty(&connector->probed_modes)) {
6716 		struct drm_display_mode *preferred_mode = NULL;
6717 
6718 		list_for_each_entry(preferred_mode,
6719 				    &connector->probed_modes,
6720 				    head) {
6721 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6722 				amdgpu_encoder->native_mode = *preferred_mode;
6723 
6724 			break;
6725 		}
6726 
6727 	}
6728 }
6729 
6730 static struct drm_display_mode *
6731 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6732 			     char *name,
6733 			     int hdisplay, int vdisplay)
6734 {
6735 	struct drm_device *dev = encoder->dev;
6736 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6737 	struct drm_display_mode *mode = NULL;
6738 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6739 
6740 	mode = drm_mode_duplicate(dev, native_mode);
6741 
6742 	if (mode == NULL)
6743 		return NULL;
6744 
6745 	mode->hdisplay = hdisplay;
6746 	mode->vdisplay = vdisplay;
6747 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6748 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6749 
6750 	return mode;
6751 
6752 }
6753 
6754 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6755 						 struct drm_connector *connector)
6756 {
6757 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6758 	struct drm_display_mode *mode = NULL;
6759 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6760 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6761 				to_amdgpu_dm_connector(connector);
6762 	int i;
6763 	int n;
6764 	struct mode_size {
6765 		char name[DRM_DISPLAY_MODE_LEN];
6766 		int w;
6767 		int h;
6768 	} common_modes[] = {
6769 		{  "640x480",  640,  480},
6770 		{  "800x600",  800,  600},
6771 		{ "1024x768", 1024,  768},
6772 		{ "1280x720", 1280,  720},
6773 		{ "1280x800", 1280,  800},
6774 		{"1280x1024", 1280, 1024},
6775 		{ "1440x900", 1440,  900},
6776 		{"1680x1050", 1680, 1050},
6777 		{"1600x1200", 1600, 1200},
6778 		{"1920x1080", 1920, 1080},
6779 		{"1920x1200", 1920, 1200}
6780 	};
6781 
6782 	n = ARRAY_SIZE(common_modes);
6783 
6784 	for (i = 0; i < n; i++) {
6785 		struct drm_display_mode *curmode = NULL;
6786 		bool mode_existed = false;
6787 
6788 		if (common_modes[i].w > native_mode->hdisplay ||
6789 		    common_modes[i].h > native_mode->vdisplay ||
6790 		   (common_modes[i].w == native_mode->hdisplay &&
6791 		    common_modes[i].h == native_mode->vdisplay))
6792 			continue;
6793 
6794 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6795 			if (common_modes[i].w == curmode->hdisplay &&
6796 			    common_modes[i].h == curmode->vdisplay) {
6797 				mode_existed = true;
6798 				break;
6799 			}
6800 		}
6801 
6802 		if (mode_existed)
6803 			continue;
6804 
6805 		mode = amdgpu_dm_create_common_mode(encoder,
6806 				common_modes[i].name, common_modes[i].w,
6807 				common_modes[i].h);
6808 		drm_mode_probed_add(connector, mode);
6809 		amdgpu_dm_connector->num_modes++;
6810 	}
6811 }
6812 
6813 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6814 					      struct edid *edid)
6815 {
6816 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6817 			to_amdgpu_dm_connector(connector);
6818 
6819 	if (edid) {
6820 		/* empty probed_modes */
6821 		INIT_LIST_HEAD(&connector->probed_modes);
6822 		amdgpu_dm_connector->num_modes =
6823 				drm_add_edid_modes(connector, edid);
6824 
6825 		/* sorting the probed modes before calling function
6826 		 * amdgpu_dm_get_native_mode() since EDID can have
6827 		 * more than one preferred mode. The modes that are
6828 		 * later in the probed mode list could be of higher
6829 		 * and preferred resolution. For example, 3840x2160
6830 		 * resolution in base EDID preferred timing and 4096x2160
6831 		 * preferred resolution in DID extension block later.
6832 		 */
6833 		drm_mode_sort(&connector->probed_modes);
6834 		amdgpu_dm_get_native_mode(connector);
6835 	} else {
6836 		amdgpu_dm_connector->num_modes = 0;
6837 	}
6838 }
6839 
6840 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6841 {
6842 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6843 			to_amdgpu_dm_connector(connector);
6844 	struct drm_encoder *encoder;
6845 	struct edid *edid = amdgpu_dm_connector->edid;
6846 
6847 	encoder = amdgpu_dm_connector_to_encoder(connector);
6848 
6849 	if (!edid || !drm_edid_is_valid(edid)) {
6850 		amdgpu_dm_connector->num_modes =
6851 				drm_add_modes_noedid(connector, 640, 480);
6852 	} else {
6853 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6854 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6855 	}
6856 	amdgpu_dm_fbc_init(connector);
6857 
6858 	return amdgpu_dm_connector->num_modes;
6859 }
6860 
6861 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6862 				     struct amdgpu_dm_connector *aconnector,
6863 				     int connector_type,
6864 				     struct dc_link *link,
6865 				     int link_index)
6866 {
6867 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6868 
6869 	/*
6870 	 * Some of the properties below require access to state, like bpc.
6871 	 * Allocate some default initial connector state with our reset helper.
6872 	 */
6873 	if (aconnector->base.funcs->reset)
6874 		aconnector->base.funcs->reset(&aconnector->base);
6875 
6876 	aconnector->connector_id = link_index;
6877 	aconnector->dc_link = link;
6878 	aconnector->base.interlace_allowed = false;
6879 	aconnector->base.doublescan_allowed = false;
6880 	aconnector->base.stereo_allowed = false;
6881 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6882 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6883 	aconnector->audio_inst = -1;
6884 	mutex_init(&aconnector->hpd_lock);
6885 
6886 	/*
6887 	 * configure support HPD hot plug connector_>polled default value is 0
6888 	 * which means HPD hot plug not supported
6889 	 */
6890 	switch (connector_type) {
6891 	case DRM_MODE_CONNECTOR_HDMIA:
6892 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6893 		aconnector->base.ycbcr_420_allowed =
6894 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6895 		break;
6896 	case DRM_MODE_CONNECTOR_DisplayPort:
6897 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6898 		aconnector->base.ycbcr_420_allowed =
6899 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6900 		break;
6901 	case DRM_MODE_CONNECTOR_DVID:
6902 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6903 		break;
6904 	default:
6905 		break;
6906 	}
6907 
6908 	drm_object_attach_property(&aconnector->base.base,
6909 				dm->ddev->mode_config.scaling_mode_property,
6910 				DRM_MODE_SCALE_NONE);
6911 
6912 	drm_object_attach_property(&aconnector->base.base,
6913 				adev->mode_info.underscan_property,
6914 				UNDERSCAN_OFF);
6915 	drm_object_attach_property(&aconnector->base.base,
6916 				adev->mode_info.underscan_hborder_property,
6917 				0);
6918 	drm_object_attach_property(&aconnector->base.base,
6919 				adev->mode_info.underscan_vborder_property,
6920 				0);
6921 
6922 	if (!aconnector->mst_port)
6923 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6924 
6925 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6926 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6927 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6928 
6929 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6930 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6931 		drm_object_attach_property(&aconnector->base.base,
6932 				adev->mode_info.abm_level_property, 0);
6933 	}
6934 
6935 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6936 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6937 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6938 		drm_object_attach_property(
6939 			&aconnector->base.base,
6940 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6941 
6942 		if (!aconnector->mst_port)
6943 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6944 
6945 #ifdef CONFIG_DRM_AMD_DC_HDCP
6946 		if (adev->dm.hdcp_workqueue)
6947 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6948 #endif
6949 	}
6950 }
6951 
6952 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6953 			      struct i2c_msg *msgs, int num)
6954 {
6955 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6956 	struct ddc_service *ddc_service = i2c->ddc_service;
6957 	struct i2c_command cmd;
6958 	int i;
6959 	int result = -EIO;
6960 
6961 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6962 
6963 	if (!cmd.payloads)
6964 		return result;
6965 
6966 	cmd.number_of_payloads = num;
6967 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6968 	cmd.speed = 100;
6969 
6970 	for (i = 0; i < num; i++) {
6971 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6972 		cmd.payloads[i].address = msgs[i].addr;
6973 		cmd.payloads[i].length = msgs[i].len;
6974 		cmd.payloads[i].data = msgs[i].buf;
6975 	}
6976 
6977 	if (dc_submit_i2c(
6978 			ddc_service->ctx->dc,
6979 			ddc_service->ddc_pin->hw_info.ddc_channel,
6980 			&cmd))
6981 		result = num;
6982 
6983 	kfree(cmd.payloads);
6984 	return result;
6985 }
6986 
6987 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6988 {
6989 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6990 }
6991 
6992 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6993 	.master_xfer = amdgpu_dm_i2c_xfer,
6994 	.functionality = amdgpu_dm_i2c_func,
6995 };
6996 
6997 static struct amdgpu_i2c_adapter *
6998 create_i2c(struct ddc_service *ddc_service,
6999 	   int link_index,
7000 	   int *res)
7001 {
7002 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7003 	struct amdgpu_i2c_adapter *i2c;
7004 
7005 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7006 	if (!i2c)
7007 		return NULL;
7008 	i2c->base.owner = THIS_MODULE;
7009 	i2c->base.class = I2C_CLASS_DDC;
7010 	i2c->base.dev.parent = &adev->pdev->dev;
7011 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7012 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7013 	i2c_set_adapdata(&i2c->base, i2c);
7014 	i2c->ddc_service = ddc_service;
7015 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7016 
7017 	return i2c;
7018 }
7019 
7020 
7021 /*
7022  * Note: this function assumes that dc_link_detect() was called for the
7023  * dc_link which will be represented by this aconnector.
7024  */
7025 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7026 				    struct amdgpu_dm_connector *aconnector,
7027 				    uint32_t link_index,
7028 				    struct amdgpu_encoder *aencoder)
7029 {
7030 	int res = 0;
7031 	int connector_type;
7032 	struct dc *dc = dm->dc;
7033 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7034 	struct amdgpu_i2c_adapter *i2c;
7035 
7036 	link->priv = aconnector;
7037 
7038 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7039 
7040 	i2c = create_i2c(link->ddc, link->link_index, &res);
7041 	if (!i2c) {
7042 		DRM_ERROR("Failed to create i2c adapter data\n");
7043 		return -ENOMEM;
7044 	}
7045 
7046 	aconnector->i2c = i2c;
7047 	res = i2c_add_adapter(&i2c->base);
7048 
7049 	if (res) {
7050 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7051 		goto out_free;
7052 	}
7053 
7054 	connector_type = to_drm_connector_type(link->connector_signal);
7055 
7056 	res = drm_connector_init_with_ddc(
7057 			dm->ddev,
7058 			&aconnector->base,
7059 			&amdgpu_dm_connector_funcs,
7060 			connector_type,
7061 			&i2c->base);
7062 
7063 	if (res) {
7064 		DRM_ERROR("connector_init failed\n");
7065 		aconnector->connector_id = -1;
7066 		goto out_free;
7067 	}
7068 
7069 	drm_connector_helper_add(
7070 			&aconnector->base,
7071 			&amdgpu_dm_connector_helper_funcs);
7072 
7073 	amdgpu_dm_connector_init_helper(
7074 		dm,
7075 		aconnector,
7076 		connector_type,
7077 		link,
7078 		link_index);
7079 
7080 	drm_connector_attach_encoder(
7081 		&aconnector->base, &aencoder->base);
7082 
7083 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7084 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7085 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7086 
7087 out_free:
7088 	if (res) {
7089 		kfree(i2c);
7090 		aconnector->i2c = NULL;
7091 	}
7092 	return res;
7093 }
7094 
7095 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7096 {
7097 	switch (adev->mode_info.num_crtc) {
7098 	case 1:
7099 		return 0x1;
7100 	case 2:
7101 		return 0x3;
7102 	case 3:
7103 		return 0x7;
7104 	case 4:
7105 		return 0xf;
7106 	case 5:
7107 		return 0x1f;
7108 	case 6:
7109 	default:
7110 		return 0x3f;
7111 	}
7112 }
7113 
7114 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7115 				  struct amdgpu_encoder *aencoder,
7116 				  uint32_t link_index)
7117 {
7118 	struct amdgpu_device *adev = drm_to_adev(dev);
7119 
7120 	int res = drm_encoder_init(dev,
7121 				   &aencoder->base,
7122 				   &amdgpu_dm_encoder_funcs,
7123 				   DRM_MODE_ENCODER_TMDS,
7124 				   NULL);
7125 
7126 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7127 
7128 	if (!res)
7129 		aencoder->encoder_id = link_index;
7130 	else
7131 		aencoder->encoder_id = -1;
7132 
7133 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7134 
7135 	return res;
7136 }
7137 
7138 static void manage_dm_interrupts(struct amdgpu_device *adev,
7139 				 struct amdgpu_crtc *acrtc,
7140 				 bool enable)
7141 {
7142 	/*
7143 	 * We have no guarantee that the frontend index maps to the same
7144 	 * backend index - some even map to more than one.
7145 	 *
7146 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7147 	 */
7148 	int irq_type =
7149 		amdgpu_display_crtc_idx_to_irq_type(
7150 			adev,
7151 			acrtc->crtc_id);
7152 
7153 	if (enable) {
7154 		drm_crtc_vblank_on(&acrtc->base);
7155 		amdgpu_irq_get(
7156 			adev,
7157 			&adev->pageflip_irq,
7158 			irq_type);
7159 	} else {
7160 
7161 		amdgpu_irq_put(
7162 			adev,
7163 			&adev->pageflip_irq,
7164 			irq_type);
7165 		drm_crtc_vblank_off(&acrtc->base);
7166 	}
7167 }
7168 
7169 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7170 				      struct amdgpu_crtc *acrtc)
7171 {
7172 	int irq_type =
7173 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7174 
7175 	/**
7176 	 * This reads the current state for the IRQ and force reapplies
7177 	 * the setting to hardware.
7178 	 */
7179 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7180 }
7181 
7182 static bool
7183 is_scaling_state_different(const struct dm_connector_state *dm_state,
7184 			   const struct dm_connector_state *old_dm_state)
7185 {
7186 	if (dm_state->scaling != old_dm_state->scaling)
7187 		return true;
7188 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7189 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7190 			return true;
7191 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7192 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7193 			return true;
7194 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7195 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7196 		return true;
7197 	return false;
7198 }
7199 
7200 #ifdef CONFIG_DRM_AMD_DC_HDCP
7201 static bool is_content_protection_different(struct drm_connector_state *state,
7202 					    const struct drm_connector_state *old_state,
7203 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7204 {
7205 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7206 
7207 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7208 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7209 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7210 		return true;
7211 	}
7212 
7213 	/* CP is being re enabled, ignore this */
7214 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7215 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7216 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7217 		return false;
7218 	}
7219 
7220 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
7221 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7222 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7223 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7224 
7225 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7226 	 * hot-plug, headless s3, dpms
7227 	 */
7228 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
7229 	    aconnector->dc_sink != NULL)
7230 		return true;
7231 
7232 	if (old_state->content_protection == state->content_protection)
7233 		return false;
7234 
7235 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
7236 		return true;
7237 
7238 	return false;
7239 }
7240 
7241 #endif
7242 static void remove_stream(struct amdgpu_device *adev,
7243 			  struct amdgpu_crtc *acrtc,
7244 			  struct dc_stream_state *stream)
7245 {
7246 	/* this is the update mode case */
7247 
7248 	acrtc->otg_inst = -1;
7249 	acrtc->enabled = false;
7250 }
7251 
7252 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7253 			       struct dc_cursor_position *position)
7254 {
7255 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7256 	int x, y;
7257 	int xorigin = 0, yorigin = 0;
7258 
7259 	position->enable = false;
7260 	position->x = 0;
7261 	position->y = 0;
7262 
7263 	if (!crtc || !plane->state->fb)
7264 		return 0;
7265 
7266 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7267 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7268 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7269 			  __func__,
7270 			  plane->state->crtc_w,
7271 			  plane->state->crtc_h);
7272 		return -EINVAL;
7273 	}
7274 
7275 	x = plane->state->crtc_x;
7276 	y = plane->state->crtc_y;
7277 
7278 	if (x <= -amdgpu_crtc->max_cursor_width ||
7279 	    y <= -amdgpu_crtc->max_cursor_height)
7280 		return 0;
7281 
7282 	if (x < 0) {
7283 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7284 		x = 0;
7285 	}
7286 	if (y < 0) {
7287 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7288 		y = 0;
7289 	}
7290 	position->enable = true;
7291 	position->translate_by_source = true;
7292 	position->x = x;
7293 	position->y = y;
7294 	position->x_hotspot = xorigin;
7295 	position->y_hotspot = yorigin;
7296 
7297 	return 0;
7298 }
7299 
7300 static void handle_cursor_update(struct drm_plane *plane,
7301 				 struct drm_plane_state *old_plane_state)
7302 {
7303 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7304 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7305 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7306 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7307 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7308 	uint64_t address = afb ? afb->address : 0;
7309 	struct dc_cursor_position position;
7310 	struct dc_cursor_attributes attributes;
7311 	int ret;
7312 
7313 	if (!plane->state->fb && !old_plane_state->fb)
7314 		return;
7315 
7316 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7317 			 __func__,
7318 			 amdgpu_crtc->crtc_id,
7319 			 plane->state->crtc_w,
7320 			 plane->state->crtc_h);
7321 
7322 	ret = get_cursor_position(plane, crtc, &position);
7323 	if (ret)
7324 		return;
7325 
7326 	if (!position.enable) {
7327 		/* turn off cursor */
7328 		if (crtc_state && crtc_state->stream) {
7329 			mutex_lock(&adev->dm.dc_lock);
7330 			dc_stream_set_cursor_position(crtc_state->stream,
7331 						      &position);
7332 			mutex_unlock(&adev->dm.dc_lock);
7333 		}
7334 		return;
7335 	}
7336 
7337 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7338 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7339 
7340 	memset(&attributes, 0, sizeof(attributes));
7341 	attributes.address.high_part = upper_32_bits(address);
7342 	attributes.address.low_part  = lower_32_bits(address);
7343 	attributes.width             = plane->state->crtc_w;
7344 	attributes.height            = plane->state->crtc_h;
7345 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7346 	attributes.rotation_angle    = 0;
7347 	attributes.attribute_flags.value = 0;
7348 
7349 	attributes.pitch = attributes.width;
7350 
7351 	if (crtc_state->stream) {
7352 		mutex_lock(&adev->dm.dc_lock);
7353 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7354 							 &attributes))
7355 			DRM_ERROR("DC failed to set cursor attributes\n");
7356 
7357 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7358 						   &position))
7359 			DRM_ERROR("DC failed to set cursor position\n");
7360 		mutex_unlock(&adev->dm.dc_lock);
7361 	}
7362 }
7363 
7364 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7365 {
7366 
7367 	assert_spin_locked(&acrtc->base.dev->event_lock);
7368 	WARN_ON(acrtc->event);
7369 
7370 	acrtc->event = acrtc->base.state->event;
7371 
7372 	/* Set the flip status */
7373 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7374 
7375 	/* Mark this event as consumed */
7376 	acrtc->base.state->event = NULL;
7377 
7378 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7379 						 acrtc->crtc_id);
7380 }
7381 
7382 static void update_freesync_state_on_stream(
7383 	struct amdgpu_display_manager *dm,
7384 	struct dm_crtc_state *new_crtc_state,
7385 	struct dc_stream_state *new_stream,
7386 	struct dc_plane_state *surface,
7387 	u32 flip_timestamp_in_us)
7388 {
7389 	struct mod_vrr_params vrr_params;
7390 	struct dc_info_packet vrr_infopacket = {0};
7391 	struct amdgpu_device *adev = dm->adev;
7392 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7393 	unsigned long flags;
7394 
7395 	if (!new_stream)
7396 		return;
7397 
7398 	/*
7399 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7400 	 * For now it's sufficient to just guard against these conditions.
7401 	 */
7402 
7403 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7404 		return;
7405 
7406 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7407         vrr_params = acrtc->dm_irq_params.vrr_params;
7408 
7409 	if (surface) {
7410 		mod_freesync_handle_preflip(
7411 			dm->freesync_module,
7412 			surface,
7413 			new_stream,
7414 			flip_timestamp_in_us,
7415 			&vrr_params);
7416 
7417 		if (adev->family < AMDGPU_FAMILY_AI &&
7418 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7419 			mod_freesync_handle_v_update(dm->freesync_module,
7420 						     new_stream, &vrr_params);
7421 
7422 			/* Need to call this before the frame ends. */
7423 			dc_stream_adjust_vmin_vmax(dm->dc,
7424 						   new_crtc_state->stream,
7425 						   &vrr_params.adjust);
7426 		}
7427 	}
7428 
7429 	mod_freesync_build_vrr_infopacket(
7430 		dm->freesync_module,
7431 		new_stream,
7432 		&vrr_params,
7433 		PACKET_TYPE_VRR,
7434 		TRANSFER_FUNC_UNKNOWN,
7435 		&vrr_infopacket);
7436 
7437 	new_crtc_state->freesync_timing_changed |=
7438 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7439 			&vrr_params.adjust,
7440 			sizeof(vrr_params.adjust)) != 0);
7441 
7442 	new_crtc_state->freesync_vrr_info_changed |=
7443 		(memcmp(&new_crtc_state->vrr_infopacket,
7444 			&vrr_infopacket,
7445 			sizeof(vrr_infopacket)) != 0);
7446 
7447 	acrtc->dm_irq_params.vrr_params = vrr_params;
7448 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7449 
7450 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7451 	new_stream->vrr_infopacket = vrr_infopacket;
7452 
7453 	if (new_crtc_state->freesync_vrr_info_changed)
7454 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7455 			      new_crtc_state->base.crtc->base.id,
7456 			      (int)new_crtc_state->base.vrr_enabled,
7457 			      (int)vrr_params.state);
7458 
7459 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7460 }
7461 
7462 static void update_stream_irq_parameters(
7463 	struct amdgpu_display_manager *dm,
7464 	struct dm_crtc_state *new_crtc_state)
7465 {
7466 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7467 	struct mod_vrr_params vrr_params;
7468 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7469 	struct amdgpu_device *adev = dm->adev;
7470 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7471 	unsigned long flags;
7472 
7473 	if (!new_stream)
7474 		return;
7475 
7476 	/*
7477 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7478 	 * For now it's sufficient to just guard against these conditions.
7479 	 */
7480 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7481 		return;
7482 
7483 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7484 	vrr_params = acrtc->dm_irq_params.vrr_params;
7485 
7486 	if (new_crtc_state->vrr_supported &&
7487 	    config.min_refresh_in_uhz &&
7488 	    config.max_refresh_in_uhz) {
7489 		config.state = new_crtc_state->base.vrr_enabled ?
7490 			VRR_STATE_ACTIVE_VARIABLE :
7491 			VRR_STATE_INACTIVE;
7492 	} else {
7493 		config.state = VRR_STATE_UNSUPPORTED;
7494 	}
7495 
7496 	mod_freesync_build_vrr_params(dm->freesync_module,
7497 				      new_stream,
7498 				      &config, &vrr_params);
7499 
7500 	new_crtc_state->freesync_timing_changed |=
7501 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7502 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7503 
7504 	new_crtc_state->freesync_config = config;
7505 	/* Copy state for access from DM IRQ handler */
7506 	acrtc->dm_irq_params.freesync_config = config;
7507 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7508 	acrtc->dm_irq_params.vrr_params = vrr_params;
7509 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7510 }
7511 
7512 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7513 					    struct dm_crtc_state *new_state)
7514 {
7515 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7516 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7517 
7518 	if (!old_vrr_active && new_vrr_active) {
7519 		/* Transition VRR inactive -> active:
7520 		 * While VRR is active, we must not disable vblank irq, as a
7521 		 * reenable after disable would compute bogus vblank/pflip
7522 		 * timestamps if it likely happened inside display front-porch.
7523 		 *
7524 		 * We also need vupdate irq for the actual core vblank handling
7525 		 * at end of vblank.
7526 		 */
7527 		dm_set_vupdate_irq(new_state->base.crtc, true);
7528 		drm_crtc_vblank_get(new_state->base.crtc);
7529 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7530 				 __func__, new_state->base.crtc->base.id);
7531 	} else if (old_vrr_active && !new_vrr_active) {
7532 		/* Transition VRR active -> inactive:
7533 		 * Allow vblank irq disable again for fixed refresh rate.
7534 		 */
7535 		dm_set_vupdate_irq(new_state->base.crtc, false);
7536 		drm_crtc_vblank_put(new_state->base.crtc);
7537 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7538 				 __func__, new_state->base.crtc->base.id);
7539 	}
7540 }
7541 
7542 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7543 {
7544 	struct drm_plane *plane;
7545 	struct drm_plane_state *old_plane_state, *new_plane_state;
7546 	int i;
7547 
7548 	/*
7549 	 * TODO: Make this per-stream so we don't issue redundant updates for
7550 	 * commits with multiple streams.
7551 	 */
7552 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7553 				       new_plane_state, i)
7554 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7555 			handle_cursor_update(plane, old_plane_state);
7556 }
7557 
7558 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7559 				    struct dc_state *dc_state,
7560 				    struct drm_device *dev,
7561 				    struct amdgpu_display_manager *dm,
7562 				    struct drm_crtc *pcrtc,
7563 				    bool wait_for_vblank)
7564 {
7565 	uint32_t i;
7566 	uint64_t timestamp_ns;
7567 	struct drm_plane *plane;
7568 	struct drm_plane_state *old_plane_state, *new_plane_state;
7569 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7570 	struct drm_crtc_state *new_pcrtc_state =
7571 			drm_atomic_get_new_crtc_state(state, pcrtc);
7572 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7573 	struct dm_crtc_state *dm_old_crtc_state =
7574 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7575 	int planes_count = 0, vpos, hpos;
7576 	long r;
7577 	unsigned long flags;
7578 	struct amdgpu_bo *abo;
7579 	uint32_t target_vblank, last_flip_vblank;
7580 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7581 	bool pflip_present = false;
7582 	struct {
7583 		struct dc_surface_update surface_updates[MAX_SURFACES];
7584 		struct dc_plane_info plane_infos[MAX_SURFACES];
7585 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7586 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7587 		struct dc_stream_update stream_update;
7588 	} *bundle;
7589 
7590 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7591 
7592 	if (!bundle) {
7593 		dm_error("Failed to allocate update bundle\n");
7594 		goto cleanup;
7595 	}
7596 
7597 	/*
7598 	 * Disable the cursor first if we're disabling all the planes.
7599 	 * It'll remain on the screen after the planes are re-enabled
7600 	 * if we don't.
7601 	 */
7602 	if (acrtc_state->active_planes == 0)
7603 		amdgpu_dm_commit_cursors(state);
7604 
7605 	/* update planes when needed */
7606 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7607 		struct drm_crtc *crtc = new_plane_state->crtc;
7608 		struct drm_crtc_state *new_crtc_state;
7609 		struct drm_framebuffer *fb = new_plane_state->fb;
7610 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7611 		bool plane_needs_flip;
7612 		struct dc_plane_state *dc_plane;
7613 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7614 
7615 		/* Cursor plane is handled after stream updates */
7616 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7617 			continue;
7618 
7619 		if (!fb || !crtc || pcrtc != crtc)
7620 			continue;
7621 
7622 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7623 		if (!new_crtc_state->active)
7624 			continue;
7625 
7626 		dc_plane = dm_new_plane_state->dc_state;
7627 
7628 		bundle->surface_updates[planes_count].surface = dc_plane;
7629 		if (new_pcrtc_state->color_mgmt_changed) {
7630 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7631 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7632 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7633 		}
7634 
7635 		fill_dc_scaling_info(new_plane_state,
7636 				     &bundle->scaling_infos[planes_count]);
7637 
7638 		bundle->surface_updates[planes_count].scaling_info =
7639 			&bundle->scaling_infos[planes_count];
7640 
7641 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7642 
7643 		pflip_present = pflip_present || plane_needs_flip;
7644 
7645 		if (!plane_needs_flip) {
7646 			planes_count += 1;
7647 			continue;
7648 		}
7649 
7650 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7651 
7652 		/*
7653 		 * Wait for all fences on this FB. Do limited wait to avoid
7654 		 * deadlock during GPU reset when this fence will not signal
7655 		 * but we hold reservation lock for the BO.
7656 		 */
7657 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7658 							false,
7659 							msecs_to_jiffies(5000));
7660 		if (unlikely(r <= 0))
7661 			DRM_ERROR("Waiting for fences timed out!");
7662 
7663 		fill_dc_plane_info_and_addr(
7664 			dm->adev, new_plane_state,
7665 			afb->tiling_flags,
7666 			&bundle->plane_infos[planes_count],
7667 			&bundle->flip_addrs[planes_count].address,
7668 			afb->tmz_surface, false);
7669 
7670 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7671 				 new_plane_state->plane->index,
7672 				 bundle->plane_infos[planes_count].dcc.enable);
7673 
7674 		bundle->surface_updates[planes_count].plane_info =
7675 			&bundle->plane_infos[planes_count];
7676 
7677 		/*
7678 		 * Only allow immediate flips for fast updates that don't
7679 		 * change FB pitch, DCC state, rotation or mirroing.
7680 		 */
7681 		bundle->flip_addrs[planes_count].flip_immediate =
7682 			crtc->state->async_flip &&
7683 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7684 
7685 		timestamp_ns = ktime_get_ns();
7686 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7687 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7688 		bundle->surface_updates[planes_count].surface = dc_plane;
7689 
7690 		if (!bundle->surface_updates[planes_count].surface) {
7691 			DRM_ERROR("No surface for CRTC: id=%d\n",
7692 					acrtc_attach->crtc_id);
7693 			continue;
7694 		}
7695 
7696 		if (plane == pcrtc->primary)
7697 			update_freesync_state_on_stream(
7698 				dm,
7699 				acrtc_state,
7700 				acrtc_state->stream,
7701 				dc_plane,
7702 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7703 
7704 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7705 				 __func__,
7706 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7707 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7708 
7709 		planes_count += 1;
7710 
7711 	}
7712 
7713 	if (pflip_present) {
7714 		if (!vrr_active) {
7715 			/* Use old throttling in non-vrr fixed refresh rate mode
7716 			 * to keep flip scheduling based on target vblank counts
7717 			 * working in a backwards compatible way, e.g., for
7718 			 * clients using the GLX_OML_sync_control extension or
7719 			 * DRI3/Present extension with defined target_msc.
7720 			 */
7721 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7722 		}
7723 		else {
7724 			/* For variable refresh rate mode only:
7725 			 * Get vblank of last completed flip to avoid > 1 vrr
7726 			 * flips per video frame by use of throttling, but allow
7727 			 * flip programming anywhere in the possibly large
7728 			 * variable vrr vblank interval for fine-grained flip
7729 			 * timing control and more opportunity to avoid stutter
7730 			 * on late submission of flips.
7731 			 */
7732 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7733 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7734 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7735 		}
7736 
7737 		target_vblank = last_flip_vblank + wait_for_vblank;
7738 
7739 		/*
7740 		 * Wait until we're out of the vertical blank period before the one
7741 		 * targeted by the flip
7742 		 */
7743 		while ((acrtc_attach->enabled &&
7744 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7745 							    0, &vpos, &hpos, NULL,
7746 							    NULL, &pcrtc->hwmode)
7747 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7748 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7749 			(int)(target_vblank -
7750 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7751 			usleep_range(1000, 1100);
7752 		}
7753 
7754 		/**
7755 		 * Prepare the flip event for the pageflip interrupt to handle.
7756 		 *
7757 		 * This only works in the case where we've already turned on the
7758 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7759 		 * from 0 -> n planes we have to skip a hardware generated event
7760 		 * and rely on sending it from software.
7761 		 */
7762 		if (acrtc_attach->base.state->event &&
7763 		    acrtc_state->active_planes > 0) {
7764 			drm_crtc_vblank_get(pcrtc);
7765 
7766 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7767 
7768 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7769 			prepare_flip_isr(acrtc_attach);
7770 
7771 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7772 		}
7773 
7774 		if (acrtc_state->stream) {
7775 			if (acrtc_state->freesync_vrr_info_changed)
7776 				bundle->stream_update.vrr_infopacket =
7777 					&acrtc_state->stream->vrr_infopacket;
7778 		}
7779 	}
7780 
7781 	/* Update the planes if changed or disable if we don't have any. */
7782 	if ((planes_count || acrtc_state->active_planes == 0) &&
7783 		acrtc_state->stream) {
7784 		bundle->stream_update.stream = acrtc_state->stream;
7785 		if (new_pcrtc_state->mode_changed) {
7786 			bundle->stream_update.src = acrtc_state->stream->src;
7787 			bundle->stream_update.dst = acrtc_state->stream->dst;
7788 		}
7789 
7790 		if (new_pcrtc_state->color_mgmt_changed) {
7791 			/*
7792 			 * TODO: This isn't fully correct since we've actually
7793 			 * already modified the stream in place.
7794 			 */
7795 			bundle->stream_update.gamut_remap =
7796 				&acrtc_state->stream->gamut_remap_matrix;
7797 			bundle->stream_update.output_csc_transform =
7798 				&acrtc_state->stream->csc_color_matrix;
7799 			bundle->stream_update.out_transfer_func =
7800 				acrtc_state->stream->out_transfer_func;
7801 		}
7802 
7803 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7804 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7805 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7806 
7807 		/*
7808 		 * If FreeSync state on the stream has changed then we need to
7809 		 * re-adjust the min/max bounds now that DC doesn't handle this
7810 		 * as part of commit.
7811 		 */
7812 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7813 		    amdgpu_dm_vrr_active(acrtc_state)) {
7814 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7815 			dc_stream_adjust_vmin_vmax(
7816 				dm->dc, acrtc_state->stream,
7817 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7818 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7819 		}
7820 		mutex_lock(&dm->dc_lock);
7821 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7822 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7823 			amdgpu_dm_psr_disable(acrtc_state->stream);
7824 
7825 		dc_commit_updates_for_stream(dm->dc,
7826 						     bundle->surface_updates,
7827 						     planes_count,
7828 						     acrtc_state->stream,
7829 						     &bundle->stream_update,
7830 						     dc_state);
7831 
7832 		/**
7833 		 * Enable or disable the interrupts on the backend.
7834 		 *
7835 		 * Most pipes are put into power gating when unused.
7836 		 *
7837 		 * When power gating is enabled on a pipe we lose the
7838 		 * interrupt enablement state when power gating is disabled.
7839 		 *
7840 		 * So we need to update the IRQ control state in hardware
7841 		 * whenever the pipe turns on (since it could be previously
7842 		 * power gated) or off (since some pipes can't be power gated
7843 		 * on some ASICs).
7844 		 */
7845 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7846 			dm_update_pflip_irq_state(drm_to_adev(dev),
7847 						  acrtc_attach);
7848 
7849 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7850 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7851 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7852 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7853 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7854 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7855 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7856 			amdgpu_dm_psr_enable(acrtc_state->stream);
7857 		}
7858 
7859 		mutex_unlock(&dm->dc_lock);
7860 	}
7861 
7862 	/*
7863 	 * Update cursor state *after* programming all the planes.
7864 	 * This avoids redundant programming in the case where we're going
7865 	 * to be disabling a single plane - those pipes are being disabled.
7866 	 */
7867 	if (acrtc_state->active_planes)
7868 		amdgpu_dm_commit_cursors(state);
7869 
7870 cleanup:
7871 	kfree(bundle);
7872 }
7873 
7874 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7875 				   struct drm_atomic_state *state)
7876 {
7877 	struct amdgpu_device *adev = drm_to_adev(dev);
7878 	struct amdgpu_dm_connector *aconnector;
7879 	struct drm_connector *connector;
7880 	struct drm_connector_state *old_con_state, *new_con_state;
7881 	struct drm_crtc_state *new_crtc_state;
7882 	struct dm_crtc_state *new_dm_crtc_state;
7883 	const struct dc_stream_status *status;
7884 	int i, inst;
7885 
7886 	/* Notify device removals. */
7887 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7888 		if (old_con_state->crtc != new_con_state->crtc) {
7889 			/* CRTC changes require notification. */
7890 			goto notify;
7891 		}
7892 
7893 		if (!new_con_state->crtc)
7894 			continue;
7895 
7896 		new_crtc_state = drm_atomic_get_new_crtc_state(
7897 			state, new_con_state->crtc);
7898 
7899 		if (!new_crtc_state)
7900 			continue;
7901 
7902 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7903 			continue;
7904 
7905 	notify:
7906 		aconnector = to_amdgpu_dm_connector(connector);
7907 
7908 		mutex_lock(&adev->dm.audio_lock);
7909 		inst = aconnector->audio_inst;
7910 		aconnector->audio_inst = -1;
7911 		mutex_unlock(&adev->dm.audio_lock);
7912 
7913 		amdgpu_dm_audio_eld_notify(adev, inst);
7914 	}
7915 
7916 	/* Notify audio device additions. */
7917 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7918 		if (!new_con_state->crtc)
7919 			continue;
7920 
7921 		new_crtc_state = drm_atomic_get_new_crtc_state(
7922 			state, new_con_state->crtc);
7923 
7924 		if (!new_crtc_state)
7925 			continue;
7926 
7927 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7928 			continue;
7929 
7930 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7931 		if (!new_dm_crtc_state->stream)
7932 			continue;
7933 
7934 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7935 		if (!status)
7936 			continue;
7937 
7938 		aconnector = to_amdgpu_dm_connector(connector);
7939 
7940 		mutex_lock(&adev->dm.audio_lock);
7941 		inst = status->audio_inst;
7942 		aconnector->audio_inst = inst;
7943 		mutex_unlock(&adev->dm.audio_lock);
7944 
7945 		amdgpu_dm_audio_eld_notify(adev, inst);
7946 	}
7947 }
7948 
7949 /*
7950  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7951  * @crtc_state: the DRM CRTC state
7952  * @stream_state: the DC stream state.
7953  *
7954  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7955  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7956  */
7957 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7958 						struct dc_stream_state *stream_state)
7959 {
7960 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7961 }
7962 
7963 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7964 				   struct drm_atomic_state *state,
7965 				   bool nonblock)
7966 {
7967 	/*
7968 	 * Add check here for SoC's that support hardware cursor plane, to
7969 	 * unset legacy_cursor_update
7970 	 */
7971 
7972 	return drm_atomic_helper_commit(dev, state, nonblock);
7973 
7974 	/*TODO Handle EINTR, reenable IRQ*/
7975 }
7976 
7977 /**
7978  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7979  * @state: The atomic state to commit
7980  *
7981  * This will tell DC to commit the constructed DC state from atomic_check,
7982  * programming the hardware. Any failures here implies a hardware failure, since
7983  * atomic check should have filtered anything non-kosher.
7984  */
7985 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7986 {
7987 	struct drm_device *dev = state->dev;
7988 	struct amdgpu_device *adev = drm_to_adev(dev);
7989 	struct amdgpu_display_manager *dm = &adev->dm;
7990 	struct dm_atomic_state *dm_state;
7991 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7992 	uint32_t i, j;
7993 	struct drm_crtc *crtc;
7994 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7995 	unsigned long flags;
7996 	bool wait_for_vblank = true;
7997 	struct drm_connector *connector;
7998 	struct drm_connector_state *old_con_state, *new_con_state;
7999 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8000 	int crtc_disable_count = 0;
8001 	bool mode_set_reset_required = false;
8002 
8003 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8004 
8005 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8006 	drm_atomic_helper_calc_timestamping_constants(state);
8007 
8008 	dm_state = dm_atomic_get_new_state(state);
8009 	if (dm_state && dm_state->context) {
8010 		dc_state = dm_state->context;
8011 	} else {
8012 		/* No state changes, retain current state. */
8013 		dc_state_temp = dc_create_state(dm->dc);
8014 		ASSERT(dc_state_temp);
8015 		dc_state = dc_state_temp;
8016 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8017 	}
8018 
8019 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8020 				       new_crtc_state, i) {
8021 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8022 
8023 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8024 
8025 		if (old_crtc_state->active &&
8026 		    (!new_crtc_state->active ||
8027 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8028 			manage_dm_interrupts(adev, acrtc, false);
8029 			dc_stream_release(dm_old_crtc_state->stream);
8030 		}
8031 	}
8032 
8033 	/* update changed items */
8034 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8035 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8036 
8037 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8038 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8039 
8040 		DRM_DEBUG_DRIVER(
8041 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8042 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8043 			"connectors_changed:%d\n",
8044 			acrtc->crtc_id,
8045 			new_crtc_state->enable,
8046 			new_crtc_state->active,
8047 			new_crtc_state->planes_changed,
8048 			new_crtc_state->mode_changed,
8049 			new_crtc_state->active_changed,
8050 			new_crtc_state->connectors_changed);
8051 
8052 		/* Copy all transient state flags into dc state */
8053 		if (dm_new_crtc_state->stream) {
8054 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8055 							    dm_new_crtc_state->stream);
8056 		}
8057 
8058 		/* handles headless hotplug case, updating new_state and
8059 		 * aconnector as needed
8060 		 */
8061 
8062 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8063 
8064 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8065 
8066 			if (!dm_new_crtc_state->stream) {
8067 				/*
8068 				 * this could happen because of issues with
8069 				 * userspace notifications delivery.
8070 				 * In this case userspace tries to set mode on
8071 				 * display which is disconnected in fact.
8072 				 * dc_sink is NULL in this case on aconnector.
8073 				 * We expect reset mode will come soon.
8074 				 *
8075 				 * This can also happen when unplug is done
8076 				 * during resume sequence ended
8077 				 *
8078 				 * In this case, we want to pretend we still
8079 				 * have a sink to keep the pipe running so that
8080 				 * hw state is consistent with the sw state
8081 				 */
8082 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8083 						__func__, acrtc->base.base.id);
8084 				continue;
8085 			}
8086 
8087 			if (dm_old_crtc_state->stream)
8088 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8089 
8090 			pm_runtime_get_noresume(dev->dev);
8091 
8092 			acrtc->enabled = true;
8093 			acrtc->hw_mode = new_crtc_state->mode;
8094 			crtc->hwmode = new_crtc_state->mode;
8095 			mode_set_reset_required = true;
8096 		} else if (modereset_required(new_crtc_state)) {
8097 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8098 			/* i.e. reset mode */
8099 			if (dm_old_crtc_state->stream)
8100 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8101 			mode_set_reset_required = true;
8102 		}
8103 	} /* for_each_crtc_in_state() */
8104 
8105 	if (dc_state) {
8106 		/* if there mode set or reset, disable eDP PSR */
8107 		if (mode_set_reset_required)
8108 			amdgpu_dm_psr_disable_all(dm);
8109 
8110 		dm_enable_per_frame_crtc_master_sync(dc_state);
8111 		mutex_lock(&dm->dc_lock);
8112 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8113 		mutex_unlock(&dm->dc_lock);
8114 	}
8115 
8116 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8117 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8118 
8119 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8120 
8121 		if (dm_new_crtc_state->stream != NULL) {
8122 			const struct dc_stream_status *status =
8123 					dc_stream_get_status(dm_new_crtc_state->stream);
8124 
8125 			if (!status)
8126 				status = dc_stream_get_status_from_state(dc_state,
8127 									 dm_new_crtc_state->stream);
8128 			if (!status)
8129 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8130 			else
8131 				acrtc->otg_inst = status->primary_otg_inst;
8132 		}
8133 	}
8134 #ifdef CONFIG_DRM_AMD_DC_HDCP
8135 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8136 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8137 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8138 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8139 
8140 		new_crtc_state = NULL;
8141 
8142 		if (acrtc)
8143 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8144 
8145 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8146 
8147 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8148 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8149 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8150 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8151 			continue;
8152 		}
8153 
8154 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8155 			hdcp_update_display(
8156 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8157 				new_con_state->hdcp_content_type,
8158 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8159 													 : false);
8160 	}
8161 #endif
8162 
8163 	/* Handle connector state changes */
8164 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8165 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8166 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8167 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8168 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8169 		struct dc_stream_update stream_update;
8170 		struct dc_info_packet hdr_packet;
8171 		struct dc_stream_status *status = NULL;
8172 		bool abm_changed, hdr_changed, scaling_changed;
8173 
8174 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8175 		memset(&stream_update, 0, sizeof(stream_update));
8176 
8177 		if (acrtc) {
8178 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8179 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8180 		}
8181 
8182 		/* Skip any modesets/resets */
8183 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8184 			continue;
8185 
8186 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8187 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8188 
8189 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8190 							     dm_old_con_state);
8191 
8192 		abm_changed = dm_new_crtc_state->abm_level !=
8193 			      dm_old_crtc_state->abm_level;
8194 
8195 		hdr_changed =
8196 			is_hdr_metadata_different(old_con_state, new_con_state);
8197 
8198 		if (!scaling_changed && !abm_changed && !hdr_changed)
8199 			continue;
8200 
8201 		stream_update.stream = dm_new_crtc_state->stream;
8202 		if (scaling_changed) {
8203 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8204 					dm_new_con_state, dm_new_crtc_state->stream);
8205 
8206 			stream_update.src = dm_new_crtc_state->stream->src;
8207 			stream_update.dst = dm_new_crtc_state->stream->dst;
8208 		}
8209 
8210 		if (abm_changed) {
8211 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8212 
8213 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8214 		}
8215 
8216 		if (hdr_changed) {
8217 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8218 			stream_update.hdr_static_metadata = &hdr_packet;
8219 		}
8220 
8221 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8222 		WARN_ON(!status);
8223 		WARN_ON(!status->plane_count);
8224 
8225 		/*
8226 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8227 		 * Here we create an empty update on each plane.
8228 		 * To fix this, DC should permit updating only stream properties.
8229 		 */
8230 		for (j = 0; j < status->plane_count; j++)
8231 			dummy_updates[j].surface = status->plane_states[0];
8232 
8233 
8234 		mutex_lock(&dm->dc_lock);
8235 		dc_commit_updates_for_stream(dm->dc,
8236 						     dummy_updates,
8237 						     status->plane_count,
8238 						     dm_new_crtc_state->stream,
8239 						     &stream_update,
8240 						     dc_state);
8241 		mutex_unlock(&dm->dc_lock);
8242 	}
8243 
8244 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8245 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8246 				      new_crtc_state, i) {
8247 		if (old_crtc_state->active && !new_crtc_state->active)
8248 			crtc_disable_count++;
8249 
8250 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8251 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8252 
8253 		/* For freesync config update on crtc state and params for irq */
8254 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8255 
8256 		/* Handle vrr on->off / off->on transitions */
8257 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8258 						dm_new_crtc_state);
8259 	}
8260 
8261 	/**
8262 	 * Enable interrupts for CRTCs that are newly enabled or went through
8263 	 * a modeset. It was intentionally deferred until after the front end
8264 	 * state was modified to wait until the OTG was on and so the IRQ
8265 	 * handlers didn't access stale or invalid state.
8266 	 */
8267 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8268 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8269 
8270 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8271 
8272 		if (new_crtc_state->active &&
8273 		    (!old_crtc_state->active ||
8274 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8275 			dc_stream_retain(dm_new_crtc_state->stream);
8276 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8277 			manage_dm_interrupts(adev, acrtc, true);
8278 
8279 #ifdef CONFIG_DEBUG_FS
8280 			/**
8281 			 * Frontend may have changed so reapply the CRC capture
8282 			 * settings for the stream.
8283 			 */
8284 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8285 
8286 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8287 				amdgpu_dm_crtc_configure_crc_source(
8288 					crtc, dm_new_crtc_state,
8289 					dm_new_crtc_state->crc_src);
8290 			}
8291 #endif
8292 		}
8293 	}
8294 
8295 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8296 		if (new_crtc_state->async_flip)
8297 			wait_for_vblank = false;
8298 
8299 	/* update planes when needed per crtc*/
8300 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8301 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8302 
8303 		if (dm_new_crtc_state->stream)
8304 			amdgpu_dm_commit_planes(state, dc_state, dev,
8305 						dm, crtc, wait_for_vblank);
8306 	}
8307 
8308 	/* Update audio instances for each connector. */
8309 	amdgpu_dm_commit_audio(dev, state);
8310 
8311 	/*
8312 	 * send vblank event on all events not handled in flip and
8313 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8314 	 */
8315 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8316 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8317 
8318 		if (new_crtc_state->event)
8319 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8320 
8321 		new_crtc_state->event = NULL;
8322 	}
8323 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8324 
8325 	/* Signal HW programming completion */
8326 	drm_atomic_helper_commit_hw_done(state);
8327 
8328 	if (wait_for_vblank)
8329 		drm_atomic_helper_wait_for_flip_done(dev, state);
8330 
8331 	drm_atomic_helper_cleanup_planes(dev, state);
8332 
8333 	/*
8334 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8335 	 * so we can put the GPU into runtime suspend if we're not driving any
8336 	 * displays anymore
8337 	 */
8338 	for (i = 0; i < crtc_disable_count; i++)
8339 		pm_runtime_put_autosuspend(dev->dev);
8340 	pm_runtime_mark_last_busy(dev->dev);
8341 
8342 	if (dc_state_temp)
8343 		dc_release_state(dc_state_temp);
8344 }
8345 
8346 
8347 static int dm_force_atomic_commit(struct drm_connector *connector)
8348 {
8349 	int ret = 0;
8350 	struct drm_device *ddev = connector->dev;
8351 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8352 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8353 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8354 	struct drm_connector_state *conn_state;
8355 	struct drm_crtc_state *crtc_state;
8356 	struct drm_plane_state *plane_state;
8357 
8358 	if (!state)
8359 		return -ENOMEM;
8360 
8361 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8362 
8363 	/* Construct an atomic state to restore previous display setting */
8364 
8365 	/*
8366 	 * Attach connectors to drm_atomic_state
8367 	 */
8368 	conn_state = drm_atomic_get_connector_state(state, connector);
8369 
8370 	ret = PTR_ERR_OR_ZERO(conn_state);
8371 	if (ret)
8372 		goto err;
8373 
8374 	/* Attach crtc to drm_atomic_state*/
8375 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8376 
8377 	ret = PTR_ERR_OR_ZERO(crtc_state);
8378 	if (ret)
8379 		goto err;
8380 
8381 	/* force a restore */
8382 	crtc_state->mode_changed = true;
8383 
8384 	/* Attach plane to drm_atomic_state */
8385 	plane_state = drm_atomic_get_plane_state(state, plane);
8386 
8387 	ret = PTR_ERR_OR_ZERO(plane_state);
8388 	if (ret)
8389 		goto err;
8390 
8391 
8392 	/* Call commit internally with the state we just constructed */
8393 	ret = drm_atomic_commit(state);
8394 	if (!ret)
8395 		return 0;
8396 
8397 err:
8398 	DRM_ERROR("Restoring old state failed with %i\n", ret);
8399 	drm_atomic_state_put(state);
8400 
8401 	return ret;
8402 }
8403 
8404 /*
8405  * This function handles all cases when set mode does not come upon hotplug.
8406  * This includes when a display is unplugged then plugged back into the
8407  * same port and when running without usermode desktop manager supprot
8408  */
8409 void dm_restore_drm_connector_state(struct drm_device *dev,
8410 				    struct drm_connector *connector)
8411 {
8412 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8413 	struct amdgpu_crtc *disconnected_acrtc;
8414 	struct dm_crtc_state *acrtc_state;
8415 
8416 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8417 		return;
8418 
8419 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8420 	if (!disconnected_acrtc)
8421 		return;
8422 
8423 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8424 	if (!acrtc_state->stream)
8425 		return;
8426 
8427 	/*
8428 	 * If the previous sink is not released and different from the current,
8429 	 * we deduce we are in a state where we can not rely on usermode call
8430 	 * to turn on the display, so we do it here
8431 	 */
8432 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8433 		dm_force_atomic_commit(&aconnector->base);
8434 }
8435 
8436 /*
8437  * Grabs all modesetting locks to serialize against any blocking commits,
8438  * Waits for completion of all non blocking commits.
8439  */
8440 static int do_aquire_global_lock(struct drm_device *dev,
8441 				 struct drm_atomic_state *state)
8442 {
8443 	struct drm_crtc *crtc;
8444 	struct drm_crtc_commit *commit;
8445 	long ret;
8446 
8447 	/*
8448 	 * Adding all modeset locks to aquire_ctx will
8449 	 * ensure that when the framework release it the
8450 	 * extra locks we are locking here will get released to
8451 	 */
8452 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8453 	if (ret)
8454 		return ret;
8455 
8456 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8457 		spin_lock(&crtc->commit_lock);
8458 		commit = list_first_entry_or_null(&crtc->commit_list,
8459 				struct drm_crtc_commit, commit_entry);
8460 		if (commit)
8461 			drm_crtc_commit_get(commit);
8462 		spin_unlock(&crtc->commit_lock);
8463 
8464 		if (!commit)
8465 			continue;
8466 
8467 		/*
8468 		 * Make sure all pending HW programming completed and
8469 		 * page flips done
8470 		 */
8471 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8472 
8473 		if (ret > 0)
8474 			ret = wait_for_completion_interruptible_timeout(
8475 					&commit->flip_done, 10*HZ);
8476 
8477 		if (ret == 0)
8478 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8479 				  "timed out\n", crtc->base.id, crtc->name);
8480 
8481 		drm_crtc_commit_put(commit);
8482 	}
8483 
8484 	return ret < 0 ? ret : 0;
8485 }
8486 
8487 static void get_freesync_config_for_crtc(
8488 	struct dm_crtc_state *new_crtc_state,
8489 	struct dm_connector_state *new_con_state)
8490 {
8491 	struct mod_freesync_config config = {0};
8492 	struct amdgpu_dm_connector *aconnector =
8493 			to_amdgpu_dm_connector(new_con_state->base.connector);
8494 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8495 	int vrefresh = drm_mode_vrefresh(mode);
8496 
8497 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8498 					vrefresh >= aconnector->min_vfreq &&
8499 					vrefresh <= aconnector->max_vfreq;
8500 
8501 	if (new_crtc_state->vrr_supported) {
8502 		new_crtc_state->stream->ignore_msa_timing_param = true;
8503 		config.state = new_crtc_state->base.vrr_enabled ?
8504 				VRR_STATE_ACTIVE_VARIABLE :
8505 				VRR_STATE_INACTIVE;
8506 		config.min_refresh_in_uhz =
8507 				aconnector->min_vfreq * 1000000;
8508 		config.max_refresh_in_uhz =
8509 				aconnector->max_vfreq * 1000000;
8510 		config.vsif_supported = true;
8511 		config.btr = true;
8512 	}
8513 
8514 	new_crtc_state->freesync_config = config;
8515 }
8516 
8517 static void reset_freesync_config_for_crtc(
8518 	struct dm_crtc_state *new_crtc_state)
8519 {
8520 	new_crtc_state->vrr_supported = false;
8521 
8522 	memset(&new_crtc_state->vrr_infopacket, 0,
8523 	       sizeof(new_crtc_state->vrr_infopacket));
8524 }
8525 
8526 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8527 				struct drm_atomic_state *state,
8528 				struct drm_crtc *crtc,
8529 				struct drm_crtc_state *old_crtc_state,
8530 				struct drm_crtc_state *new_crtc_state,
8531 				bool enable,
8532 				bool *lock_and_validation_needed)
8533 {
8534 	struct dm_atomic_state *dm_state = NULL;
8535 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8536 	struct dc_stream_state *new_stream;
8537 	int ret = 0;
8538 
8539 	/*
8540 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8541 	 * update changed items
8542 	 */
8543 	struct amdgpu_crtc *acrtc = NULL;
8544 	struct amdgpu_dm_connector *aconnector = NULL;
8545 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8546 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8547 
8548 	new_stream = NULL;
8549 
8550 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8551 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8552 	acrtc = to_amdgpu_crtc(crtc);
8553 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8554 
8555 	/* TODO This hack should go away */
8556 	if (aconnector && enable) {
8557 		/* Make sure fake sink is created in plug-in scenario */
8558 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8559 							    &aconnector->base);
8560 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8561 							    &aconnector->base);
8562 
8563 		if (IS_ERR(drm_new_conn_state)) {
8564 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8565 			goto fail;
8566 		}
8567 
8568 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8569 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8570 
8571 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8572 			goto skip_modeset;
8573 
8574 		new_stream = create_validate_stream_for_sink(aconnector,
8575 							     &new_crtc_state->mode,
8576 							     dm_new_conn_state,
8577 							     dm_old_crtc_state->stream);
8578 
8579 		/*
8580 		 * we can have no stream on ACTION_SET if a display
8581 		 * was disconnected during S3, in this case it is not an
8582 		 * error, the OS will be updated after detection, and
8583 		 * will do the right thing on next atomic commit
8584 		 */
8585 
8586 		if (!new_stream) {
8587 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8588 					__func__, acrtc->base.base.id);
8589 			ret = -ENOMEM;
8590 			goto fail;
8591 		}
8592 
8593 		/*
8594 		 * TODO: Check VSDB bits to decide whether this should
8595 		 * be enabled or not.
8596 		 */
8597 		new_stream->triggered_crtc_reset.enabled =
8598 			dm->force_timing_sync;
8599 
8600 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8601 
8602 		ret = fill_hdr_info_packet(drm_new_conn_state,
8603 					   &new_stream->hdr_static_metadata);
8604 		if (ret)
8605 			goto fail;
8606 
8607 		/*
8608 		 * If we already removed the old stream from the context
8609 		 * (and set the new stream to NULL) then we can't reuse
8610 		 * the old stream even if the stream and scaling are unchanged.
8611 		 * We'll hit the BUG_ON and black screen.
8612 		 *
8613 		 * TODO: Refactor this function to allow this check to work
8614 		 * in all conditions.
8615 		 */
8616 		if (dm_new_crtc_state->stream &&
8617 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8618 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8619 			new_crtc_state->mode_changed = false;
8620 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8621 					 new_crtc_state->mode_changed);
8622 		}
8623 	}
8624 
8625 	/* mode_changed flag may get updated above, need to check again */
8626 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8627 		goto skip_modeset;
8628 
8629 	DRM_DEBUG_DRIVER(
8630 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8631 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8632 		"connectors_changed:%d\n",
8633 		acrtc->crtc_id,
8634 		new_crtc_state->enable,
8635 		new_crtc_state->active,
8636 		new_crtc_state->planes_changed,
8637 		new_crtc_state->mode_changed,
8638 		new_crtc_state->active_changed,
8639 		new_crtc_state->connectors_changed);
8640 
8641 	/* Remove stream for any changed/disabled CRTC */
8642 	if (!enable) {
8643 
8644 		if (!dm_old_crtc_state->stream)
8645 			goto skip_modeset;
8646 
8647 		ret = dm_atomic_get_state(state, &dm_state);
8648 		if (ret)
8649 			goto fail;
8650 
8651 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8652 				crtc->base.id);
8653 
8654 		/* i.e. reset mode */
8655 		if (dc_remove_stream_from_ctx(
8656 				dm->dc,
8657 				dm_state->context,
8658 				dm_old_crtc_state->stream) != DC_OK) {
8659 			ret = -EINVAL;
8660 			goto fail;
8661 		}
8662 
8663 		dc_stream_release(dm_old_crtc_state->stream);
8664 		dm_new_crtc_state->stream = NULL;
8665 
8666 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8667 
8668 		*lock_and_validation_needed = true;
8669 
8670 	} else {/* Add stream for any updated/enabled CRTC */
8671 		/*
8672 		 * Quick fix to prevent NULL pointer on new_stream when
8673 		 * added MST connectors not found in existing crtc_state in the chained mode
8674 		 * TODO: need to dig out the root cause of that
8675 		 */
8676 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8677 			goto skip_modeset;
8678 
8679 		if (modereset_required(new_crtc_state))
8680 			goto skip_modeset;
8681 
8682 		if (modeset_required(new_crtc_state, new_stream,
8683 				     dm_old_crtc_state->stream)) {
8684 
8685 			WARN_ON(dm_new_crtc_state->stream);
8686 
8687 			ret = dm_atomic_get_state(state, &dm_state);
8688 			if (ret)
8689 				goto fail;
8690 
8691 			dm_new_crtc_state->stream = new_stream;
8692 
8693 			dc_stream_retain(new_stream);
8694 
8695 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8696 						crtc->base.id);
8697 
8698 			if (dc_add_stream_to_ctx(
8699 					dm->dc,
8700 					dm_state->context,
8701 					dm_new_crtc_state->stream) != DC_OK) {
8702 				ret = -EINVAL;
8703 				goto fail;
8704 			}
8705 
8706 			*lock_and_validation_needed = true;
8707 		}
8708 	}
8709 
8710 skip_modeset:
8711 	/* Release extra reference */
8712 	if (new_stream)
8713 		 dc_stream_release(new_stream);
8714 
8715 	/*
8716 	 * We want to do dc stream updates that do not require a
8717 	 * full modeset below.
8718 	 */
8719 	if (!(enable && aconnector && new_crtc_state->active))
8720 		return 0;
8721 	/*
8722 	 * Given above conditions, the dc state cannot be NULL because:
8723 	 * 1. We're in the process of enabling CRTCs (just been added
8724 	 *    to the dc context, or already is on the context)
8725 	 * 2. Has a valid connector attached, and
8726 	 * 3. Is currently active and enabled.
8727 	 * => The dc stream state currently exists.
8728 	 */
8729 	BUG_ON(dm_new_crtc_state->stream == NULL);
8730 
8731 	/* Scaling or underscan settings */
8732 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8733 		update_stream_scaling_settings(
8734 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8735 
8736 	/* ABM settings */
8737 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8738 
8739 	/*
8740 	 * Color management settings. We also update color properties
8741 	 * when a modeset is needed, to ensure it gets reprogrammed.
8742 	 */
8743 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8744 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8745 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8746 		if (ret)
8747 			goto fail;
8748 	}
8749 
8750 	/* Update Freesync settings. */
8751 	get_freesync_config_for_crtc(dm_new_crtc_state,
8752 				     dm_new_conn_state);
8753 
8754 	return ret;
8755 
8756 fail:
8757 	if (new_stream)
8758 		dc_stream_release(new_stream);
8759 	return ret;
8760 }
8761 
8762 static bool should_reset_plane(struct drm_atomic_state *state,
8763 			       struct drm_plane *plane,
8764 			       struct drm_plane_state *old_plane_state,
8765 			       struct drm_plane_state *new_plane_state)
8766 {
8767 	struct drm_plane *other;
8768 	struct drm_plane_state *old_other_state, *new_other_state;
8769 	struct drm_crtc_state *new_crtc_state;
8770 	int i;
8771 
8772 	/*
8773 	 * TODO: Remove this hack once the checks below are sufficient
8774 	 * enough to determine when we need to reset all the planes on
8775 	 * the stream.
8776 	 */
8777 	if (state->allow_modeset)
8778 		return true;
8779 
8780 	/* Exit early if we know that we're adding or removing the plane. */
8781 	if (old_plane_state->crtc != new_plane_state->crtc)
8782 		return true;
8783 
8784 	/* old crtc == new_crtc == NULL, plane not in context. */
8785 	if (!new_plane_state->crtc)
8786 		return false;
8787 
8788 	new_crtc_state =
8789 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8790 
8791 	if (!new_crtc_state)
8792 		return true;
8793 
8794 	/* CRTC Degamma changes currently require us to recreate planes. */
8795 	if (new_crtc_state->color_mgmt_changed)
8796 		return true;
8797 
8798 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8799 		return true;
8800 
8801 	/*
8802 	 * If there are any new primary or overlay planes being added or
8803 	 * removed then the z-order can potentially change. To ensure
8804 	 * correct z-order and pipe acquisition the current DC architecture
8805 	 * requires us to remove and recreate all existing planes.
8806 	 *
8807 	 * TODO: Come up with a more elegant solution for this.
8808 	 */
8809 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8810 		struct amdgpu_framebuffer *old_afb, *new_afb;
8811 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8812 			continue;
8813 
8814 		if (old_other_state->crtc != new_plane_state->crtc &&
8815 		    new_other_state->crtc != new_plane_state->crtc)
8816 			continue;
8817 
8818 		if (old_other_state->crtc != new_other_state->crtc)
8819 			return true;
8820 
8821 		/* Src/dst size and scaling updates. */
8822 		if (old_other_state->src_w != new_other_state->src_w ||
8823 		    old_other_state->src_h != new_other_state->src_h ||
8824 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8825 		    old_other_state->crtc_h != new_other_state->crtc_h)
8826 			return true;
8827 
8828 		/* Rotation / mirroring updates. */
8829 		if (old_other_state->rotation != new_other_state->rotation)
8830 			return true;
8831 
8832 		/* Blending updates. */
8833 		if (old_other_state->pixel_blend_mode !=
8834 		    new_other_state->pixel_blend_mode)
8835 			return true;
8836 
8837 		/* Alpha updates. */
8838 		if (old_other_state->alpha != new_other_state->alpha)
8839 			return true;
8840 
8841 		/* Colorspace changes. */
8842 		if (old_other_state->color_range != new_other_state->color_range ||
8843 		    old_other_state->color_encoding != new_other_state->color_encoding)
8844 			return true;
8845 
8846 		/* Framebuffer checks fall at the end. */
8847 		if (!old_other_state->fb || !new_other_state->fb)
8848 			continue;
8849 
8850 		/* Pixel format changes can require bandwidth updates. */
8851 		if (old_other_state->fb->format != new_other_state->fb->format)
8852 			return true;
8853 
8854 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8855 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8856 
8857 		/* Tiling and DCC changes also require bandwidth updates. */
8858 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8859 		    old_afb->base.modifier != new_afb->base.modifier)
8860 			return true;
8861 	}
8862 
8863 	return false;
8864 }
8865 
8866 static int dm_update_plane_state(struct dc *dc,
8867 				 struct drm_atomic_state *state,
8868 				 struct drm_plane *plane,
8869 				 struct drm_plane_state *old_plane_state,
8870 				 struct drm_plane_state *new_plane_state,
8871 				 bool enable,
8872 				 bool *lock_and_validation_needed)
8873 {
8874 
8875 	struct dm_atomic_state *dm_state = NULL;
8876 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8877 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8878 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8879 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8880 	struct amdgpu_crtc *new_acrtc;
8881 	bool needs_reset;
8882 	int ret = 0;
8883 
8884 
8885 	new_plane_crtc = new_plane_state->crtc;
8886 	old_plane_crtc = old_plane_state->crtc;
8887 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8888 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8889 
8890 	/*TODO Implement better atomic check for cursor plane */
8891 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8892 		if (!enable || !new_plane_crtc ||
8893 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8894 			return 0;
8895 
8896 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8897 
8898 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8899 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8900 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8901 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8902 			return -EINVAL;
8903 		}
8904 
8905 		return 0;
8906 	}
8907 
8908 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8909 					 new_plane_state);
8910 
8911 	/* Remove any changed/removed planes */
8912 	if (!enable) {
8913 		if (!needs_reset)
8914 			return 0;
8915 
8916 		if (!old_plane_crtc)
8917 			return 0;
8918 
8919 		old_crtc_state = drm_atomic_get_old_crtc_state(
8920 				state, old_plane_crtc);
8921 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8922 
8923 		if (!dm_old_crtc_state->stream)
8924 			return 0;
8925 
8926 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8927 				plane->base.id, old_plane_crtc->base.id);
8928 
8929 		ret = dm_atomic_get_state(state, &dm_state);
8930 		if (ret)
8931 			return ret;
8932 
8933 		if (!dc_remove_plane_from_context(
8934 				dc,
8935 				dm_old_crtc_state->stream,
8936 				dm_old_plane_state->dc_state,
8937 				dm_state->context)) {
8938 
8939 			return -EINVAL;
8940 		}
8941 
8942 
8943 		dc_plane_state_release(dm_old_plane_state->dc_state);
8944 		dm_new_plane_state->dc_state = NULL;
8945 
8946 		*lock_and_validation_needed = true;
8947 
8948 	} else { /* Add new planes */
8949 		struct dc_plane_state *dc_new_plane_state;
8950 
8951 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8952 			return 0;
8953 
8954 		if (!new_plane_crtc)
8955 			return 0;
8956 
8957 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8958 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8959 
8960 		if (!dm_new_crtc_state->stream)
8961 			return 0;
8962 
8963 		if (!needs_reset)
8964 			return 0;
8965 
8966 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8967 		if (ret)
8968 			return ret;
8969 
8970 		WARN_ON(dm_new_plane_state->dc_state);
8971 
8972 		dc_new_plane_state = dc_create_plane_state(dc);
8973 		if (!dc_new_plane_state)
8974 			return -ENOMEM;
8975 
8976 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8977 				plane->base.id, new_plane_crtc->base.id);
8978 
8979 		ret = fill_dc_plane_attributes(
8980 			drm_to_adev(new_plane_crtc->dev),
8981 			dc_new_plane_state,
8982 			new_plane_state,
8983 			new_crtc_state);
8984 		if (ret) {
8985 			dc_plane_state_release(dc_new_plane_state);
8986 			return ret;
8987 		}
8988 
8989 		ret = dm_atomic_get_state(state, &dm_state);
8990 		if (ret) {
8991 			dc_plane_state_release(dc_new_plane_state);
8992 			return ret;
8993 		}
8994 
8995 		/*
8996 		 * Any atomic check errors that occur after this will
8997 		 * not need a release. The plane state will be attached
8998 		 * to the stream, and therefore part of the atomic
8999 		 * state. It'll be released when the atomic state is
9000 		 * cleaned.
9001 		 */
9002 		if (!dc_add_plane_to_context(
9003 				dc,
9004 				dm_new_crtc_state->stream,
9005 				dc_new_plane_state,
9006 				dm_state->context)) {
9007 
9008 			dc_plane_state_release(dc_new_plane_state);
9009 			return -EINVAL;
9010 		}
9011 
9012 		dm_new_plane_state->dc_state = dc_new_plane_state;
9013 
9014 		/* Tell DC to do a full surface update every time there
9015 		 * is a plane change. Inefficient, but works for now.
9016 		 */
9017 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9018 
9019 		*lock_and_validation_needed = true;
9020 	}
9021 
9022 
9023 	return ret;
9024 }
9025 
9026 #if defined(CONFIG_DRM_AMD_DC_DCN)
9027 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9028 {
9029 	struct drm_connector *connector;
9030 	struct drm_connector_state *conn_state;
9031 	struct amdgpu_dm_connector *aconnector = NULL;
9032 	int i;
9033 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9034 		if (conn_state->crtc != crtc)
9035 			continue;
9036 
9037 		aconnector = to_amdgpu_dm_connector(connector);
9038 		if (!aconnector->port || !aconnector->mst_port)
9039 			aconnector = NULL;
9040 		else
9041 			break;
9042 	}
9043 
9044 	if (!aconnector)
9045 		return 0;
9046 
9047 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9048 }
9049 #endif
9050 
9051 /**
9052  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9053  * @dev: The DRM device
9054  * @state: The atomic state to commit
9055  *
9056  * Validate that the given atomic state is programmable by DC into hardware.
9057  * This involves constructing a &struct dc_state reflecting the new hardware
9058  * state we wish to commit, then querying DC to see if it is programmable. It's
9059  * important not to modify the existing DC state. Otherwise, atomic_check
9060  * may unexpectedly commit hardware changes.
9061  *
9062  * When validating the DC state, it's important that the right locks are
9063  * acquired. For full updates case which removes/adds/updates streams on one
9064  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9065  * that any such full update commit will wait for completion of any outstanding
9066  * flip using DRMs synchronization events.
9067  *
9068  * Note that DM adds the affected connectors for all CRTCs in state, when that
9069  * might not seem necessary. This is because DC stream creation requires the
9070  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9071  * be possible but non-trivial - a possible TODO item.
9072  *
9073  * Return: -Error code if validation failed.
9074  */
9075 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9076 				  struct drm_atomic_state *state)
9077 {
9078 	struct amdgpu_device *adev = drm_to_adev(dev);
9079 	struct dm_atomic_state *dm_state = NULL;
9080 	struct dc *dc = adev->dm.dc;
9081 	struct drm_connector *connector;
9082 	struct drm_connector_state *old_con_state, *new_con_state;
9083 	struct drm_crtc *crtc;
9084 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9085 	struct drm_plane *plane;
9086 	struct drm_plane_state *old_plane_state, *new_plane_state;
9087 	enum dc_status status;
9088 	int ret, i;
9089 	bool lock_and_validation_needed = false;
9090 	struct dm_crtc_state *dm_old_crtc_state;
9091 
9092 	trace_amdgpu_dm_atomic_check_begin(state);
9093 
9094 	ret = drm_atomic_helper_check_modeset(dev, state);
9095 	if (ret)
9096 		goto fail;
9097 
9098 	/* Check connector changes */
9099 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9100 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9101 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9102 
9103 		/* Skip connectors that are disabled or part of modeset already. */
9104 		if (!old_con_state->crtc && !new_con_state->crtc)
9105 			continue;
9106 
9107 		if (!new_con_state->crtc)
9108 			continue;
9109 
9110 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9111 		if (IS_ERR(new_crtc_state)) {
9112 			ret = PTR_ERR(new_crtc_state);
9113 			goto fail;
9114 		}
9115 
9116 		if (dm_old_con_state->abm_level !=
9117 		    dm_new_con_state->abm_level)
9118 			new_crtc_state->connectors_changed = true;
9119 	}
9120 
9121 #if defined(CONFIG_DRM_AMD_DC_DCN)
9122 	if (adev->asic_type >= CHIP_NAVI10) {
9123 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9124 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9125 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9126 				if (ret)
9127 					goto fail;
9128 			}
9129 		}
9130 	}
9131 #endif
9132 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9133 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9134 
9135 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9136 		    !new_crtc_state->color_mgmt_changed &&
9137 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9138 			dm_old_crtc_state->dsc_force_changed == false)
9139 			continue;
9140 
9141 		if (!new_crtc_state->enable)
9142 			continue;
9143 
9144 		ret = drm_atomic_add_affected_connectors(state, crtc);
9145 		if (ret)
9146 			return ret;
9147 
9148 		ret = drm_atomic_add_affected_planes(state, crtc);
9149 		if (ret)
9150 			goto fail;
9151 	}
9152 
9153 	/*
9154 	 * Add all primary and overlay planes on the CRTC to the state
9155 	 * whenever a plane is enabled to maintain correct z-ordering
9156 	 * and to enable fast surface updates.
9157 	 */
9158 	drm_for_each_crtc(crtc, dev) {
9159 		bool modified = false;
9160 
9161 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9162 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9163 				continue;
9164 
9165 			if (new_plane_state->crtc == crtc ||
9166 			    old_plane_state->crtc == crtc) {
9167 				modified = true;
9168 				break;
9169 			}
9170 		}
9171 
9172 		if (!modified)
9173 			continue;
9174 
9175 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9176 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9177 				continue;
9178 
9179 			new_plane_state =
9180 				drm_atomic_get_plane_state(state, plane);
9181 
9182 			if (IS_ERR(new_plane_state)) {
9183 				ret = PTR_ERR(new_plane_state);
9184 				goto fail;
9185 			}
9186 		}
9187 	}
9188 
9189 	/* Remove exiting planes if they are modified */
9190 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9191 		ret = dm_update_plane_state(dc, state, plane,
9192 					    old_plane_state,
9193 					    new_plane_state,
9194 					    false,
9195 					    &lock_and_validation_needed);
9196 		if (ret)
9197 			goto fail;
9198 	}
9199 
9200 	/* Disable all crtcs which require disable */
9201 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9202 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9203 					   old_crtc_state,
9204 					   new_crtc_state,
9205 					   false,
9206 					   &lock_and_validation_needed);
9207 		if (ret)
9208 			goto fail;
9209 	}
9210 
9211 	/* Enable all crtcs which require enable */
9212 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9213 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9214 					   old_crtc_state,
9215 					   new_crtc_state,
9216 					   true,
9217 					   &lock_and_validation_needed);
9218 		if (ret)
9219 			goto fail;
9220 	}
9221 
9222 	/* Add new/modified planes */
9223 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9224 		ret = dm_update_plane_state(dc, state, plane,
9225 					    old_plane_state,
9226 					    new_plane_state,
9227 					    true,
9228 					    &lock_and_validation_needed);
9229 		if (ret)
9230 			goto fail;
9231 	}
9232 
9233 	/* Run this here since we want to validate the streams we created */
9234 	ret = drm_atomic_helper_check_planes(dev, state);
9235 	if (ret)
9236 		goto fail;
9237 
9238 	if (state->legacy_cursor_update) {
9239 		/*
9240 		 * This is a fast cursor update coming from the plane update
9241 		 * helper, check if it can be done asynchronously for better
9242 		 * performance.
9243 		 */
9244 		state->async_update =
9245 			!drm_atomic_helper_async_check(dev, state);
9246 
9247 		/*
9248 		 * Skip the remaining global validation if this is an async
9249 		 * update. Cursor updates can be done without affecting
9250 		 * state or bandwidth calcs and this avoids the performance
9251 		 * penalty of locking the private state object and
9252 		 * allocating a new dc_state.
9253 		 */
9254 		if (state->async_update)
9255 			return 0;
9256 	}
9257 
9258 	/* Check scaling and underscan changes*/
9259 	/* TODO Removed scaling changes validation due to inability to commit
9260 	 * new stream into context w\o causing full reset. Need to
9261 	 * decide how to handle.
9262 	 */
9263 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9264 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9265 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9266 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9267 
9268 		/* Skip any modesets/resets */
9269 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9270 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9271 			continue;
9272 
9273 		/* Skip any thing not scale or underscan changes */
9274 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9275 			continue;
9276 
9277 		lock_and_validation_needed = true;
9278 	}
9279 
9280 	/**
9281 	 * Streams and planes are reset when there are changes that affect
9282 	 * bandwidth. Anything that affects bandwidth needs to go through
9283 	 * DC global validation to ensure that the configuration can be applied
9284 	 * to hardware.
9285 	 *
9286 	 * We have to currently stall out here in atomic_check for outstanding
9287 	 * commits to finish in this case because our IRQ handlers reference
9288 	 * DRM state directly - we can end up disabling interrupts too early
9289 	 * if we don't.
9290 	 *
9291 	 * TODO: Remove this stall and drop DM state private objects.
9292 	 */
9293 	if (lock_and_validation_needed) {
9294 		ret = dm_atomic_get_state(state, &dm_state);
9295 		if (ret)
9296 			goto fail;
9297 
9298 		ret = do_aquire_global_lock(dev, state);
9299 		if (ret)
9300 			goto fail;
9301 
9302 #if defined(CONFIG_DRM_AMD_DC_DCN)
9303 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9304 			goto fail;
9305 
9306 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9307 		if (ret)
9308 			goto fail;
9309 #endif
9310 
9311 		/*
9312 		 * Perform validation of MST topology in the state:
9313 		 * We need to perform MST atomic check before calling
9314 		 * dc_validate_global_state(), or there is a chance
9315 		 * to get stuck in an infinite loop and hang eventually.
9316 		 */
9317 		ret = drm_dp_mst_atomic_check(state);
9318 		if (ret)
9319 			goto fail;
9320 		status = dc_validate_global_state(dc, dm_state->context, false);
9321 		if (status != DC_OK) {
9322 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9323 				       dc_status_to_str(status), status);
9324 			ret = -EINVAL;
9325 			goto fail;
9326 		}
9327 	} else {
9328 		/*
9329 		 * The commit is a fast update. Fast updates shouldn't change
9330 		 * the DC context, affect global validation, and can have their
9331 		 * commit work done in parallel with other commits not touching
9332 		 * the same resource. If we have a new DC context as part of
9333 		 * the DM atomic state from validation we need to free it and
9334 		 * retain the existing one instead.
9335 		 *
9336 		 * Furthermore, since the DM atomic state only contains the DC
9337 		 * context and can safely be annulled, we can free the state
9338 		 * and clear the associated private object now to free
9339 		 * some memory and avoid a possible use-after-free later.
9340 		 */
9341 
9342 		for (i = 0; i < state->num_private_objs; i++) {
9343 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9344 
9345 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9346 				int j = state->num_private_objs-1;
9347 
9348 				dm_atomic_destroy_state(obj,
9349 						state->private_objs[i].state);
9350 
9351 				/* If i is not at the end of the array then the
9352 				 * last element needs to be moved to where i was
9353 				 * before the array can safely be truncated.
9354 				 */
9355 				if (i != j)
9356 					state->private_objs[i] =
9357 						state->private_objs[j];
9358 
9359 				state->private_objs[j].ptr = NULL;
9360 				state->private_objs[j].state = NULL;
9361 				state->private_objs[j].old_state = NULL;
9362 				state->private_objs[j].new_state = NULL;
9363 
9364 				state->num_private_objs = j;
9365 				break;
9366 			}
9367 		}
9368 	}
9369 
9370 	/* Store the overall update type for use later in atomic check. */
9371 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9372 		struct dm_crtc_state *dm_new_crtc_state =
9373 			to_dm_crtc_state(new_crtc_state);
9374 
9375 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9376 							 UPDATE_TYPE_FULL :
9377 							 UPDATE_TYPE_FAST;
9378 	}
9379 
9380 	/* Must be success */
9381 	WARN_ON(ret);
9382 
9383 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9384 
9385 	return ret;
9386 
9387 fail:
9388 	if (ret == -EDEADLK)
9389 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9390 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9391 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9392 	else
9393 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9394 
9395 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9396 
9397 	return ret;
9398 }
9399 
9400 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9401 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9402 {
9403 	uint8_t dpcd_data;
9404 	bool capable = false;
9405 
9406 	if (amdgpu_dm_connector->dc_link &&
9407 		dm_helpers_dp_read_dpcd(
9408 				NULL,
9409 				amdgpu_dm_connector->dc_link,
9410 				DP_DOWN_STREAM_PORT_COUNT,
9411 				&dpcd_data,
9412 				sizeof(dpcd_data))) {
9413 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9414 	}
9415 
9416 	return capable;
9417 }
9418 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9419 					struct edid *edid)
9420 {
9421 	int i;
9422 	bool edid_check_required;
9423 	struct detailed_timing *timing;
9424 	struct detailed_non_pixel *data;
9425 	struct detailed_data_monitor_range *range;
9426 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9427 			to_amdgpu_dm_connector(connector);
9428 	struct dm_connector_state *dm_con_state = NULL;
9429 
9430 	struct drm_device *dev = connector->dev;
9431 	struct amdgpu_device *adev = drm_to_adev(dev);
9432 	bool freesync_capable = false;
9433 
9434 	if (!connector->state) {
9435 		DRM_ERROR("%s - Connector has no state", __func__);
9436 		goto update;
9437 	}
9438 
9439 	if (!edid) {
9440 		dm_con_state = to_dm_connector_state(connector->state);
9441 
9442 		amdgpu_dm_connector->min_vfreq = 0;
9443 		amdgpu_dm_connector->max_vfreq = 0;
9444 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9445 
9446 		goto update;
9447 	}
9448 
9449 	dm_con_state = to_dm_connector_state(connector->state);
9450 
9451 	edid_check_required = false;
9452 	if (!amdgpu_dm_connector->dc_sink) {
9453 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9454 		goto update;
9455 	}
9456 	if (!adev->dm.freesync_module)
9457 		goto update;
9458 	/*
9459 	 * if edid non zero restrict freesync only for dp and edp
9460 	 */
9461 	if (edid) {
9462 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9463 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9464 			edid_check_required = is_dp_capable_without_timing_msa(
9465 						adev->dm.dc,
9466 						amdgpu_dm_connector);
9467 		}
9468 	}
9469 	if (edid_check_required == true && (edid->version > 1 ||
9470 	   (edid->version == 1 && edid->revision > 1))) {
9471 		for (i = 0; i < 4; i++) {
9472 
9473 			timing	= &edid->detailed_timings[i];
9474 			data	= &timing->data.other_data;
9475 			range	= &data->data.range;
9476 			/*
9477 			 * Check if monitor has continuous frequency mode
9478 			 */
9479 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9480 				continue;
9481 			/*
9482 			 * Check for flag range limits only. If flag == 1 then
9483 			 * no additional timing information provided.
9484 			 * Default GTF, GTF Secondary curve and CVT are not
9485 			 * supported
9486 			 */
9487 			if (range->flags != 1)
9488 				continue;
9489 
9490 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9491 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9492 			amdgpu_dm_connector->pixel_clock_mhz =
9493 				range->pixel_clock_mhz * 10;
9494 			break;
9495 		}
9496 
9497 		if (amdgpu_dm_connector->max_vfreq -
9498 		    amdgpu_dm_connector->min_vfreq > 10) {
9499 
9500 			freesync_capable = true;
9501 		}
9502 	}
9503 
9504 update:
9505 	if (dm_con_state)
9506 		dm_con_state->freesync_capable = freesync_capable;
9507 
9508 	if (connector->vrr_capable_property)
9509 		drm_connector_set_vrr_capable_property(connector,
9510 						       freesync_capable);
9511 }
9512 
9513 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9514 {
9515 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9516 
9517 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9518 		return;
9519 	if (link->type == dc_connection_none)
9520 		return;
9521 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9522 					dpcd_data, sizeof(dpcd_data))) {
9523 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9524 
9525 		if (dpcd_data[0] == 0) {
9526 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9527 			link->psr_settings.psr_feature_enabled = false;
9528 		} else {
9529 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9530 			link->psr_settings.psr_feature_enabled = true;
9531 		}
9532 
9533 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9534 	}
9535 }
9536 
9537 /*
9538  * amdgpu_dm_link_setup_psr() - configure psr link
9539  * @stream: stream state
9540  *
9541  * Return: true if success
9542  */
9543 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9544 {
9545 	struct dc_link *link = NULL;
9546 	struct psr_config psr_config = {0};
9547 	struct psr_context psr_context = {0};
9548 	bool ret = false;
9549 
9550 	if (stream == NULL)
9551 		return false;
9552 
9553 	link = stream->link;
9554 
9555 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9556 
9557 	if (psr_config.psr_version > 0) {
9558 		psr_config.psr_exit_link_training_required = 0x1;
9559 		psr_config.psr_frame_capture_indication_req = 0;
9560 		psr_config.psr_rfb_setup_time = 0x37;
9561 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9562 		psr_config.allow_smu_optimizations = 0x0;
9563 
9564 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9565 
9566 	}
9567 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9568 
9569 	return ret;
9570 }
9571 
9572 /*
9573  * amdgpu_dm_psr_enable() - enable psr f/w
9574  * @stream: stream state
9575  *
9576  * Return: true if success
9577  */
9578 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9579 {
9580 	struct dc_link *link = stream->link;
9581 	unsigned int vsync_rate_hz = 0;
9582 	struct dc_static_screen_params params = {0};
9583 	/* Calculate number of static frames before generating interrupt to
9584 	 * enter PSR.
9585 	 */
9586 	// Init fail safe of 2 frames static
9587 	unsigned int num_frames_static = 2;
9588 
9589 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9590 
9591 	vsync_rate_hz = div64_u64(div64_u64((
9592 			stream->timing.pix_clk_100hz * 100),
9593 			stream->timing.v_total),
9594 			stream->timing.h_total);
9595 
9596 	/* Round up
9597 	 * Calculate number of frames such that at least 30 ms of time has
9598 	 * passed.
9599 	 */
9600 	if (vsync_rate_hz != 0) {
9601 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9602 		num_frames_static = (30000 / frame_time_microsec) + 1;
9603 	}
9604 
9605 	params.triggers.cursor_update = true;
9606 	params.triggers.overlay_update = true;
9607 	params.triggers.surface_update = true;
9608 	params.num_frames = num_frames_static;
9609 
9610 	dc_stream_set_static_screen_params(link->ctx->dc,
9611 					   &stream, 1,
9612 					   &params);
9613 
9614 	return dc_link_set_psr_allow_active(link, true, false);
9615 }
9616 
9617 /*
9618  * amdgpu_dm_psr_disable() - disable psr f/w
9619  * @stream:  stream state
9620  *
9621  * Return: true if success
9622  */
9623 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9624 {
9625 
9626 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9627 
9628 	return dc_link_set_psr_allow_active(stream->link, false, true);
9629 }
9630 
9631 /*
9632  * amdgpu_dm_psr_disable() - disable psr f/w
9633  * if psr is enabled on any stream
9634  *
9635  * Return: true if success
9636  */
9637 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9638 {
9639 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9640 	return dc_set_psr_allow_active(dm->dc, false);
9641 }
9642 
9643 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9644 {
9645 	struct amdgpu_device *adev = drm_to_adev(dev);
9646 	struct dc *dc = adev->dm.dc;
9647 	int i;
9648 
9649 	mutex_lock(&adev->dm.dc_lock);
9650 	if (dc->current_state) {
9651 		for (i = 0; i < dc->current_state->stream_count; ++i)
9652 			dc->current_state->streams[i]
9653 				->triggered_crtc_reset.enabled =
9654 				adev->dm.force_timing_sync;
9655 
9656 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9657 		dc_trigger_sync(dc, dc->current_state);
9658 	}
9659 	mutex_unlock(&adev->dm.dc_lock);
9660 }
9661 
9662 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9663 		       uint32_t value, const char *func_name)
9664 {
9665 #ifdef DM_CHECK_ADDR_0
9666 	if (address == 0) {
9667 		DC_ERR("invalid register write. address = 0");
9668 		return;
9669 	}
9670 #endif
9671 	cgs_write_register(ctx->cgs_device, address, value);
9672 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9673 }
9674 
9675 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9676 			  const char *func_name)
9677 {
9678 	uint32_t value;
9679 #ifdef DM_CHECK_ADDR_0
9680 	if (address == 0) {
9681 		DC_ERR("invalid register read; address = 0\n");
9682 		return 0;
9683 	}
9684 #endif
9685 
9686 	if (ctx->dmub_srv &&
9687 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9688 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9689 		ASSERT(false);
9690 		return 0;
9691 	}
9692 
9693 	value = cgs_read_register(ctx->cgs_device, address);
9694 
9695 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9696 
9697 	return value;
9698 }
9699