1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80 
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88 
89 #include "soc15_common.h"
90 #endif
91 
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95 
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #endif
104 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #endif
108 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #endif
112 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
113 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
115 #endif
116 
117 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125 
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128 
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138 
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
208 				   struct drm_atomic_state *state,
209 				   bool nonblock);
210 
211 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
212 
213 static int amdgpu_dm_atomic_check(struct drm_device *dev,
214 				  struct drm_atomic_state *state);
215 
216 static void handle_cursor_update(struct drm_plane *plane,
217 				 struct drm_plane_state *old_plane_state);
218 
219 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
220 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
221 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
222 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
223 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
224 
225 /*
226  * dm_vblank_get_counter
227  *
228  * @brief
229  * Get counter for number of vertical blanks
230  *
231  * @param
232  * struct amdgpu_device *adev - [in] desired amdgpu device
233  * int disp_idx - [in] which CRTC to get the counter from
234  *
235  * @return
236  * Counter for vertical blanks
237  */
238 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 {
240 	if (crtc >= adev->mode_info.num_crtc)
241 		return 0;
242 	else {
243 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244 
245 		if (acrtc->dm_irq_params.stream == NULL) {
246 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247 				  crtc);
248 			return 0;
249 		}
250 
251 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
252 	}
253 }
254 
255 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
256 				  u32 *vbl, u32 *position)
257 {
258 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
259 
260 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261 		return -EINVAL;
262 	else {
263 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264 
265 		if (acrtc->dm_irq_params.stream ==  NULL) {
266 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
267 				  crtc);
268 			return 0;
269 		}
270 
271 		/*
272 		 * TODO rework base driver to use values directly.
273 		 * for now parse it back into reg-format
274 		 */
275 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
276 					 &v_blank_start,
277 					 &v_blank_end,
278 					 &h_position,
279 					 &v_position);
280 
281 		*position = v_position | (h_position << 16);
282 		*vbl = v_blank_start | (v_blank_end << 16);
283 	}
284 
285 	return 0;
286 }
287 
288 static bool dm_is_idle(void *handle)
289 {
290 	/* XXX todo */
291 	return true;
292 }
293 
294 static int dm_wait_for_idle(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static bool dm_check_soft_reset(void *handle)
301 {
302 	return false;
303 }
304 
305 static int dm_soft_reset(void *handle)
306 {
307 	/* XXX todo */
308 	return 0;
309 }
310 
311 static struct amdgpu_crtc *
312 get_crtc_by_otg_inst(struct amdgpu_device *adev,
313 		     int otg_inst)
314 {
315 	struct drm_device *dev = adev_to_drm(adev);
316 	struct drm_crtc *crtc;
317 	struct amdgpu_crtc *amdgpu_crtc;
318 
319 	if (otg_inst == -1) {
320 		WARN_ON(1);
321 		return adev->mode_info.crtcs[0];
322 	}
323 
324 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
325 		amdgpu_crtc = to_amdgpu_crtc(crtc);
326 
327 		if (amdgpu_crtc->otg_inst == otg_inst)
328 			return amdgpu_crtc;
329 	}
330 
331 	return NULL;
332 }
333 
334 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
335 {
336 	return acrtc->dm_irq_params.freesync_config.state ==
337 		       VRR_STATE_ACTIVE_VARIABLE ||
338 	       acrtc->dm_irq_params.freesync_config.state ==
339 		       VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
343 {
344 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
345 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
346 }
347 
348 /**
349  * dm_pflip_high_irq() - Handle pageflip interrupt
350  * @interrupt_params: ignored
351  *
352  * Handles the pageflip interrupt by notifying all interested parties
353  * that the pageflip has been completed.
354  */
355 static void dm_pflip_high_irq(void *interrupt_params)
356 {
357 	struct amdgpu_crtc *amdgpu_crtc;
358 	struct common_irq_params *irq_params = interrupt_params;
359 	struct amdgpu_device *adev = irq_params->adev;
360 	unsigned long flags;
361 	struct drm_pending_vblank_event *e;
362 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
363 	bool vrr_active;
364 
365 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
366 
367 	/* IRQ could occur when in initial stage */
368 	/* TODO work and BO cleanup */
369 	if (amdgpu_crtc == NULL) {
370 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
371 		return;
372 	}
373 
374 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
375 
376 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
377 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
378 						 amdgpu_crtc->pflip_status,
379 						 AMDGPU_FLIP_SUBMITTED,
380 						 amdgpu_crtc->crtc_id,
381 						 amdgpu_crtc);
382 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
383 		return;
384 	}
385 
386 	/* page flip completed. */
387 	e = amdgpu_crtc->event;
388 	amdgpu_crtc->event = NULL;
389 
390 	if (!e)
391 		WARN_ON(1);
392 
393 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
394 
395 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
396 	if (!vrr_active ||
397 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
398 				      &v_blank_end, &hpos, &vpos) ||
399 	    (vpos < v_blank_start)) {
400 		/* Update to correct count and vblank timestamp if racing with
401 		 * vblank irq. This also updates to the correct vblank timestamp
402 		 * even in VRR mode, as scanout is past the front-porch atm.
403 		 */
404 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
405 
406 		/* Wake up userspace by sending the pageflip event with proper
407 		 * count and timestamp of vblank of flip completion.
408 		 */
409 		if (e) {
410 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
411 
412 			/* Event sent, so done with vblank for this flip */
413 			drm_crtc_vblank_put(&amdgpu_crtc->base);
414 		}
415 	} else if (e) {
416 		/* VRR active and inside front-porch: vblank count and
417 		 * timestamp for pageflip event will only be up to date after
418 		 * drm_crtc_handle_vblank() has been executed from late vblank
419 		 * irq handler after start of back-porch (vline 0). We queue the
420 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
421 		 * updated timestamp and count, once it runs after us.
422 		 *
423 		 * We need to open-code this instead of using the helper
424 		 * drm_crtc_arm_vblank_event(), as that helper would
425 		 * call drm_crtc_accurate_vblank_count(), which we must
426 		 * not call in VRR mode while we are in front-porch!
427 		 */
428 
429 		/* sequence will be replaced by real count during send-out. */
430 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
431 		e->pipe = amdgpu_crtc->crtc_id;
432 
433 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
434 		e = NULL;
435 	}
436 
437 	/* Keep track of vblank of this flip for flip throttling. We use the
438 	 * cooked hw counter, as that one incremented at start of this vblank
439 	 * of pageflip completion, so last_flip_vblank is the forbidden count
440 	 * for queueing new pageflips if vsync + VRR is enabled.
441 	 */
442 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
443 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
444 
445 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
446 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
447 
448 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
449 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
450 			 vrr_active, (int) !e);
451 }
452 
453 static void dm_vupdate_high_irq(void *interrupt_params)
454 {
455 	struct common_irq_params *irq_params = interrupt_params;
456 	struct amdgpu_device *adev = irq_params->adev;
457 	struct amdgpu_crtc *acrtc;
458 	unsigned long flags;
459 	int vrr_active;
460 
461 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
462 
463 	if (acrtc) {
464 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
465 
466 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
467 			      acrtc->crtc_id,
468 			      vrr_active);
469 
470 		/* Core vblank handling is done here after end of front-porch in
471 		 * vrr mode, as vblank timestamping will give valid results
472 		 * while now done after front-porch. This will also deliver
473 		 * page-flip completion events that have been queued to us
474 		 * if a pageflip happened inside front-porch.
475 		 */
476 		if (vrr_active) {
477 			drm_crtc_handle_vblank(&acrtc->base);
478 
479 			/* BTR processing for pre-DCE12 ASICs */
480 			if (acrtc->dm_irq_params.stream &&
481 			    adev->family < AMDGPU_FAMILY_AI) {
482 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
483 				mod_freesync_handle_v_update(
484 				    adev->dm.freesync_module,
485 				    acrtc->dm_irq_params.stream,
486 				    &acrtc->dm_irq_params.vrr_params);
487 
488 				dc_stream_adjust_vmin_vmax(
489 				    adev->dm.dc,
490 				    acrtc->dm_irq_params.stream,
491 				    &acrtc->dm_irq_params.vrr_params.adjust);
492 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
493 			}
494 		}
495 	}
496 }
497 
498 /**
499  * dm_crtc_high_irq() - Handles CRTC interrupt
500  * @interrupt_params: used for determining the CRTC instance
501  *
502  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
503  * event handler.
504  */
505 static void dm_crtc_high_irq(void *interrupt_params)
506 {
507 	struct common_irq_params *irq_params = interrupt_params;
508 	struct amdgpu_device *adev = irq_params->adev;
509 	struct amdgpu_crtc *acrtc;
510 	unsigned long flags;
511 	int vrr_active;
512 
513 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
514 	if (!acrtc)
515 		return;
516 
517 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
518 
519 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
520 		      vrr_active, acrtc->dm_irq_params.active_planes);
521 
522 	/**
523 	 * Core vblank handling at start of front-porch is only possible
524 	 * in non-vrr mode, as only there vblank timestamping will give
525 	 * valid results while done in front-porch. Otherwise defer it
526 	 * to dm_vupdate_high_irq after end of front-porch.
527 	 */
528 	if (!vrr_active)
529 		drm_crtc_handle_vblank(&acrtc->base);
530 
531 	/**
532 	 * Following stuff must happen at start of vblank, for crc
533 	 * computation and below-the-range btr support in vrr mode.
534 	 */
535 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
536 
537 	/* BTR updates need to happen before VUPDATE on Vega and above. */
538 	if (adev->family < AMDGPU_FAMILY_AI)
539 		return;
540 
541 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
542 
543 	if (acrtc->dm_irq_params.stream &&
544 	    acrtc->dm_irq_params.vrr_params.supported &&
545 	    acrtc->dm_irq_params.freesync_config.state ==
546 		    VRR_STATE_ACTIVE_VARIABLE) {
547 		mod_freesync_handle_v_update(adev->dm.freesync_module,
548 					     acrtc->dm_irq_params.stream,
549 					     &acrtc->dm_irq_params.vrr_params);
550 
551 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
552 					   &acrtc->dm_irq_params.vrr_params.adjust);
553 	}
554 
555 	/*
556 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
557 	 * In that case, pageflip completion interrupts won't fire and pageflip
558 	 * completion events won't get delivered. Prevent this by sending
559 	 * pending pageflip events from here if a flip is still pending.
560 	 *
561 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
562 	 * avoid race conditions between flip programming and completion,
563 	 * which could cause too early flip completion events.
564 	 */
565 	if (adev->family >= AMDGPU_FAMILY_RV &&
566 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
567 	    acrtc->dm_irq_params.active_planes == 0) {
568 		if (acrtc->event) {
569 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
570 			acrtc->event = NULL;
571 			drm_crtc_vblank_put(&acrtc->base);
572 		}
573 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
574 	}
575 
576 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
577 }
578 
579 static int dm_set_clockgating_state(void *handle,
580 		  enum amd_clockgating_state state)
581 {
582 	return 0;
583 }
584 
585 static int dm_set_powergating_state(void *handle,
586 		  enum amd_powergating_state state)
587 {
588 	return 0;
589 }
590 
591 /* Prototypes of private functions */
592 static int dm_early_init(void* handle);
593 
594 /* Allocate memory for FBC compressed data  */
595 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
596 {
597 	struct drm_device *dev = connector->dev;
598 	struct amdgpu_device *adev = drm_to_adev(dev);
599 	struct dm_compressor_info *compressor = &adev->dm.compressor;
600 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
601 	struct drm_display_mode *mode;
602 	unsigned long max_size = 0;
603 
604 	if (adev->dm.dc->fbc_compressor == NULL)
605 		return;
606 
607 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
608 		return;
609 
610 	if (compressor->bo_ptr)
611 		return;
612 
613 
614 	list_for_each_entry(mode, &connector->modes, head) {
615 		if (max_size < mode->htotal * mode->vtotal)
616 			max_size = mode->htotal * mode->vtotal;
617 	}
618 
619 	if (max_size) {
620 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
621 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
622 			    &compressor->gpu_addr, &compressor->cpu_addr);
623 
624 		if (r)
625 			DRM_ERROR("DM: Failed to initialize FBC\n");
626 		else {
627 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
628 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
629 		}
630 
631 	}
632 
633 }
634 
635 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
636 					  int pipe, bool *enabled,
637 					  unsigned char *buf, int max_bytes)
638 {
639 	struct drm_device *dev = dev_get_drvdata(kdev);
640 	struct amdgpu_device *adev = drm_to_adev(dev);
641 	struct drm_connector *connector;
642 	struct drm_connector_list_iter conn_iter;
643 	struct amdgpu_dm_connector *aconnector;
644 	int ret = 0;
645 
646 	*enabled = false;
647 
648 	mutex_lock(&adev->dm.audio_lock);
649 
650 	drm_connector_list_iter_begin(dev, &conn_iter);
651 	drm_for_each_connector_iter(connector, &conn_iter) {
652 		aconnector = to_amdgpu_dm_connector(connector);
653 		if (aconnector->audio_inst != port)
654 			continue;
655 
656 		*enabled = true;
657 		ret = drm_eld_size(connector->eld);
658 		memcpy(buf, connector->eld, min(max_bytes, ret));
659 
660 		break;
661 	}
662 	drm_connector_list_iter_end(&conn_iter);
663 
664 	mutex_unlock(&adev->dm.audio_lock);
665 
666 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
667 
668 	return ret;
669 }
670 
671 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
672 	.get_eld = amdgpu_dm_audio_component_get_eld,
673 };
674 
675 static int amdgpu_dm_audio_component_bind(struct device *kdev,
676 				       struct device *hda_kdev, void *data)
677 {
678 	struct drm_device *dev = dev_get_drvdata(kdev);
679 	struct amdgpu_device *adev = drm_to_adev(dev);
680 	struct drm_audio_component *acomp = data;
681 
682 	acomp->ops = &amdgpu_dm_audio_component_ops;
683 	acomp->dev = kdev;
684 	adev->dm.audio_component = acomp;
685 
686 	return 0;
687 }
688 
689 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
690 					  struct device *hda_kdev, void *data)
691 {
692 	struct drm_device *dev = dev_get_drvdata(kdev);
693 	struct amdgpu_device *adev = drm_to_adev(dev);
694 	struct drm_audio_component *acomp = data;
695 
696 	acomp->ops = NULL;
697 	acomp->dev = NULL;
698 	adev->dm.audio_component = NULL;
699 }
700 
701 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
702 	.bind	= amdgpu_dm_audio_component_bind,
703 	.unbind	= amdgpu_dm_audio_component_unbind,
704 };
705 
706 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
707 {
708 	int i, ret;
709 
710 	if (!amdgpu_audio)
711 		return 0;
712 
713 	adev->mode_info.audio.enabled = true;
714 
715 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
716 
717 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
718 		adev->mode_info.audio.pin[i].channels = -1;
719 		adev->mode_info.audio.pin[i].rate = -1;
720 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
721 		adev->mode_info.audio.pin[i].status_bits = 0;
722 		adev->mode_info.audio.pin[i].category_code = 0;
723 		adev->mode_info.audio.pin[i].connected = false;
724 		adev->mode_info.audio.pin[i].id =
725 			adev->dm.dc->res_pool->audios[i]->inst;
726 		adev->mode_info.audio.pin[i].offset = 0;
727 	}
728 
729 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
730 	if (ret < 0)
731 		return ret;
732 
733 	adev->dm.audio_registered = true;
734 
735 	return 0;
736 }
737 
738 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
739 {
740 	if (!amdgpu_audio)
741 		return;
742 
743 	if (!adev->mode_info.audio.enabled)
744 		return;
745 
746 	if (adev->dm.audio_registered) {
747 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
748 		adev->dm.audio_registered = false;
749 	}
750 
751 	/* TODO: Disable audio? */
752 
753 	adev->mode_info.audio.enabled = false;
754 }
755 
756 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
757 {
758 	struct drm_audio_component *acomp = adev->dm.audio_component;
759 
760 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
761 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
762 
763 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
764 						 pin, -1);
765 	}
766 }
767 
768 static int dm_dmub_hw_init(struct amdgpu_device *adev)
769 {
770 	const struct dmcub_firmware_header_v1_0 *hdr;
771 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
772 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
773 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
774 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
775 	struct abm *abm = adev->dm.dc->res_pool->abm;
776 	struct dmub_srv_hw_params hw_params;
777 	enum dmub_status status;
778 	const unsigned char *fw_inst_const, *fw_bss_data;
779 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
780 	bool has_hw_support;
781 
782 	if (!dmub_srv)
783 		/* DMUB isn't supported on the ASIC. */
784 		return 0;
785 
786 	if (!fb_info) {
787 		DRM_ERROR("No framebuffer info for DMUB service.\n");
788 		return -EINVAL;
789 	}
790 
791 	if (!dmub_fw) {
792 		/* Firmware required for DMUB support. */
793 		DRM_ERROR("No firmware provided for DMUB.\n");
794 		return -EINVAL;
795 	}
796 
797 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
798 	if (status != DMUB_STATUS_OK) {
799 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
800 		return -EINVAL;
801 	}
802 
803 	if (!has_hw_support) {
804 		DRM_INFO("DMUB unsupported on ASIC\n");
805 		return 0;
806 	}
807 
808 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
809 
810 	fw_inst_const = dmub_fw->data +
811 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
812 			PSP_HEADER_BYTES;
813 
814 	fw_bss_data = dmub_fw->data +
815 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
816 		      le32_to_cpu(hdr->inst_const_bytes);
817 
818 	/* Copy firmware and bios info into FB memory. */
819 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
820 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
821 
822 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
823 
824 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
825 	 * amdgpu_ucode_init_single_fw will load dmub firmware
826 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
827 	 * will be done by dm_dmub_hw_init
828 	 */
829 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
830 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
831 				fw_inst_const_size);
832 	}
833 
834 	if (fw_bss_data_size)
835 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
836 		       fw_bss_data, fw_bss_data_size);
837 
838 	/* Copy firmware bios info into FB memory. */
839 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
840 	       adev->bios_size);
841 
842 	/* Reset regions that need to be reset. */
843 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
844 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
845 
846 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
847 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
848 
849 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
850 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
851 
852 	/* Initialize hardware. */
853 	memset(&hw_params, 0, sizeof(hw_params));
854 	hw_params.fb_base = adev->gmc.fb_start;
855 	hw_params.fb_offset = adev->gmc.aper_base;
856 
857 	/* backdoor load firmware and trigger dmub running */
858 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
859 		hw_params.load_inst_const = true;
860 
861 	if (dmcu)
862 		hw_params.psp_version = dmcu->psp_version;
863 
864 	for (i = 0; i < fb_info->num_fb; ++i)
865 		hw_params.fb[i] = &fb_info->fb[i];
866 
867 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
868 	if (status != DMUB_STATUS_OK) {
869 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
870 		return -EINVAL;
871 	}
872 
873 	/* Wait for firmware load to finish. */
874 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
875 	if (status != DMUB_STATUS_OK)
876 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
877 
878 	/* Init DMCU and ABM if available. */
879 	if (dmcu && abm) {
880 		dmcu->funcs->dmcu_init(dmcu);
881 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
882 	}
883 
884 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
885 	if (!adev->dm.dc->ctx->dmub_srv) {
886 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
887 		return -ENOMEM;
888 	}
889 
890 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
891 		 adev->dm.dmcub_fw_version);
892 
893 	return 0;
894 }
895 
896 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
897 {
898 	uint64_t pt_base;
899 	uint32_t logical_addr_low;
900 	uint32_t logical_addr_high;
901 	uint32_t agp_base, agp_bot, agp_top;
902 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
903 
904 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
905 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
906 
907 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
908 		/*
909 		 * Raven2 has a HW issue that it is unable to use the vram which
910 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
911 		 * workaround that increase system aperture high address (add 1)
912 		 * to get rid of the VM fault and hardware hang.
913 		 */
914 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
915 	else
916 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
917 
918 	agp_base = 0;
919 	agp_bot = adev->gmc.agp_start >> 24;
920 	agp_top = adev->gmc.agp_end >> 24;
921 
922 
923 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
924 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
925 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
926 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
927 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
928 	page_table_base.low_part = lower_32_bits(pt_base);
929 
930 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
931 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
932 
933 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
934 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
935 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
936 
937 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
938 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
939 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
940 
941 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
942 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
943 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
944 
945 	pa_config->is_hvm_enabled = 0;
946 
947 }
948 
949 static int amdgpu_dm_init(struct amdgpu_device *adev)
950 {
951 	struct dc_init_data init_data;
952 #ifdef CONFIG_DRM_AMD_DC_HDCP
953 	struct dc_callback_init init_params;
954 #endif
955 	struct dc_phy_addr_space_config pa_config;
956 	int r;
957 
958 	adev->dm.ddev = adev_to_drm(adev);
959 	adev->dm.adev = adev;
960 
961 	/* Zero all the fields */
962 	memset(&init_data, 0, sizeof(init_data));
963 #ifdef CONFIG_DRM_AMD_DC_HDCP
964 	memset(&init_params, 0, sizeof(init_params));
965 #endif
966 
967 	mutex_init(&adev->dm.dc_lock);
968 	mutex_init(&adev->dm.audio_lock);
969 
970 	if(amdgpu_dm_irq_init(adev)) {
971 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
972 		goto error;
973 	}
974 
975 	init_data.asic_id.chip_family = adev->family;
976 
977 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
978 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
979 
980 	init_data.asic_id.vram_width = adev->gmc.vram_width;
981 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
982 	init_data.asic_id.atombios_base_address =
983 		adev->mode_info.atom_context->bios;
984 
985 	init_data.driver = adev;
986 
987 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
988 
989 	if (!adev->dm.cgs_device) {
990 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
991 		goto error;
992 	}
993 
994 	init_data.cgs_device = adev->dm.cgs_device;
995 
996 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
997 
998 	switch (adev->asic_type) {
999 	case CHIP_CARRIZO:
1000 	case CHIP_STONEY:
1001 	case CHIP_RAVEN:
1002 	case CHIP_RENOIR:
1003 		init_data.flags.gpu_vm_support = true;
1004 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1005 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1006 			init_data.flags.disable_dmcu = true;
1007 #endif
1008 		break;
1009 	default:
1010 		break;
1011 	}
1012 
1013 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1014 		init_data.flags.fbc_support = true;
1015 
1016 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1017 		init_data.flags.multi_mon_pp_mclk_switch = true;
1018 
1019 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1020 		init_data.flags.disable_fractional_pwm = true;
1021 
1022 	init_data.flags.power_down_display_on_boot = true;
1023 
1024 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1025 
1026 	/* Display Core create. */
1027 	adev->dm.dc = dc_create(&init_data);
1028 
1029 	if (adev->dm.dc) {
1030 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1031 	} else {
1032 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1033 		goto error;
1034 	}
1035 
1036 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1037 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1038 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1039 	}
1040 
1041 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1042 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1043 
1044 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1045 		adev->dm.dc->debug.disable_stutter = true;
1046 
1047 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1048 		adev->dm.dc->debug.disable_dsc = true;
1049 
1050 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1051 		adev->dm.dc->debug.disable_clock_gate = true;
1052 
1053 	r = dm_dmub_hw_init(adev);
1054 	if (r) {
1055 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1056 		goto error;
1057 	}
1058 
1059 	dc_hardware_init(adev->dm.dc);
1060 
1061 #if defined(CONFIG_DRM_AMD_DC_DCN)
1062 	if (adev->asic_type == CHIP_RENOIR) {
1063 		mmhub_read_system_context(adev, &pa_config);
1064 
1065 		// Call the DC init_memory func
1066 		dc_setup_system_context(adev->dm.dc, &pa_config);
1067 	}
1068 #endif
1069 
1070 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1071 	if (!adev->dm.freesync_module) {
1072 		DRM_ERROR(
1073 		"amdgpu: failed to initialize freesync_module.\n");
1074 	} else
1075 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1076 				adev->dm.freesync_module);
1077 
1078 	amdgpu_dm_init_color_mod();
1079 
1080 #ifdef CONFIG_DRM_AMD_DC_HDCP
1081 	if (adev->asic_type >= CHIP_RAVEN) {
1082 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1083 
1084 		if (!adev->dm.hdcp_workqueue)
1085 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1086 		else
1087 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1088 
1089 		dc_init_callbacks(adev->dm.dc, &init_params);
1090 	}
1091 #endif
1092 	if (amdgpu_dm_initialize_drm_device(adev)) {
1093 		DRM_ERROR(
1094 		"amdgpu: failed to initialize sw for display support.\n");
1095 		goto error;
1096 	}
1097 
1098 	/* Update the actual used number of crtc */
1099 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1100 
1101 	/* create fake encoders for MST */
1102 	dm_dp_create_fake_mst_encoders(adev);
1103 
1104 	/* TODO: Add_display_info? */
1105 
1106 	/* TODO use dynamic cursor width */
1107 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1108 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1109 
1110 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1111 		DRM_ERROR(
1112 		"amdgpu: failed to initialize sw for display support.\n");
1113 		goto error;
1114 	}
1115 
1116 
1117 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1118 
1119 	return 0;
1120 error:
1121 	amdgpu_dm_fini(adev);
1122 
1123 	return -EINVAL;
1124 }
1125 
1126 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1127 {
1128 	int i;
1129 
1130 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1131 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1132 	}
1133 
1134 	amdgpu_dm_audio_fini(adev);
1135 
1136 	amdgpu_dm_destroy_drm_device(&adev->dm);
1137 
1138 #ifdef CONFIG_DRM_AMD_DC_HDCP
1139 	if (adev->dm.hdcp_workqueue) {
1140 		hdcp_destroy(adev->dm.hdcp_workqueue);
1141 		adev->dm.hdcp_workqueue = NULL;
1142 	}
1143 
1144 	if (adev->dm.dc)
1145 		dc_deinit_callbacks(adev->dm.dc);
1146 #endif
1147 	if (adev->dm.dc->ctx->dmub_srv) {
1148 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1149 		adev->dm.dc->ctx->dmub_srv = NULL;
1150 	}
1151 
1152 	if (adev->dm.dmub_bo)
1153 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1154 				      &adev->dm.dmub_bo_gpu_addr,
1155 				      &adev->dm.dmub_bo_cpu_addr);
1156 
1157 	/* DC Destroy TODO: Replace destroy DAL */
1158 	if (adev->dm.dc)
1159 		dc_destroy(&adev->dm.dc);
1160 	/*
1161 	 * TODO: pageflip, vlank interrupt
1162 	 *
1163 	 * amdgpu_dm_irq_fini(adev);
1164 	 */
1165 
1166 	if (adev->dm.cgs_device) {
1167 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1168 		adev->dm.cgs_device = NULL;
1169 	}
1170 	if (adev->dm.freesync_module) {
1171 		mod_freesync_destroy(adev->dm.freesync_module);
1172 		adev->dm.freesync_module = NULL;
1173 	}
1174 
1175 	mutex_destroy(&adev->dm.audio_lock);
1176 	mutex_destroy(&adev->dm.dc_lock);
1177 
1178 	return;
1179 }
1180 
1181 static int load_dmcu_fw(struct amdgpu_device *adev)
1182 {
1183 	const char *fw_name_dmcu = NULL;
1184 	int r;
1185 	const struct dmcu_firmware_header_v1_0 *hdr;
1186 
1187 	switch(adev->asic_type) {
1188 #if defined(CONFIG_DRM_AMD_DC_SI)
1189 	case CHIP_TAHITI:
1190 	case CHIP_PITCAIRN:
1191 	case CHIP_VERDE:
1192 	case CHIP_OLAND:
1193 #endif
1194 	case CHIP_BONAIRE:
1195 	case CHIP_HAWAII:
1196 	case CHIP_KAVERI:
1197 	case CHIP_KABINI:
1198 	case CHIP_MULLINS:
1199 	case CHIP_TONGA:
1200 	case CHIP_FIJI:
1201 	case CHIP_CARRIZO:
1202 	case CHIP_STONEY:
1203 	case CHIP_POLARIS11:
1204 	case CHIP_POLARIS10:
1205 	case CHIP_POLARIS12:
1206 	case CHIP_VEGAM:
1207 	case CHIP_VEGA10:
1208 	case CHIP_VEGA12:
1209 	case CHIP_VEGA20:
1210 	case CHIP_NAVI10:
1211 	case CHIP_NAVI14:
1212 	case CHIP_RENOIR:
1213 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1214 	case CHIP_SIENNA_CICHLID:
1215 	case CHIP_NAVY_FLOUNDER:
1216 #endif
1217 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1218 	case CHIP_DIMGREY_CAVEFISH:
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1221 	case CHIP_VANGOGH:
1222 #endif
1223 		return 0;
1224 	case CHIP_NAVI12:
1225 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1226 		break;
1227 	case CHIP_RAVEN:
1228 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1229 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1230 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1231 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1232 		else
1233 			return 0;
1234 		break;
1235 	default:
1236 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1237 		return -EINVAL;
1238 	}
1239 
1240 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1241 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1242 		return 0;
1243 	}
1244 
1245 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1246 	if (r == -ENOENT) {
1247 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1248 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1249 		adev->dm.fw_dmcu = NULL;
1250 		return 0;
1251 	}
1252 	if (r) {
1253 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1254 			fw_name_dmcu);
1255 		return r;
1256 	}
1257 
1258 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1259 	if (r) {
1260 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1261 			fw_name_dmcu);
1262 		release_firmware(adev->dm.fw_dmcu);
1263 		adev->dm.fw_dmcu = NULL;
1264 		return r;
1265 	}
1266 
1267 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1268 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1269 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1270 	adev->firmware.fw_size +=
1271 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1272 
1273 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1274 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1275 	adev->firmware.fw_size +=
1276 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1277 
1278 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1279 
1280 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1281 
1282 	return 0;
1283 }
1284 
1285 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1286 {
1287 	struct amdgpu_device *adev = ctx;
1288 
1289 	return dm_read_reg(adev->dm.dc->ctx, address);
1290 }
1291 
1292 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1293 				     uint32_t value)
1294 {
1295 	struct amdgpu_device *adev = ctx;
1296 
1297 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1298 }
1299 
1300 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1301 {
1302 	struct dmub_srv_create_params create_params;
1303 	struct dmub_srv_region_params region_params;
1304 	struct dmub_srv_region_info region_info;
1305 	struct dmub_srv_fb_params fb_params;
1306 	struct dmub_srv_fb_info *fb_info;
1307 	struct dmub_srv *dmub_srv;
1308 	const struct dmcub_firmware_header_v1_0 *hdr;
1309 	const char *fw_name_dmub;
1310 	enum dmub_asic dmub_asic;
1311 	enum dmub_status status;
1312 	int r;
1313 
1314 	switch (adev->asic_type) {
1315 	case CHIP_RENOIR:
1316 		dmub_asic = DMUB_ASIC_DCN21;
1317 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1318 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1319 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1320 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1321 #endif
1322 		break;
1323 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1324 	case CHIP_SIENNA_CICHLID:
1325 		dmub_asic = DMUB_ASIC_DCN30;
1326 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1327 		break;
1328 	case CHIP_NAVY_FLOUNDER:
1329 		dmub_asic = DMUB_ASIC_DCN30;
1330 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1331 		break;
1332 #endif
1333 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1334 	case CHIP_VANGOGH:
1335 		dmub_asic = DMUB_ASIC_DCN301;
1336 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1337 		break;
1338 #endif
1339 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1340 	case CHIP_DIMGREY_CAVEFISH:
1341 		dmub_asic = DMUB_ASIC_DCN302;
1342 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1343 		break;
1344 #endif
1345 
1346 	default:
1347 		/* ASIC doesn't support DMUB. */
1348 		return 0;
1349 	}
1350 
1351 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1352 	if (r) {
1353 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1354 		return 0;
1355 	}
1356 
1357 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1358 	if (r) {
1359 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1360 		return 0;
1361 	}
1362 
1363 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1364 
1365 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1366 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1367 			AMDGPU_UCODE_ID_DMCUB;
1368 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1369 			adev->dm.dmub_fw;
1370 		adev->firmware.fw_size +=
1371 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1372 
1373 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1374 			 adev->dm.dmcub_fw_version);
1375 	}
1376 
1377 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1378 
1379 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1380 	dmub_srv = adev->dm.dmub_srv;
1381 
1382 	if (!dmub_srv) {
1383 		DRM_ERROR("Failed to allocate DMUB service!\n");
1384 		return -ENOMEM;
1385 	}
1386 
1387 	memset(&create_params, 0, sizeof(create_params));
1388 	create_params.user_ctx = adev;
1389 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1390 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1391 	create_params.asic = dmub_asic;
1392 
1393 	/* Create the DMUB service. */
1394 	status = dmub_srv_create(dmub_srv, &create_params);
1395 	if (status != DMUB_STATUS_OK) {
1396 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1397 		return -EINVAL;
1398 	}
1399 
1400 	/* Calculate the size of all the regions for the DMUB service. */
1401 	memset(&region_params, 0, sizeof(region_params));
1402 
1403 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1404 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1405 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1406 	region_params.vbios_size = adev->bios_size;
1407 	region_params.fw_bss_data = region_params.bss_data_size ?
1408 		adev->dm.dmub_fw->data +
1409 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1410 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1411 	region_params.fw_inst_const =
1412 		adev->dm.dmub_fw->data +
1413 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1414 		PSP_HEADER_BYTES;
1415 
1416 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1417 					   &region_info);
1418 
1419 	if (status != DMUB_STATUS_OK) {
1420 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1421 		return -EINVAL;
1422 	}
1423 
1424 	/*
1425 	 * Allocate a framebuffer based on the total size of all the regions.
1426 	 * TODO: Move this into GART.
1427 	 */
1428 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1429 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1430 				    &adev->dm.dmub_bo_gpu_addr,
1431 				    &adev->dm.dmub_bo_cpu_addr);
1432 	if (r)
1433 		return r;
1434 
1435 	/* Rebase the regions on the framebuffer address. */
1436 	memset(&fb_params, 0, sizeof(fb_params));
1437 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1438 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1439 	fb_params.region_info = &region_info;
1440 
1441 	adev->dm.dmub_fb_info =
1442 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1443 	fb_info = adev->dm.dmub_fb_info;
1444 
1445 	if (!fb_info) {
1446 		DRM_ERROR(
1447 			"Failed to allocate framebuffer info for DMUB service!\n");
1448 		return -ENOMEM;
1449 	}
1450 
1451 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1452 	if (status != DMUB_STATUS_OK) {
1453 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1454 		return -EINVAL;
1455 	}
1456 
1457 	return 0;
1458 }
1459 
1460 static int dm_sw_init(void *handle)
1461 {
1462 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1463 	int r;
1464 
1465 	r = dm_dmub_sw_init(adev);
1466 	if (r)
1467 		return r;
1468 
1469 	return load_dmcu_fw(adev);
1470 }
1471 
1472 static int dm_sw_fini(void *handle)
1473 {
1474 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1475 
1476 	kfree(adev->dm.dmub_fb_info);
1477 	adev->dm.dmub_fb_info = NULL;
1478 
1479 	if (adev->dm.dmub_srv) {
1480 		dmub_srv_destroy(adev->dm.dmub_srv);
1481 		adev->dm.dmub_srv = NULL;
1482 	}
1483 
1484 	release_firmware(adev->dm.dmub_fw);
1485 	adev->dm.dmub_fw = NULL;
1486 
1487 	release_firmware(adev->dm.fw_dmcu);
1488 	adev->dm.fw_dmcu = NULL;
1489 
1490 	return 0;
1491 }
1492 
1493 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1494 {
1495 	struct amdgpu_dm_connector *aconnector;
1496 	struct drm_connector *connector;
1497 	struct drm_connector_list_iter iter;
1498 	int ret = 0;
1499 
1500 	drm_connector_list_iter_begin(dev, &iter);
1501 	drm_for_each_connector_iter(connector, &iter) {
1502 		aconnector = to_amdgpu_dm_connector(connector);
1503 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1504 		    aconnector->mst_mgr.aux) {
1505 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1506 					 aconnector,
1507 					 aconnector->base.base.id);
1508 
1509 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1510 			if (ret < 0) {
1511 				DRM_ERROR("DM_MST: Failed to start MST\n");
1512 				aconnector->dc_link->type =
1513 					dc_connection_single;
1514 				break;
1515 			}
1516 		}
1517 	}
1518 	drm_connector_list_iter_end(&iter);
1519 
1520 	return ret;
1521 }
1522 
1523 static int dm_late_init(void *handle)
1524 {
1525 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1526 
1527 	struct dmcu_iram_parameters params;
1528 	unsigned int linear_lut[16];
1529 	int i;
1530 	struct dmcu *dmcu = NULL;
1531 	bool ret = true;
1532 
1533 	dmcu = adev->dm.dc->res_pool->dmcu;
1534 
1535 	for (i = 0; i < 16; i++)
1536 		linear_lut[i] = 0xFFFF * i / 15;
1537 
1538 	params.set = 0;
1539 	params.backlight_ramping_start = 0xCCCC;
1540 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1541 	params.backlight_lut_array_size = 16;
1542 	params.backlight_lut_array = linear_lut;
1543 
1544 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1545 	 * 0xFFFF x 0.01 = 0x28F
1546 	 */
1547 	params.min_abm_backlight = 0x28F;
1548 
1549 	/* In the case where abm is implemented on dmcub,
1550 	 * dmcu object will be null.
1551 	 * ABM 2.4 and up are implemented on dmcub.
1552 	 */
1553 	if (dmcu)
1554 		ret = dmcu_load_iram(dmcu, params);
1555 	else if (adev->dm.dc->ctx->dmub_srv)
1556 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1557 
1558 	if (!ret)
1559 		return -EINVAL;
1560 
1561 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1562 }
1563 
1564 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1565 {
1566 	struct amdgpu_dm_connector *aconnector;
1567 	struct drm_connector *connector;
1568 	struct drm_connector_list_iter iter;
1569 	struct drm_dp_mst_topology_mgr *mgr;
1570 	int ret;
1571 	bool need_hotplug = false;
1572 
1573 	drm_connector_list_iter_begin(dev, &iter);
1574 	drm_for_each_connector_iter(connector, &iter) {
1575 		aconnector = to_amdgpu_dm_connector(connector);
1576 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1577 		    aconnector->mst_port)
1578 			continue;
1579 
1580 		mgr = &aconnector->mst_mgr;
1581 
1582 		if (suspend) {
1583 			drm_dp_mst_topology_mgr_suspend(mgr);
1584 		} else {
1585 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1586 			if (ret < 0) {
1587 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1588 				need_hotplug = true;
1589 			}
1590 		}
1591 	}
1592 	drm_connector_list_iter_end(&iter);
1593 
1594 	if (need_hotplug)
1595 		drm_kms_helper_hotplug_event(dev);
1596 }
1597 
1598 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1599 {
1600 	struct smu_context *smu = &adev->smu;
1601 	int ret = 0;
1602 
1603 	if (!is_support_sw_smu(adev))
1604 		return 0;
1605 
1606 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1607 	 * on window driver dc implementation.
1608 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1609 	 * should be passed to smu during boot up and resume from s3.
1610 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1611 	 * dcn20_resource_construct
1612 	 * then call pplib functions below to pass the settings to smu:
1613 	 * smu_set_watermarks_for_clock_ranges
1614 	 * smu_set_watermarks_table
1615 	 * navi10_set_watermarks_table
1616 	 * smu_write_watermarks_table
1617 	 *
1618 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1619 	 * dc has implemented different flow for window driver:
1620 	 * dc_hardware_init / dc_set_power_state
1621 	 * dcn10_init_hw
1622 	 * notify_wm_ranges
1623 	 * set_wm_ranges
1624 	 * -- Linux
1625 	 * smu_set_watermarks_for_clock_ranges
1626 	 * renoir_set_watermarks_table
1627 	 * smu_write_watermarks_table
1628 	 *
1629 	 * For Linux,
1630 	 * dc_hardware_init -> amdgpu_dm_init
1631 	 * dc_set_power_state --> dm_resume
1632 	 *
1633 	 * therefore, this function apply to navi10/12/14 but not Renoir
1634 	 * *
1635 	 */
1636 	switch(adev->asic_type) {
1637 	case CHIP_NAVI10:
1638 	case CHIP_NAVI14:
1639 	case CHIP_NAVI12:
1640 		break;
1641 	default:
1642 		return 0;
1643 	}
1644 
1645 	ret = smu_write_watermarks_table(smu);
1646 	if (ret) {
1647 		DRM_ERROR("Failed to update WMTABLE!\n");
1648 		return ret;
1649 	}
1650 
1651 	return 0;
1652 }
1653 
1654 /**
1655  * dm_hw_init() - Initialize DC device
1656  * @handle: The base driver device containing the amdgpu_dm device.
1657  *
1658  * Initialize the &struct amdgpu_display_manager device. This involves calling
1659  * the initializers of each DM component, then populating the struct with them.
1660  *
1661  * Although the function implies hardware initialization, both hardware and
1662  * software are initialized here. Splitting them out to their relevant init
1663  * hooks is a future TODO item.
1664  *
1665  * Some notable things that are initialized here:
1666  *
1667  * - Display Core, both software and hardware
1668  * - DC modules that we need (freesync and color management)
1669  * - DRM software states
1670  * - Interrupt sources and handlers
1671  * - Vblank support
1672  * - Debug FS entries, if enabled
1673  */
1674 static int dm_hw_init(void *handle)
1675 {
1676 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1677 	/* Create DAL display manager */
1678 	amdgpu_dm_init(adev);
1679 	amdgpu_dm_hpd_init(adev);
1680 
1681 	return 0;
1682 }
1683 
1684 /**
1685  * dm_hw_fini() - Teardown DC device
1686  * @handle: The base driver device containing the amdgpu_dm device.
1687  *
1688  * Teardown components within &struct amdgpu_display_manager that require
1689  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1690  * were loaded. Also flush IRQ workqueues and disable them.
1691  */
1692 static int dm_hw_fini(void *handle)
1693 {
1694 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1695 
1696 	amdgpu_dm_hpd_fini(adev);
1697 
1698 	amdgpu_dm_irq_fini(adev);
1699 	amdgpu_dm_fini(adev);
1700 	return 0;
1701 }
1702 
1703 
1704 static int dm_enable_vblank(struct drm_crtc *crtc);
1705 static void dm_disable_vblank(struct drm_crtc *crtc);
1706 
1707 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1708 				 struct dc_state *state, bool enable)
1709 {
1710 	enum dc_irq_source irq_source;
1711 	struct amdgpu_crtc *acrtc;
1712 	int rc = -EBUSY;
1713 	int i = 0;
1714 
1715 	for (i = 0; i < state->stream_count; i++) {
1716 		acrtc = get_crtc_by_otg_inst(
1717 				adev, state->stream_status[i].primary_otg_inst);
1718 
1719 		if (acrtc && state->stream_status[i].plane_count != 0) {
1720 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1721 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1722 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1723 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1724 			if (rc)
1725 				DRM_WARN("Failed to %s pflip interrupts\n",
1726 					 enable ? "enable" : "disable");
1727 
1728 			if (enable) {
1729 				rc = dm_enable_vblank(&acrtc->base);
1730 				if (rc)
1731 					DRM_WARN("Failed to enable vblank interrupts\n");
1732 			} else {
1733 				dm_disable_vblank(&acrtc->base);
1734 			}
1735 
1736 		}
1737 	}
1738 
1739 }
1740 
1741 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1742 {
1743 	struct dc_state *context = NULL;
1744 	enum dc_status res = DC_ERROR_UNEXPECTED;
1745 	int i;
1746 	struct dc_stream_state *del_streams[MAX_PIPES];
1747 	int del_streams_count = 0;
1748 
1749 	memset(del_streams, 0, sizeof(del_streams));
1750 
1751 	context = dc_create_state(dc);
1752 	if (context == NULL)
1753 		goto context_alloc_fail;
1754 
1755 	dc_resource_state_copy_construct_current(dc, context);
1756 
1757 	/* First remove from context all streams */
1758 	for (i = 0; i < context->stream_count; i++) {
1759 		struct dc_stream_state *stream = context->streams[i];
1760 
1761 		del_streams[del_streams_count++] = stream;
1762 	}
1763 
1764 	/* Remove all planes for removed streams and then remove the streams */
1765 	for (i = 0; i < del_streams_count; i++) {
1766 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1767 			res = DC_FAIL_DETACH_SURFACES;
1768 			goto fail;
1769 		}
1770 
1771 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1772 		if (res != DC_OK)
1773 			goto fail;
1774 	}
1775 
1776 
1777 	res = dc_validate_global_state(dc, context, false);
1778 
1779 	if (res != DC_OK) {
1780 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1781 		goto fail;
1782 	}
1783 
1784 	res = dc_commit_state(dc, context);
1785 
1786 fail:
1787 	dc_release_state(context);
1788 
1789 context_alloc_fail:
1790 	return res;
1791 }
1792 
1793 static int dm_suspend(void *handle)
1794 {
1795 	struct amdgpu_device *adev = handle;
1796 	struct amdgpu_display_manager *dm = &adev->dm;
1797 	int ret = 0;
1798 
1799 	if (amdgpu_in_reset(adev)) {
1800 		mutex_lock(&dm->dc_lock);
1801 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1802 
1803 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1804 
1805 		amdgpu_dm_commit_zero_streams(dm->dc);
1806 
1807 		amdgpu_dm_irq_suspend(adev);
1808 
1809 		return ret;
1810 	}
1811 
1812 	WARN_ON(adev->dm.cached_state);
1813 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1814 
1815 	s3_handle_mst(adev_to_drm(adev), true);
1816 
1817 	amdgpu_dm_irq_suspend(adev);
1818 
1819 
1820 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1821 
1822 	return 0;
1823 }
1824 
1825 static struct amdgpu_dm_connector *
1826 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1827 					     struct drm_crtc *crtc)
1828 {
1829 	uint32_t i;
1830 	struct drm_connector_state *new_con_state;
1831 	struct drm_connector *connector;
1832 	struct drm_crtc *crtc_from_state;
1833 
1834 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1835 		crtc_from_state = new_con_state->crtc;
1836 
1837 		if (crtc_from_state == crtc)
1838 			return to_amdgpu_dm_connector(connector);
1839 	}
1840 
1841 	return NULL;
1842 }
1843 
1844 static void emulated_link_detect(struct dc_link *link)
1845 {
1846 	struct dc_sink_init_data sink_init_data = { 0 };
1847 	struct display_sink_capability sink_caps = { 0 };
1848 	enum dc_edid_status edid_status;
1849 	struct dc_context *dc_ctx = link->ctx;
1850 	struct dc_sink *sink = NULL;
1851 	struct dc_sink *prev_sink = NULL;
1852 
1853 	link->type = dc_connection_none;
1854 	prev_sink = link->local_sink;
1855 
1856 	if (prev_sink != NULL)
1857 		dc_sink_retain(prev_sink);
1858 
1859 	switch (link->connector_signal) {
1860 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1861 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1862 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1863 		break;
1864 	}
1865 
1866 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1867 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1868 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1869 		break;
1870 	}
1871 
1872 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1873 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1874 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1875 		break;
1876 	}
1877 
1878 	case SIGNAL_TYPE_LVDS: {
1879 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1880 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1881 		break;
1882 	}
1883 
1884 	case SIGNAL_TYPE_EDP: {
1885 		sink_caps.transaction_type =
1886 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1887 		sink_caps.signal = SIGNAL_TYPE_EDP;
1888 		break;
1889 	}
1890 
1891 	case SIGNAL_TYPE_DISPLAY_PORT: {
1892 		sink_caps.transaction_type =
1893 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1894 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1895 		break;
1896 	}
1897 
1898 	default:
1899 		DC_ERROR("Invalid connector type! signal:%d\n",
1900 			link->connector_signal);
1901 		return;
1902 	}
1903 
1904 	sink_init_data.link = link;
1905 	sink_init_data.sink_signal = sink_caps.signal;
1906 
1907 	sink = dc_sink_create(&sink_init_data);
1908 	if (!sink) {
1909 		DC_ERROR("Failed to create sink!\n");
1910 		return;
1911 	}
1912 
1913 	/* dc_sink_create returns a new reference */
1914 	link->local_sink = sink;
1915 
1916 	edid_status = dm_helpers_read_local_edid(
1917 			link->ctx,
1918 			link,
1919 			sink);
1920 
1921 	if (edid_status != EDID_OK)
1922 		DC_ERROR("Failed to read EDID");
1923 
1924 }
1925 
1926 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1927 				     struct amdgpu_display_manager *dm)
1928 {
1929 	struct {
1930 		struct dc_surface_update surface_updates[MAX_SURFACES];
1931 		struct dc_plane_info plane_infos[MAX_SURFACES];
1932 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1933 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1934 		struct dc_stream_update stream_update;
1935 	} * bundle;
1936 	int k, m;
1937 
1938 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1939 
1940 	if (!bundle) {
1941 		dm_error("Failed to allocate update bundle\n");
1942 		goto cleanup;
1943 	}
1944 
1945 	for (k = 0; k < dc_state->stream_count; k++) {
1946 		bundle->stream_update.stream = dc_state->streams[k];
1947 
1948 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1949 			bundle->surface_updates[m].surface =
1950 				dc_state->stream_status->plane_states[m];
1951 			bundle->surface_updates[m].surface->force_full_update =
1952 				true;
1953 		}
1954 		dc_commit_updates_for_stream(
1955 			dm->dc, bundle->surface_updates,
1956 			dc_state->stream_status->plane_count,
1957 			dc_state->streams[k], &bundle->stream_update, dc_state);
1958 	}
1959 
1960 cleanup:
1961 	kfree(bundle);
1962 
1963 	return;
1964 }
1965 
1966 static int dm_resume(void *handle)
1967 {
1968 	struct amdgpu_device *adev = handle;
1969 	struct drm_device *ddev = adev_to_drm(adev);
1970 	struct amdgpu_display_manager *dm = &adev->dm;
1971 	struct amdgpu_dm_connector *aconnector;
1972 	struct drm_connector *connector;
1973 	struct drm_connector_list_iter iter;
1974 	struct drm_crtc *crtc;
1975 	struct drm_crtc_state *new_crtc_state;
1976 	struct dm_crtc_state *dm_new_crtc_state;
1977 	struct drm_plane *plane;
1978 	struct drm_plane_state *new_plane_state;
1979 	struct dm_plane_state *dm_new_plane_state;
1980 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1981 	enum dc_connection_type new_connection_type = dc_connection_none;
1982 	struct dc_state *dc_state;
1983 	int i, r, j;
1984 
1985 	if (amdgpu_in_reset(adev)) {
1986 		dc_state = dm->cached_dc_state;
1987 
1988 		r = dm_dmub_hw_init(adev);
1989 		if (r)
1990 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1991 
1992 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1993 		dc_resume(dm->dc);
1994 
1995 		amdgpu_dm_irq_resume_early(adev);
1996 
1997 		for (i = 0; i < dc_state->stream_count; i++) {
1998 			dc_state->streams[i]->mode_changed = true;
1999 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2000 				dc_state->stream_status->plane_states[j]->update_flags.raw
2001 					= 0xffffffff;
2002 			}
2003 		}
2004 
2005 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2006 
2007 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2008 
2009 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2010 
2011 		dc_release_state(dm->cached_dc_state);
2012 		dm->cached_dc_state = NULL;
2013 
2014 		amdgpu_dm_irq_resume_late(adev);
2015 
2016 		mutex_unlock(&dm->dc_lock);
2017 
2018 		return 0;
2019 	}
2020 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2021 	dc_release_state(dm_state->context);
2022 	dm_state->context = dc_create_state(dm->dc);
2023 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2024 	dc_resource_state_construct(dm->dc, dm_state->context);
2025 
2026 	/* Before powering on DC we need to re-initialize DMUB. */
2027 	r = dm_dmub_hw_init(adev);
2028 	if (r)
2029 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2030 
2031 	/* power on hardware */
2032 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2033 
2034 	/* program HPD filter */
2035 	dc_resume(dm->dc);
2036 
2037 	/*
2038 	 * early enable HPD Rx IRQ, should be done before set mode as short
2039 	 * pulse interrupts are used for MST
2040 	 */
2041 	amdgpu_dm_irq_resume_early(adev);
2042 
2043 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2044 	s3_handle_mst(ddev, false);
2045 
2046 	/* Do detection*/
2047 	drm_connector_list_iter_begin(ddev, &iter);
2048 	drm_for_each_connector_iter(connector, &iter) {
2049 		aconnector = to_amdgpu_dm_connector(connector);
2050 
2051 		/*
2052 		 * this is the case when traversing through already created
2053 		 * MST connectors, should be skipped
2054 		 */
2055 		if (aconnector->mst_port)
2056 			continue;
2057 
2058 		mutex_lock(&aconnector->hpd_lock);
2059 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2060 			DRM_ERROR("KMS: Failed to detect connector\n");
2061 
2062 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2063 			emulated_link_detect(aconnector->dc_link);
2064 		else
2065 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2066 
2067 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2068 			aconnector->fake_enable = false;
2069 
2070 		if (aconnector->dc_sink)
2071 			dc_sink_release(aconnector->dc_sink);
2072 		aconnector->dc_sink = NULL;
2073 		amdgpu_dm_update_connector_after_detect(aconnector);
2074 		mutex_unlock(&aconnector->hpd_lock);
2075 	}
2076 	drm_connector_list_iter_end(&iter);
2077 
2078 	/* Force mode set in atomic commit */
2079 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2080 		new_crtc_state->active_changed = true;
2081 
2082 	/*
2083 	 * atomic_check is expected to create the dc states. We need to release
2084 	 * them here, since they were duplicated as part of the suspend
2085 	 * procedure.
2086 	 */
2087 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2088 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2089 		if (dm_new_crtc_state->stream) {
2090 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2091 			dc_stream_release(dm_new_crtc_state->stream);
2092 			dm_new_crtc_state->stream = NULL;
2093 		}
2094 	}
2095 
2096 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2097 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2098 		if (dm_new_plane_state->dc_state) {
2099 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2100 			dc_plane_state_release(dm_new_plane_state->dc_state);
2101 			dm_new_plane_state->dc_state = NULL;
2102 		}
2103 	}
2104 
2105 	drm_atomic_helper_resume(ddev, dm->cached_state);
2106 
2107 	dm->cached_state = NULL;
2108 
2109 	amdgpu_dm_irq_resume_late(adev);
2110 
2111 	amdgpu_dm_smu_write_watermarks_table(adev);
2112 
2113 	return 0;
2114 }
2115 
2116 /**
2117  * DOC: DM Lifecycle
2118  *
2119  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2120  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2121  * the base driver's device list to be initialized and torn down accordingly.
2122  *
2123  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2124  */
2125 
2126 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2127 	.name = "dm",
2128 	.early_init = dm_early_init,
2129 	.late_init = dm_late_init,
2130 	.sw_init = dm_sw_init,
2131 	.sw_fini = dm_sw_fini,
2132 	.hw_init = dm_hw_init,
2133 	.hw_fini = dm_hw_fini,
2134 	.suspend = dm_suspend,
2135 	.resume = dm_resume,
2136 	.is_idle = dm_is_idle,
2137 	.wait_for_idle = dm_wait_for_idle,
2138 	.check_soft_reset = dm_check_soft_reset,
2139 	.soft_reset = dm_soft_reset,
2140 	.set_clockgating_state = dm_set_clockgating_state,
2141 	.set_powergating_state = dm_set_powergating_state,
2142 };
2143 
2144 const struct amdgpu_ip_block_version dm_ip_block =
2145 {
2146 	.type = AMD_IP_BLOCK_TYPE_DCE,
2147 	.major = 1,
2148 	.minor = 0,
2149 	.rev = 0,
2150 	.funcs = &amdgpu_dm_funcs,
2151 };
2152 
2153 
2154 /**
2155  * DOC: atomic
2156  *
2157  * *WIP*
2158  */
2159 
2160 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2161 	.fb_create = amdgpu_display_user_framebuffer_create,
2162 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2163 	.atomic_check = amdgpu_dm_atomic_check,
2164 	.atomic_commit = amdgpu_dm_atomic_commit,
2165 };
2166 
2167 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2168 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2169 };
2170 
2171 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2172 {
2173 	u32 max_cll, min_cll, max, min, q, r;
2174 	struct amdgpu_dm_backlight_caps *caps;
2175 	struct amdgpu_display_manager *dm;
2176 	struct drm_connector *conn_base;
2177 	struct amdgpu_device *adev;
2178 	struct dc_link *link = NULL;
2179 	static const u8 pre_computed_values[] = {
2180 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2181 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2182 
2183 	if (!aconnector || !aconnector->dc_link)
2184 		return;
2185 
2186 	link = aconnector->dc_link;
2187 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2188 		return;
2189 
2190 	conn_base = &aconnector->base;
2191 	adev = drm_to_adev(conn_base->dev);
2192 	dm = &adev->dm;
2193 	caps = &dm->backlight_caps;
2194 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2195 	caps->aux_support = false;
2196 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2197 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2198 
2199 	if (caps->ext_caps->bits.oled == 1 ||
2200 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2201 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2202 		caps->aux_support = true;
2203 
2204 	/* From the specification (CTA-861-G), for calculating the maximum
2205 	 * luminance we need to use:
2206 	 *	Luminance = 50*2**(CV/32)
2207 	 * Where CV is a one-byte value.
2208 	 * For calculating this expression we may need float point precision;
2209 	 * to avoid this complexity level, we take advantage that CV is divided
2210 	 * by a constant. From the Euclids division algorithm, we know that CV
2211 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2212 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2213 	 * need to pre-compute the value of r/32. For pre-computing the values
2214 	 * We just used the following Ruby line:
2215 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2216 	 * The results of the above expressions can be verified at
2217 	 * pre_computed_values.
2218 	 */
2219 	q = max_cll >> 5;
2220 	r = max_cll % 32;
2221 	max = (1 << q) * pre_computed_values[r];
2222 
2223 	// min luminance: maxLum * (CV/255)^2 / 100
2224 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2225 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2226 
2227 	caps->aux_max_input_signal = max;
2228 	caps->aux_min_input_signal = min;
2229 }
2230 
2231 void amdgpu_dm_update_connector_after_detect(
2232 		struct amdgpu_dm_connector *aconnector)
2233 {
2234 	struct drm_connector *connector = &aconnector->base;
2235 	struct drm_device *dev = connector->dev;
2236 	struct dc_sink *sink;
2237 
2238 	/* MST handled by drm_mst framework */
2239 	if (aconnector->mst_mgr.mst_state == true)
2240 		return;
2241 
2242 	sink = aconnector->dc_link->local_sink;
2243 	if (sink)
2244 		dc_sink_retain(sink);
2245 
2246 	/*
2247 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2248 	 * the connector sink is set to either fake or physical sink depends on link status.
2249 	 * Skip if already done during boot.
2250 	 */
2251 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2252 			&& aconnector->dc_em_sink) {
2253 
2254 		/*
2255 		 * For S3 resume with headless use eml_sink to fake stream
2256 		 * because on resume connector->sink is set to NULL
2257 		 */
2258 		mutex_lock(&dev->mode_config.mutex);
2259 
2260 		if (sink) {
2261 			if (aconnector->dc_sink) {
2262 				amdgpu_dm_update_freesync_caps(connector, NULL);
2263 				/*
2264 				 * retain and release below are used to
2265 				 * bump up refcount for sink because the link doesn't point
2266 				 * to it anymore after disconnect, so on next crtc to connector
2267 				 * reshuffle by UMD we will get into unwanted dc_sink release
2268 				 */
2269 				dc_sink_release(aconnector->dc_sink);
2270 			}
2271 			aconnector->dc_sink = sink;
2272 			dc_sink_retain(aconnector->dc_sink);
2273 			amdgpu_dm_update_freesync_caps(connector,
2274 					aconnector->edid);
2275 		} else {
2276 			amdgpu_dm_update_freesync_caps(connector, NULL);
2277 			if (!aconnector->dc_sink) {
2278 				aconnector->dc_sink = aconnector->dc_em_sink;
2279 				dc_sink_retain(aconnector->dc_sink);
2280 			}
2281 		}
2282 
2283 		mutex_unlock(&dev->mode_config.mutex);
2284 
2285 		if (sink)
2286 			dc_sink_release(sink);
2287 		return;
2288 	}
2289 
2290 	/*
2291 	 * TODO: temporary guard to look for proper fix
2292 	 * if this sink is MST sink, we should not do anything
2293 	 */
2294 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2295 		dc_sink_release(sink);
2296 		return;
2297 	}
2298 
2299 	if (aconnector->dc_sink == sink) {
2300 		/*
2301 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2302 		 * Do nothing!!
2303 		 */
2304 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2305 				aconnector->connector_id);
2306 		if (sink)
2307 			dc_sink_release(sink);
2308 		return;
2309 	}
2310 
2311 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2312 		aconnector->connector_id, aconnector->dc_sink, sink);
2313 
2314 	mutex_lock(&dev->mode_config.mutex);
2315 
2316 	/*
2317 	 * 1. Update status of the drm connector
2318 	 * 2. Send an event and let userspace tell us what to do
2319 	 */
2320 	if (sink) {
2321 		/*
2322 		 * TODO: check if we still need the S3 mode update workaround.
2323 		 * If yes, put it here.
2324 		 */
2325 		if (aconnector->dc_sink)
2326 			amdgpu_dm_update_freesync_caps(connector, NULL);
2327 
2328 		aconnector->dc_sink = sink;
2329 		dc_sink_retain(aconnector->dc_sink);
2330 		if (sink->dc_edid.length == 0) {
2331 			aconnector->edid = NULL;
2332 			if (aconnector->dc_link->aux_mode) {
2333 				drm_dp_cec_unset_edid(
2334 					&aconnector->dm_dp_aux.aux);
2335 			}
2336 		} else {
2337 			aconnector->edid =
2338 				(struct edid *)sink->dc_edid.raw_edid;
2339 
2340 			drm_connector_update_edid_property(connector,
2341 							   aconnector->edid);
2342 			drm_add_edid_modes(connector, aconnector->edid);
2343 
2344 			if (aconnector->dc_link->aux_mode)
2345 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2346 						    aconnector->edid);
2347 		}
2348 
2349 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2350 		update_connector_ext_caps(aconnector);
2351 	} else {
2352 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2353 		amdgpu_dm_update_freesync_caps(connector, NULL);
2354 		drm_connector_update_edid_property(connector, NULL);
2355 		aconnector->num_modes = 0;
2356 		dc_sink_release(aconnector->dc_sink);
2357 		aconnector->dc_sink = NULL;
2358 		aconnector->edid = NULL;
2359 #ifdef CONFIG_DRM_AMD_DC_HDCP
2360 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2361 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2362 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2363 #endif
2364 	}
2365 
2366 	mutex_unlock(&dev->mode_config.mutex);
2367 
2368 	update_subconnector_property(aconnector);
2369 
2370 	if (sink)
2371 		dc_sink_release(sink);
2372 }
2373 
2374 static void handle_hpd_irq(void *param)
2375 {
2376 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2377 	struct drm_connector *connector = &aconnector->base;
2378 	struct drm_device *dev = connector->dev;
2379 	enum dc_connection_type new_connection_type = dc_connection_none;
2380 #ifdef CONFIG_DRM_AMD_DC_HDCP
2381 	struct amdgpu_device *adev = drm_to_adev(dev);
2382 #endif
2383 
2384 	/*
2385 	 * In case of failure or MST no need to update connector status or notify the OS
2386 	 * since (for MST case) MST does this in its own context.
2387 	 */
2388 	mutex_lock(&aconnector->hpd_lock);
2389 
2390 #ifdef CONFIG_DRM_AMD_DC_HDCP
2391 	if (adev->dm.hdcp_workqueue)
2392 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2393 #endif
2394 	if (aconnector->fake_enable)
2395 		aconnector->fake_enable = false;
2396 
2397 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2398 		DRM_ERROR("KMS: Failed to detect connector\n");
2399 
2400 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2401 		emulated_link_detect(aconnector->dc_link);
2402 
2403 
2404 		drm_modeset_lock_all(dev);
2405 		dm_restore_drm_connector_state(dev, connector);
2406 		drm_modeset_unlock_all(dev);
2407 
2408 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2409 			drm_kms_helper_hotplug_event(dev);
2410 
2411 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2412 		amdgpu_dm_update_connector_after_detect(aconnector);
2413 
2414 
2415 		drm_modeset_lock_all(dev);
2416 		dm_restore_drm_connector_state(dev, connector);
2417 		drm_modeset_unlock_all(dev);
2418 
2419 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2420 			drm_kms_helper_hotplug_event(dev);
2421 	}
2422 	mutex_unlock(&aconnector->hpd_lock);
2423 
2424 }
2425 
2426 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2427 {
2428 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2429 	uint8_t dret;
2430 	bool new_irq_handled = false;
2431 	int dpcd_addr;
2432 	int dpcd_bytes_to_read;
2433 
2434 	const int max_process_count = 30;
2435 	int process_count = 0;
2436 
2437 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2438 
2439 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2440 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2441 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2442 		dpcd_addr = DP_SINK_COUNT;
2443 	} else {
2444 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2445 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2446 		dpcd_addr = DP_SINK_COUNT_ESI;
2447 	}
2448 
2449 	dret = drm_dp_dpcd_read(
2450 		&aconnector->dm_dp_aux.aux,
2451 		dpcd_addr,
2452 		esi,
2453 		dpcd_bytes_to_read);
2454 
2455 	while (dret == dpcd_bytes_to_read &&
2456 		process_count < max_process_count) {
2457 		uint8_t retry;
2458 		dret = 0;
2459 
2460 		process_count++;
2461 
2462 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2463 		/* handle HPD short pulse irq */
2464 		if (aconnector->mst_mgr.mst_state)
2465 			drm_dp_mst_hpd_irq(
2466 				&aconnector->mst_mgr,
2467 				esi,
2468 				&new_irq_handled);
2469 
2470 		if (new_irq_handled) {
2471 			/* ACK at DPCD to notify down stream */
2472 			const int ack_dpcd_bytes_to_write =
2473 				dpcd_bytes_to_read - 1;
2474 
2475 			for (retry = 0; retry < 3; retry++) {
2476 				uint8_t wret;
2477 
2478 				wret = drm_dp_dpcd_write(
2479 					&aconnector->dm_dp_aux.aux,
2480 					dpcd_addr + 1,
2481 					&esi[1],
2482 					ack_dpcd_bytes_to_write);
2483 				if (wret == ack_dpcd_bytes_to_write)
2484 					break;
2485 			}
2486 
2487 			/* check if there is new irq to be handled */
2488 			dret = drm_dp_dpcd_read(
2489 				&aconnector->dm_dp_aux.aux,
2490 				dpcd_addr,
2491 				esi,
2492 				dpcd_bytes_to_read);
2493 
2494 			new_irq_handled = false;
2495 		} else {
2496 			break;
2497 		}
2498 	}
2499 
2500 	if (process_count == max_process_count)
2501 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2502 }
2503 
2504 static void handle_hpd_rx_irq(void *param)
2505 {
2506 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2507 	struct drm_connector *connector = &aconnector->base;
2508 	struct drm_device *dev = connector->dev;
2509 	struct dc_link *dc_link = aconnector->dc_link;
2510 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2511 	enum dc_connection_type new_connection_type = dc_connection_none;
2512 #ifdef CONFIG_DRM_AMD_DC_HDCP
2513 	union hpd_irq_data hpd_irq_data;
2514 	struct amdgpu_device *adev = drm_to_adev(dev);
2515 
2516 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2517 #endif
2518 
2519 	/*
2520 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2521 	 * conflict, after implement i2c helper, this mutex should be
2522 	 * retired.
2523 	 */
2524 	if (dc_link->type != dc_connection_mst_branch)
2525 		mutex_lock(&aconnector->hpd_lock);
2526 
2527 
2528 #ifdef CONFIG_DRM_AMD_DC_HDCP
2529 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2530 #else
2531 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2532 #endif
2533 			!is_mst_root_connector) {
2534 		/* Downstream Port status changed. */
2535 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2536 			DRM_ERROR("KMS: Failed to detect connector\n");
2537 
2538 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2539 			emulated_link_detect(dc_link);
2540 
2541 			if (aconnector->fake_enable)
2542 				aconnector->fake_enable = false;
2543 
2544 			amdgpu_dm_update_connector_after_detect(aconnector);
2545 
2546 
2547 			drm_modeset_lock_all(dev);
2548 			dm_restore_drm_connector_state(dev, connector);
2549 			drm_modeset_unlock_all(dev);
2550 
2551 			drm_kms_helper_hotplug_event(dev);
2552 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2553 
2554 			if (aconnector->fake_enable)
2555 				aconnector->fake_enable = false;
2556 
2557 			amdgpu_dm_update_connector_after_detect(aconnector);
2558 
2559 
2560 			drm_modeset_lock_all(dev);
2561 			dm_restore_drm_connector_state(dev, connector);
2562 			drm_modeset_unlock_all(dev);
2563 
2564 			drm_kms_helper_hotplug_event(dev);
2565 		}
2566 	}
2567 #ifdef CONFIG_DRM_AMD_DC_HDCP
2568 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2569 		if (adev->dm.hdcp_workqueue)
2570 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2571 	}
2572 #endif
2573 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2574 	    (dc_link->type == dc_connection_mst_branch))
2575 		dm_handle_hpd_rx_irq(aconnector);
2576 
2577 	if (dc_link->type != dc_connection_mst_branch) {
2578 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2579 		mutex_unlock(&aconnector->hpd_lock);
2580 	}
2581 }
2582 
2583 static void register_hpd_handlers(struct amdgpu_device *adev)
2584 {
2585 	struct drm_device *dev = adev_to_drm(adev);
2586 	struct drm_connector *connector;
2587 	struct amdgpu_dm_connector *aconnector;
2588 	const struct dc_link *dc_link;
2589 	struct dc_interrupt_params int_params = {0};
2590 
2591 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2592 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2593 
2594 	list_for_each_entry(connector,
2595 			&dev->mode_config.connector_list, head)	{
2596 
2597 		aconnector = to_amdgpu_dm_connector(connector);
2598 		dc_link = aconnector->dc_link;
2599 
2600 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2601 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2602 			int_params.irq_source = dc_link->irq_source_hpd;
2603 
2604 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2605 					handle_hpd_irq,
2606 					(void *) aconnector);
2607 		}
2608 
2609 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2610 
2611 			/* Also register for DP short pulse (hpd_rx). */
2612 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2613 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2614 
2615 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2616 					handle_hpd_rx_irq,
2617 					(void *) aconnector);
2618 		}
2619 	}
2620 }
2621 
2622 #if defined(CONFIG_DRM_AMD_DC_SI)
2623 /* Register IRQ sources and initialize IRQ callbacks */
2624 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2625 {
2626 	struct dc *dc = adev->dm.dc;
2627 	struct common_irq_params *c_irq_params;
2628 	struct dc_interrupt_params int_params = {0};
2629 	int r;
2630 	int i;
2631 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2632 
2633 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2634 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2635 
2636 	/*
2637 	 * Actions of amdgpu_irq_add_id():
2638 	 * 1. Register a set() function with base driver.
2639 	 *    Base driver will call set() function to enable/disable an
2640 	 *    interrupt in DC hardware.
2641 	 * 2. Register amdgpu_dm_irq_handler().
2642 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2643 	 *    coming from DC hardware.
2644 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2645 	 *    for acknowledging and handling. */
2646 
2647 	/* Use VBLANK interrupt */
2648 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2649 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2650 		if (r) {
2651 			DRM_ERROR("Failed to add crtc irq id!\n");
2652 			return r;
2653 		}
2654 
2655 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2656 		int_params.irq_source =
2657 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2658 
2659 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2660 
2661 		c_irq_params->adev = adev;
2662 		c_irq_params->irq_src = int_params.irq_source;
2663 
2664 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2665 				dm_crtc_high_irq, c_irq_params);
2666 	}
2667 
2668 	/* Use GRPH_PFLIP interrupt */
2669 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2670 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2671 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2672 		if (r) {
2673 			DRM_ERROR("Failed to add page flip irq id!\n");
2674 			return r;
2675 		}
2676 
2677 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2678 		int_params.irq_source =
2679 			dc_interrupt_to_irq_source(dc, i, 0);
2680 
2681 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2682 
2683 		c_irq_params->adev = adev;
2684 		c_irq_params->irq_src = int_params.irq_source;
2685 
2686 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2687 				dm_pflip_high_irq, c_irq_params);
2688 
2689 	}
2690 
2691 	/* HPD */
2692 	r = amdgpu_irq_add_id(adev, client_id,
2693 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2694 	if (r) {
2695 		DRM_ERROR("Failed to add hpd irq id!\n");
2696 		return r;
2697 	}
2698 
2699 	register_hpd_handlers(adev);
2700 
2701 	return 0;
2702 }
2703 #endif
2704 
2705 /* Register IRQ sources and initialize IRQ callbacks */
2706 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2707 {
2708 	struct dc *dc = adev->dm.dc;
2709 	struct common_irq_params *c_irq_params;
2710 	struct dc_interrupt_params int_params = {0};
2711 	int r;
2712 	int i;
2713 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2714 
2715 	if (adev->asic_type >= CHIP_VEGA10)
2716 		client_id = SOC15_IH_CLIENTID_DCE;
2717 
2718 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2719 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2720 
2721 	/*
2722 	 * Actions of amdgpu_irq_add_id():
2723 	 * 1. Register a set() function with base driver.
2724 	 *    Base driver will call set() function to enable/disable an
2725 	 *    interrupt in DC hardware.
2726 	 * 2. Register amdgpu_dm_irq_handler().
2727 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2728 	 *    coming from DC hardware.
2729 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2730 	 *    for acknowledging and handling. */
2731 
2732 	/* Use VBLANK interrupt */
2733 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2734 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2735 		if (r) {
2736 			DRM_ERROR("Failed to add crtc irq id!\n");
2737 			return r;
2738 		}
2739 
2740 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2741 		int_params.irq_source =
2742 			dc_interrupt_to_irq_source(dc, i, 0);
2743 
2744 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2745 
2746 		c_irq_params->adev = adev;
2747 		c_irq_params->irq_src = int_params.irq_source;
2748 
2749 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2750 				dm_crtc_high_irq, c_irq_params);
2751 	}
2752 
2753 	/* Use VUPDATE interrupt */
2754 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2755 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2756 		if (r) {
2757 			DRM_ERROR("Failed to add vupdate irq id!\n");
2758 			return r;
2759 		}
2760 
2761 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2762 		int_params.irq_source =
2763 			dc_interrupt_to_irq_source(dc, i, 0);
2764 
2765 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2766 
2767 		c_irq_params->adev = adev;
2768 		c_irq_params->irq_src = int_params.irq_source;
2769 
2770 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2771 				dm_vupdate_high_irq, c_irq_params);
2772 	}
2773 
2774 	/* Use GRPH_PFLIP interrupt */
2775 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2776 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2777 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2778 		if (r) {
2779 			DRM_ERROR("Failed to add page flip irq id!\n");
2780 			return r;
2781 		}
2782 
2783 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2784 		int_params.irq_source =
2785 			dc_interrupt_to_irq_source(dc, i, 0);
2786 
2787 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2788 
2789 		c_irq_params->adev = adev;
2790 		c_irq_params->irq_src = int_params.irq_source;
2791 
2792 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2793 				dm_pflip_high_irq, c_irq_params);
2794 
2795 	}
2796 
2797 	/* HPD */
2798 	r = amdgpu_irq_add_id(adev, client_id,
2799 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2800 	if (r) {
2801 		DRM_ERROR("Failed to add hpd irq id!\n");
2802 		return r;
2803 	}
2804 
2805 	register_hpd_handlers(adev);
2806 
2807 	return 0;
2808 }
2809 
2810 #if defined(CONFIG_DRM_AMD_DC_DCN)
2811 /* Register IRQ sources and initialize IRQ callbacks */
2812 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2813 {
2814 	struct dc *dc = adev->dm.dc;
2815 	struct common_irq_params *c_irq_params;
2816 	struct dc_interrupt_params int_params = {0};
2817 	int r;
2818 	int i;
2819 
2820 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2821 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2822 
2823 	/*
2824 	 * Actions of amdgpu_irq_add_id():
2825 	 * 1. Register a set() function with base driver.
2826 	 *    Base driver will call set() function to enable/disable an
2827 	 *    interrupt in DC hardware.
2828 	 * 2. Register amdgpu_dm_irq_handler().
2829 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2830 	 *    coming from DC hardware.
2831 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2832 	 *    for acknowledging and handling.
2833 	 */
2834 
2835 	/* Use VSTARTUP interrupt */
2836 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2837 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2838 			i++) {
2839 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2840 
2841 		if (r) {
2842 			DRM_ERROR("Failed to add crtc irq id!\n");
2843 			return r;
2844 		}
2845 
2846 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2847 		int_params.irq_source =
2848 			dc_interrupt_to_irq_source(dc, i, 0);
2849 
2850 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2851 
2852 		c_irq_params->adev = adev;
2853 		c_irq_params->irq_src = int_params.irq_source;
2854 
2855 		amdgpu_dm_irq_register_interrupt(
2856 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2857 	}
2858 
2859 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2860 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2861 	 * to trigger at end of each vblank, regardless of state of the lock,
2862 	 * matching DCE behaviour.
2863 	 */
2864 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2865 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2866 	     i++) {
2867 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2868 
2869 		if (r) {
2870 			DRM_ERROR("Failed to add vupdate irq id!\n");
2871 			return r;
2872 		}
2873 
2874 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875 		int_params.irq_source =
2876 			dc_interrupt_to_irq_source(dc, i, 0);
2877 
2878 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2879 
2880 		c_irq_params->adev = adev;
2881 		c_irq_params->irq_src = int_params.irq_source;
2882 
2883 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884 				dm_vupdate_high_irq, c_irq_params);
2885 	}
2886 
2887 	/* Use GRPH_PFLIP interrupt */
2888 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2889 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2890 			i++) {
2891 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2892 		if (r) {
2893 			DRM_ERROR("Failed to add page flip irq id!\n");
2894 			return r;
2895 		}
2896 
2897 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2898 		int_params.irq_source =
2899 			dc_interrupt_to_irq_source(dc, i, 0);
2900 
2901 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2902 
2903 		c_irq_params->adev = adev;
2904 		c_irq_params->irq_src = int_params.irq_source;
2905 
2906 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2907 				dm_pflip_high_irq, c_irq_params);
2908 
2909 	}
2910 
2911 	/* HPD */
2912 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2913 			&adev->hpd_irq);
2914 	if (r) {
2915 		DRM_ERROR("Failed to add hpd irq id!\n");
2916 		return r;
2917 	}
2918 
2919 	register_hpd_handlers(adev);
2920 
2921 	return 0;
2922 }
2923 #endif
2924 
2925 /*
2926  * Acquires the lock for the atomic state object and returns
2927  * the new atomic state.
2928  *
2929  * This should only be called during atomic check.
2930  */
2931 static int dm_atomic_get_state(struct drm_atomic_state *state,
2932 			       struct dm_atomic_state **dm_state)
2933 {
2934 	struct drm_device *dev = state->dev;
2935 	struct amdgpu_device *adev = drm_to_adev(dev);
2936 	struct amdgpu_display_manager *dm = &adev->dm;
2937 	struct drm_private_state *priv_state;
2938 
2939 	if (*dm_state)
2940 		return 0;
2941 
2942 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2943 	if (IS_ERR(priv_state))
2944 		return PTR_ERR(priv_state);
2945 
2946 	*dm_state = to_dm_atomic_state(priv_state);
2947 
2948 	return 0;
2949 }
2950 
2951 static struct dm_atomic_state *
2952 dm_atomic_get_new_state(struct drm_atomic_state *state)
2953 {
2954 	struct drm_device *dev = state->dev;
2955 	struct amdgpu_device *adev = drm_to_adev(dev);
2956 	struct amdgpu_display_manager *dm = &adev->dm;
2957 	struct drm_private_obj *obj;
2958 	struct drm_private_state *new_obj_state;
2959 	int i;
2960 
2961 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2962 		if (obj->funcs == dm->atomic_obj.funcs)
2963 			return to_dm_atomic_state(new_obj_state);
2964 	}
2965 
2966 	return NULL;
2967 }
2968 
2969 static struct drm_private_state *
2970 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2971 {
2972 	struct dm_atomic_state *old_state, *new_state;
2973 
2974 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2975 	if (!new_state)
2976 		return NULL;
2977 
2978 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2979 
2980 	old_state = to_dm_atomic_state(obj->state);
2981 
2982 	if (old_state && old_state->context)
2983 		new_state->context = dc_copy_state(old_state->context);
2984 
2985 	if (!new_state->context) {
2986 		kfree(new_state);
2987 		return NULL;
2988 	}
2989 
2990 	return &new_state->base;
2991 }
2992 
2993 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2994 				    struct drm_private_state *state)
2995 {
2996 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2997 
2998 	if (dm_state && dm_state->context)
2999 		dc_release_state(dm_state->context);
3000 
3001 	kfree(dm_state);
3002 }
3003 
3004 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3005 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3006 	.atomic_destroy_state = dm_atomic_destroy_state,
3007 };
3008 
3009 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3010 {
3011 	struct dm_atomic_state *state;
3012 	int r;
3013 
3014 	adev->mode_info.mode_config_initialized = true;
3015 
3016 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3017 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3018 
3019 	adev_to_drm(adev)->mode_config.max_width = 16384;
3020 	adev_to_drm(adev)->mode_config.max_height = 16384;
3021 
3022 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3023 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3024 	/* indicates support for immediate flip */
3025 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3026 
3027 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3028 
3029 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3030 	if (!state)
3031 		return -ENOMEM;
3032 
3033 	state->context = dc_create_state(adev->dm.dc);
3034 	if (!state->context) {
3035 		kfree(state);
3036 		return -ENOMEM;
3037 	}
3038 
3039 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3040 
3041 	drm_atomic_private_obj_init(adev_to_drm(adev),
3042 				    &adev->dm.atomic_obj,
3043 				    &state->base,
3044 				    &dm_atomic_state_funcs);
3045 
3046 	r = amdgpu_display_modeset_create_props(adev);
3047 	if (r) {
3048 		dc_release_state(state->context);
3049 		kfree(state);
3050 		return r;
3051 	}
3052 
3053 	r = amdgpu_dm_audio_init(adev);
3054 	if (r) {
3055 		dc_release_state(state->context);
3056 		kfree(state);
3057 		return r;
3058 	}
3059 
3060 	return 0;
3061 }
3062 
3063 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3064 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3065 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3066 
3067 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3068 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3069 
3070 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3071 {
3072 #if defined(CONFIG_ACPI)
3073 	struct amdgpu_dm_backlight_caps caps;
3074 
3075 	memset(&caps, 0, sizeof(caps));
3076 
3077 	if (dm->backlight_caps.caps_valid)
3078 		return;
3079 
3080 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3081 	if (caps.caps_valid) {
3082 		dm->backlight_caps.caps_valid = true;
3083 		if (caps.aux_support)
3084 			return;
3085 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3086 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3087 	} else {
3088 		dm->backlight_caps.min_input_signal =
3089 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3090 		dm->backlight_caps.max_input_signal =
3091 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3092 	}
3093 #else
3094 	if (dm->backlight_caps.aux_support)
3095 		return;
3096 
3097 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3098 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3099 #endif
3100 }
3101 
3102 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3103 {
3104 	bool rc;
3105 
3106 	if (!link)
3107 		return 1;
3108 
3109 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3110 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3111 
3112 	return rc ? 0 : 1;
3113 }
3114 
3115 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3116 				unsigned *min, unsigned *max)
3117 {
3118 	if (!caps)
3119 		return 0;
3120 
3121 	if (caps->aux_support) {
3122 		// Firmware limits are in nits, DC API wants millinits.
3123 		*max = 1000 * caps->aux_max_input_signal;
3124 		*min = 1000 * caps->aux_min_input_signal;
3125 	} else {
3126 		// Firmware limits are 8-bit, PWM control is 16-bit.
3127 		*max = 0x101 * caps->max_input_signal;
3128 		*min = 0x101 * caps->min_input_signal;
3129 	}
3130 	return 1;
3131 }
3132 
3133 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3134 					uint32_t brightness)
3135 {
3136 	unsigned min, max;
3137 
3138 	if (!get_brightness_range(caps, &min, &max))
3139 		return brightness;
3140 
3141 	// Rescale 0..255 to min..max
3142 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3143 				       AMDGPU_MAX_BL_LEVEL);
3144 }
3145 
3146 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3147 				      uint32_t brightness)
3148 {
3149 	unsigned min, max;
3150 
3151 	if (!get_brightness_range(caps, &min, &max))
3152 		return brightness;
3153 
3154 	if (brightness < min)
3155 		return 0;
3156 	// Rescale min..max to 0..255
3157 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3158 				 max - min);
3159 }
3160 
3161 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3162 {
3163 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3164 	struct amdgpu_dm_backlight_caps caps;
3165 	struct dc_link *link = NULL;
3166 	u32 brightness;
3167 	bool rc;
3168 
3169 	amdgpu_dm_update_backlight_caps(dm);
3170 	caps = dm->backlight_caps;
3171 
3172 	link = (struct dc_link *)dm->backlight_link;
3173 
3174 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3175 	// Change brightness based on AUX property
3176 	if (caps.aux_support)
3177 		return set_backlight_via_aux(link, brightness);
3178 
3179 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3180 
3181 	return rc ? 0 : 1;
3182 }
3183 
3184 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3185 {
3186 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3187 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3188 
3189 	if (ret == DC_ERROR_UNEXPECTED)
3190 		return bd->props.brightness;
3191 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3192 }
3193 
3194 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3195 	.options = BL_CORE_SUSPENDRESUME,
3196 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3197 	.update_status	= amdgpu_dm_backlight_update_status,
3198 };
3199 
3200 static void
3201 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3202 {
3203 	char bl_name[16];
3204 	struct backlight_properties props = { 0 };
3205 
3206 	amdgpu_dm_update_backlight_caps(dm);
3207 
3208 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3209 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3210 	props.type = BACKLIGHT_RAW;
3211 
3212 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3213 		 adev_to_drm(dm->adev)->primary->index);
3214 
3215 	dm->backlight_dev = backlight_device_register(bl_name,
3216 						      adev_to_drm(dm->adev)->dev,
3217 						      dm,
3218 						      &amdgpu_dm_backlight_ops,
3219 						      &props);
3220 
3221 	if (IS_ERR(dm->backlight_dev))
3222 		DRM_ERROR("DM: Backlight registration failed!\n");
3223 	else
3224 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3225 }
3226 
3227 #endif
3228 
3229 static int initialize_plane(struct amdgpu_display_manager *dm,
3230 			    struct amdgpu_mode_info *mode_info, int plane_id,
3231 			    enum drm_plane_type plane_type,
3232 			    const struct dc_plane_cap *plane_cap)
3233 {
3234 	struct drm_plane *plane;
3235 	unsigned long possible_crtcs;
3236 	int ret = 0;
3237 
3238 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3239 	if (!plane) {
3240 		DRM_ERROR("KMS: Failed to allocate plane\n");
3241 		return -ENOMEM;
3242 	}
3243 	plane->type = plane_type;
3244 
3245 	/*
3246 	 * HACK: IGT tests expect that the primary plane for a CRTC
3247 	 * can only have one possible CRTC. Only expose support for
3248 	 * any CRTC if they're not going to be used as a primary plane
3249 	 * for a CRTC - like overlay or underlay planes.
3250 	 */
3251 	possible_crtcs = 1 << plane_id;
3252 	if (plane_id >= dm->dc->caps.max_streams)
3253 		possible_crtcs = 0xff;
3254 
3255 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3256 
3257 	if (ret) {
3258 		DRM_ERROR("KMS: Failed to initialize plane\n");
3259 		kfree(plane);
3260 		return ret;
3261 	}
3262 
3263 	if (mode_info)
3264 		mode_info->planes[plane_id] = plane;
3265 
3266 	return ret;
3267 }
3268 
3269 
3270 static void register_backlight_device(struct amdgpu_display_manager *dm,
3271 				      struct dc_link *link)
3272 {
3273 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3274 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3275 
3276 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3277 	    link->type != dc_connection_none) {
3278 		/*
3279 		 * Event if registration failed, we should continue with
3280 		 * DM initialization because not having a backlight control
3281 		 * is better then a black screen.
3282 		 */
3283 		amdgpu_dm_register_backlight_device(dm);
3284 
3285 		if (dm->backlight_dev)
3286 			dm->backlight_link = link;
3287 	}
3288 #endif
3289 }
3290 
3291 
3292 /*
3293  * In this architecture, the association
3294  * connector -> encoder -> crtc
3295  * id not really requried. The crtc and connector will hold the
3296  * display_index as an abstraction to use with DAL component
3297  *
3298  * Returns 0 on success
3299  */
3300 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3301 {
3302 	struct amdgpu_display_manager *dm = &adev->dm;
3303 	int32_t i;
3304 	struct amdgpu_dm_connector *aconnector = NULL;
3305 	struct amdgpu_encoder *aencoder = NULL;
3306 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3307 	uint32_t link_cnt;
3308 	int32_t primary_planes;
3309 	enum dc_connection_type new_connection_type = dc_connection_none;
3310 	const struct dc_plane_cap *plane;
3311 
3312 	link_cnt = dm->dc->caps.max_links;
3313 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3314 		DRM_ERROR("DM: Failed to initialize mode config\n");
3315 		return -EINVAL;
3316 	}
3317 
3318 	/* There is one primary plane per CRTC */
3319 	primary_planes = dm->dc->caps.max_streams;
3320 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3321 
3322 	/*
3323 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3324 	 * Order is reversed to match iteration order in atomic check.
3325 	 */
3326 	for (i = (primary_planes - 1); i >= 0; i--) {
3327 		plane = &dm->dc->caps.planes[i];
3328 
3329 		if (initialize_plane(dm, mode_info, i,
3330 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3331 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3332 			goto fail;
3333 		}
3334 	}
3335 
3336 	/*
3337 	 * Initialize overlay planes, index starting after primary planes.
3338 	 * These planes have a higher DRM index than the primary planes since
3339 	 * they should be considered as having a higher z-order.
3340 	 * Order is reversed to match iteration order in atomic check.
3341 	 *
3342 	 * Only support DCN for now, and only expose one so we don't encourage
3343 	 * userspace to use up all the pipes.
3344 	 */
3345 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3346 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3347 
3348 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3349 			continue;
3350 
3351 		if (!plane->blends_with_above || !plane->blends_with_below)
3352 			continue;
3353 
3354 		if (!plane->pixel_format_support.argb8888)
3355 			continue;
3356 
3357 		if (initialize_plane(dm, NULL, primary_planes + i,
3358 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3359 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3360 			goto fail;
3361 		}
3362 
3363 		/* Only create one overlay plane. */
3364 		break;
3365 	}
3366 
3367 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3368 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3369 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3370 			goto fail;
3371 		}
3372 
3373 	dm->display_indexes_num = dm->dc->caps.max_streams;
3374 
3375 	/* loops over all connectors on the board */
3376 	for (i = 0; i < link_cnt; i++) {
3377 		struct dc_link *link = NULL;
3378 
3379 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3380 			DRM_ERROR(
3381 				"KMS: Cannot support more than %d display indexes\n",
3382 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3383 			continue;
3384 		}
3385 
3386 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3387 		if (!aconnector)
3388 			goto fail;
3389 
3390 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3391 		if (!aencoder)
3392 			goto fail;
3393 
3394 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3395 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3396 			goto fail;
3397 		}
3398 
3399 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3400 			DRM_ERROR("KMS: Failed to initialize connector\n");
3401 			goto fail;
3402 		}
3403 
3404 		link = dc_get_link_at_index(dm->dc, i);
3405 
3406 		if (!dc_link_detect_sink(link, &new_connection_type))
3407 			DRM_ERROR("KMS: Failed to detect connector\n");
3408 
3409 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3410 			emulated_link_detect(link);
3411 			amdgpu_dm_update_connector_after_detect(aconnector);
3412 
3413 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3414 			amdgpu_dm_update_connector_after_detect(aconnector);
3415 			register_backlight_device(dm, link);
3416 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3417 				amdgpu_dm_set_psr_caps(link);
3418 		}
3419 
3420 
3421 	}
3422 
3423 	/* Software is initialized. Now we can register interrupt handlers. */
3424 	switch (adev->asic_type) {
3425 #if defined(CONFIG_DRM_AMD_DC_SI)
3426 	case CHIP_TAHITI:
3427 	case CHIP_PITCAIRN:
3428 	case CHIP_VERDE:
3429 	case CHIP_OLAND:
3430 		if (dce60_register_irq_handlers(dm->adev)) {
3431 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3432 			goto fail;
3433 		}
3434 		break;
3435 #endif
3436 	case CHIP_BONAIRE:
3437 	case CHIP_HAWAII:
3438 	case CHIP_KAVERI:
3439 	case CHIP_KABINI:
3440 	case CHIP_MULLINS:
3441 	case CHIP_TONGA:
3442 	case CHIP_FIJI:
3443 	case CHIP_CARRIZO:
3444 	case CHIP_STONEY:
3445 	case CHIP_POLARIS11:
3446 	case CHIP_POLARIS10:
3447 	case CHIP_POLARIS12:
3448 	case CHIP_VEGAM:
3449 	case CHIP_VEGA10:
3450 	case CHIP_VEGA12:
3451 	case CHIP_VEGA20:
3452 		if (dce110_register_irq_handlers(dm->adev)) {
3453 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3454 			goto fail;
3455 		}
3456 		break;
3457 #if defined(CONFIG_DRM_AMD_DC_DCN)
3458 	case CHIP_RAVEN:
3459 	case CHIP_NAVI12:
3460 	case CHIP_NAVI10:
3461 	case CHIP_NAVI14:
3462 	case CHIP_RENOIR:
3463 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3464 	case CHIP_SIENNA_CICHLID:
3465 	case CHIP_NAVY_FLOUNDER:
3466 #endif
3467 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3468 	case CHIP_DIMGREY_CAVEFISH:
3469 #endif
3470 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3471 	case CHIP_VANGOGH:
3472 #endif
3473 		if (dcn10_register_irq_handlers(dm->adev)) {
3474 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3475 			goto fail;
3476 		}
3477 		break;
3478 #endif
3479 	default:
3480 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3481 		goto fail;
3482 	}
3483 
3484 	return 0;
3485 fail:
3486 	kfree(aencoder);
3487 	kfree(aconnector);
3488 
3489 	return -EINVAL;
3490 }
3491 
3492 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3493 {
3494 	drm_mode_config_cleanup(dm->ddev);
3495 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3496 	return;
3497 }
3498 
3499 /******************************************************************************
3500  * amdgpu_display_funcs functions
3501  *****************************************************************************/
3502 
3503 /*
3504  * dm_bandwidth_update - program display watermarks
3505  *
3506  * @adev: amdgpu_device pointer
3507  *
3508  * Calculate and program the display watermarks and line buffer allocation.
3509  */
3510 static void dm_bandwidth_update(struct amdgpu_device *adev)
3511 {
3512 	/* TODO: implement later */
3513 }
3514 
3515 static const struct amdgpu_display_funcs dm_display_funcs = {
3516 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3517 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3518 	.backlight_set_level = NULL, /* never called for DC */
3519 	.backlight_get_level = NULL, /* never called for DC */
3520 	.hpd_sense = NULL,/* called unconditionally */
3521 	.hpd_set_polarity = NULL, /* called unconditionally */
3522 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3523 	.page_flip_get_scanoutpos =
3524 		dm_crtc_get_scanoutpos,/* called unconditionally */
3525 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3526 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3527 };
3528 
3529 #if defined(CONFIG_DEBUG_KERNEL_DC)
3530 
3531 static ssize_t s3_debug_store(struct device *device,
3532 			      struct device_attribute *attr,
3533 			      const char *buf,
3534 			      size_t count)
3535 {
3536 	int ret;
3537 	int s3_state;
3538 	struct drm_device *drm_dev = dev_get_drvdata(device);
3539 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3540 
3541 	ret = kstrtoint(buf, 0, &s3_state);
3542 
3543 	if (ret == 0) {
3544 		if (s3_state) {
3545 			dm_resume(adev);
3546 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3547 		} else
3548 			dm_suspend(adev);
3549 	}
3550 
3551 	return ret == 0 ? count : 0;
3552 }
3553 
3554 DEVICE_ATTR_WO(s3_debug);
3555 
3556 #endif
3557 
3558 static int dm_early_init(void *handle)
3559 {
3560 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3561 
3562 	switch (adev->asic_type) {
3563 #if defined(CONFIG_DRM_AMD_DC_SI)
3564 	case CHIP_TAHITI:
3565 	case CHIP_PITCAIRN:
3566 	case CHIP_VERDE:
3567 		adev->mode_info.num_crtc = 6;
3568 		adev->mode_info.num_hpd = 6;
3569 		adev->mode_info.num_dig = 6;
3570 		break;
3571 	case CHIP_OLAND:
3572 		adev->mode_info.num_crtc = 2;
3573 		adev->mode_info.num_hpd = 2;
3574 		adev->mode_info.num_dig = 2;
3575 		break;
3576 #endif
3577 	case CHIP_BONAIRE:
3578 	case CHIP_HAWAII:
3579 		adev->mode_info.num_crtc = 6;
3580 		adev->mode_info.num_hpd = 6;
3581 		adev->mode_info.num_dig = 6;
3582 		break;
3583 	case CHIP_KAVERI:
3584 		adev->mode_info.num_crtc = 4;
3585 		adev->mode_info.num_hpd = 6;
3586 		adev->mode_info.num_dig = 7;
3587 		break;
3588 	case CHIP_KABINI:
3589 	case CHIP_MULLINS:
3590 		adev->mode_info.num_crtc = 2;
3591 		adev->mode_info.num_hpd = 6;
3592 		adev->mode_info.num_dig = 6;
3593 		break;
3594 	case CHIP_FIJI:
3595 	case CHIP_TONGA:
3596 		adev->mode_info.num_crtc = 6;
3597 		adev->mode_info.num_hpd = 6;
3598 		adev->mode_info.num_dig = 7;
3599 		break;
3600 	case CHIP_CARRIZO:
3601 		adev->mode_info.num_crtc = 3;
3602 		adev->mode_info.num_hpd = 6;
3603 		adev->mode_info.num_dig = 9;
3604 		break;
3605 	case CHIP_STONEY:
3606 		adev->mode_info.num_crtc = 2;
3607 		adev->mode_info.num_hpd = 6;
3608 		adev->mode_info.num_dig = 9;
3609 		break;
3610 	case CHIP_POLARIS11:
3611 	case CHIP_POLARIS12:
3612 		adev->mode_info.num_crtc = 5;
3613 		adev->mode_info.num_hpd = 5;
3614 		adev->mode_info.num_dig = 5;
3615 		break;
3616 	case CHIP_POLARIS10:
3617 	case CHIP_VEGAM:
3618 		adev->mode_info.num_crtc = 6;
3619 		adev->mode_info.num_hpd = 6;
3620 		adev->mode_info.num_dig = 6;
3621 		break;
3622 	case CHIP_VEGA10:
3623 	case CHIP_VEGA12:
3624 	case CHIP_VEGA20:
3625 		adev->mode_info.num_crtc = 6;
3626 		adev->mode_info.num_hpd = 6;
3627 		adev->mode_info.num_dig = 6;
3628 		break;
3629 #if defined(CONFIG_DRM_AMD_DC_DCN)
3630 	case CHIP_RAVEN:
3631 		adev->mode_info.num_crtc = 4;
3632 		adev->mode_info.num_hpd = 4;
3633 		adev->mode_info.num_dig = 4;
3634 		break;
3635 #endif
3636 	case CHIP_NAVI10:
3637 	case CHIP_NAVI12:
3638 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3639 	case CHIP_SIENNA_CICHLID:
3640 	case CHIP_NAVY_FLOUNDER:
3641 #endif
3642 		adev->mode_info.num_crtc = 6;
3643 		adev->mode_info.num_hpd = 6;
3644 		adev->mode_info.num_dig = 6;
3645 		break;
3646 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3647 	case CHIP_VANGOGH:
3648 		adev->mode_info.num_crtc = 4;
3649 		adev->mode_info.num_hpd = 4;
3650 		adev->mode_info.num_dig = 4;
3651 		break;
3652 #endif
3653 	case CHIP_NAVI14:
3654 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3655 	case CHIP_DIMGREY_CAVEFISH:
3656 #endif
3657 		adev->mode_info.num_crtc = 5;
3658 		adev->mode_info.num_hpd = 5;
3659 		adev->mode_info.num_dig = 5;
3660 		break;
3661 	case CHIP_RENOIR:
3662 		adev->mode_info.num_crtc = 4;
3663 		adev->mode_info.num_hpd = 4;
3664 		adev->mode_info.num_dig = 4;
3665 		break;
3666 	default:
3667 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3668 		return -EINVAL;
3669 	}
3670 
3671 	amdgpu_dm_set_irq_funcs(adev);
3672 
3673 	if (adev->mode_info.funcs == NULL)
3674 		adev->mode_info.funcs = &dm_display_funcs;
3675 
3676 	/*
3677 	 * Note: Do NOT change adev->audio_endpt_rreg and
3678 	 * adev->audio_endpt_wreg because they are initialised in
3679 	 * amdgpu_device_init()
3680 	 */
3681 #if defined(CONFIG_DEBUG_KERNEL_DC)
3682 	device_create_file(
3683 		adev_to_drm(adev)->dev,
3684 		&dev_attr_s3_debug);
3685 #endif
3686 
3687 	return 0;
3688 }
3689 
3690 static bool modeset_required(struct drm_crtc_state *crtc_state,
3691 			     struct dc_stream_state *new_stream,
3692 			     struct dc_stream_state *old_stream)
3693 {
3694 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3695 }
3696 
3697 static bool modereset_required(struct drm_crtc_state *crtc_state)
3698 {
3699 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3700 }
3701 
3702 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3703 {
3704 	drm_encoder_cleanup(encoder);
3705 	kfree(encoder);
3706 }
3707 
3708 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3709 	.destroy = amdgpu_dm_encoder_destroy,
3710 };
3711 
3712 
3713 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3714 				struct dc_scaling_info *scaling_info)
3715 {
3716 	int scale_w, scale_h;
3717 
3718 	memset(scaling_info, 0, sizeof(*scaling_info));
3719 
3720 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3721 	scaling_info->src_rect.x = state->src_x >> 16;
3722 	scaling_info->src_rect.y = state->src_y >> 16;
3723 
3724 	scaling_info->src_rect.width = state->src_w >> 16;
3725 	if (scaling_info->src_rect.width == 0)
3726 		return -EINVAL;
3727 
3728 	scaling_info->src_rect.height = state->src_h >> 16;
3729 	if (scaling_info->src_rect.height == 0)
3730 		return -EINVAL;
3731 
3732 	scaling_info->dst_rect.x = state->crtc_x;
3733 	scaling_info->dst_rect.y = state->crtc_y;
3734 
3735 	if (state->crtc_w == 0)
3736 		return -EINVAL;
3737 
3738 	scaling_info->dst_rect.width = state->crtc_w;
3739 
3740 	if (state->crtc_h == 0)
3741 		return -EINVAL;
3742 
3743 	scaling_info->dst_rect.height = state->crtc_h;
3744 
3745 	/* DRM doesn't specify clipping on destination output. */
3746 	scaling_info->clip_rect = scaling_info->dst_rect;
3747 
3748 	/* TODO: Validate scaling per-format with DC plane caps */
3749 	scale_w = scaling_info->dst_rect.width * 1000 /
3750 		  scaling_info->src_rect.width;
3751 
3752 	if (scale_w < 250 || scale_w > 16000)
3753 		return -EINVAL;
3754 
3755 	scale_h = scaling_info->dst_rect.height * 1000 /
3756 		  scaling_info->src_rect.height;
3757 
3758 	if (scale_h < 250 || scale_h > 16000)
3759 		return -EINVAL;
3760 
3761 	/*
3762 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3763 	 * assume reasonable defaults based on the format.
3764 	 */
3765 
3766 	return 0;
3767 }
3768 
3769 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3770 		       uint64_t *tiling_flags, bool *tmz_surface)
3771 {
3772 	struct amdgpu_bo *rbo;
3773 	int r;
3774 
3775 	if (!amdgpu_fb) {
3776 		*tiling_flags = 0;
3777 		*tmz_surface = false;
3778 		return 0;
3779 	}
3780 
3781 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3782 	r = amdgpu_bo_reserve(rbo, false);
3783 
3784 	if (unlikely(r)) {
3785 		/* Don't show error message when returning -ERESTARTSYS */
3786 		if (r != -ERESTARTSYS)
3787 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3788 		return r;
3789 	}
3790 
3791 	if (tiling_flags)
3792 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3793 
3794 	if (tmz_surface)
3795 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3796 
3797 	amdgpu_bo_unreserve(rbo);
3798 
3799 	return r;
3800 }
3801 
3802 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3803 {
3804 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3805 
3806 	return offset ? (address + offset * 256) : 0;
3807 }
3808 
3809 static int
3810 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3811 			  const struct amdgpu_framebuffer *afb,
3812 			  const enum surface_pixel_format format,
3813 			  const enum dc_rotation_angle rotation,
3814 			  const struct plane_size *plane_size,
3815 			  const union dc_tiling_info *tiling_info,
3816 			  const uint64_t info,
3817 			  struct dc_plane_dcc_param *dcc,
3818 			  struct dc_plane_address *address,
3819 			  bool force_disable_dcc)
3820 {
3821 	struct dc *dc = adev->dm.dc;
3822 	struct dc_dcc_surface_param input;
3823 	struct dc_surface_dcc_cap output;
3824 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3825 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3826 	uint64_t dcc_address;
3827 
3828 	memset(&input, 0, sizeof(input));
3829 	memset(&output, 0, sizeof(output));
3830 
3831 	if (force_disable_dcc)
3832 		return 0;
3833 
3834 	if (!offset)
3835 		return 0;
3836 
3837 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3838 		return 0;
3839 
3840 	if (!dc->cap_funcs.get_dcc_compression_cap)
3841 		return -EINVAL;
3842 
3843 	input.format = format;
3844 	input.surface_size.width = plane_size->surface_size.width;
3845 	input.surface_size.height = plane_size->surface_size.height;
3846 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3847 
3848 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3849 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3850 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3851 		input.scan = SCAN_DIRECTION_VERTICAL;
3852 
3853 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3854 		return -EINVAL;
3855 
3856 	if (!output.capable)
3857 		return -EINVAL;
3858 
3859 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3860 		return -EINVAL;
3861 
3862 	dcc->enable = 1;
3863 	dcc->meta_pitch =
3864 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3865 	dcc->independent_64b_blks = i64b;
3866 
3867 	dcc_address = get_dcc_address(afb->address, info);
3868 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3869 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3870 
3871 	return 0;
3872 }
3873 
3874 static int
3875 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3876 			     const struct amdgpu_framebuffer *afb,
3877 			     const enum surface_pixel_format format,
3878 			     const enum dc_rotation_angle rotation,
3879 			     const uint64_t tiling_flags,
3880 			     union dc_tiling_info *tiling_info,
3881 			     struct plane_size *plane_size,
3882 			     struct dc_plane_dcc_param *dcc,
3883 			     struct dc_plane_address *address,
3884 			     bool tmz_surface,
3885 			     bool force_disable_dcc)
3886 {
3887 	const struct drm_framebuffer *fb = &afb->base;
3888 	int ret;
3889 
3890 	memset(tiling_info, 0, sizeof(*tiling_info));
3891 	memset(plane_size, 0, sizeof(*plane_size));
3892 	memset(dcc, 0, sizeof(*dcc));
3893 	memset(address, 0, sizeof(*address));
3894 
3895 	address->tmz_surface = tmz_surface;
3896 
3897 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3898 		plane_size->surface_size.x = 0;
3899 		plane_size->surface_size.y = 0;
3900 		plane_size->surface_size.width = fb->width;
3901 		plane_size->surface_size.height = fb->height;
3902 		plane_size->surface_pitch =
3903 			fb->pitches[0] / fb->format->cpp[0];
3904 
3905 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3906 		address->grph.addr.low_part = lower_32_bits(afb->address);
3907 		address->grph.addr.high_part = upper_32_bits(afb->address);
3908 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3909 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3910 
3911 		plane_size->surface_size.x = 0;
3912 		plane_size->surface_size.y = 0;
3913 		plane_size->surface_size.width = fb->width;
3914 		plane_size->surface_size.height = fb->height;
3915 		plane_size->surface_pitch =
3916 			fb->pitches[0] / fb->format->cpp[0];
3917 
3918 		plane_size->chroma_size.x = 0;
3919 		plane_size->chroma_size.y = 0;
3920 		/* TODO: set these based on surface format */
3921 		plane_size->chroma_size.width = fb->width / 2;
3922 		plane_size->chroma_size.height = fb->height / 2;
3923 
3924 		plane_size->chroma_pitch =
3925 			fb->pitches[1] / fb->format->cpp[1];
3926 
3927 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3928 		address->video_progressive.luma_addr.low_part =
3929 			lower_32_bits(afb->address);
3930 		address->video_progressive.luma_addr.high_part =
3931 			upper_32_bits(afb->address);
3932 		address->video_progressive.chroma_addr.low_part =
3933 			lower_32_bits(chroma_addr);
3934 		address->video_progressive.chroma_addr.high_part =
3935 			upper_32_bits(chroma_addr);
3936 	}
3937 
3938 	/* Fill GFX8 params */
3939 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3940 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3941 
3942 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3943 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3944 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3945 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3946 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3947 
3948 		/* XXX fix me for VI */
3949 		tiling_info->gfx8.num_banks = num_banks;
3950 		tiling_info->gfx8.array_mode =
3951 				DC_ARRAY_2D_TILED_THIN1;
3952 		tiling_info->gfx8.tile_split = tile_split;
3953 		tiling_info->gfx8.bank_width = bankw;
3954 		tiling_info->gfx8.bank_height = bankh;
3955 		tiling_info->gfx8.tile_aspect = mtaspect;
3956 		tiling_info->gfx8.tile_mode =
3957 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3958 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3959 			== DC_ARRAY_1D_TILED_THIN1) {
3960 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3961 	}
3962 
3963 	tiling_info->gfx8.pipe_config =
3964 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3965 
3966 	if (adev->asic_type == CHIP_VEGA10 ||
3967 	    adev->asic_type == CHIP_VEGA12 ||
3968 	    adev->asic_type == CHIP_VEGA20 ||
3969 	    adev->asic_type == CHIP_NAVI10 ||
3970 	    adev->asic_type == CHIP_NAVI14 ||
3971 	    adev->asic_type == CHIP_NAVI12 ||
3972 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3973 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3974 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3975 #endif
3976 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3977 		adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3978 #endif
3979 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3980 		adev->asic_type == CHIP_VANGOGH ||
3981 #endif
3982 	    adev->asic_type == CHIP_RENOIR ||
3983 	    adev->asic_type == CHIP_RAVEN) {
3984 		/* Fill GFX9 params */
3985 		tiling_info->gfx9.num_pipes =
3986 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3987 		tiling_info->gfx9.num_banks =
3988 			adev->gfx.config.gb_addr_config_fields.num_banks;
3989 		tiling_info->gfx9.pipe_interleave =
3990 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3991 		tiling_info->gfx9.num_shader_engines =
3992 			adev->gfx.config.gb_addr_config_fields.num_se;
3993 		tiling_info->gfx9.max_compressed_frags =
3994 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3995 		tiling_info->gfx9.num_rb_per_se =
3996 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3997 		tiling_info->gfx9.swizzle =
3998 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3999 		tiling_info->gfx9.shaderEnable = 1;
4000 
4001 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
4002 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4003 		    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4004 		    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4005 		    adev->asic_type == CHIP_VANGOGH)
4006 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4007 #endif
4008 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4009 						plane_size, tiling_info,
4010 						tiling_flags, dcc, address,
4011 						force_disable_dcc);
4012 		if (ret)
4013 			return ret;
4014 	}
4015 
4016 	return 0;
4017 }
4018 
4019 static void
4020 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4021 			       bool *per_pixel_alpha, bool *global_alpha,
4022 			       int *global_alpha_value)
4023 {
4024 	*per_pixel_alpha = false;
4025 	*global_alpha = false;
4026 	*global_alpha_value = 0xff;
4027 
4028 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4029 		return;
4030 
4031 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4032 		static const uint32_t alpha_formats[] = {
4033 			DRM_FORMAT_ARGB8888,
4034 			DRM_FORMAT_RGBA8888,
4035 			DRM_FORMAT_ABGR8888,
4036 		};
4037 		uint32_t format = plane_state->fb->format->format;
4038 		unsigned int i;
4039 
4040 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4041 			if (format == alpha_formats[i]) {
4042 				*per_pixel_alpha = true;
4043 				break;
4044 			}
4045 		}
4046 	}
4047 
4048 	if (plane_state->alpha < 0xffff) {
4049 		*global_alpha = true;
4050 		*global_alpha_value = plane_state->alpha >> 8;
4051 	}
4052 }
4053 
4054 static int
4055 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4056 			    const enum surface_pixel_format format,
4057 			    enum dc_color_space *color_space)
4058 {
4059 	bool full_range;
4060 
4061 	*color_space = COLOR_SPACE_SRGB;
4062 
4063 	/* DRM color properties only affect non-RGB formats. */
4064 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4065 		return 0;
4066 
4067 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4068 
4069 	switch (plane_state->color_encoding) {
4070 	case DRM_COLOR_YCBCR_BT601:
4071 		if (full_range)
4072 			*color_space = COLOR_SPACE_YCBCR601;
4073 		else
4074 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4075 		break;
4076 
4077 	case DRM_COLOR_YCBCR_BT709:
4078 		if (full_range)
4079 			*color_space = COLOR_SPACE_YCBCR709;
4080 		else
4081 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4082 		break;
4083 
4084 	case DRM_COLOR_YCBCR_BT2020:
4085 		if (full_range)
4086 			*color_space = COLOR_SPACE_2020_YCBCR;
4087 		else
4088 			return -EINVAL;
4089 		break;
4090 
4091 	default:
4092 		return -EINVAL;
4093 	}
4094 
4095 	return 0;
4096 }
4097 
4098 static int
4099 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4100 			    const struct drm_plane_state *plane_state,
4101 			    const uint64_t tiling_flags,
4102 			    struct dc_plane_info *plane_info,
4103 			    struct dc_plane_address *address,
4104 			    bool tmz_surface,
4105 			    bool force_disable_dcc)
4106 {
4107 	const struct drm_framebuffer *fb = plane_state->fb;
4108 	const struct amdgpu_framebuffer *afb =
4109 		to_amdgpu_framebuffer(plane_state->fb);
4110 	struct drm_format_name_buf format_name;
4111 	int ret;
4112 
4113 	memset(plane_info, 0, sizeof(*plane_info));
4114 
4115 	switch (fb->format->format) {
4116 	case DRM_FORMAT_C8:
4117 		plane_info->format =
4118 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4119 		break;
4120 	case DRM_FORMAT_RGB565:
4121 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4122 		break;
4123 	case DRM_FORMAT_XRGB8888:
4124 	case DRM_FORMAT_ARGB8888:
4125 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4126 		break;
4127 	case DRM_FORMAT_XRGB2101010:
4128 	case DRM_FORMAT_ARGB2101010:
4129 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4130 		break;
4131 	case DRM_FORMAT_XBGR2101010:
4132 	case DRM_FORMAT_ABGR2101010:
4133 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4134 		break;
4135 	case DRM_FORMAT_XBGR8888:
4136 	case DRM_FORMAT_ABGR8888:
4137 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4138 		break;
4139 	case DRM_FORMAT_NV21:
4140 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4141 		break;
4142 	case DRM_FORMAT_NV12:
4143 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4144 		break;
4145 	case DRM_FORMAT_P010:
4146 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4147 		break;
4148 	case DRM_FORMAT_XRGB16161616F:
4149 	case DRM_FORMAT_ARGB16161616F:
4150 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4151 		break;
4152 	case DRM_FORMAT_XBGR16161616F:
4153 	case DRM_FORMAT_ABGR16161616F:
4154 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4155 		break;
4156 	default:
4157 		DRM_ERROR(
4158 			"Unsupported screen format %s\n",
4159 			drm_get_format_name(fb->format->format, &format_name));
4160 		return -EINVAL;
4161 	}
4162 
4163 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4164 	case DRM_MODE_ROTATE_0:
4165 		plane_info->rotation = ROTATION_ANGLE_0;
4166 		break;
4167 	case DRM_MODE_ROTATE_90:
4168 		plane_info->rotation = ROTATION_ANGLE_90;
4169 		break;
4170 	case DRM_MODE_ROTATE_180:
4171 		plane_info->rotation = ROTATION_ANGLE_180;
4172 		break;
4173 	case DRM_MODE_ROTATE_270:
4174 		plane_info->rotation = ROTATION_ANGLE_270;
4175 		break;
4176 	default:
4177 		plane_info->rotation = ROTATION_ANGLE_0;
4178 		break;
4179 	}
4180 
4181 	plane_info->visible = true;
4182 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4183 
4184 	plane_info->layer_index = 0;
4185 
4186 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4187 					  &plane_info->color_space);
4188 	if (ret)
4189 		return ret;
4190 
4191 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4192 					   plane_info->rotation, tiling_flags,
4193 					   &plane_info->tiling_info,
4194 					   &plane_info->plane_size,
4195 					   &plane_info->dcc, address, tmz_surface,
4196 					   force_disable_dcc);
4197 	if (ret)
4198 		return ret;
4199 
4200 	fill_blending_from_plane_state(
4201 		plane_state, &plane_info->per_pixel_alpha,
4202 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4203 
4204 	return 0;
4205 }
4206 
4207 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4208 				    struct dc_plane_state *dc_plane_state,
4209 				    struct drm_plane_state *plane_state,
4210 				    struct drm_crtc_state *crtc_state)
4211 {
4212 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4213 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4214 	struct dc_scaling_info scaling_info;
4215 	struct dc_plane_info plane_info;
4216 	int ret;
4217 	bool force_disable_dcc = false;
4218 
4219 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4220 	if (ret)
4221 		return ret;
4222 
4223 	dc_plane_state->src_rect = scaling_info.src_rect;
4224 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4225 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4226 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4227 
4228 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4229 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4230 					  dm_plane_state->tiling_flags,
4231 					  &plane_info,
4232 					  &dc_plane_state->address,
4233 					  dm_plane_state->tmz_surface,
4234 					  force_disable_dcc);
4235 	if (ret)
4236 		return ret;
4237 
4238 	dc_plane_state->format = plane_info.format;
4239 	dc_plane_state->color_space = plane_info.color_space;
4240 	dc_plane_state->format = plane_info.format;
4241 	dc_plane_state->plane_size = plane_info.plane_size;
4242 	dc_plane_state->rotation = plane_info.rotation;
4243 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4244 	dc_plane_state->stereo_format = plane_info.stereo_format;
4245 	dc_plane_state->tiling_info = plane_info.tiling_info;
4246 	dc_plane_state->visible = plane_info.visible;
4247 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4248 	dc_plane_state->global_alpha = plane_info.global_alpha;
4249 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4250 	dc_plane_state->dcc = plane_info.dcc;
4251 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4252 
4253 	/*
4254 	 * Always set input transfer function, since plane state is refreshed
4255 	 * every time.
4256 	 */
4257 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4258 	if (ret)
4259 		return ret;
4260 
4261 	return 0;
4262 }
4263 
4264 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4265 					   const struct dm_connector_state *dm_state,
4266 					   struct dc_stream_state *stream)
4267 {
4268 	enum amdgpu_rmx_type rmx_type;
4269 
4270 	struct rect src = { 0 }; /* viewport in composition space*/
4271 	struct rect dst = { 0 }; /* stream addressable area */
4272 
4273 	/* no mode. nothing to be done */
4274 	if (!mode)
4275 		return;
4276 
4277 	/* Full screen scaling by default */
4278 	src.width = mode->hdisplay;
4279 	src.height = mode->vdisplay;
4280 	dst.width = stream->timing.h_addressable;
4281 	dst.height = stream->timing.v_addressable;
4282 
4283 	if (dm_state) {
4284 		rmx_type = dm_state->scaling;
4285 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4286 			if (src.width * dst.height <
4287 					src.height * dst.width) {
4288 				/* height needs less upscaling/more downscaling */
4289 				dst.width = src.width *
4290 						dst.height / src.height;
4291 			} else {
4292 				/* width needs less upscaling/more downscaling */
4293 				dst.height = src.height *
4294 						dst.width / src.width;
4295 			}
4296 		} else if (rmx_type == RMX_CENTER) {
4297 			dst = src;
4298 		}
4299 
4300 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4301 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4302 
4303 		if (dm_state->underscan_enable) {
4304 			dst.x += dm_state->underscan_hborder / 2;
4305 			dst.y += dm_state->underscan_vborder / 2;
4306 			dst.width -= dm_state->underscan_hborder;
4307 			dst.height -= dm_state->underscan_vborder;
4308 		}
4309 	}
4310 
4311 	stream->src = src;
4312 	stream->dst = dst;
4313 
4314 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4315 			dst.x, dst.y, dst.width, dst.height);
4316 
4317 }
4318 
4319 static enum dc_color_depth
4320 convert_color_depth_from_display_info(const struct drm_connector *connector,
4321 				      bool is_y420, int requested_bpc)
4322 {
4323 	uint8_t bpc;
4324 
4325 	if (is_y420) {
4326 		bpc = 8;
4327 
4328 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4329 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4330 			bpc = 16;
4331 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4332 			bpc = 12;
4333 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4334 			bpc = 10;
4335 	} else {
4336 		bpc = (uint8_t)connector->display_info.bpc;
4337 		/* Assume 8 bpc by default if no bpc is specified. */
4338 		bpc = bpc ? bpc : 8;
4339 	}
4340 
4341 	if (requested_bpc > 0) {
4342 		/*
4343 		 * Cap display bpc based on the user requested value.
4344 		 *
4345 		 * The value for state->max_bpc may not correctly updated
4346 		 * depending on when the connector gets added to the state
4347 		 * or if this was called outside of atomic check, so it
4348 		 * can't be used directly.
4349 		 */
4350 		bpc = min_t(u8, bpc, requested_bpc);
4351 
4352 		/* Round down to the nearest even number. */
4353 		bpc = bpc - (bpc & 1);
4354 	}
4355 
4356 	switch (bpc) {
4357 	case 0:
4358 		/*
4359 		 * Temporary Work around, DRM doesn't parse color depth for
4360 		 * EDID revision before 1.4
4361 		 * TODO: Fix edid parsing
4362 		 */
4363 		return COLOR_DEPTH_888;
4364 	case 6:
4365 		return COLOR_DEPTH_666;
4366 	case 8:
4367 		return COLOR_DEPTH_888;
4368 	case 10:
4369 		return COLOR_DEPTH_101010;
4370 	case 12:
4371 		return COLOR_DEPTH_121212;
4372 	case 14:
4373 		return COLOR_DEPTH_141414;
4374 	case 16:
4375 		return COLOR_DEPTH_161616;
4376 	default:
4377 		return COLOR_DEPTH_UNDEFINED;
4378 	}
4379 }
4380 
4381 static enum dc_aspect_ratio
4382 get_aspect_ratio(const struct drm_display_mode *mode_in)
4383 {
4384 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4385 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4386 }
4387 
4388 static enum dc_color_space
4389 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4390 {
4391 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4392 
4393 	switch (dc_crtc_timing->pixel_encoding)	{
4394 	case PIXEL_ENCODING_YCBCR422:
4395 	case PIXEL_ENCODING_YCBCR444:
4396 	case PIXEL_ENCODING_YCBCR420:
4397 	{
4398 		/*
4399 		 * 27030khz is the separation point between HDTV and SDTV
4400 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4401 		 * respectively
4402 		 */
4403 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4404 			if (dc_crtc_timing->flags.Y_ONLY)
4405 				color_space =
4406 					COLOR_SPACE_YCBCR709_LIMITED;
4407 			else
4408 				color_space = COLOR_SPACE_YCBCR709;
4409 		} else {
4410 			if (dc_crtc_timing->flags.Y_ONLY)
4411 				color_space =
4412 					COLOR_SPACE_YCBCR601_LIMITED;
4413 			else
4414 				color_space = COLOR_SPACE_YCBCR601;
4415 		}
4416 
4417 	}
4418 	break;
4419 	case PIXEL_ENCODING_RGB:
4420 		color_space = COLOR_SPACE_SRGB;
4421 		break;
4422 
4423 	default:
4424 		WARN_ON(1);
4425 		break;
4426 	}
4427 
4428 	return color_space;
4429 }
4430 
4431 static bool adjust_colour_depth_from_display_info(
4432 	struct dc_crtc_timing *timing_out,
4433 	const struct drm_display_info *info)
4434 {
4435 	enum dc_color_depth depth = timing_out->display_color_depth;
4436 	int normalized_clk;
4437 	do {
4438 		normalized_clk = timing_out->pix_clk_100hz / 10;
4439 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4440 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4441 			normalized_clk /= 2;
4442 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4443 		switch (depth) {
4444 		case COLOR_DEPTH_888:
4445 			break;
4446 		case COLOR_DEPTH_101010:
4447 			normalized_clk = (normalized_clk * 30) / 24;
4448 			break;
4449 		case COLOR_DEPTH_121212:
4450 			normalized_clk = (normalized_clk * 36) / 24;
4451 			break;
4452 		case COLOR_DEPTH_161616:
4453 			normalized_clk = (normalized_clk * 48) / 24;
4454 			break;
4455 		default:
4456 			/* The above depths are the only ones valid for HDMI. */
4457 			return false;
4458 		}
4459 		if (normalized_clk <= info->max_tmds_clock) {
4460 			timing_out->display_color_depth = depth;
4461 			return true;
4462 		}
4463 	} while (--depth > COLOR_DEPTH_666);
4464 	return false;
4465 }
4466 
4467 static void fill_stream_properties_from_drm_display_mode(
4468 	struct dc_stream_state *stream,
4469 	const struct drm_display_mode *mode_in,
4470 	const struct drm_connector *connector,
4471 	const struct drm_connector_state *connector_state,
4472 	const struct dc_stream_state *old_stream,
4473 	int requested_bpc)
4474 {
4475 	struct dc_crtc_timing *timing_out = &stream->timing;
4476 	const struct drm_display_info *info = &connector->display_info;
4477 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4478 	struct hdmi_vendor_infoframe hv_frame;
4479 	struct hdmi_avi_infoframe avi_frame;
4480 
4481 	memset(&hv_frame, 0, sizeof(hv_frame));
4482 	memset(&avi_frame, 0, sizeof(avi_frame));
4483 
4484 	timing_out->h_border_left = 0;
4485 	timing_out->h_border_right = 0;
4486 	timing_out->v_border_top = 0;
4487 	timing_out->v_border_bottom = 0;
4488 	/* TODO: un-hardcode */
4489 	if (drm_mode_is_420_only(info, mode_in)
4490 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4491 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4492 	else if (drm_mode_is_420_also(info, mode_in)
4493 			&& aconnector->force_yuv420_output)
4494 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4495 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4496 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4497 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4498 	else
4499 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4500 
4501 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4502 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4503 		connector,
4504 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4505 		requested_bpc);
4506 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4507 	timing_out->hdmi_vic = 0;
4508 
4509 	if(old_stream) {
4510 		timing_out->vic = old_stream->timing.vic;
4511 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4512 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4513 	} else {
4514 		timing_out->vic = drm_match_cea_mode(mode_in);
4515 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4516 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4517 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4518 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4519 	}
4520 
4521 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4522 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4523 		timing_out->vic = avi_frame.video_code;
4524 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4525 		timing_out->hdmi_vic = hv_frame.vic;
4526 	}
4527 
4528 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4529 	timing_out->h_total = mode_in->crtc_htotal;
4530 	timing_out->h_sync_width =
4531 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4532 	timing_out->h_front_porch =
4533 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4534 	timing_out->v_total = mode_in->crtc_vtotal;
4535 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4536 	timing_out->v_front_porch =
4537 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4538 	timing_out->v_sync_width =
4539 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4540 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4541 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4542 
4543 	stream->output_color_space = get_output_color_space(timing_out);
4544 
4545 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4546 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4547 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4548 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4549 		    drm_mode_is_420_also(info, mode_in) &&
4550 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4551 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4552 			adjust_colour_depth_from_display_info(timing_out, info);
4553 		}
4554 	}
4555 }
4556 
4557 static void fill_audio_info(struct audio_info *audio_info,
4558 			    const struct drm_connector *drm_connector,
4559 			    const struct dc_sink *dc_sink)
4560 {
4561 	int i = 0;
4562 	int cea_revision = 0;
4563 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4564 
4565 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4566 	audio_info->product_id = edid_caps->product_id;
4567 
4568 	cea_revision = drm_connector->display_info.cea_rev;
4569 
4570 	strscpy(audio_info->display_name,
4571 		edid_caps->display_name,
4572 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4573 
4574 	if (cea_revision >= 3) {
4575 		audio_info->mode_count = edid_caps->audio_mode_count;
4576 
4577 		for (i = 0; i < audio_info->mode_count; ++i) {
4578 			audio_info->modes[i].format_code =
4579 					(enum audio_format_code)
4580 					(edid_caps->audio_modes[i].format_code);
4581 			audio_info->modes[i].channel_count =
4582 					edid_caps->audio_modes[i].channel_count;
4583 			audio_info->modes[i].sample_rates.all =
4584 					edid_caps->audio_modes[i].sample_rate;
4585 			audio_info->modes[i].sample_size =
4586 					edid_caps->audio_modes[i].sample_size;
4587 		}
4588 	}
4589 
4590 	audio_info->flags.all = edid_caps->speaker_flags;
4591 
4592 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4593 	if (drm_connector->latency_present[0]) {
4594 		audio_info->video_latency = drm_connector->video_latency[0];
4595 		audio_info->audio_latency = drm_connector->audio_latency[0];
4596 	}
4597 
4598 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4599 
4600 }
4601 
4602 static void
4603 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4604 				      struct drm_display_mode *dst_mode)
4605 {
4606 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4607 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4608 	dst_mode->crtc_clock = src_mode->crtc_clock;
4609 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4610 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4611 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4612 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4613 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4614 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4615 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4616 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4617 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4618 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4619 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4620 }
4621 
4622 static void
4623 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4624 					const struct drm_display_mode *native_mode,
4625 					bool scale_enabled)
4626 {
4627 	if (scale_enabled) {
4628 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4629 	} else if (native_mode->clock == drm_mode->clock &&
4630 			native_mode->htotal == drm_mode->htotal &&
4631 			native_mode->vtotal == drm_mode->vtotal) {
4632 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4633 	} else {
4634 		/* no scaling nor amdgpu inserted, no need to patch */
4635 	}
4636 }
4637 
4638 static struct dc_sink *
4639 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4640 {
4641 	struct dc_sink_init_data sink_init_data = { 0 };
4642 	struct dc_sink *sink = NULL;
4643 	sink_init_data.link = aconnector->dc_link;
4644 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4645 
4646 	sink = dc_sink_create(&sink_init_data);
4647 	if (!sink) {
4648 		DRM_ERROR("Failed to create sink!\n");
4649 		return NULL;
4650 	}
4651 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4652 
4653 	return sink;
4654 }
4655 
4656 static void set_multisync_trigger_params(
4657 		struct dc_stream_state *stream)
4658 {
4659 	if (stream->triggered_crtc_reset.enabled) {
4660 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4661 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4662 	}
4663 }
4664 
4665 static void set_master_stream(struct dc_stream_state *stream_set[],
4666 			      int stream_count)
4667 {
4668 	int j, highest_rfr = 0, master_stream = 0;
4669 
4670 	for (j = 0;  j < stream_count; j++) {
4671 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4672 			int refresh_rate = 0;
4673 
4674 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4675 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4676 			if (refresh_rate > highest_rfr) {
4677 				highest_rfr = refresh_rate;
4678 				master_stream = j;
4679 			}
4680 		}
4681 	}
4682 	for (j = 0;  j < stream_count; j++) {
4683 		if (stream_set[j])
4684 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4685 	}
4686 }
4687 
4688 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4689 {
4690 	int i = 0;
4691 
4692 	if (context->stream_count < 2)
4693 		return;
4694 	for (i = 0; i < context->stream_count ; i++) {
4695 		if (!context->streams[i])
4696 			continue;
4697 		/*
4698 		 * TODO: add a function to read AMD VSDB bits and set
4699 		 * crtc_sync_master.multi_sync_enabled flag
4700 		 * For now it's set to false
4701 		 */
4702 		set_multisync_trigger_params(context->streams[i]);
4703 	}
4704 	set_master_stream(context->streams, context->stream_count);
4705 }
4706 
4707 static struct dc_stream_state *
4708 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4709 		       const struct drm_display_mode *drm_mode,
4710 		       const struct dm_connector_state *dm_state,
4711 		       const struct dc_stream_state *old_stream,
4712 		       int requested_bpc)
4713 {
4714 	struct drm_display_mode *preferred_mode = NULL;
4715 	struct drm_connector *drm_connector;
4716 	const struct drm_connector_state *con_state =
4717 		dm_state ? &dm_state->base : NULL;
4718 	struct dc_stream_state *stream = NULL;
4719 	struct drm_display_mode mode = *drm_mode;
4720 	bool native_mode_found = false;
4721 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4722 	int mode_refresh;
4723 	int preferred_refresh = 0;
4724 #if defined(CONFIG_DRM_AMD_DC_DCN)
4725 	struct dsc_dec_dpcd_caps dsc_caps;
4726 #endif
4727 	uint32_t link_bandwidth_kbps;
4728 
4729 	struct dc_sink *sink = NULL;
4730 	if (aconnector == NULL) {
4731 		DRM_ERROR("aconnector is NULL!\n");
4732 		return stream;
4733 	}
4734 
4735 	drm_connector = &aconnector->base;
4736 
4737 	if (!aconnector->dc_sink) {
4738 		sink = create_fake_sink(aconnector);
4739 		if (!sink)
4740 			return stream;
4741 	} else {
4742 		sink = aconnector->dc_sink;
4743 		dc_sink_retain(sink);
4744 	}
4745 
4746 	stream = dc_create_stream_for_sink(sink);
4747 
4748 	if (stream == NULL) {
4749 		DRM_ERROR("Failed to create stream for sink!\n");
4750 		goto finish;
4751 	}
4752 
4753 	stream->dm_stream_context = aconnector;
4754 
4755 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4756 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4757 
4758 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4759 		/* Search for preferred mode */
4760 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4761 			native_mode_found = true;
4762 			break;
4763 		}
4764 	}
4765 	if (!native_mode_found)
4766 		preferred_mode = list_first_entry_or_null(
4767 				&aconnector->base.modes,
4768 				struct drm_display_mode,
4769 				head);
4770 
4771 	mode_refresh = drm_mode_vrefresh(&mode);
4772 
4773 	if (preferred_mode == NULL) {
4774 		/*
4775 		 * This may not be an error, the use case is when we have no
4776 		 * usermode calls to reset and set mode upon hotplug. In this
4777 		 * case, we call set mode ourselves to restore the previous mode
4778 		 * and the modelist may not be filled in in time.
4779 		 */
4780 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4781 	} else {
4782 		decide_crtc_timing_for_drm_display_mode(
4783 				&mode, preferred_mode,
4784 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4785 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4786 	}
4787 
4788 	if (!dm_state)
4789 		drm_mode_set_crtcinfo(&mode, 0);
4790 
4791 	/*
4792 	* If scaling is enabled and refresh rate didn't change
4793 	* we copy the vic and polarities of the old timings
4794 	*/
4795 	if (!scale || mode_refresh != preferred_refresh)
4796 		fill_stream_properties_from_drm_display_mode(stream,
4797 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4798 	else
4799 		fill_stream_properties_from_drm_display_mode(stream,
4800 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4801 
4802 	stream->timing.flags.DSC = 0;
4803 
4804 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4805 #if defined(CONFIG_DRM_AMD_DC_DCN)
4806 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4807 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4808 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4809 				      &dsc_caps);
4810 #endif
4811 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4812 							     dc_link_get_link_cap(aconnector->dc_link));
4813 
4814 #if defined(CONFIG_DRM_AMD_DC_DCN)
4815 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4816 			/* Set DSC policy according to dsc_clock_en */
4817 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4818 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4819 
4820 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4821 						  &dsc_caps,
4822 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4823 						  0,
4824 						  link_bandwidth_kbps,
4825 						  &stream->timing,
4826 						  &stream->timing.dsc_cfg))
4827 				stream->timing.flags.DSC = 1;
4828 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4829 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4830 				stream->timing.flags.DSC = 1;
4831 
4832 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4833 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4834 
4835 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4836 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4837 
4838 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4839 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4840 		}
4841 #endif
4842 	}
4843 
4844 	update_stream_scaling_settings(&mode, dm_state, stream);
4845 
4846 	fill_audio_info(
4847 		&stream->audio_info,
4848 		drm_connector,
4849 		sink);
4850 
4851 	update_stream_signal(stream, sink);
4852 
4853 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4854 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4855 
4856 	if (stream->link->psr_settings.psr_feature_enabled) {
4857 		//
4858 		// should decide stream support vsc sdp colorimetry capability
4859 		// before building vsc info packet
4860 		//
4861 		stream->use_vsc_sdp_for_colorimetry = false;
4862 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4863 			stream->use_vsc_sdp_for_colorimetry =
4864 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4865 		} else {
4866 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4867 				stream->use_vsc_sdp_for_colorimetry = true;
4868 		}
4869 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4870 	}
4871 finish:
4872 	dc_sink_release(sink);
4873 
4874 	return stream;
4875 }
4876 
4877 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4878 {
4879 	drm_crtc_cleanup(crtc);
4880 	kfree(crtc);
4881 }
4882 
4883 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4884 				  struct drm_crtc_state *state)
4885 {
4886 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4887 
4888 	/* TODO Destroy dc_stream objects are stream object is flattened */
4889 	if (cur->stream)
4890 		dc_stream_release(cur->stream);
4891 
4892 
4893 	__drm_atomic_helper_crtc_destroy_state(state);
4894 
4895 
4896 	kfree(state);
4897 }
4898 
4899 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4900 {
4901 	struct dm_crtc_state *state;
4902 
4903 	if (crtc->state)
4904 		dm_crtc_destroy_state(crtc, crtc->state);
4905 
4906 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4907 	if (WARN_ON(!state))
4908 		return;
4909 
4910 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4911 }
4912 
4913 static struct drm_crtc_state *
4914 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4915 {
4916 	struct dm_crtc_state *state, *cur;
4917 
4918 	cur = to_dm_crtc_state(crtc->state);
4919 
4920 	if (WARN_ON(!crtc->state))
4921 		return NULL;
4922 
4923 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4924 	if (!state)
4925 		return NULL;
4926 
4927 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4928 
4929 	if (cur->stream) {
4930 		state->stream = cur->stream;
4931 		dc_stream_retain(state->stream);
4932 	}
4933 
4934 	state->active_planes = cur->active_planes;
4935 	state->vrr_infopacket = cur->vrr_infopacket;
4936 	state->abm_level = cur->abm_level;
4937 	state->vrr_supported = cur->vrr_supported;
4938 	state->freesync_config = cur->freesync_config;
4939 	state->crc_src = cur->crc_src;
4940 	state->cm_has_degamma = cur->cm_has_degamma;
4941 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4942 
4943 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4944 
4945 	return &state->base;
4946 }
4947 
4948 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4949 {
4950 	enum dc_irq_source irq_source;
4951 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4952 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4953 	int rc;
4954 
4955 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4956 
4957 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4958 
4959 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4960 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4961 	return rc;
4962 }
4963 
4964 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4965 {
4966 	enum dc_irq_source irq_source;
4967 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4968 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4969 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4970 	int rc = 0;
4971 
4972 	if (enable) {
4973 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4974 		if (amdgpu_dm_vrr_active(acrtc_state))
4975 			rc = dm_set_vupdate_irq(crtc, true);
4976 	} else {
4977 		/* vblank irq off -> vupdate irq off */
4978 		rc = dm_set_vupdate_irq(crtc, false);
4979 	}
4980 
4981 	if (rc)
4982 		return rc;
4983 
4984 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4985 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4986 }
4987 
4988 static int dm_enable_vblank(struct drm_crtc *crtc)
4989 {
4990 	return dm_set_vblank(crtc, true);
4991 }
4992 
4993 static void dm_disable_vblank(struct drm_crtc *crtc)
4994 {
4995 	dm_set_vblank(crtc, false);
4996 }
4997 
4998 /* Implemented only the options currently availible for the driver */
4999 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5000 	.reset = dm_crtc_reset_state,
5001 	.destroy = amdgpu_dm_crtc_destroy,
5002 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5003 	.set_config = drm_atomic_helper_set_config,
5004 	.page_flip = drm_atomic_helper_page_flip,
5005 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5006 	.atomic_destroy_state = dm_crtc_destroy_state,
5007 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5008 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5009 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5010 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5011 	.enable_vblank = dm_enable_vblank,
5012 	.disable_vblank = dm_disable_vblank,
5013 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5014 };
5015 
5016 static enum drm_connector_status
5017 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5018 {
5019 	bool connected;
5020 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5021 
5022 	/*
5023 	 * Notes:
5024 	 * 1. This interface is NOT called in context of HPD irq.
5025 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5026 	 * makes it a bad place for *any* MST-related activity.
5027 	 */
5028 
5029 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5030 	    !aconnector->fake_enable)
5031 		connected = (aconnector->dc_sink != NULL);
5032 	else
5033 		connected = (aconnector->base.force == DRM_FORCE_ON);
5034 
5035 	update_subconnector_property(aconnector);
5036 
5037 	return (connected ? connector_status_connected :
5038 			connector_status_disconnected);
5039 }
5040 
5041 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5042 					    struct drm_connector_state *connector_state,
5043 					    struct drm_property *property,
5044 					    uint64_t val)
5045 {
5046 	struct drm_device *dev = connector->dev;
5047 	struct amdgpu_device *adev = drm_to_adev(dev);
5048 	struct dm_connector_state *dm_old_state =
5049 		to_dm_connector_state(connector->state);
5050 	struct dm_connector_state *dm_new_state =
5051 		to_dm_connector_state(connector_state);
5052 
5053 	int ret = -EINVAL;
5054 
5055 	if (property == dev->mode_config.scaling_mode_property) {
5056 		enum amdgpu_rmx_type rmx_type;
5057 
5058 		switch (val) {
5059 		case DRM_MODE_SCALE_CENTER:
5060 			rmx_type = RMX_CENTER;
5061 			break;
5062 		case DRM_MODE_SCALE_ASPECT:
5063 			rmx_type = RMX_ASPECT;
5064 			break;
5065 		case DRM_MODE_SCALE_FULLSCREEN:
5066 			rmx_type = RMX_FULL;
5067 			break;
5068 		case DRM_MODE_SCALE_NONE:
5069 		default:
5070 			rmx_type = RMX_OFF;
5071 			break;
5072 		}
5073 
5074 		if (dm_old_state->scaling == rmx_type)
5075 			return 0;
5076 
5077 		dm_new_state->scaling = rmx_type;
5078 		ret = 0;
5079 	} else if (property == adev->mode_info.underscan_hborder_property) {
5080 		dm_new_state->underscan_hborder = val;
5081 		ret = 0;
5082 	} else if (property == adev->mode_info.underscan_vborder_property) {
5083 		dm_new_state->underscan_vborder = val;
5084 		ret = 0;
5085 	} else if (property == adev->mode_info.underscan_property) {
5086 		dm_new_state->underscan_enable = val;
5087 		ret = 0;
5088 	} else if (property == adev->mode_info.abm_level_property) {
5089 		dm_new_state->abm_level = val;
5090 		ret = 0;
5091 	}
5092 
5093 	return ret;
5094 }
5095 
5096 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5097 					    const struct drm_connector_state *state,
5098 					    struct drm_property *property,
5099 					    uint64_t *val)
5100 {
5101 	struct drm_device *dev = connector->dev;
5102 	struct amdgpu_device *adev = drm_to_adev(dev);
5103 	struct dm_connector_state *dm_state =
5104 		to_dm_connector_state(state);
5105 	int ret = -EINVAL;
5106 
5107 	if (property == dev->mode_config.scaling_mode_property) {
5108 		switch (dm_state->scaling) {
5109 		case RMX_CENTER:
5110 			*val = DRM_MODE_SCALE_CENTER;
5111 			break;
5112 		case RMX_ASPECT:
5113 			*val = DRM_MODE_SCALE_ASPECT;
5114 			break;
5115 		case RMX_FULL:
5116 			*val = DRM_MODE_SCALE_FULLSCREEN;
5117 			break;
5118 		case RMX_OFF:
5119 		default:
5120 			*val = DRM_MODE_SCALE_NONE;
5121 			break;
5122 		}
5123 		ret = 0;
5124 	} else if (property == adev->mode_info.underscan_hborder_property) {
5125 		*val = dm_state->underscan_hborder;
5126 		ret = 0;
5127 	} else if (property == adev->mode_info.underscan_vborder_property) {
5128 		*val = dm_state->underscan_vborder;
5129 		ret = 0;
5130 	} else if (property == adev->mode_info.underscan_property) {
5131 		*val = dm_state->underscan_enable;
5132 		ret = 0;
5133 	} else if (property == adev->mode_info.abm_level_property) {
5134 		*val = dm_state->abm_level;
5135 		ret = 0;
5136 	}
5137 
5138 	return ret;
5139 }
5140 
5141 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5142 {
5143 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5144 
5145 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5146 }
5147 
5148 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5149 {
5150 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5151 	const struct dc_link *link = aconnector->dc_link;
5152 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5153 	struct amdgpu_display_manager *dm = &adev->dm;
5154 
5155 	/*
5156 	 * Call only if mst_mgr was iniitalized before since it's not done
5157 	 * for all connector types.
5158 	 */
5159 	if (aconnector->mst_mgr.dev)
5160 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5161 
5162 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5163 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5164 
5165 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5166 	    link->type != dc_connection_none &&
5167 	    dm->backlight_dev) {
5168 		backlight_device_unregister(dm->backlight_dev);
5169 		dm->backlight_dev = NULL;
5170 	}
5171 #endif
5172 
5173 	if (aconnector->dc_em_sink)
5174 		dc_sink_release(aconnector->dc_em_sink);
5175 	aconnector->dc_em_sink = NULL;
5176 	if (aconnector->dc_sink)
5177 		dc_sink_release(aconnector->dc_sink);
5178 	aconnector->dc_sink = NULL;
5179 
5180 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5181 	drm_connector_unregister(connector);
5182 	drm_connector_cleanup(connector);
5183 	if (aconnector->i2c) {
5184 		i2c_del_adapter(&aconnector->i2c->base);
5185 		kfree(aconnector->i2c);
5186 	}
5187 	kfree(aconnector->dm_dp_aux.aux.name);
5188 
5189 	kfree(connector);
5190 }
5191 
5192 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5193 {
5194 	struct dm_connector_state *state =
5195 		to_dm_connector_state(connector->state);
5196 
5197 	if (connector->state)
5198 		__drm_atomic_helper_connector_destroy_state(connector->state);
5199 
5200 	kfree(state);
5201 
5202 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5203 
5204 	if (state) {
5205 		state->scaling = RMX_OFF;
5206 		state->underscan_enable = false;
5207 		state->underscan_hborder = 0;
5208 		state->underscan_vborder = 0;
5209 		state->base.max_requested_bpc = 8;
5210 		state->vcpi_slots = 0;
5211 		state->pbn = 0;
5212 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5213 			state->abm_level = amdgpu_dm_abm_level;
5214 
5215 		__drm_atomic_helper_connector_reset(connector, &state->base);
5216 	}
5217 }
5218 
5219 struct drm_connector_state *
5220 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5221 {
5222 	struct dm_connector_state *state =
5223 		to_dm_connector_state(connector->state);
5224 
5225 	struct dm_connector_state *new_state =
5226 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5227 
5228 	if (!new_state)
5229 		return NULL;
5230 
5231 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5232 
5233 	new_state->freesync_capable = state->freesync_capable;
5234 	new_state->abm_level = state->abm_level;
5235 	new_state->scaling = state->scaling;
5236 	new_state->underscan_enable = state->underscan_enable;
5237 	new_state->underscan_hborder = state->underscan_hborder;
5238 	new_state->underscan_vborder = state->underscan_vborder;
5239 	new_state->vcpi_slots = state->vcpi_slots;
5240 	new_state->pbn = state->pbn;
5241 	return &new_state->base;
5242 }
5243 
5244 static int
5245 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5246 {
5247 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5248 		to_amdgpu_dm_connector(connector);
5249 	int r;
5250 
5251 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5252 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5253 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5254 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5255 		if (r)
5256 			return r;
5257 	}
5258 
5259 #if defined(CONFIG_DEBUG_FS)
5260 	connector_debugfs_init(amdgpu_dm_connector);
5261 #endif
5262 
5263 	return 0;
5264 }
5265 
5266 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5267 	.reset = amdgpu_dm_connector_funcs_reset,
5268 	.detect = amdgpu_dm_connector_detect,
5269 	.fill_modes = drm_helper_probe_single_connector_modes,
5270 	.destroy = amdgpu_dm_connector_destroy,
5271 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5272 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5273 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5274 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5275 	.late_register = amdgpu_dm_connector_late_register,
5276 	.early_unregister = amdgpu_dm_connector_unregister
5277 };
5278 
5279 static int get_modes(struct drm_connector *connector)
5280 {
5281 	return amdgpu_dm_connector_get_modes(connector);
5282 }
5283 
5284 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5285 {
5286 	struct dc_sink_init_data init_params = {
5287 			.link = aconnector->dc_link,
5288 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5289 	};
5290 	struct edid *edid;
5291 
5292 	if (!aconnector->base.edid_blob_ptr) {
5293 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5294 				aconnector->base.name);
5295 
5296 		aconnector->base.force = DRM_FORCE_OFF;
5297 		aconnector->base.override_edid = false;
5298 		return;
5299 	}
5300 
5301 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5302 
5303 	aconnector->edid = edid;
5304 
5305 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5306 		aconnector->dc_link,
5307 		(uint8_t *)edid,
5308 		(edid->extensions + 1) * EDID_LENGTH,
5309 		&init_params);
5310 
5311 	if (aconnector->base.force == DRM_FORCE_ON) {
5312 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5313 		aconnector->dc_link->local_sink :
5314 		aconnector->dc_em_sink;
5315 		dc_sink_retain(aconnector->dc_sink);
5316 	}
5317 }
5318 
5319 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5320 {
5321 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5322 
5323 	/*
5324 	 * In case of headless boot with force on for DP managed connector
5325 	 * Those settings have to be != 0 to get initial modeset
5326 	 */
5327 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5328 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5329 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5330 	}
5331 
5332 
5333 	aconnector->base.override_edid = true;
5334 	create_eml_sink(aconnector);
5335 }
5336 
5337 static struct dc_stream_state *
5338 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5339 				const struct drm_display_mode *drm_mode,
5340 				const struct dm_connector_state *dm_state,
5341 				const struct dc_stream_state *old_stream)
5342 {
5343 	struct drm_connector *connector = &aconnector->base;
5344 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5345 	struct dc_stream_state *stream;
5346 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5347 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5348 	enum dc_status dc_result = DC_OK;
5349 
5350 	do {
5351 		stream = create_stream_for_sink(aconnector, drm_mode,
5352 						dm_state, old_stream,
5353 						requested_bpc);
5354 		if (stream == NULL) {
5355 			DRM_ERROR("Failed to create stream for sink!\n");
5356 			break;
5357 		}
5358 
5359 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5360 
5361 		if (dc_result != DC_OK) {
5362 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5363 				      drm_mode->hdisplay,
5364 				      drm_mode->vdisplay,
5365 				      drm_mode->clock,
5366 				      dc_result,
5367 				      dc_status_to_str(dc_result));
5368 
5369 			dc_stream_release(stream);
5370 			stream = NULL;
5371 			requested_bpc -= 2; /* lower bpc to retry validation */
5372 		}
5373 
5374 	} while (stream == NULL && requested_bpc >= 6);
5375 
5376 	return stream;
5377 }
5378 
5379 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5380 				   struct drm_display_mode *mode)
5381 {
5382 	int result = MODE_ERROR;
5383 	struct dc_sink *dc_sink;
5384 	/* TODO: Unhardcode stream count */
5385 	struct dc_stream_state *stream;
5386 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5387 
5388 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5389 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5390 		return result;
5391 
5392 	/*
5393 	 * Only run this the first time mode_valid is called to initilialize
5394 	 * EDID mgmt
5395 	 */
5396 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5397 		!aconnector->dc_em_sink)
5398 		handle_edid_mgmt(aconnector);
5399 
5400 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5401 
5402 	if (dc_sink == NULL) {
5403 		DRM_ERROR("dc_sink is NULL!\n");
5404 		goto fail;
5405 	}
5406 
5407 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5408 	if (stream) {
5409 		dc_stream_release(stream);
5410 		result = MODE_OK;
5411 	}
5412 
5413 fail:
5414 	/* TODO: error handling*/
5415 	return result;
5416 }
5417 
5418 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5419 				struct dc_info_packet *out)
5420 {
5421 	struct hdmi_drm_infoframe frame;
5422 	unsigned char buf[30]; /* 26 + 4 */
5423 	ssize_t len;
5424 	int ret, i;
5425 
5426 	memset(out, 0, sizeof(*out));
5427 
5428 	if (!state->hdr_output_metadata)
5429 		return 0;
5430 
5431 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5432 	if (ret)
5433 		return ret;
5434 
5435 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5436 	if (len < 0)
5437 		return (int)len;
5438 
5439 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5440 	if (len != 30)
5441 		return -EINVAL;
5442 
5443 	/* Prepare the infopacket for DC. */
5444 	switch (state->connector->connector_type) {
5445 	case DRM_MODE_CONNECTOR_HDMIA:
5446 		out->hb0 = 0x87; /* type */
5447 		out->hb1 = 0x01; /* version */
5448 		out->hb2 = 0x1A; /* length */
5449 		out->sb[0] = buf[3]; /* checksum */
5450 		i = 1;
5451 		break;
5452 
5453 	case DRM_MODE_CONNECTOR_DisplayPort:
5454 	case DRM_MODE_CONNECTOR_eDP:
5455 		out->hb0 = 0x00; /* sdp id, zero */
5456 		out->hb1 = 0x87; /* type */
5457 		out->hb2 = 0x1D; /* payload len - 1 */
5458 		out->hb3 = (0x13 << 2); /* sdp version */
5459 		out->sb[0] = 0x01; /* version */
5460 		out->sb[1] = 0x1A; /* length */
5461 		i = 2;
5462 		break;
5463 
5464 	default:
5465 		return -EINVAL;
5466 	}
5467 
5468 	memcpy(&out->sb[i], &buf[4], 26);
5469 	out->valid = true;
5470 
5471 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5472 		       sizeof(out->sb), false);
5473 
5474 	return 0;
5475 }
5476 
5477 static bool
5478 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5479 			  const struct drm_connector_state *new_state)
5480 {
5481 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5482 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5483 
5484 	if (old_blob != new_blob) {
5485 		if (old_blob && new_blob &&
5486 		    old_blob->length == new_blob->length)
5487 			return memcmp(old_blob->data, new_blob->data,
5488 				      old_blob->length);
5489 
5490 		return true;
5491 	}
5492 
5493 	return false;
5494 }
5495 
5496 static int
5497 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5498 				 struct drm_atomic_state *state)
5499 {
5500 	struct drm_connector_state *new_con_state =
5501 		drm_atomic_get_new_connector_state(state, conn);
5502 	struct drm_connector_state *old_con_state =
5503 		drm_atomic_get_old_connector_state(state, conn);
5504 	struct drm_crtc *crtc = new_con_state->crtc;
5505 	struct drm_crtc_state *new_crtc_state;
5506 	int ret;
5507 
5508 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5509 
5510 	if (!crtc)
5511 		return 0;
5512 
5513 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5514 		struct dc_info_packet hdr_infopacket;
5515 
5516 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5517 		if (ret)
5518 			return ret;
5519 
5520 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5521 		if (IS_ERR(new_crtc_state))
5522 			return PTR_ERR(new_crtc_state);
5523 
5524 		/*
5525 		 * DC considers the stream backends changed if the
5526 		 * static metadata changes. Forcing the modeset also
5527 		 * gives a simple way for userspace to switch from
5528 		 * 8bpc to 10bpc when setting the metadata to enter
5529 		 * or exit HDR.
5530 		 *
5531 		 * Changing the static metadata after it's been
5532 		 * set is permissible, however. So only force a
5533 		 * modeset if we're entering or exiting HDR.
5534 		 */
5535 		new_crtc_state->mode_changed =
5536 			!old_con_state->hdr_output_metadata ||
5537 			!new_con_state->hdr_output_metadata;
5538 	}
5539 
5540 	return 0;
5541 }
5542 
5543 static const struct drm_connector_helper_funcs
5544 amdgpu_dm_connector_helper_funcs = {
5545 	/*
5546 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5547 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5548 	 * are missing after user start lightdm. So we need to renew modes list.
5549 	 * in get_modes call back, not just return the modes count
5550 	 */
5551 	.get_modes = get_modes,
5552 	.mode_valid = amdgpu_dm_connector_mode_valid,
5553 	.atomic_check = amdgpu_dm_connector_atomic_check,
5554 };
5555 
5556 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5557 {
5558 }
5559 
5560 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5561 {
5562 	struct drm_atomic_state *state = new_crtc_state->state;
5563 	struct drm_plane *plane;
5564 	int num_active = 0;
5565 
5566 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5567 		struct drm_plane_state *new_plane_state;
5568 
5569 		/* Cursor planes are "fake". */
5570 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5571 			continue;
5572 
5573 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5574 
5575 		if (!new_plane_state) {
5576 			/*
5577 			 * The plane is enable on the CRTC and hasn't changed
5578 			 * state. This means that it previously passed
5579 			 * validation and is therefore enabled.
5580 			 */
5581 			num_active += 1;
5582 			continue;
5583 		}
5584 
5585 		/* We need a framebuffer to be considered enabled. */
5586 		num_active += (new_plane_state->fb != NULL);
5587 	}
5588 
5589 	return num_active;
5590 }
5591 
5592 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5593 					 struct drm_crtc_state *new_crtc_state)
5594 {
5595 	struct dm_crtc_state *dm_new_crtc_state =
5596 		to_dm_crtc_state(new_crtc_state);
5597 
5598 	dm_new_crtc_state->active_planes = 0;
5599 
5600 	if (!dm_new_crtc_state->stream)
5601 		return;
5602 
5603 	dm_new_crtc_state->active_planes =
5604 		count_crtc_active_planes(new_crtc_state);
5605 }
5606 
5607 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5608 				       struct drm_crtc_state *state)
5609 {
5610 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5611 	struct dc *dc = adev->dm.dc;
5612 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5613 	int ret = -EINVAL;
5614 
5615 	trace_amdgpu_dm_crtc_atomic_check(state);
5616 
5617 	dm_update_crtc_active_planes(crtc, state);
5618 
5619 	if (unlikely(!dm_crtc_state->stream &&
5620 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5621 		WARN_ON(1);
5622 		return ret;
5623 	}
5624 
5625 	/*
5626 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5627 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5628 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5629 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5630 	 */
5631 	if (state->enable &&
5632 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5633 		return -EINVAL;
5634 
5635 	/* In some use cases, like reset, no stream is attached */
5636 	if (!dm_crtc_state->stream)
5637 		return 0;
5638 
5639 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5640 		return 0;
5641 
5642 	return ret;
5643 }
5644 
5645 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5646 				      const struct drm_display_mode *mode,
5647 				      struct drm_display_mode *adjusted_mode)
5648 {
5649 	return true;
5650 }
5651 
5652 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5653 	.disable = dm_crtc_helper_disable,
5654 	.atomic_check = dm_crtc_helper_atomic_check,
5655 	.mode_fixup = dm_crtc_helper_mode_fixup,
5656 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5657 };
5658 
5659 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5660 {
5661 
5662 }
5663 
5664 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5665 {
5666 	switch (display_color_depth) {
5667 		case COLOR_DEPTH_666:
5668 			return 6;
5669 		case COLOR_DEPTH_888:
5670 			return 8;
5671 		case COLOR_DEPTH_101010:
5672 			return 10;
5673 		case COLOR_DEPTH_121212:
5674 			return 12;
5675 		case COLOR_DEPTH_141414:
5676 			return 14;
5677 		case COLOR_DEPTH_161616:
5678 			return 16;
5679 		default:
5680 			break;
5681 		}
5682 	return 0;
5683 }
5684 
5685 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5686 					  struct drm_crtc_state *crtc_state,
5687 					  struct drm_connector_state *conn_state)
5688 {
5689 	struct drm_atomic_state *state = crtc_state->state;
5690 	struct drm_connector *connector = conn_state->connector;
5691 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5692 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5693 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5694 	struct drm_dp_mst_topology_mgr *mst_mgr;
5695 	struct drm_dp_mst_port *mst_port;
5696 	enum dc_color_depth color_depth;
5697 	int clock, bpp = 0;
5698 	bool is_y420 = false;
5699 
5700 	if (!aconnector->port || !aconnector->dc_sink)
5701 		return 0;
5702 
5703 	mst_port = aconnector->port;
5704 	mst_mgr = &aconnector->mst_port->mst_mgr;
5705 
5706 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5707 		return 0;
5708 
5709 	if (!state->duplicated) {
5710 		int max_bpc = conn_state->max_requested_bpc;
5711 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5712 				aconnector->force_yuv420_output;
5713 		color_depth = convert_color_depth_from_display_info(connector,
5714 								    is_y420,
5715 								    max_bpc);
5716 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5717 		clock = adjusted_mode->clock;
5718 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5719 	}
5720 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5721 									   mst_mgr,
5722 									   mst_port,
5723 									   dm_new_connector_state->pbn,
5724 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5725 	if (dm_new_connector_state->vcpi_slots < 0) {
5726 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5727 		return dm_new_connector_state->vcpi_slots;
5728 	}
5729 	return 0;
5730 }
5731 
5732 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5733 	.disable = dm_encoder_helper_disable,
5734 	.atomic_check = dm_encoder_helper_atomic_check
5735 };
5736 
5737 #if defined(CONFIG_DRM_AMD_DC_DCN)
5738 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5739 					    struct dc_state *dc_state)
5740 {
5741 	struct dc_stream_state *stream = NULL;
5742 	struct drm_connector *connector;
5743 	struct drm_connector_state *new_con_state, *old_con_state;
5744 	struct amdgpu_dm_connector *aconnector;
5745 	struct dm_connector_state *dm_conn_state;
5746 	int i, j, clock, bpp;
5747 	int vcpi, pbn_div, pbn = 0;
5748 
5749 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5750 
5751 		aconnector = to_amdgpu_dm_connector(connector);
5752 
5753 		if (!aconnector->port)
5754 			continue;
5755 
5756 		if (!new_con_state || !new_con_state->crtc)
5757 			continue;
5758 
5759 		dm_conn_state = to_dm_connector_state(new_con_state);
5760 
5761 		for (j = 0; j < dc_state->stream_count; j++) {
5762 			stream = dc_state->streams[j];
5763 			if (!stream)
5764 				continue;
5765 
5766 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5767 				break;
5768 
5769 			stream = NULL;
5770 		}
5771 
5772 		if (!stream)
5773 			continue;
5774 
5775 		if (stream->timing.flags.DSC != 1) {
5776 			drm_dp_mst_atomic_enable_dsc(state,
5777 						     aconnector->port,
5778 						     dm_conn_state->pbn,
5779 						     0,
5780 						     false);
5781 			continue;
5782 		}
5783 
5784 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5785 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5786 		clock = stream->timing.pix_clk_100hz / 10;
5787 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5788 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5789 						    aconnector->port,
5790 						    pbn, pbn_div,
5791 						    true);
5792 		if (vcpi < 0)
5793 			return vcpi;
5794 
5795 		dm_conn_state->pbn = pbn;
5796 		dm_conn_state->vcpi_slots = vcpi;
5797 	}
5798 	return 0;
5799 }
5800 #endif
5801 
5802 static void dm_drm_plane_reset(struct drm_plane *plane)
5803 {
5804 	struct dm_plane_state *amdgpu_state = NULL;
5805 
5806 	if (plane->state)
5807 		plane->funcs->atomic_destroy_state(plane, plane->state);
5808 
5809 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5810 	WARN_ON(amdgpu_state == NULL);
5811 
5812 	if (amdgpu_state)
5813 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5814 }
5815 
5816 static struct drm_plane_state *
5817 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5818 {
5819 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5820 
5821 	old_dm_plane_state = to_dm_plane_state(plane->state);
5822 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5823 	if (!dm_plane_state)
5824 		return NULL;
5825 
5826 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5827 
5828 	if (old_dm_plane_state->dc_state) {
5829 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5830 		dc_plane_state_retain(dm_plane_state->dc_state);
5831 	}
5832 
5833 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5834 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5835 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5836 
5837 	return &dm_plane_state->base;
5838 }
5839 
5840 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5841 				struct drm_plane_state *state)
5842 {
5843 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5844 
5845 	if (dm_plane_state->dc_state)
5846 		dc_plane_state_release(dm_plane_state->dc_state);
5847 
5848 	drm_atomic_helper_plane_destroy_state(plane, state);
5849 }
5850 
5851 static const struct drm_plane_funcs dm_plane_funcs = {
5852 	.update_plane	= drm_atomic_helper_update_plane,
5853 	.disable_plane	= drm_atomic_helper_disable_plane,
5854 	.destroy	= drm_primary_helper_destroy,
5855 	.reset = dm_drm_plane_reset,
5856 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5857 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5858 };
5859 
5860 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5861 				      struct drm_plane_state *new_state)
5862 {
5863 	struct amdgpu_framebuffer *afb;
5864 	struct drm_gem_object *obj;
5865 	struct amdgpu_device *adev;
5866 	struct amdgpu_bo *rbo;
5867 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5868 	struct list_head list;
5869 	struct ttm_validate_buffer tv;
5870 	struct ww_acquire_ctx ticket;
5871 	uint32_t domain;
5872 	int r;
5873 
5874 	if (!new_state->fb) {
5875 		DRM_DEBUG_DRIVER("No FB bound\n");
5876 		return 0;
5877 	}
5878 
5879 	afb = to_amdgpu_framebuffer(new_state->fb);
5880 	obj = new_state->fb->obj[0];
5881 	rbo = gem_to_amdgpu_bo(obj);
5882 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5883 	INIT_LIST_HEAD(&list);
5884 
5885 	tv.bo = &rbo->tbo;
5886 	tv.num_shared = 1;
5887 	list_add(&tv.head, &list);
5888 
5889 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5890 	if (r) {
5891 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5892 		return r;
5893 	}
5894 
5895 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5896 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5897 	else
5898 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5899 
5900 	r = amdgpu_bo_pin(rbo, domain);
5901 	if (unlikely(r != 0)) {
5902 		if (r != -ERESTARTSYS)
5903 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5904 		ttm_eu_backoff_reservation(&ticket, &list);
5905 		return r;
5906 	}
5907 
5908 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5909 	if (unlikely(r != 0)) {
5910 		amdgpu_bo_unpin(rbo);
5911 		ttm_eu_backoff_reservation(&ticket, &list);
5912 		DRM_ERROR("%p bind failed\n", rbo);
5913 		return r;
5914 	}
5915 
5916 	ttm_eu_backoff_reservation(&ticket, &list);
5917 
5918 	afb->address = amdgpu_bo_gpu_offset(rbo);
5919 
5920 	amdgpu_bo_ref(rbo);
5921 
5922 	/**
5923 	 * We don't do surface updates on planes that have been newly created,
5924 	 * but we also don't have the afb->address during atomic check.
5925 	 *
5926 	 * Fill in buffer attributes depending on the address here, but only on
5927 	 * newly created planes since they're not being used by DC yet and this
5928 	 * won't modify global state.
5929 	 */
5930 	dm_plane_state_old = to_dm_plane_state(plane->state);
5931 	dm_plane_state_new = to_dm_plane_state(new_state);
5932 
5933 	if (dm_plane_state_new->dc_state &&
5934 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5935 		struct dc_plane_state *plane_state =
5936 			dm_plane_state_new->dc_state;
5937 		bool force_disable_dcc = !plane_state->dcc.enable;
5938 
5939 		fill_plane_buffer_attributes(
5940 			adev, afb, plane_state->format, plane_state->rotation,
5941 			dm_plane_state_new->tiling_flags,
5942 			&plane_state->tiling_info, &plane_state->plane_size,
5943 			&plane_state->dcc, &plane_state->address,
5944 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5945 	}
5946 
5947 	return 0;
5948 }
5949 
5950 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5951 				       struct drm_plane_state *old_state)
5952 {
5953 	struct amdgpu_bo *rbo;
5954 	int r;
5955 
5956 	if (!old_state->fb)
5957 		return;
5958 
5959 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5960 	r = amdgpu_bo_reserve(rbo, false);
5961 	if (unlikely(r)) {
5962 		DRM_ERROR("failed to reserve rbo before unpin\n");
5963 		return;
5964 	}
5965 
5966 	amdgpu_bo_unpin(rbo);
5967 	amdgpu_bo_unreserve(rbo);
5968 	amdgpu_bo_unref(&rbo);
5969 }
5970 
5971 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5972 				       struct drm_crtc_state *new_crtc_state)
5973 {
5974 	int max_downscale = 0;
5975 	int max_upscale = INT_MAX;
5976 
5977 	/* TODO: These should be checked against DC plane caps */
5978 	return drm_atomic_helper_check_plane_state(
5979 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5980 }
5981 
5982 static int dm_plane_atomic_check(struct drm_plane *plane,
5983 				 struct drm_plane_state *state)
5984 {
5985 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
5986 	struct dc *dc = adev->dm.dc;
5987 	struct dm_plane_state *dm_plane_state;
5988 	struct dc_scaling_info scaling_info;
5989 	struct drm_crtc_state *new_crtc_state;
5990 	int ret;
5991 
5992 	trace_amdgpu_dm_plane_atomic_check(state);
5993 
5994 	dm_plane_state = to_dm_plane_state(state);
5995 
5996 	if (!dm_plane_state->dc_state)
5997 		return 0;
5998 
5999 	new_crtc_state =
6000 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6001 	if (!new_crtc_state)
6002 		return -EINVAL;
6003 
6004 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6005 	if (ret)
6006 		return ret;
6007 
6008 	ret = fill_dc_scaling_info(state, &scaling_info);
6009 	if (ret)
6010 		return ret;
6011 
6012 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6013 		return 0;
6014 
6015 	return -EINVAL;
6016 }
6017 
6018 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6019 				       struct drm_plane_state *new_plane_state)
6020 {
6021 	/* Only support async updates on cursor planes. */
6022 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6023 		return -EINVAL;
6024 
6025 	return 0;
6026 }
6027 
6028 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6029 					 struct drm_plane_state *new_state)
6030 {
6031 	struct drm_plane_state *old_state =
6032 		drm_atomic_get_old_plane_state(new_state->state, plane);
6033 
6034 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6035 
6036 	swap(plane->state->fb, new_state->fb);
6037 
6038 	plane->state->src_x = new_state->src_x;
6039 	plane->state->src_y = new_state->src_y;
6040 	plane->state->src_w = new_state->src_w;
6041 	plane->state->src_h = new_state->src_h;
6042 	plane->state->crtc_x = new_state->crtc_x;
6043 	plane->state->crtc_y = new_state->crtc_y;
6044 	plane->state->crtc_w = new_state->crtc_w;
6045 	plane->state->crtc_h = new_state->crtc_h;
6046 
6047 	handle_cursor_update(plane, old_state);
6048 }
6049 
6050 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6051 	.prepare_fb = dm_plane_helper_prepare_fb,
6052 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6053 	.atomic_check = dm_plane_atomic_check,
6054 	.atomic_async_check = dm_plane_atomic_async_check,
6055 	.atomic_async_update = dm_plane_atomic_async_update
6056 };
6057 
6058 /*
6059  * TODO: these are currently initialized to rgb formats only.
6060  * For future use cases we should either initialize them dynamically based on
6061  * plane capabilities, or initialize this array to all formats, so internal drm
6062  * check will succeed, and let DC implement proper check
6063  */
6064 static const uint32_t rgb_formats[] = {
6065 	DRM_FORMAT_XRGB8888,
6066 	DRM_FORMAT_ARGB8888,
6067 	DRM_FORMAT_RGBA8888,
6068 	DRM_FORMAT_XRGB2101010,
6069 	DRM_FORMAT_XBGR2101010,
6070 	DRM_FORMAT_ARGB2101010,
6071 	DRM_FORMAT_ABGR2101010,
6072 	DRM_FORMAT_XBGR8888,
6073 	DRM_FORMAT_ABGR8888,
6074 	DRM_FORMAT_RGB565,
6075 };
6076 
6077 static const uint32_t overlay_formats[] = {
6078 	DRM_FORMAT_XRGB8888,
6079 	DRM_FORMAT_ARGB8888,
6080 	DRM_FORMAT_RGBA8888,
6081 	DRM_FORMAT_XBGR8888,
6082 	DRM_FORMAT_ABGR8888,
6083 	DRM_FORMAT_RGB565
6084 };
6085 
6086 static const u32 cursor_formats[] = {
6087 	DRM_FORMAT_ARGB8888
6088 };
6089 
6090 static int get_plane_formats(const struct drm_plane *plane,
6091 			     const struct dc_plane_cap *plane_cap,
6092 			     uint32_t *formats, int max_formats)
6093 {
6094 	int i, num_formats = 0;
6095 
6096 	/*
6097 	 * TODO: Query support for each group of formats directly from
6098 	 * DC plane caps. This will require adding more formats to the
6099 	 * caps list.
6100 	 */
6101 
6102 	switch (plane->type) {
6103 	case DRM_PLANE_TYPE_PRIMARY:
6104 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6105 			if (num_formats >= max_formats)
6106 				break;
6107 
6108 			formats[num_formats++] = rgb_formats[i];
6109 		}
6110 
6111 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6112 			formats[num_formats++] = DRM_FORMAT_NV12;
6113 		if (plane_cap && plane_cap->pixel_format_support.p010)
6114 			formats[num_formats++] = DRM_FORMAT_P010;
6115 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6116 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6117 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6118 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6119 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6120 		}
6121 		break;
6122 
6123 	case DRM_PLANE_TYPE_OVERLAY:
6124 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6125 			if (num_formats >= max_formats)
6126 				break;
6127 
6128 			formats[num_formats++] = overlay_formats[i];
6129 		}
6130 		break;
6131 
6132 	case DRM_PLANE_TYPE_CURSOR:
6133 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6134 			if (num_formats >= max_formats)
6135 				break;
6136 
6137 			formats[num_formats++] = cursor_formats[i];
6138 		}
6139 		break;
6140 	}
6141 
6142 	return num_formats;
6143 }
6144 
6145 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6146 				struct drm_plane *plane,
6147 				unsigned long possible_crtcs,
6148 				const struct dc_plane_cap *plane_cap)
6149 {
6150 	uint32_t formats[32];
6151 	int num_formats;
6152 	int res = -EPERM;
6153 	unsigned int supported_rotations;
6154 
6155 	num_formats = get_plane_formats(plane, plane_cap, formats,
6156 					ARRAY_SIZE(formats));
6157 
6158 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6159 				       &dm_plane_funcs, formats, num_formats,
6160 				       NULL, plane->type, NULL);
6161 	if (res)
6162 		return res;
6163 
6164 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6165 	    plane_cap && plane_cap->per_pixel_alpha) {
6166 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6167 					  BIT(DRM_MODE_BLEND_PREMULTI);
6168 
6169 		drm_plane_create_alpha_property(plane);
6170 		drm_plane_create_blend_mode_property(plane, blend_caps);
6171 	}
6172 
6173 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6174 	    plane_cap &&
6175 	    (plane_cap->pixel_format_support.nv12 ||
6176 	     plane_cap->pixel_format_support.p010)) {
6177 		/* This only affects YUV formats. */
6178 		drm_plane_create_color_properties(
6179 			plane,
6180 			BIT(DRM_COLOR_YCBCR_BT601) |
6181 			BIT(DRM_COLOR_YCBCR_BT709) |
6182 			BIT(DRM_COLOR_YCBCR_BT2020),
6183 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6184 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6185 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6186 	}
6187 
6188 	supported_rotations =
6189 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6190 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6191 
6192 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6193 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6194 						   supported_rotations);
6195 
6196 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6197 
6198 	/* Create (reset) the plane state */
6199 	if (plane->funcs->reset)
6200 		plane->funcs->reset(plane);
6201 
6202 	return 0;
6203 }
6204 
6205 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6206 			       struct drm_plane *plane,
6207 			       uint32_t crtc_index)
6208 {
6209 	struct amdgpu_crtc *acrtc = NULL;
6210 	struct drm_plane *cursor_plane;
6211 
6212 	int res = -ENOMEM;
6213 
6214 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6215 	if (!cursor_plane)
6216 		goto fail;
6217 
6218 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6219 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6220 
6221 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6222 	if (!acrtc)
6223 		goto fail;
6224 
6225 	res = drm_crtc_init_with_planes(
6226 			dm->ddev,
6227 			&acrtc->base,
6228 			plane,
6229 			cursor_plane,
6230 			&amdgpu_dm_crtc_funcs, NULL);
6231 
6232 	if (res)
6233 		goto fail;
6234 
6235 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6236 
6237 	/* Create (reset) the plane state */
6238 	if (acrtc->base.funcs->reset)
6239 		acrtc->base.funcs->reset(&acrtc->base);
6240 
6241 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6242 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6243 
6244 	acrtc->crtc_id = crtc_index;
6245 	acrtc->base.enabled = false;
6246 	acrtc->otg_inst = -1;
6247 
6248 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6249 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6250 				   true, MAX_COLOR_LUT_ENTRIES);
6251 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6252 
6253 	return 0;
6254 
6255 fail:
6256 	kfree(acrtc);
6257 	kfree(cursor_plane);
6258 	return res;
6259 }
6260 
6261 
6262 static int to_drm_connector_type(enum signal_type st)
6263 {
6264 	switch (st) {
6265 	case SIGNAL_TYPE_HDMI_TYPE_A:
6266 		return DRM_MODE_CONNECTOR_HDMIA;
6267 	case SIGNAL_TYPE_EDP:
6268 		return DRM_MODE_CONNECTOR_eDP;
6269 	case SIGNAL_TYPE_LVDS:
6270 		return DRM_MODE_CONNECTOR_LVDS;
6271 	case SIGNAL_TYPE_RGB:
6272 		return DRM_MODE_CONNECTOR_VGA;
6273 	case SIGNAL_TYPE_DISPLAY_PORT:
6274 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6275 		return DRM_MODE_CONNECTOR_DisplayPort;
6276 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6277 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6278 		return DRM_MODE_CONNECTOR_DVID;
6279 	case SIGNAL_TYPE_VIRTUAL:
6280 		return DRM_MODE_CONNECTOR_VIRTUAL;
6281 
6282 	default:
6283 		return DRM_MODE_CONNECTOR_Unknown;
6284 	}
6285 }
6286 
6287 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6288 {
6289 	struct drm_encoder *encoder;
6290 
6291 	/* There is only one encoder per connector */
6292 	drm_connector_for_each_possible_encoder(connector, encoder)
6293 		return encoder;
6294 
6295 	return NULL;
6296 }
6297 
6298 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6299 {
6300 	struct drm_encoder *encoder;
6301 	struct amdgpu_encoder *amdgpu_encoder;
6302 
6303 	encoder = amdgpu_dm_connector_to_encoder(connector);
6304 
6305 	if (encoder == NULL)
6306 		return;
6307 
6308 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6309 
6310 	amdgpu_encoder->native_mode.clock = 0;
6311 
6312 	if (!list_empty(&connector->probed_modes)) {
6313 		struct drm_display_mode *preferred_mode = NULL;
6314 
6315 		list_for_each_entry(preferred_mode,
6316 				    &connector->probed_modes,
6317 				    head) {
6318 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6319 				amdgpu_encoder->native_mode = *preferred_mode;
6320 
6321 			break;
6322 		}
6323 
6324 	}
6325 }
6326 
6327 static struct drm_display_mode *
6328 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6329 			     char *name,
6330 			     int hdisplay, int vdisplay)
6331 {
6332 	struct drm_device *dev = encoder->dev;
6333 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6334 	struct drm_display_mode *mode = NULL;
6335 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6336 
6337 	mode = drm_mode_duplicate(dev, native_mode);
6338 
6339 	if (mode == NULL)
6340 		return NULL;
6341 
6342 	mode->hdisplay = hdisplay;
6343 	mode->vdisplay = vdisplay;
6344 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6345 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6346 
6347 	return mode;
6348 
6349 }
6350 
6351 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6352 						 struct drm_connector *connector)
6353 {
6354 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6355 	struct drm_display_mode *mode = NULL;
6356 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6357 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6358 				to_amdgpu_dm_connector(connector);
6359 	int i;
6360 	int n;
6361 	struct mode_size {
6362 		char name[DRM_DISPLAY_MODE_LEN];
6363 		int w;
6364 		int h;
6365 	} common_modes[] = {
6366 		{  "640x480",  640,  480},
6367 		{  "800x600",  800,  600},
6368 		{ "1024x768", 1024,  768},
6369 		{ "1280x720", 1280,  720},
6370 		{ "1280x800", 1280,  800},
6371 		{"1280x1024", 1280, 1024},
6372 		{ "1440x900", 1440,  900},
6373 		{"1680x1050", 1680, 1050},
6374 		{"1600x1200", 1600, 1200},
6375 		{"1920x1080", 1920, 1080},
6376 		{"1920x1200", 1920, 1200}
6377 	};
6378 
6379 	n = ARRAY_SIZE(common_modes);
6380 
6381 	for (i = 0; i < n; i++) {
6382 		struct drm_display_mode *curmode = NULL;
6383 		bool mode_existed = false;
6384 
6385 		if (common_modes[i].w > native_mode->hdisplay ||
6386 		    common_modes[i].h > native_mode->vdisplay ||
6387 		   (common_modes[i].w == native_mode->hdisplay &&
6388 		    common_modes[i].h == native_mode->vdisplay))
6389 			continue;
6390 
6391 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6392 			if (common_modes[i].w == curmode->hdisplay &&
6393 			    common_modes[i].h == curmode->vdisplay) {
6394 				mode_existed = true;
6395 				break;
6396 			}
6397 		}
6398 
6399 		if (mode_existed)
6400 			continue;
6401 
6402 		mode = amdgpu_dm_create_common_mode(encoder,
6403 				common_modes[i].name, common_modes[i].w,
6404 				common_modes[i].h);
6405 		drm_mode_probed_add(connector, mode);
6406 		amdgpu_dm_connector->num_modes++;
6407 	}
6408 }
6409 
6410 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6411 					      struct edid *edid)
6412 {
6413 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6414 			to_amdgpu_dm_connector(connector);
6415 
6416 	if (edid) {
6417 		/* empty probed_modes */
6418 		INIT_LIST_HEAD(&connector->probed_modes);
6419 		amdgpu_dm_connector->num_modes =
6420 				drm_add_edid_modes(connector, edid);
6421 
6422 		/* sorting the probed modes before calling function
6423 		 * amdgpu_dm_get_native_mode() since EDID can have
6424 		 * more than one preferred mode. The modes that are
6425 		 * later in the probed mode list could be of higher
6426 		 * and preferred resolution. For example, 3840x2160
6427 		 * resolution in base EDID preferred timing and 4096x2160
6428 		 * preferred resolution in DID extension block later.
6429 		 */
6430 		drm_mode_sort(&connector->probed_modes);
6431 		amdgpu_dm_get_native_mode(connector);
6432 	} else {
6433 		amdgpu_dm_connector->num_modes = 0;
6434 	}
6435 }
6436 
6437 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6438 {
6439 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6440 			to_amdgpu_dm_connector(connector);
6441 	struct drm_encoder *encoder;
6442 	struct edid *edid = amdgpu_dm_connector->edid;
6443 
6444 	encoder = amdgpu_dm_connector_to_encoder(connector);
6445 
6446 	if (!edid || !drm_edid_is_valid(edid)) {
6447 		amdgpu_dm_connector->num_modes =
6448 				drm_add_modes_noedid(connector, 640, 480);
6449 	} else {
6450 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6451 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6452 	}
6453 	amdgpu_dm_fbc_init(connector);
6454 
6455 	return amdgpu_dm_connector->num_modes;
6456 }
6457 
6458 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6459 				     struct amdgpu_dm_connector *aconnector,
6460 				     int connector_type,
6461 				     struct dc_link *link,
6462 				     int link_index)
6463 {
6464 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6465 
6466 	/*
6467 	 * Some of the properties below require access to state, like bpc.
6468 	 * Allocate some default initial connector state with our reset helper.
6469 	 */
6470 	if (aconnector->base.funcs->reset)
6471 		aconnector->base.funcs->reset(&aconnector->base);
6472 
6473 	aconnector->connector_id = link_index;
6474 	aconnector->dc_link = link;
6475 	aconnector->base.interlace_allowed = false;
6476 	aconnector->base.doublescan_allowed = false;
6477 	aconnector->base.stereo_allowed = false;
6478 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6479 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6480 	aconnector->audio_inst = -1;
6481 	mutex_init(&aconnector->hpd_lock);
6482 
6483 	/*
6484 	 * configure support HPD hot plug connector_>polled default value is 0
6485 	 * which means HPD hot plug not supported
6486 	 */
6487 	switch (connector_type) {
6488 	case DRM_MODE_CONNECTOR_HDMIA:
6489 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6490 		aconnector->base.ycbcr_420_allowed =
6491 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6492 		break;
6493 	case DRM_MODE_CONNECTOR_DisplayPort:
6494 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6495 		aconnector->base.ycbcr_420_allowed =
6496 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6497 		break;
6498 	case DRM_MODE_CONNECTOR_DVID:
6499 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6500 		break;
6501 	default:
6502 		break;
6503 	}
6504 
6505 	drm_object_attach_property(&aconnector->base.base,
6506 				dm->ddev->mode_config.scaling_mode_property,
6507 				DRM_MODE_SCALE_NONE);
6508 
6509 	drm_object_attach_property(&aconnector->base.base,
6510 				adev->mode_info.underscan_property,
6511 				UNDERSCAN_OFF);
6512 	drm_object_attach_property(&aconnector->base.base,
6513 				adev->mode_info.underscan_hborder_property,
6514 				0);
6515 	drm_object_attach_property(&aconnector->base.base,
6516 				adev->mode_info.underscan_vborder_property,
6517 				0);
6518 
6519 	if (!aconnector->mst_port)
6520 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6521 
6522 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6523 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6524 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6525 
6526 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6527 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6528 		drm_object_attach_property(&aconnector->base.base,
6529 				adev->mode_info.abm_level_property, 0);
6530 	}
6531 
6532 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6533 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6534 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6535 		drm_object_attach_property(
6536 			&aconnector->base.base,
6537 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6538 
6539 		if (!aconnector->mst_port)
6540 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6541 
6542 #ifdef CONFIG_DRM_AMD_DC_HDCP
6543 		if (adev->dm.hdcp_workqueue)
6544 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6545 #endif
6546 	}
6547 }
6548 
6549 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6550 			      struct i2c_msg *msgs, int num)
6551 {
6552 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6553 	struct ddc_service *ddc_service = i2c->ddc_service;
6554 	struct i2c_command cmd;
6555 	int i;
6556 	int result = -EIO;
6557 
6558 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6559 
6560 	if (!cmd.payloads)
6561 		return result;
6562 
6563 	cmd.number_of_payloads = num;
6564 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6565 	cmd.speed = 100;
6566 
6567 	for (i = 0; i < num; i++) {
6568 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6569 		cmd.payloads[i].address = msgs[i].addr;
6570 		cmd.payloads[i].length = msgs[i].len;
6571 		cmd.payloads[i].data = msgs[i].buf;
6572 	}
6573 
6574 	if (dc_submit_i2c(
6575 			ddc_service->ctx->dc,
6576 			ddc_service->ddc_pin->hw_info.ddc_channel,
6577 			&cmd))
6578 		result = num;
6579 
6580 	kfree(cmd.payloads);
6581 	return result;
6582 }
6583 
6584 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6585 {
6586 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6587 }
6588 
6589 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6590 	.master_xfer = amdgpu_dm_i2c_xfer,
6591 	.functionality = amdgpu_dm_i2c_func,
6592 };
6593 
6594 static struct amdgpu_i2c_adapter *
6595 create_i2c(struct ddc_service *ddc_service,
6596 	   int link_index,
6597 	   int *res)
6598 {
6599 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6600 	struct amdgpu_i2c_adapter *i2c;
6601 
6602 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6603 	if (!i2c)
6604 		return NULL;
6605 	i2c->base.owner = THIS_MODULE;
6606 	i2c->base.class = I2C_CLASS_DDC;
6607 	i2c->base.dev.parent = &adev->pdev->dev;
6608 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6609 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6610 	i2c_set_adapdata(&i2c->base, i2c);
6611 	i2c->ddc_service = ddc_service;
6612 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6613 
6614 	return i2c;
6615 }
6616 
6617 
6618 /*
6619  * Note: this function assumes that dc_link_detect() was called for the
6620  * dc_link which will be represented by this aconnector.
6621  */
6622 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6623 				    struct amdgpu_dm_connector *aconnector,
6624 				    uint32_t link_index,
6625 				    struct amdgpu_encoder *aencoder)
6626 {
6627 	int res = 0;
6628 	int connector_type;
6629 	struct dc *dc = dm->dc;
6630 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6631 	struct amdgpu_i2c_adapter *i2c;
6632 
6633 	link->priv = aconnector;
6634 
6635 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6636 
6637 	i2c = create_i2c(link->ddc, link->link_index, &res);
6638 	if (!i2c) {
6639 		DRM_ERROR("Failed to create i2c adapter data\n");
6640 		return -ENOMEM;
6641 	}
6642 
6643 	aconnector->i2c = i2c;
6644 	res = i2c_add_adapter(&i2c->base);
6645 
6646 	if (res) {
6647 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6648 		goto out_free;
6649 	}
6650 
6651 	connector_type = to_drm_connector_type(link->connector_signal);
6652 
6653 	res = drm_connector_init_with_ddc(
6654 			dm->ddev,
6655 			&aconnector->base,
6656 			&amdgpu_dm_connector_funcs,
6657 			connector_type,
6658 			&i2c->base);
6659 
6660 	if (res) {
6661 		DRM_ERROR("connector_init failed\n");
6662 		aconnector->connector_id = -1;
6663 		goto out_free;
6664 	}
6665 
6666 	drm_connector_helper_add(
6667 			&aconnector->base,
6668 			&amdgpu_dm_connector_helper_funcs);
6669 
6670 	amdgpu_dm_connector_init_helper(
6671 		dm,
6672 		aconnector,
6673 		connector_type,
6674 		link,
6675 		link_index);
6676 
6677 	drm_connector_attach_encoder(
6678 		&aconnector->base, &aencoder->base);
6679 
6680 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6681 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6682 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6683 
6684 out_free:
6685 	if (res) {
6686 		kfree(i2c);
6687 		aconnector->i2c = NULL;
6688 	}
6689 	return res;
6690 }
6691 
6692 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6693 {
6694 	switch (adev->mode_info.num_crtc) {
6695 	case 1:
6696 		return 0x1;
6697 	case 2:
6698 		return 0x3;
6699 	case 3:
6700 		return 0x7;
6701 	case 4:
6702 		return 0xf;
6703 	case 5:
6704 		return 0x1f;
6705 	case 6:
6706 	default:
6707 		return 0x3f;
6708 	}
6709 }
6710 
6711 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6712 				  struct amdgpu_encoder *aencoder,
6713 				  uint32_t link_index)
6714 {
6715 	struct amdgpu_device *adev = drm_to_adev(dev);
6716 
6717 	int res = drm_encoder_init(dev,
6718 				   &aencoder->base,
6719 				   &amdgpu_dm_encoder_funcs,
6720 				   DRM_MODE_ENCODER_TMDS,
6721 				   NULL);
6722 
6723 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6724 
6725 	if (!res)
6726 		aencoder->encoder_id = link_index;
6727 	else
6728 		aencoder->encoder_id = -1;
6729 
6730 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6731 
6732 	return res;
6733 }
6734 
6735 static void manage_dm_interrupts(struct amdgpu_device *adev,
6736 				 struct amdgpu_crtc *acrtc,
6737 				 bool enable)
6738 {
6739 	/*
6740 	 * We have no guarantee that the frontend index maps to the same
6741 	 * backend index - some even map to more than one.
6742 	 *
6743 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6744 	 */
6745 	int irq_type =
6746 		amdgpu_display_crtc_idx_to_irq_type(
6747 			adev,
6748 			acrtc->crtc_id);
6749 
6750 	if (enable) {
6751 		drm_crtc_vblank_on(&acrtc->base);
6752 		amdgpu_irq_get(
6753 			adev,
6754 			&adev->pageflip_irq,
6755 			irq_type);
6756 	} else {
6757 
6758 		amdgpu_irq_put(
6759 			adev,
6760 			&adev->pageflip_irq,
6761 			irq_type);
6762 		drm_crtc_vblank_off(&acrtc->base);
6763 	}
6764 }
6765 
6766 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6767 				      struct amdgpu_crtc *acrtc)
6768 {
6769 	int irq_type =
6770 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6771 
6772 	/**
6773 	 * This reads the current state for the IRQ and force reapplies
6774 	 * the setting to hardware.
6775 	 */
6776 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6777 }
6778 
6779 static bool
6780 is_scaling_state_different(const struct dm_connector_state *dm_state,
6781 			   const struct dm_connector_state *old_dm_state)
6782 {
6783 	if (dm_state->scaling != old_dm_state->scaling)
6784 		return true;
6785 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6786 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6787 			return true;
6788 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6789 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6790 			return true;
6791 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6792 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6793 		return true;
6794 	return false;
6795 }
6796 
6797 #ifdef CONFIG_DRM_AMD_DC_HDCP
6798 static bool is_content_protection_different(struct drm_connector_state *state,
6799 					    const struct drm_connector_state *old_state,
6800 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6801 {
6802 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6803 
6804 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6805 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6806 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6807 		return true;
6808 	}
6809 
6810 	/* CP is being re enabled, ignore this */
6811 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6812 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6813 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6814 		return false;
6815 	}
6816 
6817 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6818 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6819 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6820 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6821 
6822 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6823 	 * hot-plug, headless s3, dpms
6824 	 */
6825 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6826 	    aconnector->dc_sink != NULL)
6827 		return true;
6828 
6829 	if (old_state->content_protection == state->content_protection)
6830 		return false;
6831 
6832 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6833 		return true;
6834 
6835 	return false;
6836 }
6837 
6838 #endif
6839 static void remove_stream(struct amdgpu_device *adev,
6840 			  struct amdgpu_crtc *acrtc,
6841 			  struct dc_stream_state *stream)
6842 {
6843 	/* this is the update mode case */
6844 
6845 	acrtc->otg_inst = -1;
6846 	acrtc->enabled = false;
6847 }
6848 
6849 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6850 			       struct dc_cursor_position *position)
6851 {
6852 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6853 	int x, y;
6854 	int xorigin = 0, yorigin = 0;
6855 
6856 	position->enable = false;
6857 	position->x = 0;
6858 	position->y = 0;
6859 
6860 	if (!crtc || !plane->state->fb)
6861 		return 0;
6862 
6863 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6864 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6865 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6866 			  __func__,
6867 			  plane->state->crtc_w,
6868 			  plane->state->crtc_h);
6869 		return -EINVAL;
6870 	}
6871 
6872 	x = plane->state->crtc_x;
6873 	y = plane->state->crtc_y;
6874 
6875 	if (x <= -amdgpu_crtc->max_cursor_width ||
6876 	    y <= -amdgpu_crtc->max_cursor_height)
6877 		return 0;
6878 
6879 	if (x < 0) {
6880 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6881 		x = 0;
6882 	}
6883 	if (y < 0) {
6884 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6885 		y = 0;
6886 	}
6887 	position->enable = true;
6888 	position->translate_by_source = true;
6889 	position->x = x;
6890 	position->y = y;
6891 	position->x_hotspot = xorigin;
6892 	position->y_hotspot = yorigin;
6893 
6894 	return 0;
6895 }
6896 
6897 static void handle_cursor_update(struct drm_plane *plane,
6898 				 struct drm_plane_state *old_plane_state)
6899 {
6900 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6901 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6902 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6903 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6904 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6905 	uint64_t address = afb ? afb->address : 0;
6906 	struct dc_cursor_position position;
6907 	struct dc_cursor_attributes attributes;
6908 	int ret;
6909 
6910 	if (!plane->state->fb && !old_plane_state->fb)
6911 		return;
6912 
6913 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6914 			 __func__,
6915 			 amdgpu_crtc->crtc_id,
6916 			 plane->state->crtc_w,
6917 			 plane->state->crtc_h);
6918 
6919 	ret = get_cursor_position(plane, crtc, &position);
6920 	if (ret)
6921 		return;
6922 
6923 	if (!position.enable) {
6924 		/* turn off cursor */
6925 		if (crtc_state && crtc_state->stream) {
6926 			mutex_lock(&adev->dm.dc_lock);
6927 			dc_stream_set_cursor_position(crtc_state->stream,
6928 						      &position);
6929 			mutex_unlock(&adev->dm.dc_lock);
6930 		}
6931 		return;
6932 	}
6933 
6934 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6935 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6936 
6937 	memset(&attributes, 0, sizeof(attributes));
6938 	attributes.address.high_part = upper_32_bits(address);
6939 	attributes.address.low_part  = lower_32_bits(address);
6940 	attributes.width             = plane->state->crtc_w;
6941 	attributes.height            = plane->state->crtc_h;
6942 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6943 	attributes.rotation_angle    = 0;
6944 	attributes.attribute_flags.value = 0;
6945 
6946 	attributes.pitch = attributes.width;
6947 
6948 	if (crtc_state->stream) {
6949 		mutex_lock(&adev->dm.dc_lock);
6950 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6951 							 &attributes))
6952 			DRM_ERROR("DC failed to set cursor attributes\n");
6953 
6954 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6955 						   &position))
6956 			DRM_ERROR("DC failed to set cursor position\n");
6957 		mutex_unlock(&adev->dm.dc_lock);
6958 	}
6959 }
6960 
6961 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6962 {
6963 
6964 	assert_spin_locked(&acrtc->base.dev->event_lock);
6965 	WARN_ON(acrtc->event);
6966 
6967 	acrtc->event = acrtc->base.state->event;
6968 
6969 	/* Set the flip status */
6970 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6971 
6972 	/* Mark this event as consumed */
6973 	acrtc->base.state->event = NULL;
6974 
6975 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6976 						 acrtc->crtc_id);
6977 }
6978 
6979 static void update_freesync_state_on_stream(
6980 	struct amdgpu_display_manager *dm,
6981 	struct dm_crtc_state *new_crtc_state,
6982 	struct dc_stream_state *new_stream,
6983 	struct dc_plane_state *surface,
6984 	u32 flip_timestamp_in_us)
6985 {
6986 	struct mod_vrr_params vrr_params;
6987 	struct dc_info_packet vrr_infopacket = {0};
6988 	struct amdgpu_device *adev = dm->adev;
6989 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6990 	unsigned long flags;
6991 
6992 	if (!new_stream)
6993 		return;
6994 
6995 	/*
6996 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6997 	 * For now it's sufficient to just guard against these conditions.
6998 	 */
6999 
7000 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7001 		return;
7002 
7003 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7004         vrr_params = acrtc->dm_irq_params.vrr_params;
7005 
7006 	if (surface) {
7007 		mod_freesync_handle_preflip(
7008 			dm->freesync_module,
7009 			surface,
7010 			new_stream,
7011 			flip_timestamp_in_us,
7012 			&vrr_params);
7013 
7014 		if (adev->family < AMDGPU_FAMILY_AI &&
7015 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7016 			mod_freesync_handle_v_update(dm->freesync_module,
7017 						     new_stream, &vrr_params);
7018 
7019 			/* Need to call this before the frame ends. */
7020 			dc_stream_adjust_vmin_vmax(dm->dc,
7021 						   new_crtc_state->stream,
7022 						   &vrr_params.adjust);
7023 		}
7024 	}
7025 
7026 	mod_freesync_build_vrr_infopacket(
7027 		dm->freesync_module,
7028 		new_stream,
7029 		&vrr_params,
7030 		PACKET_TYPE_VRR,
7031 		TRANSFER_FUNC_UNKNOWN,
7032 		&vrr_infopacket);
7033 
7034 	new_crtc_state->freesync_timing_changed |=
7035 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7036 			&vrr_params.adjust,
7037 			sizeof(vrr_params.adjust)) != 0);
7038 
7039 	new_crtc_state->freesync_vrr_info_changed |=
7040 		(memcmp(&new_crtc_state->vrr_infopacket,
7041 			&vrr_infopacket,
7042 			sizeof(vrr_infopacket)) != 0);
7043 
7044 	acrtc->dm_irq_params.vrr_params = vrr_params;
7045 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7046 
7047 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7048 	new_stream->vrr_infopacket = vrr_infopacket;
7049 
7050 	if (new_crtc_state->freesync_vrr_info_changed)
7051 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7052 			      new_crtc_state->base.crtc->base.id,
7053 			      (int)new_crtc_state->base.vrr_enabled,
7054 			      (int)vrr_params.state);
7055 
7056 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7057 }
7058 
7059 static void update_stream_irq_parameters(
7060 	struct amdgpu_display_manager *dm,
7061 	struct dm_crtc_state *new_crtc_state)
7062 {
7063 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7064 	struct mod_vrr_params vrr_params;
7065 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7066 	struct amdgpu_device *adev = dm->adev;
7067 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7068 	unsigned long flags;
7069 
7070 	if (!new_stream)
7071 		return;
7072 
7073 	/*
7074 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7075 	 * For now it's sufficient to just guard against these conditions.
7076 	 */
7077 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7078 		return;
7079 
7080 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7081 	vrr_params = acrtc->dm_irq_params.vrr_params;
7082 
7083 	if (new_crtc_state->vrr_supported &&
7084 	    config.min_refresh_in_uhz &&
7085 	    config.max_refresh_in_uhz) {
7086 		config.state = new_crtc_state->base.vrr_enabled ?
7087 			VRR_STATE_ACTIVE_VARIABLE :
7088 			VRR_STATE_INACTIVE;
7089 	} else {
7090 		config.state = VRR_STATE_UNSUPPORTED;
7091 	}
7092 
7093 	mod_freesync_build_vrr_params(dm->freesync_module,
7094 				      new_stream,
7095 				      &config, &vrr_params);
7096 
7097 	new_crtc_state->freesync_timing_changed |=
7098 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7099 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7100 
7101 	new_crtc_state->freesync_config = config;
7102 	/* Copy state for access from DM IRQ handler */
7103 	acrtc->dm_irq_params.freesync_config = config;
7104 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7105 	acrtc->dm_irq_params.vrr_params = vrr_params;
7106 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7107 }
7108 
7109 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7110 					    struct dm_crtc_state *new_state)
7111 {
7112 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7113 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7114 
7115 	if (!old_vrr_active && new_vrr_active) {
7116 		/* Transition VRR inactive -> active:
7117 		 * While VRR is active, we must not disable vblank irq, as a
7118 		 * reenable after disable would compute bogus vblank/pflip
7119 		 * timestamps if it likely happened inside display front-porch.
7120 		 *
7121 		 * We also need vupdate irq for the actual core vblank handling
7122 		 * at end of vblank.
7123 		 */
7124 		dm_set_vupdate_irq(new_state->base.crtc, true);
7125 		drm_crtc_vblank_get(new_state->base.crtc);
7126 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7127 				 __func__, new_state->base.crtc->base.id);
7128 	} else if (old_vrr_active && !new_vrr_active) {
7129 		/* Transition VRR active -> inactive:
7130 		 * Allow vblank irq disable again for fixed refresh rate.
7131 		 */
7132 		dm_set_vupdate_irq(new_state->base.crtc, false);
7133 		drm_crtc_vblank_put(new_state->base.crtc);
7134 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7135 				 __func__, new_state->base.crtc->base.id);
7136 	}
7137 }
7138 
7139 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7140 {
7141 	struct drm_plane *plane;
7142 	struct drm_plane_state *old_plane_state, *new_plane_state;
7143 	int i;
7144 
7145 	/*
7146 	 * TODO: Make this per-stream so we don't issue redundant updates for
7147 	 * commits with multiple streams.
7148 	 */
7149 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7150 				       new_plane_state, i)
7151 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7152 			handle_cursor_update(plane, old_plane_state);
7153 }
7154 
7155 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7156 				    struct dc_state *dc_state,
7157 				    struct drm_device *dev,
7158 				    struct amdgpu_display_manager *dm,
7159 				    struct drm_crtc *pcrtc,
7160 				    bool wait_for_vblank)
7161 {
7162 	uint32_t i;
7163 	uint64_t timestamp_ns;
7164 	struct drm_plane *plane;
7165 	struct drm_plane_state *old_plane_state, *new_plane_state;
7166 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7167 	struct drm_crtc_state *new_pcrtc_state =
7168 			drm_atomic_get_new_crtc_state(state, pcrtc);
7169 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7170 	struct dm_crtc_state *dm_old_crtc_state =
7171 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7172 	int planes_count = 0, vpos, hpos;
7173 	long r;
7174 	unsigned long flags;
7175 	struct amdgpu_bo *abo;
7176 	uint32_t target_vblank, last_flip_vblank;
7177 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7178 	bool pflip_present = false;
7179 	struct {
7180 		struct dc_surface_update surface_updates[MAX_SURFACES];
7181 		struct dc_plane_info plane_infos[MAX_SURFACES];
7182 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7183 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7184 		struct dc_stream_update stream_update;
7185 	} *bundle;
7186 
7187 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7188 
7189 	if (!bundle) {
7190 		dm_error("Failed to allocate update bundle\n");
7191 		goto cleanup;
7192 	}
7193 
7194 	/*
7195 	 * Disable the cursor first if we're disabling all the planes.
7196 	 * It'll remain on the screen after the planes are re-enabled
7197 	 * if we don't.
7198 	 */
7199 	if (acrtc_state->active_planes == 0)
7200 		amdgpu_dm_commit_cursors(state);
7201 
7202 	/* update planes when needed */
7203 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7204 		struct drm_crtc *crtc = new_plane_state->crtc;
7205 		struct drm_crtc_state *new_crtc_state;
7206 		struct drm_framebuffer *fb = new_plane_state->fb;
7207 		bool plane_needs_flip;
7208 		struct dc_plane_state *dc_plane;
7209 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7210 
7211 		/* Cursor plane is handled after stream updates */
7212 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7213 			continue;
7214 
7215 		if (!fb || !crtc || pcrtc != crtc)
7216 			continue;
7217 
7218 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7219 		if (!new_crtc_state->active)
7220 			continue;
7221 
7222 		dc_plane = dm_new_plane_state->dc_state;
7223 
7224 		bundle->surface_updates[planes_count].surface = dc_plane;
7225 		if (new_pcrtc_state->color_mgmt_changed) {
7226 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7227 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7228 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7229 		}
7230 
7231 		fill_dc_scaling_info(new_plane_state,
7232 				     &bundle->scaling_infos[planes_count]);
7233 
7234 		bundle->surface_updates[planes_count].scaling_info =
7235 			&bundle->scaling_infos[planes_count];
7236 
7237 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7238 
7239 		pflip_present = pflip_present || plane_needs_flip;
7240 
7241 		if (!plane_needs_flip) {
7242 			planes_count += 1;
7243 			continue;
7244 		}
7245 
7246 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7247 
7248 		/*
7249 		 * Wait for all fences on this FB. Do limited wait to avoid
7250 		 * deadlock during GPU reset when this fence will not signal
7251 		 * but we hold reservation lock for the BO.
7252 		 */
7253 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7254 							false,
7255 							msecs_to_jiffies(5000));
7256 		if (unlikely(r <= 0))
7257 			DRM_ERROR("Waiting for fences timed out!");
7258 
7259 		fill_dc_plane_info_and_addr(
7260 			dm->adev, new_plane_state,
7261 			dm_new_plane_state->tiling_flags,
7262 			&bundle->plane_infos[planes_count],
7263 			&bundle->flip_addrs[planes_count].address,
7264 			dm_new_plane_state->tmz_surface, false);
7265 
7266 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7267 				 new_plane_state->plane->index,
7268 				 bundle->plane_infos[planes_count].dcc.enable);
7269 
7270 		bundle->surface_updates[planes_count].plane_info =
7271 			&bundle->plane_infos[planes_count];
7272 
7273 		/*
7274 		 * Only allow immediate flips for fast updates that don't
7275 		 * change FB pitch, DCC state, rotation or mirroing.
7276 		 */
7277 		bundle->flip_addrs[planes_count].flip_immediate =
7278 			crtc->state->async_flip &&
7279 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7280 
7281 		timestamp_ns = ktime_get_ns();
7282 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7283 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7284 		bundle->surface_updates[planes_count].surface = dc_plane;
7285 
7286 		if (!bundle->surface_updates[planes_count].surface) {
7287 			DRM_ERROR("No surface for CRTC: id=%d\n",
7288 					acrtc_attach->crtc_id);
7289 			continue;
7290 		}
7291 
7292 		if (plane == pcrtc->primary)
7293 			update_freesync_state_on_stream(
7294 				dm,
7295 				acrtc_state,
7296 				acrtc_state->stream,
7297 				dc_plane,
7298 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7299 
7300 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7301 				 __func__,
7302 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7303 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7304 
7305 		planes_count += 1;
7306 
7307 	}
7308 
7309 	if (pflip_present) {
7310 		if (!vrr_active) {
7311 			/* Use old throttling in non-vrr fixed refresh rate mode
7312 			 * to keep flip scheduling based on target vblank counts
7313 			 * working in a backwards compatible way, e.g., for
7314 			 * clients using the GLX_OML_sync_control extension or
7315 			 * DRI3/Present extension with defined target_msc.
7316 			 */
7317 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7318 		}
7319 		else {
7320 			/* For variable refresh rate mode only:
7321 			 * Get vblank of last completed flip to avoid > 1 vrr
7322 			 * flips per video frame by use of throttling, but allow
7323 			 * flip programming anywhere in the possibly large
7324 			 * variable vrr vblank interval for fine-grained flip
7325 			 * timing control and more opportunity to avoid stutter
7326 			 * on late submission of flips.
7327 			 */
7328 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7329 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7330 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7331 		}
7332 
7333 		target_vblank = last_flip_vblank + wait_for_vblank;
7334 
7335 		/*
7336 		 * Wait until we're out of the vertical blank period before the one
7337 		 * targeted by the flip
7338 		 */
7339 		while ((acrtc_attach->enabled &&
7340 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7341 							    0, &vpos, &hpos, NULL,
7342 							    NULL, &pcrtc->hwmode)
7343 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7344 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7345 			(int)(target_vblank -
7346 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7347 			usleep_range(1000, 1100);
7348 		}
7349 
7350 		/**
7351 		 * Prepare the flip event for the pageflip interrupt to handle.
7352 		 *
7353 		 * This only works in the case where we've already turned on the
7354 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7355 		 * from 0 -> n planes we have to skip a hardware generated event
7356 		 * and rely on sending it from software.
7357 		 */
7358 		if (acrtc_attach->base.state->event &&
7359 		    acrtc_state->active_planes > 0) {
7360 			drm_crtc_vblank_get(pcrtc);
7361 
7362 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7363 
7364 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7365 			prepare_flip_isr(acrtc_attach);
7366 
7367 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7368 		}
7369 
7370 		if (acrtc_state->stream) {
7371 			if (acrtc_state->freesync_vrr_info_changed)
7372 				bundle->stream_update.vrr_infopacket =
7373 					&acrtc_state->stream->vrr_infopacket;
7374 		}
7375 	}
7376 
7377 	/* Update the planes if changed or disable if we don't have any. */
7378 	if ((planes_count || acrtc_state->active_planes == 0) &&
7379 		acrtc_state->stream) {
7380 		bundle->stream_update.stream = acrtc_state->stream;
7381 		if (new_pcrtc_state->mode_changed) {
7382 			bundle->stream_update.src = acrtc_state->stream->src;
7383 			bundle->stream_update.dst = acrtc_state->stream->dst;
7384 		}
7385 
7386 		if (new_pcrtc_state->color_mgmt_changed) {
7387 			/*
7388 			 * TODO: This isn't fully correct since we've actually
7389 			 * already modified the stream in place.
7390 			 */
7391 			bundle->stream_update.gamut_remap =
7392 				&acrtc_state->stream->gamut_remap_matrix;
7393 			bundle->stream_update.output_csc_transform =
7394 				&acrtc_state->stream->csc_color_matrix;
7395 			bundle->stream_update.out_transfer_func =
7396 				acrtc_state->stream->out_transfer_func;
7397 		}
7398 
7399 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7400 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7401 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7402 
7403 		/*
7404 		 * If FreeSync state on the stream has changed then we need to
7405 		 * re-adjust the min/max bounds now that DC doesn't handle this
7406 		 * as part of commit.
7407 		 */
7408 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7409 		    amdgpu_dm_vrr_active(acrtc_state)) {
7410 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7411 			dc_stream_adjust_vmin_vmax(
7412 				dm->dc, acrtc_state->stream,
7413 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7414 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7415 		}
7416 		mutex_lock(&dm->dc_lock);
7417 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7418 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7419 			amdgpu_dm_psr_disable(acrtc_state->stream);
7420 
7421 		dc_commit_updates_for_stream(dm->dc,
7422 						     bundle->surface_updates,
7423 						     planes_count,
7424 						     acrtc_state->stream,
7425 						     &bundle->stream_update,
7426 						     dc_state);
7427 
7428 		/**
7429 		 * Enable or disable the interrupts on the backend.
7430 		 *
7431 		 * Most pipes are put into power gating when unused.
7432 		 *
7433 		 * When power gating is enabled on a pipe we lose the
7434 		 * interrupt enablement state when power gating is disabled.
7435 		 *
7436 		 * So we need to update the IRQ control state in hardware
7437 		 * whenever the pipe turns on (since it could be previously
7438 		 * power gated) or off (since some pipes can't be power gated
7439 		 * on some ASICs).
7440 		 */
7441 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7442 			dm_update_pflip_irq_state(drm_to_adev(dev),
7443 						  acrtc_attach);
7444 
7445 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7446 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7447 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7448 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7449 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7450 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7451 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7452 			amdgpu_dm_psr_enable(acrtc_state->stream);
7453 		}
7454 
7455 		mutex_unlock(&dm->dc_lock);
7456 	}
7457 
7458 	/*
7459 	 * Update cursor state *after* programming all the planes.
7460 	 * This avoids redundant programming in the case where we're going
7461 	 * to be disabling a single plane - those pipes are being disabled.
7462 	 */
7463 	if (acrtc_state->active_planes)
7464 		amdgpu_dm_commit_cursors(state);
7465 
7466 cleanup:
7467 	kfree(bundle);
7468 }
7469 
7470 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7471 				   struct drm_atomic_state *state)
7472 {
7473 	struct amdgpu_device *adev = drm_to_adev(dev);
7474 	struct amdgpu_dm_connector *aconnector;
7475 	struct drm_connector *connector;
7476 	struct drm_connector_state *old_con_state, *new_con_state;
7477 	struct drm_crtc_state *new_crtc_state;
7478 	struct dm_crtc_state *new_dm_crtc_state;
7479 	const struct dc_stream_status *status;
7480 	int i, inst;
7481 
7482 	/* Notify device removals. */
7483 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7484 		if (old_con_state->crtc != new_con_state->crtc) {
7485 			/* CRTC changes require notification. */
7486 			goto notify;
7487 		}
7488 
7489 		if (!new_con_state->crtc)
7490 			continue;
7491 
7492 		new_crtc_state = drm_atomic_get_new_crtc_state(
7493 			state, new_con_state->crtc);
7494 
7495 		if (!new_crtc_state)
7496 			continue;
7497 
7498 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7499 			continue;
7500 
7501 	notify:
7502 		aconnector = to_amdgpu_dm_connector(connector);
7503 
7504 		mutex_lock(&adev->dm.audio_lock);
7505 		inst = aconnector->audio_inst;
7506 		aconnector->audio_inst = -1;
7507 		mutex_unlock(&adev->dm.audio_lock);
7508 
7509 		amdgpu_dm_audio_eld_notify(adev, inst);
7510 	}
7511 
7512 	/* Notify audio device additions. */
7513 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7514 		if (!new_con_state->crtc)
7515 			continue;
7516 
7517 		new_crtc_state = drm_atomic_get_new_crtc_state(
7518 			state, new_con_state->crtc);
7519 
7520 		if (!new_crtc_state)
7521 			continue;
7522 
7523 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7524 			continue;
7525 
7526 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7527 		if (!new_dm_crtc_state->stream)
7528 			continue;
7529 
7530 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7531 		if (!status)
7532 			continue;
7533 
7534 		aconnector = to_amdgpu_dm_connector(connector);
7535 
7536 		mutex_lock(&adev->dm.audio_lock);
7537 		inst = status->audio_inst;
7538 		aconnector->audio_inst = inst;
7539 		mutex_unlock(&adev->dm.audio_lock);
7540 
7541 		amdgpu_dm_audio_eld_notify(adev, inst);
7542 	}
7543 }
7544 
7545 /*
7546  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7547  * @crtc_state: the DRM CRTC state
7548  * @stream_state: the DC stream state.
7549  *
7550  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7551  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7552  */
7553 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7554 						struct dc_stream_state *stream_state)
7555 {
7556 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7557 }
7558 
7559 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7560 				   struct drm_atomic_state *state,
7561 				   bool nonblock)
7562 {
7563 	/*
7564 	 * Add check here for SoC's that support hardware cursor plane, to
7565 	 * unset legacy_cursor_update
7566 	 */
7567 
7568 	return drm_atomic_helper_commit(dev, state, nonblock);
7569 
7570 	/*TODO Handle EINTR, reenable IRQ*/
7571 }
7572 
7573 /**
7574  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7575  * @state: The atomic state to commit
7576  *
7577  * This will tell DC to commit the constructed DC state from atomic_check,
7578  * programming the hardware. Any failures here implies a hardware failure, since
7579  * atomic check should have filtered anything non-kosher.
7580  */
7581 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7582 {
7583 	struct drm_device *dev = state->dev;
7584 	struct amdgpu_device *adev = drm_to_adev(dev);
7585 	struct amdgpu_display_manager *dm = &adev->dm;
7586 	struct dm_atomic_state *dm_state;
7587 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7588 	uint32_t i, j;
7589 	struct drm_crtc *crtc;
7590 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7591 	unsigned long flags;
7592 	bool wait_for_vblank = true;
7593 	struct drm_connector *connector;
7594 	struct drm_connector_state *old_con_state, *new_con_state;
7595 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7596 	int crtc_disable_count = 0;
7597 	bool mode_set_reset_required = false;
7598 
7599 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
7600 
7601 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7602 
7603 	dm_state = dm_atomic_get_new_state(state);
7604 	if (dm_state && dm_state->context) {
7605 		dc_state = dm_state->context;
7606 	} else {
7607 		/* No state changes, retain current state. */
7608 		dc_state_temp = dc_create_state(dm->dc);
7609 		ASSERT(dc_state_temp);
7610 		dc_state = dc_state_temp;
7611 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7612 	}
7613 
7614 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7615 				       new_crtc_state, i) {
7616 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7617 
7618 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7619 
7620 		if (old_crtc_state->active &&
7621 		    (!new_crtc_state->active ||
7622 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7623 			manage_dm_interrupts(adev, acrtc, false);
7624 			dc_stream_release(dm_old_crtc_state->stream);
7625 		}
7626 	}
7627 
7628 	/* update changed items */
7629 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7630 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7631 
7632 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7633 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7634 
7635 		DRM_DEBUG_DRIVER(
7636 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7637 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7638 			"connectors_changed:%d\n",
7639 			acrtc->crtc_id,
7640 			new_crtc_state->enable,
7641 			new_crtc_state->active,
7642 			new_crtc_state->planes_changed,
7643 			new_crtc_state->mode_changed,
7644 			new_crtc_state->active_changed,
7645 			new_crtc_state->connectors_changed);
7646 
7647 		/* Copy all transient state flags into dc state */
7648 		if (dm_new_crtc_state->stream) {
7649 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7650 							    dm_new_crtc_state->stream);
7651 		}
7652 
7653 		/* handles headless hotplug case, updating new_state and
7654 		 * aconnector as needed
7655 		 */
7656 
7657 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7658 
7659 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7660 
7661 			if (!dm_new_crtc_state->stream) {
7662 				/*
7663 				 * this could happen because of issues with
7664 				 * userspace notifications delivery.
7665 				 * In this case userspace tries to set mode on
7666 				 * display which is disconnected in fact.
7667 				 * dc_sink is NULL in this case on aconnector.
7668 				 * We expect reset mode will come soon.
7669 				 *
7670 				 * This can also happen when unplug is done
7671 				 * during resume sequence ended
7672 				 *
7673 				 * In this case, we want to pretend we still
7674 				 * have a sink to keep the pipe running so that
7675 				 * hw state is consistent with the sw state
7676 				 */
7677 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7678 						__func__, acrtc->base.base.id);
7679 				continue;
7680 			}
7681 
7682 			if (dm_old_crtc_state->stream)
7683 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7684 
7685 			pm_runtime_get_noresume(dev->dev);
7686 
7687 			acrtc->enabled = true;
7688 			acrtc->hw_mode = new_crtc_state->mode;
7689 			crtc->hwmode = new_crtc_state->mode;
7690 			mode_set_reset_required = true;
7691 		} else if (modereset_required(new_crtc_state)) {
7692 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7693 			/* i.e. reset mode */
7694 			if (dm_old_crtc_state->stream)
7695 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7696 			mode_set_reset_required = true;
7697 		}
7698 	} /* for_each_crtc_in_state() */
7699 
7700 	if (dc_state) {
7701 		/* if there mode set or reset, disable eDP PSR */
7702 		if (mode_set_reset_required)
7703 			amdgpu_dm_psr_disable_all(dm);
7704 
7705 		dm_enable_per_frame_crtc_master_sync(dc_state);
7706 		mutex_lock(&dm->dc_lock);
7707 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7708 		mutex_unlock(&dm->dc_lock);
7709 	}
7710 
7711 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7712 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7713 
7714 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7715 
7716 		if (dm_new_crtc_state->stream != NULL) {
7717 			const struct dc_stream_status *status =
7718 					dc_stream_get_status(dm_new_crtc_state->stream);
7719 
7720 			if (!status)
7721 				status = dc_stream_get_status_from_state(dc_state,
7722 									 dm_new_crtc_state->stream);
7723 			if (!status)
7724 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7725 			else
7726 				acrtc->otg_inst = status->primary_otg_inst;
7727 		}
7728 	}
7729 #ifdef CONFIG_DRM_AMD_DC_HDCP
7730 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7731 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7732 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7733 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7734 
7735 		new_crtc_state = NULL;
7736 
7737 		if (acrtc)
7738 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7739 
7740 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7741 
7742 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7743 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7744 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7745 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7746 			continue;
7747 		}
7748 
7749 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7750 			hdcp_update_display(
7751 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7752 				new_con_state->hdcp_content_type,
7753 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7754 													 : false);
7755 	}
7756 #endif
7757 
7758 	/* Handle connector state changes */
7759 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7760 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7761 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7762 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7763 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7764 		struct dc_stream_update stream_update;
7765 		struct dc_info_packet hdr_packet;
7766 		struct dc_stream_status *status = NULL;
7767 		bool abm_changed, hdr_changed, scaling_changed;
7768 
7769 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7770 		memset(&stream_update, 0, sizeof(stream_update));
7771 
7772 		if (acrtc) {
7773 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7774 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7775 		}
7776 
7777 		/* Skip any modesets/resets */
7778 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7779 			continue;
7780 
7781 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7782 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7783 
7784 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7785 							     dm_old_con_state);
7786 
7787 		abm_changed = dm_new_crtc_state->abm_level !=
7788 			      dm_old_crtc_state->abm_level;
7789 
7790 		hdr_changed =
7791 			is_hdr_metadata_different(old_con_state, new_con_state);
7792 
7793 		if (!scaling_changed && !abm_changed && !hdr_changed)
7794 			continue;
7795 
7796 		stream_update.stream = dm_new_crtc_state->stream;
7797 		if (scaling_changed) {
7798 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7799 					dm_new_con_state, dm_new_crtc_state->stream);
7800 
7801 			stream_update.src = dm_new_crtc_state->stream->src;
7802 			stream_update.dst = dm_new_crtc_state->stream->dst;
7803 		}
7804 
7805 		if (abm_changed) {
7806 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7807 
7808 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7809 		}
7810 
7811 		if (hdr_changed) {
7812 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7813 			stream_update.hdr_static_metadata = &hdr_packet;
7814 		}
7815 
7816 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7817 		WARN_ON(!status);
7818 		WARN_ON(!status->plane_count);
7819 
7820 		/*
7821 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7822 		 * Here we create an empty update on each plane.
7823 		 * To fix this, DC should permit updating only stream properties.
7824 		 */
7825 		for (j = 0; j < status->plane_count; j++)
7826 			dummy_updates[j].surface = status->plane_states[0];
7827 
7828 
7829 		mutex_lock(&dm->dc_lock);
7830 		dc_commit_updates_for_stream(dm->dc,
7831 						     dummy_updates,
7832 						     status->plane_count,
7833 						     dm_new_crtc_state->stream,
7834 						     &stream_update,
7835 						     dc_state);
7836 		mutex_unlock(&dm->dc_lock);
7837 	}
7838 
7839 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7840 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7841 				      new_crtc_state, i) {
7842 		if (old_crtc_state->active && !new_crtc_state->active)
7843 			crtc_disable_count++;
7844 
7845 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7846 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7847 
7848 		/* For freesync config update on crtc state and params for irq */
7849 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7850 
7851 		/* Handle vrr on->off / off->on transitions */
7852 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7853 						dm_new_crtc_state);
7854 	}
7855 
7856 	/**
7857 	 * Enable interrupts for CRTCs that are newly enabled or went through
7858 	 * a modeset. It was intentionally deferred until after the front end
7859 	 * state was modified to wait until the OTG was on and so the IRQ
7860 	 * handlers didn't access stale or invalid state.
7861 	 */
7862 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7863 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7864 
7865 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7866 
7867 		if (new_crtc_state->active &&
7868 		    (!old_crtc_state->active ||
7869 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7870 			dc_stream_retain(dm_new_crtc_state->stream);
7871 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7872 			manage_dm_interrupts(adev, acrtc, true);
7873 
7874 #ifdef CONFIG_DEBUG_FS
7875 			/**
7876 			 * Frontend may have changed so reapply the CRC capture
7877 			 * settings for the stream.
7878 			 */
7879 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7880 
7881 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7882 				amdgpu_dm_crtc_configure_crc_source(
7883 					crtc, dm_new_crtc_state,
7884 					dm_new_crtc_state->crc_src);
7885 			}
7886 #endif
7887 		}
7888 	}
7889 
7890 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7891 		if (new_crtc_state->async_flip)
7892 			wait_for_vblank = false;
7893 
7894 	/* update planes when needed per crtc*/
7895 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7896 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7897 
7898 		if (dm_new_crtc_state->stream)
7899 			amdgpu_dm_commit_planes(state, dc_state, dev,
7900 						dm, crtc, wait_for_vblank);
7901 	}
7902 
7903 	/* Update audio instances for each connector. */
7904 	amdgpu_dm_commit_audio(dev, state);
7905 
7906 	/*
7907 	 * send vblank event on all events not handled in flip and
7908 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7909 	 */
7910 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7911 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7912 
7913 		if (new_crtc_state->event)
7914 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7915 
7916 		new_crtc_state->event = NULL;
7917 	}
7918 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7919 
7920 	/* Signal HW programming completion */
7921 	drm_atomic_helper_commit_hw_done(state);
7922 
7923 	if (wait_for_vblank)
7924 		drm_atomic_helper_wait_for_flip_done(dev, state);
7925 
7926 	drm_atomic_helper_cleanup_planes(dev, state);
7927 
7928 	/*
7929 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7930 	 * so we can put the GPU into runtime suspend if we're not driving any
7931 	 * displays anymore
7932 	 */
7933 	for (i = 0; i < crtc_disable_count; i++)
7934 		pm_runtime_put_autosuspend(dev->dev);
7935 	pm_runtime_mark_last_busy(dev->dev);
7936 
7937 	if (dc_state_temp)
7938 		dc_release_state(dc_state_temp);
7939 }
7940 
7941 
7942 static int dm_force_atomic_commit(struct drm_connector *connector)
7943 {
7944 	int ret = 0;
7945 	struct drm_device *ddev = connector->dev;
7946 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7947 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7948 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7949 	struct drm_connector_state *conn_state;
7950 	struct drm_crtc_state *crtc_state;
7951 	struct drm_plane_state *plane_state;
7952 
7953 	if (!state)
7954 		return -ENOMEM;
7955 
7956 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7957 
7958 	/* Construct an atomic state to restore previous display setting */
7959 
7960 	/*
7961 	 * Attach connectors to drm_atomic_state
7962 	 */
7963 	conn_state = drm_atomic_get_connector_state(state, connector);
7964 
7965 	ret = PTR_ERR_OR_ZERO(conn_state);
7966 	if (ret)
7967 		goto err;
7968 
7969 	/* Attach crtc to drm_atomic_state*/
7970 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7971 
7972 	ret = PTR_ERR_OR_ZERO(crtc_state);
7973 	if (ret)
7974 		goto err;
7975 
7976 	/* force a restore */
7977 	crtc_state->mode_changed = true;
7978 
7979 	/* Attach plane to drm_atomic_state */
7980 	plane_state = drm_atomic_get_plane_state(state, plane);
7981 
7982 	ret = PTR_ERR_OR_ZERO(plane_state);
7983 	if (ret)
7984 		goto err;
7985 
7986 
7987 	/* Call commit internally with the state we just constructed */
7988 	ret = drm_atomic_commit(state);
7989 	if (!ret)
7990 		return 0;
7991 
7992 err:
7993 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7994 	drm_atomic_state_put(state);
7995 
7996 	return ret;
7997 }
7998 
7999 /*
8000  * This function handles all cases when set mode does not come upon hotplug.
8001  * This includes when a display is unplugged then plugged back into the
8002  * same port and when running without usermode desktop manager supprot
8003  */
8004 void dm_restore_drm_connector_state(struct drm_device *dev,
8005 				    struct drm_connector *connector)
8006 {
8007 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8008 	struct amdgpu_crtc *disconnected_acrtc;
8009 	struct dm_crtc_state *acrtc_state;
8010 
8011 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8012 		return;
8013 
8014 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8015 	if (!disconnected_acrtc)
8016 		return;
8017 
8018 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8019 	if (!acrtc_state->stream)
8020 		return;
8021 
8022 	/*
8023 	 * If the previous sink is not released and different from the current,
8024 	 * we deduce we are in a state where we can not rely on usermode call
8025 	 * to turn on the display, so we do it here
8026 	 */
8027 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8028 		dm_force_atomic_commit(&aconnector->base);
8029 }
8030 
8031 /*
8032  * Grabs all modesetting locks to serialize against any blocking commits,
8033  * Waits for completion of all non blocking commits.
8034  */
8035 static int do_aquire_global_lock(struct drm_device *dev,
8036 				 struct drm_atomic_state *state)
8037 {
8038 	struct drm_crtc *crtc;
8039 	struct drm_crtc_commit *commit;
8040 	long ret;
8041 
8042 	/*
8043 	 * Adding all modeset locks to aquire_ctx will
8044 	 * ensure that when the framework release it the
8045 	 * extra locks we are locking here will get released to
8046 	 */
8047 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8048 	if (ret)
8049 		return ret;
8050 
8051 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8052 		spin_lock(&crtc->commit_lock);
8053 		commit = list_first_entry_or_null(&crtc->commit_list,
8054 				struct drm_crtc_commit, commit_entry);
8055 		if (commit)
8056 			drm_crtc_commit_get(commit);
8057 		spin_unlock(&crtc->commit_lock);
8058 
8059 		if (!commit)
8060 			continue;
8061 
8062 		/*
8063 		 * Make sure all pending HW programming completed and
8064 		 * page flips done
8065 		 */
8066 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8067 
8068 		if (ret > 0)
8069 			ret = wait_for_completion_interruptible_timeout(
8070 					&commit->flip_done, 10*HZ);
8071 
8072 		if (ret == 0)
8073 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8074 				  "timed out\n", crtc->base.id, crtc->name);
8075 
8076 		drm_crtc_commit_put(commit);
8077 	}
8078 
8079 	return ret < 0 ? ret : 0;
8080 }
8081 
8082 static void get_freesync_config_for_crtc(
8083 	struct dm_crtc_state *new_crtc_state,
8084 	struct dm_connector_state *new_con_state)
8085 {
8086 	struct mod_freesync_config config = {0};
8087 	struct amdgpu_dm_connector *aconnector =
8088 			to_amdgpu_dm_connector(new_con_state->base.connector);
8089 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8090 	int vrefresh = drm_mode_vrefresh(mode);
8091 
8092 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8093 					vrefresh >= aconnector->min_vfreq &&
8094 					vrefresh <= aconnector->max_vfreq;
8095 
8096 	if (new_crtc_state->vrr_supported) {
8097 		new_crtc_state->stream->ignore_msa_timing_param = true;
8098 		config.state = new_crtc_state->base.vrr_enabled ?
8099 				VRR_STATE_ACTIVE_VARIABLE :
8100 				VRR_STATE_INACTIVE;
8101 		config.min_refresh_in_uhz =
8102 				aconnector->min_vfreq * 1000000;
8103 		config.max_refresh_in_uhz =
8104 				aconnector->max_vfreq * 1000000;
8105 		config.vsif_supported = true;
8106 		config.btr = true;
8107 	}
8108 
8109 	new_crtc_state->freesync_config = config;
8110 }
8111 
8112 static void reset_freesync_config_for_crtc(
8113 	struct dm_crtc_state *new_crtc_state)
8114 {
8115 	new_crtc_state->vrr_supported = false;
8116 
8117 	memset(&new_crtc_state->vrr_infopacket, 0,
8118 	       sizeof(new_crtc_state->vrr_infopacket));
8119 }
8120 
8121 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8122 				struct drm_atomic_state *state,
8123 				struct drm_crtc *crtc,
8124 				struct drm_crtc_state *old_crtc_state,
8125 				struct drm_crtc_state *new_crtc_state,
8126 				bool enable,
8127 				bool *lock_and_validation_needed)
8128 {
8129 	struct dm_atomic_state *dm_state = NULL;
8130 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8131 	struct dc_stream_state *new_stream;
8132 	int ret = 0;
8133 
8134 	/*
8135 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8136 	 * update changed items
8137 	 */
8138 	struct amdgpu_crtc *acrtc = NULL;
8139 	struct amdgpu_dm_connector *aconnector = NULL;
8140 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8141 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8142 
8143 	new_stream = NULL;
8144 
8145 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8146 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8147 	acrtc = to_amdgpu_crtc(crtc);
8148 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8149 
8150 	/* TODO This hack should go away */
8151 	if (aconnector && enable) {
8152 		/* Make sure fake sink is created in plug-in scenario */
8153 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8154 							    &aconnector->base);
8155 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8156 							    &aconnector->base);
8157 
8158 		if (IS_ERR(drm_new_conn_state)) {
8159 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8160 			goto fail;
8161 		}
8162 
8163 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8164 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8165 
8166 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8167 			goto skip_modeset;
8168 
8169 		new_stream = create_validate_stream_for_sink(aconnector,
8170 							     &new_crtc_state->mode,
8171 							     dm_new_conn_state,
8172 							     dm_old_crtc_state->stream);
8173 
8174 		/*
8175 		 * we can have no stream on ACTION_SET if a display
8176 		 * was disconnected during S3, in this case it is not an
8177 		 * error, the OS will be updated after detection, and
8178 		 * will do the right thing on next atomic commit
8179 		 */
8180 
8181 		if (!new_stream) {
8182 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8183 					__func__, acrtc->base.base.id);
8184 			ret = -ENOMEM;
8185 			goto fail;
8186 		}
8187 
8188 		/*
8189 		 * TODO: Check VSDB bits to decide whether this should
8190 		 * be enabled or not.
8191 		 */
8192 		new_stream->triggered_crtc_reset.enabled =
8193 			dm->force_timing_sync;
8194 
8195 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8196 
8197 		ret = fill_hdr_info_packet(drm_new_conn_state,
8198 					   &new_stream->hdr_static_metadata);
8199 		if (ret)
8200 			goto fail;
8201 
8202 		/*
8203 		 * If we already removed the old stream from the context
8204 		 * (and set the new stream to NULL) then we can't reuse
8205 		 * the old stream even if the stream and scaling are unchanged.
8206 		 * We'll hit the BUG_ON and black screen.
8207 		 *
8208 		 * TODO: Refactor this function to allow this check to work
8209 		 * in all conditions.
8210 		 */
8211 		if (dm_new_crtc_state->stream &&
8212 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8213 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8214 			new_crtc_state->mode_changed = false;
8215 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8216 					 new_crtc_state->mode_changed);
8217 		}
8218 	}
8219 
8220 	/* mode_changed flag may get updated above, need to check again */
8221 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8222 		goto skip_modeset;
8223 
8224 	DRM_DEBUG_DRIVER(
8225 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8226 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8227 		"connectors_changed:%d\n",
8228 		acrtc->crtc_id,
8229 		new_crtc_state->enable,
8230 		new_crtc_state->active,
8231 		new_crtc_state->planes_changed,
8232 		new_crtc_state->mode_changed,
8233 		new_crtc_state->active_changed,
8234 		new_crtc_state->connectors_changed);
8235 
8236 	/* Remove stream for any changed/disabled CRTC */
8237 	if (!enable) {
8238 
8239 		if (!dm_old_crtc_state->stream)
8240 			goto skip_modeset;
8241 
8242 		ret = dm_atomic_get_state(state, &dm_state);
8243 		if (ret)
8244 			goto fail;
8245 
8246 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8247 				crtc->base.id);
8248 
8249 		/* i.e. reset mode */
8250 		if (dc_remove_stream_from_ctx(
8251 				dm->dc,
8252 				dm_state->context,
8253 				dm_old_crtc_state->stream) != DC_OK) {
8254 			ret = -EINVAL;
8255 			goto fail;
8256 		}
8257 
8258 		dc_stream_release(dm_old_crtc_state->stream);
8259 		dm_new_crtc_state->stream = NULL;
8260 
8261 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8262 
8263 		*lock_and_validation_needed = true;
8264 
8265 	} else {/* Add stream for any updated/enabled CRTC */
8266 		/*
8267 		 * Quick fix to prevent NULL pointer on new_stream when
8268 		 * added MST connectors not found in existing crtc_state in the chained mode
8269 		 * TODO: need to dig out the root cause of that
8270 		 */
8271 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8272 			goto skip_modeset;
8273 
8274 		if (modereset_required(new_crtc_state))
8275 			goto skip_modeset;
8276 
8277 		if (modeset_required(new_crtc_state, new_stream,
8278 				     dm_old_crtc_state->stream)) {
8279 
8280 			WARN_ON(dm_new_crtc_state->stream);
8281 
8282 			ret = dm_atomic_get_state(state, &dm_state);
8283 			if (ret)
8284 				goto fail;
8285 
8286 			dm_new_crtc_state->stream = new_stream;
8287 
8288 			dc_stream_retain(new_stream);
8289 
8290 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8291 						crtc->base.id);
8292 
8293 			if (dc_add_stream_to_ctx(
8294 					dm->dc,
8295 					dm_state->context,
8296 					dm_new_crtc_state->stream) != DC_OK) {
8297 				ret = -EINVAL;
8298 				goto fail;
8299 			}
8300 
8301 			*lock_and_validation_needed = true;
8302 		}
8303 	}
8304 
8305 skip_modeset:
8306 	/* Release extra reference */
8307 	if (new_stream)
8308 		 dc_stream_release(new_stream);
8309 
8310 	/*
8311 	 * We want to do dc stream updates that do not require a
8312 	 * full modeset below.
8313 	 */
8314 	if (!(enable && aconnector && new_crtc_state->active))
8315 		return 0;
8316 	/*
8317 	 * Given above conditions, the dc state cannot be NULL because:
8318 	 * 1. We're in the process of enabling CRTCs (just been added
8319 	 *    to the dc context, or already is on the context)
8320 	 * 2. Has a valid connector attached, and
8321 	 * 3. Is currently active and enabled.
8322 	 * => The dc stream state currently exists.
8323 	 */
8324 	BUG_ON(dm_new_crtc_state->stream == NULL);
8325 
8326 	/* Scaling or underscan settings */
8327 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8328 		update_stream_scaling_settings(
8329 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8330 
8331 	/* ABM settings */
8332 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8333 
8334 	/*
8335 	 * Color management settings. We also update color properties
8336 	 * when a modeset is needed, to ensure it gets reprogrammed.
8337 	 */
8338 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8339 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8340 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8341 		if (ret)
8342 			goto fail;
8343 	}
8344 
8345 	/* Update Freesync settings. */
8346 	get_freesync_config_for_crtc(dm_new_crtc_state,
8347 				     dm_new_conn_state);
8348 
8349 	return ret;
8350 
8351 fail:
8352 	if (new_stream)
8353 		dc_stream_release(new_stream);
8354 	return ret;
8355 }
8356 
8357 static bool should_reset_plane(struct drm_atomic_state *state,
8358 			       struct drm_plane *plane,
8359 			       struct drm_plane_state *old_plane_state,
8360 			       struct drm_plane_state *new_plane_state)
8361 {
8362 	struct drm_plane *other;
8363 	struct drm_plane_state *old_other_state, *new_other_state;
8364 	struct drm_crtc_state *new_crtc_state;
8365 	int i;
8366 
8367 	/*
8368 	 * TODO: Remove this hack once the checks below are sufficient
8369 	 * enough to determine when we need to reset all the planes on
8370 	 * the stream.
8371 	 */
8372 	if (state->allow_modeset)
8373 		return true;
8374 
8375 	/* Exit early if we know that we're adding or removing the plane. */
8376 	if (old_plane_state->crtc != new_plane_state->crtc)
8377 		return true;
8378 
8379 	/* old crtc == new_crtc == NULL, plane not in context. */
8380 	if (!new_plane_state->crtc)
8381 		return false;
8382 
8383 	new_crtc_state =
8384 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8385 
8386 	if (!new_crtc_state)
8387 		return true;
8388 
8389 	/* CRTC Degamma changes currently require us to recreate planes. */
8390 	if (new_crtc_state->color_mgmt_changed)
8391 		return true;
8392 
8393 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8394 		return true;
8395 
8396 	/*
8397 	 * If there are any new primary or overlay planes being added or
8398 	 * removed then the z-order can potentially change. To ensure
8399 	 * correct z-order and pipe acquisition the current DC architecture
8400 	 * requires us to remove and recreate all existing planes.
8401 	 *
8402 	 * TODO: Come up with a more elegant solution for this.
8403 	 */
8404 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8405 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8406 
8407 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8408 			continue;
8409 
8410 		if (old_other_state->crtc != new_plane_state->crtc &&
8411 		    new_other_state->crtc != new_plane_state->crtc)
8412 			continue;
8413 
8414 		if (old_other_state->crtc != new_other_state->crtc)
8415 			return true;
8416 
8417 		/* Src/dst size and scaling updates. */
8418 		if (old_other_state->src_w != new_other_state->src_w ||
8419 		    old_other_state->src_h != new_other_state->src_h ||
8420 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8421 		    old_other_state->crtc_h != new_other_state->crtc_h)
8422 			return true;
8423 
8424 		/* Rotation / mirroring updates. */
8425 		if (old_other_state->rotation != new_other_state->rotation)
8426 			return true;
8427 
8428 		/* Blending updates. */
8429 		if (old_other_state->pixel_blend_mode !=
8430 		    new_other_state->pixel_blend_mode)
8431 			return true;
8432 
8433 		/* Alpha updates. */
8434 		if (old_other_state->alpha != new_other_state->alpha)
8435 			return true;
8436 
8437 		/* Colorspace changes. */
8438 		if (old_other_state->color_range != new_other_state->color_range ||
8439 		    old_other_state->color_encoding != new_other_state->color_encoding)
8440 			return true;
8441 
8442 		/* Framebuffer checks fall at the end. */
8443 		if (!old_other_state->fb || !new_other_state->fb)
8444 			continue;
8445 
8446 		/* Pixel format changes can require bandwidth updates. */
8447 		if (old_other_state->fb->format != new_other_state->fb->format)
8448 			return true;
8449 
8450 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8451 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8452 
8453 		/* Tiling and DCC changes also require bandwidth updates. */
8454 		if (old_dm_plane_state->tiling_flags !=
8455 		    new_dm_plane_state->tiling_flags)
8456 			return true;
8457 	}
8458 
8459 	return false;
8460 }
8461 
8462 static int dm_update_plane_state(struct dc *dc,
8463 				 struct drm_atomic_state *state,
8464 				 struct drm_plane *plane,
8465 				 struct drm_plane_state *old_plane_state,
8466 				 struct drm_plane_state *new_plane_state,
8467 				 bool enable,
8468 				 bool *lock_and_validation_needed)
8469 {
8470 
8471 	struct dm_atomic_state *dm_state = NULL;
8472 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8473 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8474 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8475 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8476 	struct amdgpu_crtc *new_acrtc;
8477 	bool needs_reset;
8478 	int ret = 0;
8479 
8480 
8481 	new_plane_crtc = new_plane_state->crtc;
8482 	old_plane_crtc = old_plane_state->crtc;
8483 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8484 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8485 
8486 	/*TODO Implement better atomic check for cursor plane */
8487 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8488 		if (!enable || !new_plane_crtc ||
8489 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8490 			return 0;
8491 
8492 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8493 
8494 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8495 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8496 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8497 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8498 			return -EINVAL;
8499 		}
8500 
8501 		return 0;
8502 	}
8503 
8504 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8505 					 new_plane_state);
8506 
8507 	/* Remove any changed/removed planes */
8508 	if (!enable) {
8509 		if (!needs_reset)
8510 			return 0;
8511 
8512 		if (!old_plane_crtc)
8513 			return 0;
8514 
8515 		old_crtc_state = drm_atomic_get_old_crtc_state(
8516 				state, old_plane_crtc);
8517 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8518 
8519 		if (!dm_old_crtc_state->stream)
8520 			return 0;
8521 
8522 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8523 				plane->base.id, old_plane_crtc->base.id);
8524 
8525 		ret = dm_atomic_get_state(state, &dm_state);
8526 		if (ret)
8527 			return ret;
8528 
8529 		if (!dc_remove_plane_from_context(
8530 				dc,
8531 				dm_old_crtc_state->stream,
8532 				dm_old_plane_state->dc_state,
8533 				dm_state->context)) {
8534 
8535 			return -EINVAL;
8536 		}
8537 
8538 
8539 		dc_plane_state_release(dm_old_plane_state->dc_state);
8540 		dm_new_plane_state->dc_state = NULL;
8541 
8542 		*lock_and_validation_needed = true;
8543 
8544 	} else { /* Add new planes */
8545 		struct dc_plane_state *dc_new_plane_state;
8546 
8547 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8548 			return 0;
8549 
8550 		if (!new_plane_crtc)
8551 			return 0;
8552 
8553 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8554 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8555 
8556 		if (!dm_new_crtc_state->stream)
8557 			return 0;
8558 
8559 		if (!needs_reset)
8560 			return 0;
8561 
8562 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8563 		if (ret)
8564 			return ret;
8565 
8566 		WARN_ON(dm_new_plane_state->dc_state);
8567 
8568 		dc_new_plane_state = dc_create_plane_state(dc);
8569 		if (!dc_new_plane_state)
8570 			return -ENOMEM;
8571 
8572 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8573 				plane->base.id, new_plane_crtc->base.id);
8574 
8575 		ret = fill_dc_plane_attributes(
8576 			drm_to_adev(new_plane_crtc->dev),
8577 			dc_new_plane_state,
8578 			new_plane_state,
8579 			new_crtc_state);
8580 		if (ret) {
8581 			dc_plane_state_release(dc_new_plane_state);
8582 			return ret;
8583 		}
8584 
8585 		ret = dm_atomic_get_state(state, &dm_state);
8586 		if (ret) {
8587 			dc_plane_state_release(dc_new_plane_state);
8588 			return ret;
8589 		}
8590 
8591 		/*
8592 		 * Any atomic check errors that occur after this will
8593 		 * not need a release. The plane state will be attached
8594 		 * to the stream, and therefore part of the atomic
8595 		 * state. It'll be released when the atomic state is
8596 		 * cleaned.
8597 		 */
8598 		if (!dc_add_plane_to_context(
8599 				dc,
8600 				dm_new_crtc_state->stream,
8601 				dc_new_plane_state,
8602 				dm_state->context)) {
8603 
8604 			dc_plane_state_release(dc_new_plane_state);
8605 			return -EINVAL;
8606 		}
8607 
8608 		dm_new_plane_state->dc_state = dc_new_plane_state;
8609 
8610 		/* Tell DC to do a full surface update every time there
8611 		 * is a plane change. Inefficient, but works for now.
8612 		 */
8613 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8614 
8615 		*lock_and_validation_needed = true;
8616 	}
8617 
8618 
8619 	return ret;
8620 }
8621 
8622 #if defined(CONFIG_DRM_AMD_DC_DCN)
8623 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8624 {
8625 	struct drm_connector *connector;
8626 	struct drm_connector_state *conn_state;
8627 	struct amdgpu_dm_connector *aconnector = NULL;
8628 	int i;
8629 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8630 		if (conn_state->crtc != crtc)
8631 			continue;
8632 
8633 		aconnector = to_amdgpu_dm_connector(connector);
8634 		if (!aconnector->port || !aconnector->mst_port)
8635 			aconnector = NULL;
8636 		else
8637 			break;
8638 	}
8639 
8640 	if (!aconnector)
8641 		return 0;
8642 
8643 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8644 }
8645 #endif
8646 
8647 /**
8648  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8649  * @dev: The DRM device
8650  * @state: The atomic state to commit
8651  *
8652  * Validate that the given atomic state is programmable by DC into hardware.
8653  * This involves constructing a &struct dc_state reflecting the new hardware
8654  * state we wish to commit, then querying DC to see if it is programmable. It's
8655  * important not to modify the existing DC state. Otherwise, atomic_check
8656  * may unexpectedly commit hardware changes.
8657  *
8658  * When validating the DC state, it's important that the right locks are
8659  * acquired. For full updates case which removes/adds/updates streams on one
8660  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8661  * that any such full update commit will wait for completion of any outstanding
8662  * flip using DRMs synchronization events.
8663  *
8664  * Note that DM adds the affected connectors for all CRTCs in state, when that
8665  * might not seem necessary. This is because DC stream creation requires the
8666  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8667  * be possible but non-trivial - a possible TODO item.
8668  *
8669  * Return: -Error code if validation failed.
8670  */
8671 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8672 				  struct drm_atomic_state *state)
8673 {
8674 	struct amdgpu_device *adev = drm_to_adev(dev);
8675 	struct dm_atomic_state *dm_state = NULL;
8676 	struct dc *dc = adev->dm.dc;
8677 	struct drm_connector *connector;
8678 	struct drm_connector_state *old_con_state, *new_con_state;
8679 	struct drm_crtc *crtc;
8680 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8681 	struct drm_plane *plane;
8682 	struct drm_plane_state *old_plane_state, *new_plane_state;
8683 	enum dc_status status;
8684 	int ret, i;
8685 	bool lock_and_validation_needed = false;
8686 
8687 	trace_amdgpu_dm_atomic_check_begin(state);
8688 
8689 	ret = drm_atomic_helper_check_modeset(dev, state);
8690 	if (ret)
8691 		goto fail;
8692 
8693 	/* Check connector changes */
8694 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8695 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8696 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8697 
8698 		/* Skip connectors that are disabled or part of modeset already. */
8699 		if (!old_con_state->crtc && !new_con_state->crtc)
8700 			continue;
8701 
8702 		if (!new_con_state->crtc)
8703 			continue;
8704 
8705 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8706 		if (IS_ERR(new_crtc_state)) {
8707 			ret = PTR_ERR(new_crtc_state);
8708 			goto fail;
8709 		}
8710 
8711 		if (dm_old_con_state->abm_level !=
8712 		    dm_new_con_state->abm_level)
8713 			new_crtc_state->connectors_changed = true;
8714 	}
8715 
8716 #if defined(CONFIG_DRM_AMD_DC_DCN)
8717 	if (adev->asic_type >= CHIP_NAVI10) {
8718 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8719 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8720 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8721 				if (ret)
8722 					goto fail;
8723 			}
8724 		}
8725 	}
8726 #endif
8727 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8728 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8729 		    !new_crtc_state->color_mgmt_changed &&
8730 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8731 			continue;
8732 
8733 		if (!new_crtc_state->enable)
8734 			continue;
8735 
8736 		ret = drm_atomic_add_affected_connectors(state, crtc);
8737 		if (ret)
8738 			return ret;
8739 
8740 		ret = drm_atomic_add_affected_planes(state, crtc);
8741 		if (ret)
8742 			goto fail;
8743 	}
8744 
8745 	/*
8746 	 * Add all primary and overlay planes on the CRTC to the state
8747 	 * whenever a plane is enabled to maintain correct z-ordering
8748 	 * and to enable fast surface updates.
8749 	 */
8750 	drm_for_each_crtc(crtc, dev) {
8751 		bool modified = false;
8752 
8753 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8754 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8755 				continue;
8756 
8757 			if (new_plane_state->crtc == crtc ||
8758 			    old_plane_state->crtc == crtc) {
8759 				modified = true;
8760 				break;
8761 			}
8762 		}
8763 
8764 		if (!modified)
8765 			continue;
8766 
8767 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8768 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8769 				continue;
8770 
8771 			new_plane_state =
8772 				drm_atomic_get_plane_state(state, plane);
8773 
8774 			if (IS_ERR(new_plane_state)) {
8775 				ret = PTR_ERR(new_plane_state);
8776 				goto fail;
8777 			}
8778 		}
8779 	}
8780 
8781 	/* Prepass for updating tiling flags on new planes. */
8782 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8783 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8784 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8785 
8786 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8787 				  &new_dm_plane_state->tmz_surface);
8788 		if (ret)
8789 			goto fail;
8790 	}
8791 
8792 	/* Remove exiting planes if they are modified */
8793 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8794 		ret = dm_update_plane_state(dc, state, plane,
8795 					    old_plane_state,
8796 					    new_plane_state,
8797 					    false,
8798 					    &lock_and_validation_needed);
8799 		if (ret)
8800 			goto fail;
8801 	}
8802 
8803 	/* Disable all crtcs which require disable */
8804 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8805 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8806 					   old_crtc_state,
8807 					   new_crtc_state,
8808 					   false,
8809 					   &lock_and_validation_needed);
8810 		if (ret)
8811 			goto fail;
8812 	}
8813 
8814 	/* Enable all crtcs which require enable */
8815 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8816 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8817 					   old_crtc_state,
8818 					   new_crtc_state,
8819 					   true,
8820 					   &lock_and_validation_needed);
8821 		if (ret)
8822 			goto fail;
8823 	}
8824 
8825 	/* Add new/modified planes */
8826 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8827 		ret = dm_update_plane_state(dc, state, plane,
8828 					    old_plane_state,
8829 					    new_plane_state,
8830 					    true,
8831 					    &lock_and_validation_needed);
8832 		if (ret)
8833 			goto fail;
8834 	}
8835 
8836 	/* Run this here since we want to validate the streams we created */
8837 	ret = drm_atomic_helper_check_planes(dev, state);
8838 	if (ret)
8839 		goto fail;
8840 
8841 	if (state->legacy_cursor_update) {
8842 		/*
8843 		 * This is a fast cursor update coming from the plane update
8844 		 * helper, check if it can be done asynchronously for better
8845 		 * performance.
8846 		 */
8847 		state->async_update =
8848 			!drm_atomic_helper_async_check(dev, state);
8849 
8850 		/*
8851 		 * Skip the remaining global validation if this is an async
8852 		 * update. Cursor updates can be done without affecting
8853 		 * state or bandwidth calcs and this avoids the performance
8854 		 * penalty of locking the private state object and
8855 		 * allocating a new dc_state.
8856 		 */
8857 		if (state->async_update)
8858 			return 0;
8859 	}
8860 
8861 	/* Check scaling and underscan changes*/
8862 	/* TODO Removed scaling changes validation due to inability to commit
8863 	 * new stream into context w\o causing full reset. Need to
8864 	 * decide how to handle.
8865 	 */
8866 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8867 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8868 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8869 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8870 
8871 		/* Skip any modesets/resets */
8872 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8873 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8874 			continue;
8875 
8876 		/* Skip any thing not scale or underscan changes */
8877 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8878 			continue;
8879 
8880 		lock_and_validation_needed = true;
8881 	}
8882 
8883 	/**
8884 	 * Streams and planes are reset when there are changes that affect
8885 	 * bandwidth. Anything that affects bandwidth needs to go through
8886 	 * DC global validation to ensure that the configuration can be applied
8887 	 * to hardware.
8888 	 *
8889 	 * We have to currently stall out here in atomic_check for outstanding
8890 	 * commits to finish in this case because our IRQ handlers reference
8891 	 * DRM state directly - we can end up disabling interrupts too early
8892 	 * if we don't.
8893 	 *
8894 	 * TODO: Remove this stall and drop DM state private objects.
8895 	 */
8896 	if (lock_and_validation_needed) {
8897 		ret = dm_atomic_get_state(state, &dm_state);
8898 		if (ret)
8899 			goto fail;
8900 
8901 		ret = do_aquire_global_lock(dev, state);
8902 		if (ret)
8903 			goto fail;
8904 
8905 #if defined(CONFIG_DRM_AMD_DC_DCN)
8906 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8907 			goto fail;
8908 
8909 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8910 		if (ret)
8911 			goto fail;
8912 #endif
8913 
8914 		/*
8915 		 * Perform validation of MST topology in the state:
8916 		 * We need to perform MST atomic check before calling
8917 		 * dc_validate_global_state(), or there is a chance
8918 		 * to get stuck in an infinite loop and hang eventually.
8919 		 */
8920 		ret = drm_dp_mst_atomic_check(state);
8921 		if (ret)
8922 			goto fail;
8923 		status = dc_validate_global_state(dc, dm_state->context, false);
8924 		if (status != DC_OK) {
8925 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8926 				       dc_status_to_str(status), status);
8927 			ret = -EINVAL;
8928 			goto fail;
8929 		}
8930 	} else {
8931 		/*
8932 		 * The commit is a fast update. Fast updates shouldn't change
8933 		 * the DC context, affect global validation, and can have their
8934 		 * commit work done in parallel with other commits not touching
8935 		 * the same resource. If we have a new DC context as part of
8936 		 * the DM atomic state from validation we need to free it and
8937 		 * retain the existing one instead.
8938 		 *
8939 		 * Furthermore, since the DM atomic state only contains the DC
8940 		 * context and can safely be annulled, we can free the state
8941 		 * and clear the associated private object now to free
8942 		 * some memory and avoid a possible use-after-free later.
8943 		 */
8944 
8945 		for (i = 0; i < state->num_private_objs; i++) {
8946 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8947 
8948 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8949 				int j = state->num_private_objs-1;
8950 
8951 				dm_atomic_destroy_state(obj,
8952 						state->private_objs[i].state);
8953 
8954 				/* If i is not at the end of the array then the
8955 				 * last element needs to be moved to where i was
8956 				 * before the array can safely be truncated.
8957 				 */
8958 				if (i != j)
8959 					state->private_objs[i] =
8960 						state->private_objs[j];
8961 
8962 				state->private_objs[j].ptr = NULL;
8963 				state->private_objs[j].state = NULL;
8964 				state->private_objs[j].old_state = NULL;
8965 				state->private_objs[j].new_state = NULL;
8966 
8967 				state->num_private_objs = j;
8968 				break;
8969 			}
8970 		}
8971 	}
8972 
8973 	/* Store the overall update type for use later in atomic check. */
8974 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8975 		struct dm_crtc_state *dm_new_crtc_state =
8976 			to_dm_crtc_state(new_crtc_state);
8977 
8978 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
8979 							 UPDATE_TYPE_FULL :
8980 							 UPDATE_TYPE_FAST;
8981 	}
8982 
8983 	/* Must be success */
8984 	WARN_ON(ret);
8985 
8986 	trace_amdgpu_dm_atomic_check_finish(state, ret);
8987 
8988 	return ret;
8989 
8990 fail:
8991 	if (ret == -EDEADLK)
8992 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8993 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8994 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8995 	else
8996 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8997 
8998 	trace_amdgpu_dm_atomic_check_finish(state, ret);
8999 
9000 	return ret;
9001 }
9002 
9003 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9004 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9005 {
9006 	uint8_t dpcd_data;
9007 	bool capable = false;
9008 
9009 	if (amdgpu_dm_connector->dc_link &&
9010 		dm_helpers_dp_read_dpcd(
9011 				NULL,
9012 				amdgpu_dm_connector->dc_link,
9013 				DP_DOWN_STREAM_PORT_COUNT,
9014 				&dpcd_data,
9015 				sizeof(dpcd_data))) {
9016 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9017 	}
9018 
9019 	return capable;
9020 }
9021 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9022 					struct edid *edid)
9023 {
9024 	int i;
9025 	bool edid_check_required;
9026 	struct detailed_timing *timing;
9027 	struct detailed_non_pixel *data;
9028 	struct detailed_data_monitor_range *range;
9029 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9030 			to_amdgpu_dm_connector(connector);
9031 	struct dm_connector_state *dm_con_state = NULL;
9032 
9033 	struct drm_device *dev = connector->dev;
9034 	struct amdgpu_device *adev = drm_to_adev(dev);
9035 	bool freesync_capable = false;
9036 
9037 	if (!connector->state) {
9038 		DRM_ERROR("%s - Connector has no state", __func__);
9039 		goto update;
9040 	}
9041 
9042 	if (!edid) {
9043 		dm_con_state = to_dm_connector_state(connector->state);
9044 
9045 		amdgpu_dm_connector->min_vfreq = 0;
9046 		amdgpu_dm_connector->max_vfreq = 0;
9047 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9048 
9049 		goto update;
9050 	}
9051 
9052 	dm_con_state = to_dm_connector_state(connector->state);
9053 
9054 	edid_check_required = false;
9055 	if (!amdgpu_dm_connector->dc_sink) {
9056 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9057 		goto update;
9058 	}
9059 	if (!adev->dm.freesync_module)
9060 		goto update;
9061 	/*
9062 	 * if edid non zero restrict freesync only for dp and edp
9063 	 */
9064 	if (edid) {
9065 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9066 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9067 			edid_check_required = is_dp_capable_without_timing_msa(
9068 						adev->dm.dc,
9069 						amdgpu_dm_connector);
9070 		}
9071 	}
9072 	if (edid_check_required == true && (edid->version > 1 ||
9073 	   (edid->version == 1 && edid->revision > 1))) {
9074 		for (i = 0; i < 4; i++) {
9075 
9076 			timing	= &edid->detailed_timings[i];
9077 			data	= &timing->data.other_data;
9078 			range	= &data->data.range;
9079 			/*
9080 			 * Check if monitor has continuous frequency mode
9081 			 */
9082 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9083 				continue;
9084 			/*
9085 			 * Check for flag range limits only. If flag == 1 then
9086 			 * no additional timing information provided.
9087 			 * Default GTF, GTF Secondary curve and CVT are not
9088 			 * supported
9089 			 */
9090 			if (range->flags != 1)
9091 				continue;
9092 
9093 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9094 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9095 			amdgpu_dm_connector->pixel_clock_mhz =
9096 				range->pixel_clock_mhz * 10;
9097 			break;
9098 		}
9099 
9100 		if (amdgpu_dm_connector->max_vfreq -
9101 		    amdgpu_dm_connector->min_vfreq > 10) {
9102 
9103 			freesync_capable = true;
9104 		}
9105 	}
9106 
9107 update:
9108 	if (dm_con_state)
9109 		dm_con_state->freesync_capable = freesync_capable;
9110 
9111 	if (connector->vrr_capable_property)
9112 		drm_connector_set_vrr_capable_property(connector,
9113 						       freesync_capable);
9114 }
9115 
9116 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9117 {
9118 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9119 
9120 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9121 		return;
9122 	if (link->type == dc_connection_none)
9123 		return;
9124 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9125 					dpcd_data, sizeof(dpcd_data))) {
9126 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9127 
9128 		if (dpcd_data[0] == 0) {
9129 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9130 			link->psr_settings.psr_feature_enabled = false;
9131 		} else {
9132 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9133 			link->psr_settings.psr_feature_enabled = true;
9134 		}
9135 
9136 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9137 	}
9138 }
9139 
9140 /*
9141  * amdgpu_dm_link_setup_psr() - configure psr link
9142  * @stream: stream state
9143  *
9144  * Return: true if success
9145  */
9146 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9147 {
9148 	struct dc_link *link = NULL;
9149 	struct psr_config psr_config = {0};
9150 	struct psr_context psr_context = {0};
9151 	bool ret = false;
9152 
9153 	if (stream == NULL)
9154 		return false;
9155 
9156 	link = stream->link;
9157 
9158 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9159 
9160 	if (psr_config.psr_version > 0) {
9161 		psr_config.psr_exit_link_training_required = 0x1;
9162 		psr_config.psr_frame_capture_indication_req = 0;
9163 		psr_config.psr_rfb_setup_time = 0x37;
9164 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9165 		psr_config.allow_smu_optimizations = 0x0;
9166 
9167 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9168 
9169 	}
9170 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9171 
9172 	return ret;
9173 }
9174 
9175 /*
9176  * amdgpu_dm_psr_enable() - enable psr f/w
9177  * @stream: stream state
9178  *
9179  * Return: true if success
9180  */
9181 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9182 {
9183 	struct dc_link *link = stream->link;
9184 	unsigned int vsync_rate_hz = 0;
9185 	struct dc_static_screen_params params = {0};
9186 	/* Calculate number of static frames before generating interrupt to
9187 	 * enter PSR.
9188 	 */
9189 	// Init fail safe of 2 frames static
9190 	unsigned int num_frames_static = 2;
9191 
9192 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9193 
9194 	vsync_rate_hz = div64_u64(div64_u64((
9195 			stream->timing.pix_clk_100hz * 100),
9196 			stream->timing.v_total),
9197 			stream->timing.h_total);
9198 
9199 	/* Round up
9200 	 * Calculate number of frames such that at least 30 ms of time has
9201 	 * passed.
9202 	 */
9203 	if (vsync_rate_hz != 0) {
9204 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9205 		num_frames_static = (30000 / frame_time_microsec) + 1;
9206 	}
9207 
9208 	params.triggers.cursor_update = true;
9209 	params.triggers.overlay_update = true;
9210 	params.triggers.surface_update = true;
9211 	params.num_frames = num_frames_static;
9212 
9213 	dc_stream_set_static_screen_params(link->ctx->dc,
9214 					   &stream, 1,
9215 					   &params);
9216 
9217 	return dc_link_set_psr_allow_active(link, true, false);
9218 }
9219 
9220 /*
9221  * amdgpu_dm_psr_disable() - disable psr f/w
9222  * @stream:  stream state
9223  *
9224  * Return: true if success
9225  */
9226 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9227 {
9228 
9229 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9230 
9231 	return dc_link_set_psr_allow_active(stream->link, false, true);
9232 }
9233 
9234 /*
9235  * amdgpu_dm_psr_disable() - disable psr f/w
9236  * if psr is enabled on any stream
9237  *
9238  * Return: true if success
9239  */
9240 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9241 {
9242 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9243 	return dc_set_psr_allow_active(dm->dc, false);
9244 }
9245 
9246 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9247 {
9248 	struct amdgpu_device *adev = drm_to_adev(dev);
9249 	struct dc *dc = adev->dm.dc;
9250 	int i;
9251 
9252 	mutex_lock(&adev->dm.dc_lock);
9253 	if (dc->current_state) {
9254 		for (i = 0; i < dc->current_state->stream_count; ++i)
9255 			dc->current_state->streams[i]
9256 				->triggered_crtc_reset.enabled =
9257 				adev->dm.force_timing_sync;
9258 
9259 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9260 		dc_trigger_sync(dc, dc->current_state);
9261 	}
9262 	mutex_unlock(&adev->dm.dc_lock);
9263 }
9264 
9265 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9266 		       uint32_t value, const char *func_name)
9267 {
9268 #ifdef DM_CHECK_ADDR_0
9269 	if (address == 0) {
9270 		DC_ERR("invalid register write. address = 0");
9271 		return;
9272 	}
9273 #endif
9274 	cgs_write_register(ctx->cgs_device, address, value);
9275 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9276 }
9277 
9278 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9279 			  const char *func_name)
9280 {
9281 	uint32_t value;
9282 #ifdef DM_CHECK_ADDR_0
9283 	if (address == 0) {
9284 		DC_ERR("invalid register read; address = 0\n");
9285 		return 0;
9286 	}
9287 #endif
9288 
9289 	if (ctx->dmub_srv &&
9290 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9291 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9292 		ASSERT(false);
9293 		return 0;
9294 	}
9295 
9296 	value = cgs_read_register(ctx->cgs_device, address);
9297 
9298 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9299 
9300 	return value;
9301 }
9302