1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "dc/dc_stat.h"
39 #include "amdgpu_dm_trace.h"
40 
41 #include "vid.h"
42 #include "amdgpu.h"
43 #include "amdgpu_display.h"
44 #include "amdgpu_ucode.h"
45 #include "atom.h"
46 #include "amdgpu_dm.h"
47 #ifdef CONFIG_DRM_AMD_DC_HDCP
48 #include "amdgpu_dm_hdcp.h"
49 #include <drm/drm_hdcp.h>
50 #endif
51 #include "amdgpu_pm.h"
52 
53 #include "amd_shared.h"
54 #include "amdgpu_dm_irq.h"
55 #include "dm_helpers.h"
56 #include "amdgpu_dm_mst_types.h"
57 #if defined(CONFIG_DEBUG_FS)
58 #include "amdgpu_dm_debugfs.h"
59 #endif
60 
61 #include "ivsrcid/ivsrcid_vislands30.h"
62 
63 #include "i2caux_interface.h"
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 #include <linux/types.h>
67 #include <linux/pm_runtime.h>
68 #include <linux/pci.h>
69 #include <linux/firmware.h>
70 #include <linux/component.h>
71 
72 #include <drm/drm_atomic.h>
73 #include <drm/drm_atomic_uapi.h>
74 #include <drm/drm_atomic_helper.h>
75 #include <drm/drm_dp_mst_helper.h>
76 #include <drm/drm_fb_helper.h>
77 #include <drm/drm_fourcc.h>
78 #include <drm/drm_edid.h>
79 #include <drm/drm_vblank.h>
80 #include <drm/drm_audio_component.h>
81 
82 #if defined(CONFIG_DRM_AMD_DC_DCN)
83 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84 
85 #include "dcn/dcn_1_0_offset.h"
86 #include "dcn/dcn_1_0_sh_mask.h"
87 #include "soc15_hw_ip.h"
88 #include "vega10_ip_offset.h"
89 
90 #include "soc15_common.h"
91 #endif
92 
93 #include "modules/inc/mod_freesync.h"
94 #include "modules/power/power_helpers.h"
95 #include "modules/inc/mod_info_packet.h"
96 
97 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
101 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
107 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 
110 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
111 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
112 
113 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
115 
116 /* Number of bytes in PSP header for firmware. */
117 #define PSP_HEADER_BYTES 0x100
118 
119 /* Number of bytes in PSP footer for firmware. */
120 #define PSP_FOOTER_BYTES 0x100
121 
122 /**
123  * DOC: overview
124  *
125  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
126  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
127  * requests into DC requests, and DC responses into DRM responses.
128  *
129  * The root control structure is &struct amdgpu_display_manager.
130  */
131 
132 /* basic init/fini API */
133 static int amdgpu_dm_init(struct amdgpu_device *adev);
134 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
136 
137 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
138 {
139 	switch (link->dpcd_caps.dongle_type) {
140 	case DISPLAY_DONGLE_NONE:
141 		return DRM_MODE_SUBCONNECTOR_Native;
142 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
143 		return DRM_MODE_SUBCONNECTOR_VGA;
144 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_DVID;
147 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
148 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
149 		return DRM_MODE_SUBCONNECTOR_HDMIA;
150 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
151 	default:
152 		return DRM_MODE_SUBCONNECTOR_Unknown;
153 	}
154 }
155 
156 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
157 {
158 	struct dc_link *link = aconnector->dc_link;
159 	struct drm_connector *connector = &aconnector->base;
160 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
161 
162 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 		return;
164 
165 	if (aconnector->dc_sink)
166 		subconnector = get_subconnector_type(link);
167 
168 	drm_object_property_set_value(&connector->base,
169 			connector->dev->mode_config.dp_subconnector_property,
170 			subconnector);
171 }
172 
173 /*
174  * initializes drm_device display related structures, based on the information
175  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
176  * drm_encoder, drm_mode_config
177  *
178  * Returns 0 on success
179  */
180 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
181 /* removes and deallocates the drm structures, created by the above function */
182 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
183 
184 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
185 				struct drm_plane *plane,
186 				unsigned long possible_crtcs,
187 				const struct dc_plane_cap *plane_cap);
188 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
189 			       struct drm_plane *plane,
190 			       uint32_t link_index);
191 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
192 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
193 				    uint32_t link_index,
194 				    struct amdgpu_encoder *amdgpu_encoder);
195 static int amdgpu_dm_encoder_init(struct drm_device *dev,
196 				  struct amdgpu_encoder *aencoder,
197 				  uint32_t link_index);
198 
199 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
200 
201 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
202 
203 static int amdgpu_dm_atomic_check(struct drm_device *dev,
204 				  struct drm_atomic_state *state);
205 
206 static void handle_cursor_update(struct drm_plane *plane,
207 				 struct drm_plane_state *old_plane_state);
208 
209 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
210 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 				 struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236 	if (crtc >= adev->mode_info.num_crtc)
237 		return 0;
238 	else {
239 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 
241 		if (acrtc->dm_irq_params.stream == NULL) {
242 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 				  crtc);
244 			return 0;
245 		}
246 
247 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 	}
249 }
250 
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 				  u32 *vbl, u32 *position)
253 {
254 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 
256 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 		return -EINVAL;
258 	else {
259 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 
261 		if (acrtc->dm_irq_params.stream ==  NULL) {
262 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 				  crtc);
264 			return 0;
265 		}
266 
267 		/*
268 		 * TODO rework base driver to use values directly.
269 		 * for now parse it back into reg-format
270 		 */
271 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 					 &v_blank_start,
273 					 &v_blank_end,
274 					 &h_position,
275 					 &v_position);
276 
277 		*position = v_position | (h_position << 16);
278 		*vbl = v_blank_start | (v_blank_end << 16);
279 	}
280 
281 	return 0;
282 }
283 
284 static bool dm_is_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return true;
288 }
289 
290 static int dm_wait_for_idle(void *handle)
291 {
292 	/* XXX todo */
293 	return 0;
294 }
295 
296 static bool dm_check_soft_reset(void *handle)
297 {
298 	return false;
299 }
300 
301 static int dm_soft_reset(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 		     int otg_inst)
310 {
311 	struct drm_device *dev = adev_to_drm(adev);
312 	struct drm_crtc *crtc;
313 	struct amdgpu_crtc *amdgpu_crtc;
314 
315 	if (otg_inst == -1) {
316 		WARN_ON(1);
317 		return adev->mode_info.crtcs[0];
318 	}
319 
320 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321 		amdgpu_crtc = to_amdgpu_crtc(crtc);
322 
323 		if (amdgpu_crtc->otg_inst == otg_inst)
324 			return amdgpu_crtc;
325 	}
326 
327 	return NULL;
328 }
329 
330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331 {
332 	return acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_VARIABLE ||
334 	       acrtc->dm_irq_params.freesync_config.state ==
335 		       VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339 {
340 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 }
343 
344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345 					      struct dm_crtc_state *new_state)
346 {
347 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
348 		return true;
349 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350 		return true;
351 	else
352 		return false;
353 }
354 
355 /**
356  * dm_pflip_high_irq() - Handle pageflip interrupt
357  * @interrupt_params: ignored
358  *
359  * Handles the pageflip interrupt by notifying all interested parties
360  * that the pageflip has been completed.
361  */
362 static void dm_pflip_high_irq(void *interrupt_params)
363 {
364 	struct amdgpu_crtc *amdgpu_crtc;
365 	struct common_irq_params *irq_params = interrupt_params;
366 	struct amdgpu_device *adev = irq_params->adev;
367 	unsigned long flags;
368 	struct drm_pending_vblank_event *e;
369 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 	bool vrr_active;
371 
372 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373 
374 	/* IRQ could occur when in initial stage */
375 	/* TODO work and BO cleanup */
376 	if (amdgpu_crtc == NULL) {
377 		DC_LOG_PFLIP("CRTC is null, returning.\n");
378 		return;
379 	}
380 
381 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
382 
383 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
384 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
385 						 amdgpu_crtc->pflip_status,
386 						 AMDGPU_FLIP_SUBMITTED,
387 						 amdgpu_crtc->crtc_id,
388 						 amdgpu_crtc);
389 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390 		return;
391 	}
392 
393 	/* page flip completed. */
394 	e = amdgpu_crtc->event;
395 	amdgpu_crtc->event = NULL;
396 
397 	if (!e)
398 		WARN_ON(1);
399 
400 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401 
402 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
403 	if (!vrr_active ||
404 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 				      &v_blank_end, &hpos, &vpos) ||
406 	    (vpos < v_blank_start)) {
407 		/* Update to correct count and vblank timestamp if racing with
408 		 * vblank irq. This also updates to the correct vblank timestamp
409 		 * even in VRR mode, as scanout is past the front-porch atm.
410 		 */
411 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412 
413 		/* Wake up userspace by sending the pageflip event with proper
414 		 * count and timestamp of vblank of flip completion.
415 		 */
416 		if (e) {
417 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418 
419 			/* Event sent, so done with vblank for this flip */
420 			drm_crtc_vblank_put(&amdgpu_crtc->base);
421 		}
422 	} else if (e) {
423 		/* VRR active and inside front-porch: vblank count and
424 		 * timestamp for pageflip event will only be up to date after
425 		 * drm_crtc_handle_vblank() has been executed from late vblank
426 		 * irq handler after start of back-porch (vline 0). We queue the
427 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 		 * updated timestamp and count, once it runs after us.
429 		 *
430 		 * We need to open-code this instead of using the helper
431 		 * drm_crtc_arm_vblank_event(), as that helper would
432 		 * call drm_crtc_accurate_vblank_count(), which we must
433 		 * not call in VRR mode while we are in front-porch!
434 		 */
435 
436 		/* sequence will be replaced by real count during send-out. */
437 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 		e->pipe = amdgpu_crtc->crtc_id;
439 
440 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 		e = NULL;
442 	}
443 
444 	/* Keep track of vblank of this flip for flip throttling. We use the
445 	 * cooked hw counter, as that one incremented at start of this vblank
446 	 * of pageflip completion, so last_flip_vblank is the forbidden count
447 	 * for queueing new pageflips if vsync + VRR is enabled.
448 	 */
449 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451 
452 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454 
455 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
457 		     vrr_active, (int) !e);
458 }
459 
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462 	struct common_irq_params *irq_params = interrupt_params;
463 	struct amdgpu_device *adev = irq_params->adev;
464 	struct amdgpu_crtc *acrtc;
465 	struct drm_device *drm_dev;
466 	struct drm_vblank_crtc *vblank;
467 	ktime_t frame_duration_ns, previous_timestamp;
468 	unsigned long flags;
469 	int vrr_active;
470 
471 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472 
473 	if (acrtc) {
474 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 		drm_dev = acrtc->base.dev;
476 		vblank = &drm_dev->vblank[acrtc->base.index];
477 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 		frame_duration_ns = vblank->time - previous_timestamp;
479 
480 		if (frame_duration_ns > 0) {
481 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
482 						frame_duration_ns,
483 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 		}
486 
487 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488 			      acrtc->crtc_id,
489 			      vrr_active);
490 
491 		/* Core vblank handling is done here after end of front-porch in
492 		 * vrr mode, as vblank timestamping will give valid results
493 		 * while now done after front-porch. This will also deliver
494 		 * page-flip completion events that have been queued to us
495 		 * if a pageflip happened inside front-porch.
496 		 */
497 		if (vrr_active) {
498 			drm_crtc_handle_vblank(&acrtc->base);
499 
500 			/* BTR processing for pre-DCE12 ASICs */
501 			if (acrtc->dm_irq_params.stream &&
502 			    adev->family < AMDGPU_FAMILY_AI) {
503 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 				mod_freesync_handle_v_update(
505 				    adev->dm.freesync_module,
506 				    acrtc->dm_irq_params.stream,
507 				    &acrtc->dm_irq_params.vrr_params);
508 
509 				dc_stream_adjust_vmin_vmax(
510 				    adev->dm.dc,
511 				    acrtc->dm_irq_params.stream,
512 				    &acrtc->dm_irq_params.vrr_params.adjust);
513 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514 			}
515 		}
516 	}
517 }
518 
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528 	struct common_irq_params *irq_params = interrupt_params;
529 	struct amdgpu_device *adev = irq_params->adev;
530 	struct amdgpu_crtc *acrtc;
531 	unsigned long flags;
532 	int vrr_active;
533 
534 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535 	if (!acrtc)
536 		return;
537 
538 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539 
540 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 		      vrr_active, acrtc->dm_irq_params.active_planes);
542 
543 	/**
544 	 * Core vblank handling at start of front-porch is only possible
545 	 * in non-vrr mode, as only there vblank timestamping will give
546 	 * valid results while done in front-porch. Otherwise defer it
547 	 * to dm_vupdate_high_irq after end of front-porch.
548 	 */
549 	if (!vrr_active)
550 		drm_crtc_handle_vblank(&acrtc->base);
551 
552 	/**
553 	 * Following stuff must happen at start of vblank, for crc
554 	 * computation and below-the-range btr support in vrr mode.
555 	 */
556 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557 
558 	/* BTR updates need to happen before VUPDATE on Vega and above. */
559 	if (adev->family < AMDGPU_FAMILY_AI)
560 		return;
561 
562 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563 
564 	if (acrtc->dm_irq_params.stream &&
565 	    acrtc->dm_irq_params.vrr_params.supported &&
566 	    acrtc->dm_irq_params.freesync_config.state ==
567 		    VRR_STATE_ACTIVE_VARIABLE) {
568 		mod_freesync_handle_v_update(adev->dm.freesync_module,
569 					     acrtc->dm_irq_params.stream,
570 					     &acrtc->dm_irq_params.vrr_params);
571 
572 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 					   &acrtc->dm_irq_params.vrr_params.adjust);
574 	}
575 
576 	/*
577 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 	 * In that case, pageflip completion interrupts won't fire and pageflip
579 	 * completion events won't get delivered. Prevent this by sending
580 	 * pending pageflip events from here if a flip is still pending.
581 	 *
582 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 	 * avoid race conditions between flip programming and completion,
584 	 * which could cause too early flip completion events.
585 	 */
586 	if (adev->family >= AMDGPU_FAMILY_RV &&
587 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 	    acrtc->dm_irq_params.active_planes == 0) {
589 		if (acrtc->event) {
590 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591 			acrtc->event = NULL;
592 			drm_crtc_vblank_put(&acrtc->base);
593 		}
594 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 	}
596 
597 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599 
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 /**
602  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603  * DCN generation ASICs
604  * @interrupt params - interrupt parameters
605  *
606  * Used to set crc window/read out crc value at vertical line 0 position
607  */
608 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611 	struct common_irq_params *irq_params = interrupt_params;
612 	struct amdgpu_device *adev = irq_params->adev;
613 	struct amdgpu_crtc *acrtc;
614 
615 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616 
617 	if (!acrtc)
618 		return;
619 
620 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623 
624 /**
625  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
626  * @interrupt_params: used for determining the Outbox instance
627  *
628  * Handles the Outbox Interrupt
629  * event handler.
630  */
631 #define DMUB_TRACE_MAX_READ 64
632 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
633 {
634 	struct dmub_notification notify;
635 	struct common_irq_params *irq_params = interrupt_params;
636 	struct amdgpu_device *adev = irq_params->adev;
637 	struct amdgpu_display_manager *dm = &adev->dm;
638 	struct dmcub_trace_buf_entry entry = { 0 };
639 	uint32_t count = 0;
640 
641 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
642 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
643 			do {
644 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
645 			} while (notify.pending_notification);
646 
647 			if (adev->dm.dmub_notify)
648 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
649 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
650 				complete(&adev->dm.dmub_aux_transfer_done);
651 			// TODO : HPD Implementation
652 
653 		} else {
654 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
655 		}
656 	}
657 
658 
659 	do {
660 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
661 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
662 							entry.param0, entry.param1);
663 
664 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
665 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
666 		} else
667 			break;
668 
669 		count++;
670 
671 	} while (count <= DMUB_TRACE_MAX_READ);
672 
673 	ASSERT(count <= DMUB_TRACE_MAX_READ);
674 }
675 #endif
676 
677 static int dm_set_clockgating_state(void *handle,
678 		  enum amd_clockgating_state state)
679 {
680 	return 0;
681 }
682 
683 static int dm_set_powergating_state(void *handle,
684 		  enum amd_powergating_state state)
685 {
686 	return 0;
687 }
688 
689 /* Prototypes of private functions */
690 static int dm_early_init(void* handle);
691 
692 /* Allocate memory for FBC compressed data  */
693 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
694 {
695 	struct drm_device *dev = connector->dev;
696 	struct amdgpu_device *adev = drm_to_adev(dev);
697 	struct dm_compressor_info *compressor = &adev->dm.compressor;
698 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
699 	struct drm_display_mode *mode;
700 	unsigned long max_size = 0;
701 
702 	if (adev->dm.dc->fbc_compressor == NULL)
703 		return;
704 
705 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
706 		return;
707 
708 	if (compressor->bo_ptr)
709 		return;
710 
711 
712 	list_for_each_entry(mode, &connector->modes, head) {
713 		if (max_size < mode->htotal * mode->vtotal)
714 			max_size = mode->htotal * mode->vtotal;
715 	}
716 
717 	if (max_size) {
718 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
719 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
720 			    &compressor->gpu_addr, &compressor->cpu_addr);
721 
722 		if (r)
723 			DRM_ERROR("DM: Failed to initialize FBC\n");
724 		else {
725 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
726 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
727 		}
728 
729 	}
730 
731 }
732 
733 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
734 					  int pipe, bool *enabled,
735 					  unsigned char *buf, int max_bytes)
736 {
737 	struct drm_device *dev = dev_get_drvdata(kdev);
738 	struct amdgpu_device *adev = drm_to_adev(dev);
739 	struct drm_connector *connector;
740 	struct drm_connector_list_iter conn_iter;
741 	struct amdgpu_dm_connector *aconnector;
742 	int ret = 0;
743 
744 	*enabled = false;
745 
746 	mutex_lock(&adev->dm.audio_lock);
747 
748 	drm_connector_list_iter_begin(dev, &conn_iter);
749 	drm_for_each_connector_iter(connector, &conn_iter) {
750 		aconnector = to_amdgpu_dm_connector(connector);
751 		if (aconnector->audio_inst != port)
752 			continue;
753 
754 		*enabled = true;
755 		ret = drm_eld_size(connector->eld);
756 		memcpy(buf, connector->eld, min(max_bytes, ret));
757 
758 		break;
759 	}
760 	drm_connector_list_iter_end(&conn_iter);
761 
762 	mutex_unlock(&adev->dm.audio_lock);
763 
764 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
765 
766 	return ret;
767 }
768 
769 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
770 	.get_eld = amdgpu_dm_audio_component_get_eld,
771 };
772 
773 static int amdgpu_dm_audio_component_bind(struct device *kdev,
774 				       struct device *hda_kdev, void *data)
775 {
776 	struct drm_device *dev = dev_get_drvdata(kdev);
777 	struct amdgpu_device *adev = drm_to_adev(dev);
778 	struct drm_audio_component *acomp = data;
779 
780 	acomp->ops = &amdgpu_dm_audio_component_ops;
781 	acomp->dev = kdev;
782 	adev->dm.audio_component = acomp;
783 
784 	return 0;
785 }
786 
787 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
788 					  struct device *hda_kdev, void *data)
789 {
790 	struct drm_device *dev = dev_get_drvdata(kdev);
791 	struct amdgpu_device *adev = drm_to_adev(dev);
792 	struct drm_audio_component *acomp = data;
793 
794 	acomp->ops = NULL;
795 	acomp->dev = NULL;
796 	adev->dm.audio_component = NULL;
797 }
798 
799 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
800 	.bind	= amdgpu_dm_audio_component_bind,
801 	.unbind	= amdgpu_dm_audio_component_unbind,
802 };
803 
804 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
805 {
806 	int i, ret;
807 
808 	if (!amdgpu_audio)
809 		return 0;
810 
811 	adev->mode_info.audio.enabled = true;
812 
813 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
814 
815 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
816 		adev->mode_info.audio.pin[i].channels = -1;
817 		adev->mode_info.audio.pin[i].rate = -1;
818 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
819 		adev->mode_info.audio.pin[i].status_bits = 0;
820 		adev->mode_info.audio.pin[i].category_code = 0;
821 		adev->mode_info.audio.pin[i].connected = false;
822 		adev->mode_info.audio.pin[i].id =
823 			adev->dm.dc->res_pool->audios[i]->inst;
824 		adev->mode_info.audio.pin[i].offset = 0;
825 	}
826 
827 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
828 	if (ret < 0)
829 		return ret;
830 
831 	adev->dm.audio_registered = true;
832 
833 	return 0;
834 }
835 
836 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
837 {
838 	if (!amdgpu_audio)
839 		return;
840 
841 	if (!adev->mode_info.audio.enabled)
842 		return;
843 
844 	if (adev->dm.audio_registered) {
845 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
846 		adev->dm.audio_registered = false;
847 	}
848 
849 	/* TODO: Disable audio? */
850 
851 	adev->mode_info.audio.enabled = false;
852 }
853 
854 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
855 {
856 	struct drm_audio_component *acomp = adev->dm.audio_component;
857 
858 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
859 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
860 
861 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
862 						 pin, -1);
863 	}
864 }
865 
866 static int dm_dmub_hw_init(struct amdgpu_device *adev)
867 {
868 	const struct dmcub_firmware_header_v1_0 *hdr;
869 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
870 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
871 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
872 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
873 	struct abm *abm = adev->dm.dc->res_pool->abm;
874 	struct dmub_srv_hw_params hw_params;
875 	enum dmub_status status;
876 	const unsigned char *fw_inst_const, *fw_bss_data;
877 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
878 	bool has_hw_support;
879 
880 	if (!dmub_srv)
881 		/* DMUB isn't supported on the ASIC. */
882 		return 0;
883 
884 	if (!fb_info) {
885 		DRM_ERROR("No framebuffer info for DMUB service.\n");
886 		return -EINVAL;
887 	}
888 
889 	if (!dmub_fw) {
890 		/* Firmware required for DMUB support. */
891 		DRM_ERROR("No firmware provided for DMUB.\n");
892 		return -EINVAL;
893 	}
894 
895 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
896 	if (status != DMUB_STATUS_OK) {
897 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
898 		return -EINVAL;
899 	}
900 
901 	if (!has_hw_support) {
902 		DRM_INFO("DMUB unsupported on ASIC\n");
903 		return 0;
904 	}
905 
906 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
907 
908 	fw_inst_const = dmub_fw->data +
909 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
910 			PSP_HEADER_BYTES;
911 
912 	fw_bss_data = dmub_fw->data +
913 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914 		      le32_to_cpu(hdr->inst_const_bytes);
915 
916 	/* Copy firmware and bios info into FB memory. */
917 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
918 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
919 
920 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
921 
922 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
923 	 * amdgpu_ucode_init_single_fw will load dmub firmware
924 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
925 	 * will be done by dm_dmub_hw_init
926 	 */
927 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
928 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
929 				fw_inst_const_size);
930 	}
931 
932 	if (fw_bss_data_size)
933 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
934 		       fw_bss_data, fw_bss_data_size);
935 
936 	/* Copy firmware bios info into FB memory. */
937 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
938 	       adev->bios_size);
939 
940 	/* Reset regions that need to be reset. */
941 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
942 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
943 
944 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
945 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
946 
947 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
948 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
949 
950 	/* Initialize hardware. */
951 	memset(&hw_params, 0, sizeof(hw_params));
952 	hw_params.fb_base = adev->gmc.fb_start;
953 	hw_params.fb_offset = adev->gmc.aper_base;
954 
955 	/* backdoor load firmware and trigger dmub running */
956 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
957 		hw_params.load_inst_const = true;
958 
959 	if (dmcu)
960 		hw_params.psp_version = dmcu->psp_version;
961 
962 	for (i = 0; i < fb_info->num_fb; ++i)
963 		hw_params.fb[i] = &fb_info->fb[i];
964 
965 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
966 	if (status != DMUB_STATUS_OK) {
967 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
968 		return -EINVAL;
969 	}
970 
971 	/* Wait for firmware load to finish. */
972 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
973 	if (status != DMUB_STATUS_OK)
974 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
975 
976 	/* Init DMCU and ABM if available. */
977 	if (dmcu && abm) {
978 		dmcu->funcs->dmcu_init(dmcu);
979 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
980 	}
981 
982 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
983 	if (!adev->dm.dc->ctx->dmub_srv) {
984 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
985 		return -ENOMEM;
986 	}
987 
988 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989 		 adev->dm.dmcub_fw_version);
990 
991 	return 0;
992 }
993 
994 #if defined(CONFIG_DRM_AMD_DC_DCN)
995 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
996 {
997 	uint64_t pt_base;
998 	uint32_t logical_addr_low;
999 	uint32_t logical_addr_high;
1000 	uint32_t agp_base, agp_bot, agp_top;
1001 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1002 
1003 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1004 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1005 
1006 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1007 		/*
1008 		 * Raven2 has a HW issue that it is unable to use the vram which
1009 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1010 		 * workaround that increase system aperture high address (add 1)
1011 		 * to get rid of the VM fault and hardware hang.
1012 		 */
1013 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1014 	else
1015 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1016 
1017 	agp_base = 0;
1018 	agp_bot = adev->gmc.agp_start >> 24;
1019 	agp_top = adev->gmc.agp_end >> 24;
1020 
1021 
1022 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1023 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1024 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1025 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1026 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1027 	page_table_base.low_part = lower_32_bits(pt_base);
1028 
1029 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1030 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1031 
1032 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1033 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1034 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1035 
1036 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1037 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1038 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1039 
1040 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1041 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1042 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1043 
1044 	pa_config->is_hvm_enabled = 0;
1045 
1046 }
1047 #endif
1048 #if defined(CONFIG_DRM_AMD_DC_DCN)
1049 static void event_mall_stutter(struct work_struct *work)
1050 {
1051 
1052 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1053 	struct amdgpu_display_manager *dm = vblank_work->dm;
1054 
1055 	mutex_lock(&dm->dc_lock);
1056 
1057 	if (vblank_work->enable)
1058 		dm->active_vblank_irq_count++;
1059 	else if(dm->active_vblank_irq_count)
1060 		dm->active_vblank_irq_count--;
1061 
1062 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1063 
1064 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1065 
1066 	mutex_unlock(&dm->dc_lock);
1067 }
1068 
1069 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1070 {
1071 
1072 	int max_caps = dc->caps.max_links;
1073 	struct vblank_workqueue *vblank_work;
1074 	int i = 0;
1075 
1076 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1077 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1078 		kfree(vblank_work);
1079 		return NULL;
1080 	}
1081 
1082 	for (i = 0; i < max_caps; i++)
1083 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1084 
1085 	return vblank_work;
1086 }
1087 #endif
1088 static int amdgpu_dm_init(struct amdgpu_device *adev)
1089 {
1090 	struct dc_init_data init_data;
1091 #ifdef CONFIG_DRM_AMD_DC_HDCP
1092 	struct dc_callback_init init_params;
1093 #endif
1094 	int r;
1095 
1096 	adev->dm.ddev = adev_to_drm(adev);
1097 	adev->dm.adev = adev;
1098 
1099 	/* Zero all the fields */
1100 	memset(&init_data, 0, sizeof(init_data));
1101 #ifdef CONFIG_DRM_AMD_DC_HDCP
1102 	memset(&init_params, 0, sizeof(init_params));
1103 #endif
1104 
1105 	mutex_init(&adev->dm.dc_lock);
1106 	mutex_init(&adev->dm.audio_lock);
1107 #if defined(CONFIG_DRM_AMD_DC_DCN)
1108 	spin_lock_init(&adev->dm.vblank_lock);
1109 #endif
1110 
1111 	if(amdgpu_dm_irq_init(adev)) {
1112 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1113 		goto error;
1114 	}
1115 
1116 	init_data.asic_id.chip_family = adev->family;
1117 
1118 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1119 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1120 
1121 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1122 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1123 	init_data.asic_id.atombios_base_address =
1124 		adev->mode_info.atom_context->bios;
1125 
1126 	init_data.driver = adev;
1127 
1128 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1129 
1130 	if (!adev->dm.cgs_device) {
1131 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1132 		goto error;
1133 	}
1134 
1135 	init_data.cgs_device = adev->dm.cgs_device;
1136 
1137 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1138 
1139 	switch (adev->asic_type) {
1140 	case CHIP_CARRIZO:
1141 	case CHIP_STONEY:
1142 	case CHIP_RAVEN:
1143 	case CHIP_RENOIR:
1144 		init_data.flags.gpu_vm_support = true;
1145 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1146 			init_data.flags.disable_dmcu = true;
1147 		break;
1148 #if defined(CONFIG_DRM_AMD_DC_DCN)
1149 	case CHIP_VANGOGH:
1150 		init_data.flags.gpu_vm_support = true;
1151 		break;
1152 #endif
1153 	default:
1154 		break;
1155 	}
1156 
1157 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1158 		init_data.flags.fbc_support = true;
1159 
1160 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1161 		init_data.flags.multi_mon_pp_mclk_switch = true;
1162 
1163 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1164 		init_data.flags.disable_fractional_pwm = true;
1165 
1166 	init_data.flags.power_down_display_on_boot = true;
1167 
1168 	INIT_LIST_HEAD(&adev->dm.da_list);
1169 	/* Display Core create. */
1170 	adev->dm.dc = dc_create(&init_data);
1171 
1172 	if (adev->dm.dc) {
1173 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1174 	} else {
1175 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1176 		goto error;
1177 	}
1178 
1179 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1180 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1181 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1182 	}
1183 
1184 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1185 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1186 
1187 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1188 		adev->dm.dc->debug.disable_stutter = true;
1189 
1190 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1191 		adev->dm.dc->debug.disable_dsc = true;
1192 
1193 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1194 		adev->dm.dc->debug.disable_clock_gate = true;
1195 
1196 	r = dm_dmub_hw_init(adev);
1197 	if (r) {
1198 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1199 		goto error;
1200 	}
1201 
1202 	dc_hardware_init(adev->dm.dc);
1203 
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205 	if (adev->apu_flags) {
1206 		struct dc_phy_addr_space_config pa_config;
1207 
1208 		mmhub_read_system_context(adev, &pa_config);
1209 
1210 		// Call the DC init_memory func
1211 		dc_setup_system_context(adev->dm.dc, &pa_config);
1212 	}
1213 #endif
1214 
1215 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1216 	if (!adev->dm.freesync_module) {
1217 		DRM_ERROR(
1218 		"amdgpu: failed to initialize freesync_module.\n");
1219 	} else
1220 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1221 				adev->dm.freesync_module);
1222 
1223 	amdgpu_dm_init_color_mod();
1224 
1225 #if defined(CONFIG_DRM_AMD_DC_DCN)
1226 	if (adev->dm.dc->caps.max_links > 0) {
1227 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1228 
1229 		if (!adev->dm.vblank_workqueue)
1230 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1231 		else
1232 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1233 	}
1234 #endif
1235 
1236 #ifdef CONFIG_DRM_AMD_DC_HDCP
1237 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1238 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1239 
1240 		if (!adev->dm.hdcp_workqueue)
1241 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1242 		else
1243 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1244 
1245 		dc_init_callbacks(adev->dm.dc, &init_params);
1246 	}
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1249 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1250 #endif
1251 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1252 		init_completion(&adev->dm.dmub_aux_transfer_done);
1253 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1254 		if (!adev->dm.dmub_notify) {
1255 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1256 			goto error;
1257 		}
1258 		amdgpu_dm_outbox_init(adev);
1259 	}
1260 
1261 	if (amdgpu_dm_initialize_drm_device(adev)) {
1262 		DRM_ERROR(
1263 		"amdgpu: failed to initialize sw for display support.\n");
1264 		goto error;
1265 	}
1266 
1267 	/* create fake encoders for MST */
1268 	dm_dp_create_fake_mst_encoders(adev);
1269 
1270 	/* TODO: Add_display_info? */
1271 
1272 	/* TODO use dynamic cursor width */
1273 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1274 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1275 
1276 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1277 		DRM_ERROR(
1278 		"amdgpu: failed to initialize sw for display support.\n");
1279 		goto error;
1280 	}
1281 
1282 
1283 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1284 
1285 	return 0;
1286 error:
1287 	amdgpu_dm_fini(adev);
1288 
1289 	return -EINVAL;
1290 }
1291 
1292 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1293 {
1294 	int i;
1295 
1296 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1297 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1298 	}
1299 
1300 	amdgpu_dm_audio_fini(adev);
1301 
1302 	amdgpu_dm_destroy_drm_device(&adev->dm);
1303 
1304 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1305 	if (adev->dm.crc_rd_wrk) {
1306 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1307 		kfree(adev->dm.crc_rd_wrk);
1308 		adev->dm.crc_rd_wrk = NULL;
1309 	}
1310 #endif
1311 #ifdef CONFIG_DRM_AMD_DC_HDCP
1312 	if (adev->dm.hdcp_workqueue) {
1313 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1314 		adev->dm.hdcp_workqueue = NULL;
1315 	}
1316 
1317 	if (adev->dm.dc)
1318 		dc_deinit_callbacks(adev->dm.dc);
1319 #endif
1320 
1321 #if defined(CONFIG_DRM_AMD_DC_DCN)
1322 	if (adev->dm.vblank_workqueue) {
1323 		adev->dm.vblank_workqueue->dm = NULL;
1324 		kfree(adev->dm.vblank_workqueue);
1325 		adev->dm.vblank_workqueue = NULL;
1326 	}
1327 #endif
1328 
1329 	if (adev->dm.dc->ctx->dmub_srv) {
1330 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1331 		adev->dm.dc->ctx->dmub_srv = NULL;
1332 	}
1333 
1334 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1335 		kfree(adev->dm.dmub_notify);
1336 		adev->dm.dmub_notify = NULL;
1337 	}
1338 
1339 	if (adev->dm.dmub_bo)
1340 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1341 				      &adev->dm.dmub_bo_gpu_addr,
1342 				      &adev->dm.dmub_bo_cpu_addr);
1343 
1344 	/* DC Destroy TODO: Replace destroy DAL */
1345 	if (adev->dm.dc)
1346 		dc_destroy(&adev->dm.dc);
1347 	/*
1348 	 * TODO: pageflip, vlank interrupt
1349 	 *
1350 	 * amdgpu_dm_irq_fini(adev);
1351 	 */
1352 
1353 	if (adev->dm.cgs_device) {
1354 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1355 		adev->dm.cgs_device = NULL;
1356 	}
1357 	if (adev->dm.freesync_module) {
1358 		mod_freesync_destroy(adev->dm.freesync_module);
1359 		adev->dm.freesync_module = NULL;
1360 	}
1361 
1362 	mutex_destroy(&adev->dm.audio_lock);
1363 	mutex_destroy(&adev->dm.dc_lock);
1364 
1365 	return;
1366 }
1367 
1368 static int load_dmcu_fw(struct amdgpu_device *adev)
1369 {
1370 	const char *fw_name_dmcu = NULL;
1371 	int r;
1372 	const struct dmcu_firmware_header_v1_0 *hdr;
1373 
1374 	switch(adev->asic_type) {
1375 #if defined(CONFIG_DRM_AMD_DC_SI)
1376 	case CHIP_TAHITI:
1377 	case CHIP_PITCAIRN:
1378 	case CHIP_VERDE:
1379 	case CHIP_OLAND:
1380 #endif
1381 	case CHIP_BONAIRE:
1382 	case CHIP_HAWAII:
1383 	case CHIP_KAVERI:
1384 	case CHIP_KABINI:
1385 	case CHIP_MULLINS:
1386 	case CHIP_TONGA:
1387 	case CHIP_FIJI:
1388 	case CHIP_CARRIZO:
1389 	case CHIP_STONEY:
1390 	case CHIP_POLARIS11:
1391 	case CHIP_POLARIS10:
1392 	case CHIP_POLARIS12:
1393 	case CHIP_VEGAM:
1394 	case CHIP_VEGA10:
1395 	case CHIP_VEGA12:
1396 	case CHIP_VEGA20:
1397 	case CHIP_NAVI10:
1398 	case CHIP_NAVI14:
1399 	case CHIP_RENOIR:
1400 	case CHIP_SIENNA_CICHLID:
1401 	case CHIP_NAVY_FLOUNDER:
1402 	case CHIP_DIMGREY_CAVEFISH:
1403 	case CHIP_VANGOGH:
1404 		return 0;
1405 	case CHIP_NAVI12:
1406 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1407 		break;
1408 	case CHIP_RAVEN:
1409 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1410 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1411 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1412 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1413 		else
1414 			return 0;
1415 		break;
1416 	default:
1417 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1418 		return -EINVAL;
1419 	}
1420 
1421 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1422 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1423 		return 0;
1424 	}
1425 
1426 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1427 	if (r == -ENOENT) {
1428 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1429 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1430 		adev->dm.fw_dmcu = NULL;
1431 		return 0;
1432 	}
1433 	if (r) {
1434 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1435 			fw_name_dmcu);
1436 		return r;
1437 	}
1438 
1439 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1440 	if (r) {
1441 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1442 			fw_name_dmcu);
1443 		release_firmware(adev->dm.fw_dmcu);
1444 		adev->dm.fw_dmcu = NULL;
1445 		return r;
1446 	}
1447 
1448 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1449 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1450 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1451 	adev->firmware.fw_size +=
1452 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1453 
1454 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1455 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1456 	adev->firmware.fw_size +=
1457 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1458 
1459 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1460 
1461 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1462 
1463 	return 0;
1464 }
1465 
1466 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1467 {
1468 	struct amdgpu_device *adev = ctx;
1469 
1470 	return dm_read_reg(adev->dm.dc->ctx, address);
1471 }
1472 
1473 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1474 				     uint32_t value)
1475 {
1476 	struct amdgpu_device *adev = ctx;
1477 
1478 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1479 }
1480 
1481 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1482 {
1483 	struct dmub_srv_create_params create_params;
1484 	struct dmub_srv_region_params region_params;
1485 	struct dmub_srv_region_info region_info;
1486 	struct dmub_srv_fb_params fb_params;
1487 	struct dmub_srv_fb_info *fb_info;
1488 	struct dmub_srv *dmub_srv;
1489 	const struct dmcub_firmware_header_v1_0 *hdr;
1490 	const char *fw_name_dmub;
1491 	enum dmub_asic dmub_asic;
1492 	enum dmub_status status;
1493 	int r;
1494 
1495 	switch (adev->asic_type) {
1496 	case CHIP_RENOIR:
1497 		dmub_asic = DMUB_ASIC_DCN21;
1498 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1499 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1500 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1501 		break;
1502 	case CHIP_SIENNA_CICHLID:
1503 		dmub_asic = DMUB_ASIC_DCN30;
1504 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1505 		break;
1506 	case CHIP_NAVY_FLOUNDER:
1507 		dmub_asic = DMUB_ASIC_DCN30;
1508 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1509 		break;
1510 	case CHIP_VANGOGH:
1511 		dmub_asic = DMUB_ASIC_DCN301;
1512 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1513 		break;
1514 	case CHIP_DIMGREY_CAVEFISH:
1515 		dmub_asic = DMUB_ASIC_DCN302;
1516 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1517 		break;
1518 
1519 	default:
1520 		/* ASIC doesn't support DMUB. */
1521 		return 0;
1522 	}
1523 
1524 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1525 	if (r) {
1526 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1527 		return 0;
1528 	}
1529 
1530 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1531 	if (r) {
1532 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1533 		return 0;
1534 	}
1535 
1536 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1537 
1538 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1539 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1540 			AMDGPU_UCODE_ID_DMCUB;
1541 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1542 			adev->dm.dmub_fw;
1543 		adev->firmware.fw_size +=
1544 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1545 
1546 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1547 			 adev->dm.dmcub_fw_version);
1548 	}
1549 
1550 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1551 
1552 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1553 	dmub_srv = adev->dm.dmub_srv;
1554 
1555 	if (!dmub_srv) {
1556 		DRM_ERROR("Failed to allocate DMUB service!\n");
1557 		return -ENOMEM;
1558 	}
1559 
1560 	memset(&create_params, 0, sizeof(create_params));
1561 	create_params.user_ctx = adev;
1562 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1563 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1564 	create_params.asic = dmub_asic;
1565 
1566 	/* Create the DMUB service. */
1567 	status = dmub_srv_create(dmub_srv, &create_params);
1568 	if (status != DMUB_STATUS_OK) {
1569 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1570 		return -EINVAL;
1571 	}
1572 
1573 	/* Calculate the size of all the regions for the DMUB service. */
1574 	memset(&region_params, 0, sizeof(region_params));
1575 
1576 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1577 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1578 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1579 	region_params.vbios_size = adev->bios_size;
1580 	region_params.fw_bss_data = region_params.bss_data_size ?
1581 		adev->dm.dmub_fw->data +
1582 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1583 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1584 	region_params.fw_inst_const =
1585 		adev->dm.dmub_fw->data +
1586 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1587 		PSP_HEADER_BYTES;
1588 
1589 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1590 					   &region_info);
1591 
1592 	if (status != DMUB_STATUS_OK) {
1593 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1594 		return -EINVAL;
1595 	}
1596 
1597 	/*
1598 	 * Allocate a framebuffer based on the total size of all the regions.
1599 	 * TODO: Move this into GART.
1600 	 */
1601 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1602 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1603 				    &adev->dm.dmub_bo_gpu_addr,
1604 				    &adev->dm.dmub_bo_cpu_addr);
1605 	if (r)
1606 		return r;
1607 
1608 	/* Rebase the regions on the framebuffer address. */
1609 	memset(&fb_params, 0, sizeof(fb_params));
1610 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1611 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1612 	fb_params.region_info = &region_info;
1613 
1614 	adev->dm.dmub_fb_info =
1615 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1616 	fb_info = adev->dm.dmub_fb_info;
1617 
1618 	if (!fb_info) {
1619 		DRM_ERROR(
1620 			"Failed to allocate framebuffer info for DMUB service!\n");
1621 		return -ENOMEM;
1622 	}
1623 
1624 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1625 	if (status != DMUB_STATUS_OK) {
1626 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1627 		return -EINVAL;
1628 	}
1629 
1630 	return 0;
1631 }
1632 
1633 static int dm_sw_init(void *handle)
1634 {
1635 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1636 	int r;
1637 
1638 	r = dm_dmub_sw_init(adev);
1639 	if (r)
1640 		return r;
1641 
1642 	return load_dmcu_fw(adev);
1643 }
1644 
1645 static int dm_sw_fini(void *handle)
1646 {
1647 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1648 
1649 	kfree(adev->dm.dmub_fb_info);
1650 	adev->dm.dmub_fb_info = NULL;
1651 
1652 	if (adev->dm.dmub_srv) {
1653 		dmub_srv_destroy(adev->dm.dmub_srv);
1654 		adev->dm.dmub_srv = NULL;
1655 	}
1656 
1657 	release_firmware(adev->dm.dmub_fw);
1658 	adev->dm.dmub_fw = NULL;
1659 
1660 	release_firmware(adev->dm.fw_dmcu);
1661 	adev->dm.fw_dmcu = NULL;
1662 
1663 	return 0;
1664 }
1665 
1666 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1667 {
1668 	struct amdgpu_dm_connector *aconnector;
1669 	struct drm_connector *connector;
1670 	struct drm_connector_list_iter iter;
1671 	int ret = 0;
1672 
1673 	drm_connector_list_iter_begin(dev, &iter);
1674 	drm_for_each_connector_iter(connector, &iter) {
1675 		aconnector = to_amdgpu_dm_connector(connector);
1676 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1677 		    aconnector->mst_mgr.aux) {
1678 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1679 					 aconnector,
1680 					 aconnector->base.base.id);
1681 
1682 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1683 			if (ret < 0) {
1684 				DRM_ERROR("DM_MST: Failed to start MST\n");
1685 				aconnector->dc_link->type =
1686 					dc_connection_single;
1687 				break;
1688 			}
1689 		}
1690 	}
1691 	drm_connector_list_iter_end(&iter);
1692 
1693 	return ret;
1694 }
1695 
1696 static int dm_late_init(void *handle)
1697 {
1698 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699 
1700 	struct dmcu_iram_parameters params;
1701 	unsigned int linear_lut[16];
1702 	int i;
1703 	struct dmcu *dmcu = NULL;
1704 	bool ret = true;
1705 
1706 	dmcu = adev->dm.dc->res_pool->dmcu;
1707 
1708 	for (i = 0; i < 16; i++)
1709 		linear_lut[i] = 0xFFFF * i / 15;
1710 
1711 	params.set = 0;
1712 	params.backlight_ramping_start = 0xCCCC;
1713 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1714 	params.backlight_lut_array_size = 16;
1715 	params.backlight_lut_array = linear_lut;
1716 
1717 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1718 	 * 0xFFFF x 0.01 = 0x28F
1719 	 */
1720 	params.min_abm_backlight = 0x28F;
1721 
1722 	/* In the case where abm is implemented on dmcub,
1723 	 * dmcu object will be null.
1724 	 * ABM 2.4 and up are implemented on dmcub.
1725 	 */
1726 	if (dmcu)
1727 		ret = dmcu_load_iram(dmcu, params);
1728 	else if (adev->dm.dc->ctx->dmub_srv)
1729 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1730 
1731 	if (!ret)
1732 		return -EINVAL;
1733 
1734 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1735 }
1736 
1737 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1738 {
1739 	struct amdgpu_dm_connector *aconnector;
1740 	struct drm_connector *connector;
1741 	struct drm_connector_list_iter iter;
1742 	struct drm_dp_mst_topology_mgr *mgr;
1743 	int ret;
1744 	bool need_hotplug = false;
1745 
1746 	drm_connector_list_iter_begin(dev, &iter);
1747 	drm_for_each_connector_iter(connector, &iter) {
1748 		aconnector = to_amdgpu_dm_connector(connector);
1749 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1750 		    aconnector->mst_port)
1751 			continue;
1752 
1753 		mgr = &aconnector->mst_mgr;
1754 
1755 		if (suspend) {
1756 			drm_dp_mst_topology_mgr_suspend(mgr);
1757 		} else {
1758 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1759 			if (ret < 0) {
1760 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1761 				need_hotplug = true;
1762 			}
1763 		}
1764 	}
1765 	drm_connector_list_iter_end(&iter);
1766 
1767 	if (need_hotplug)
1768 		drm_kms_helper_hotplug_event(dev);
1769 }
1770 
1771 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1772 {
1773 	struct smu_context *smu = &adev->smu;
1774 	int ret = 0;
1775 
1776 	if (!is_support_sw_smu(adev))
1777 		return 0;
1778 
1779 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1780 	 * on window driver dc implementation.
1781 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1782 	 * should be passed to smu during boot up and resume from s3.
1783 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1784 	 * dcn20_resource_construct
1785 	 * then call pplib functions below to pass the settings to smu:
1786 	 * smu_set_watermarks_for_clock_ranges
1787 	 * smu_set_watermarks_table
1788 	 * navi10_set_watermarks_table
1789 	 * smu_write_watermarks_table
1790 	 *
1791 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1792 	 * dc has implemented different flow for window driver:
1793 	 * dc_hardware_init / dc_set_power_state
1794 	 * dcn10_init_hw
1795 	 * notify_wm_ranges
1796 	 * set_wm_ranges
1797 	 * -- Linux
1798 	 * smu_set_watermarks_for_clock_ranges
1799 	 * renoir_set_watermarks_table
1800 	 * smu_write_watermarks_table
1801 	 *
1802 	 * For Linux,
1803 	 * dc_hardware_init -> amdgpu_dm_init
1804 	 * dc_set_power_state --> dm_resume
1805 	 *
1806 	 * therefore, this function apply to navi10/12/14 but not Renoir
1807 	 * *
1808 	 */
1809 	switch(adev->asic_type) {
1810 	case CHIP_NAVI10:
1811 	case CHIP_NAVI14:
1812 	case CHIP_NAVI12:
1813 		break;
1814 	default:
1815 		return 0;
1816 	}
1817 
1818 	ret = smu_write_watermarks_table(smu);
1819 	if (ret) {
1820 		DRM_ERROR("Failed to update WMTABLE!\n");
1821 		return ret;
1822 	}
1823 
1824 	return 0;
1825 }
1826 
1827 /**
1828  * dm_hw_init() - Initialize DC device
1829  * @handle: The base driver device containing the amdgpu_dm device.
1830  *
1831  * Initialize the &struct amdgpu_display_manager device. This involves calling
1832  * the initializers of each DM component, then populating the struct with them.
1833  *
1834  * Although the function implies hardware initialization, both hardware and
1835  * software are initialized here. Splitting them out to their relevant init
1836  * hooks is a future TODO item.
1837  *
1838  * Some notable things that are initialized here:
1839  *
1840  * - Display Core, both software and hardware
1841  * - DC modules that we need (freesync and color management)
1842  * - DRM software states
1843  * - Interrupt sources and handlers
1844  * - Vblank support
1845  * - Debug FS entries, if enabled
1846  */
1847 static int dm_hw_init(void *handle)
1848 {
1849 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1850 	/* Create DAL display manager */
1851 	amdgpu_dm_init(adev);
1852 	amdgpu_dm_hpd_init(adev);
1853 
1854 	return 0;
1855 }
1856 
1857 /**
1858  * dm_hw_fini() - Teardown DC device
1859  * @handle: The base driver device containing the amdgpu_dm device.
1860  *
1861  * Teardown components within &struct amdgpu_display_manager that require
1862  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1863  * were loaded. Also flush IRQ workqueues and disable them.
1864  */
1865 static int dm_hw_fini(void *handle)
1866 {
1867 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1868 
1869 	amdgpu_dm_hpd_fini(adev);
1870 
1871 	amdgpu_dm_irq_fini(adev);
1872 	amdgpu_dm_fini(adev);
1873 	return 0;
1874 }
1875 
1876 
1877 static int dm_enable_vblank(struct drm_crtc *crtc);
1878 static void dm_disable_vblank(struct drm_crtc *crtc);
1879 
1880 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1881 				 struct dc_state *state, bool enable)
1882 {
1883 	enum dc_irq_source irq_source;
1884 	struct amdgpu_crtc *acrtc;
1885 	int rc = -EBUSY;
1886 	int i = 0;
1887 
1888 	for (i = 0; i < state->stream_count; i++) {
1889 		acrtc = get_crtc_by_otg_inst(
1890 				adev, state->stream_status[i].primary_otg_inst);
1891 
1892 		if (acrtc && state->stream_status[i].plane_count != 0) {
1893 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1894 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1895 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1896 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1897 			if (rc)
1898 				DRM_WARN("Failed to %s pflip interrupts\n",
1899 					 enable ? "enable" : "disable");
1900 
1901 			if (enable) {
1902 				rc = dm_enable_vblank(&acrtc->base);
1903 				if (rc)
1904 					DRM_WARN("Failed to enable vblank interrupts\n");
1905 			} else {
1906 				dm_disable_vblank(&acrtc->base);
1907 			}
1908 
1909 		}
1910 	}
1911 
1912 }
1913 
1914 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1915 {
1916 	struct dc_state *context = NULL;
1917 	enum dc_status res = DC_ERROR_UNEXPECTED;
1918 	int i;
1919 	struct dc_stream_state *del_streams[MAX_PIPES];
1920 	int del_streams_count = 0;
1921 
1922 	memset(del_streams, 0, sizeof(del_streams));
1923 
1924 	context = dc_create_state(dc);
1925 	if (context == NULL)
1926 		goto context_alloc_fail;
1927 
1928 	dc_resource_state_copy_construct_current(dc, context);
1929 
1930 	/* First remove from context all streams */
1931 	for (i = 0; i < context->stream_count; i++) {
1932 		struct dc_stream_state *stream = context->streams[i];
1933 
1934 		del_streams[del_streams_count++] = stream;
1935 	}
1936 
1937 	/* Remove all planes for removed streams and then remove the streams */
1938 	for (i = 0; i < del_streams_count; i++) {
1939 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1940 			res = DC_FAIL_DETACH_SURFACES;
1941 			goto fail;
1942 		}
1943 
1944 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1945 		if (res != DC_OK)
1946 			goto fail;
1947 	}
1948 
1949 
1950 	res = dc_validate_global_state(dc, context, false);
1951 
1952 	if (res != DC_OK) {
1953 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1954 		goto fail;
1955 	}
1956 
1957 	res = dc_commit_state(dc, context);
1958 
1959 fail:
1960 	dc_release_state(context);
1961 
1962 context_alloc_fail:
1963 	return res;
1964 }
1965 
1966 static int dm_suspend(void *handle)
1967 {
1968 	struct amdgpu_device *adev = handle;
1969 	struct amdgpu_display_manager *dm = &adev->dm;
1970 	int ret = 0;
1971 
1972 	if (amdgpu_in_reset(adev)) {
1973 		mutex_lock(&dm->dc_lock);
1974 
1975 #if defined(CONFIG_DRM_AMD_DC_DCN)
1976 		dc_allow_idle_optimizations(adev->dm.dc, false);
1977 #endif
1978 
1979 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1980 
1981 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1982 
1983 		amdgpu_dm_commit_zero_streams(dm->dc);
1984 
1985 		amdgpu_dm_irq_suspend(adev);
1986 
1987 		return ret;
1988 	}
1989 
1990 	WARN_ON(adev->dm.cached_state);
1991 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1992 
1993 	s3_handle_mst(adev_to_drm(adev), true);
1994 
1995 	amdgpu_dm_irq_suspend(adev);
1996 
1997 
1998 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1999 
2000 	return 0;
2001 }
2002 
2003 static struct amdgpu_dm_connector *
2004 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2005 					     struct drm_crtc *crtc)
2006 {
2007 	uint32_t i;
2008 	struct drm_connector_state *new_con_state;
2009 	struct drm_connector *connector;
2010 	struct drm_crtc *crtc_from_state;
2011 
2012 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2013 		crtc_from_state = new_con_state->crtc;
2014 
2015 		if (crtc_from_state == crtc)
2016 			return to_amdgpu_dm_connector(connector);
2017 	}
2018 
2019 	return NULL;
2020 }
2021 
2022 static void emulated_link_detect(struct dc_link *link)
2023 {
2024 	struct dc_sink_init_data sink_init_data = { 0 };
2025 	struct display_sink_capability sink_caps = { 0 };
2026 	enum dc_edid_status edid_status;
2027 	struct dc_context *dc_ctx = link->ctx;
2028 	struct dc_sink *sink = NULL;
2029 	struct dc_sink *prev_sink = NULL;
2030 
2031 	link->type = dc_connection_none;
2032 	prev_sink = link->local_sink;
2033 
2034 	if (prev_sink)
2035 		dc_sink_release(prev_sink);
2036 
2037 	switch (link->connector_signal) {
2038 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2039 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2040 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2041 		break;
2042 	}
2043 
2044 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2045 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2046 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2047 		break;
2048 	}
2049 
2050 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2051 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2052 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2053 		break;
2054 	}
2055 
2056 	case SIGNAL_TYPE_LVDS: {
2057 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2058 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2059 		break;
2060 	}
2061 
2062 	case SIGNAL_TYPE_EDP: {
2063 		sink_caps.transaction_type =
2064 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2065 		sink_caps.signal = SIGNAL_TYPE_EDP;
2066 		break;
2067 	}
2068 
2069 	case SIGNAL_TYPE_DISPLAY_PORT: {
2070 		sink_caps.transaction_type =
2071 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2072 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2073 		break;
2074 	}
2075 
2076 	default:
2077 		DC_ERROR("Invalid connector type! signal:%d\n",
2078 			link->connector_signal);
2079 		return;
2080 	}
2081 
2082 	sink_init_data.link = link;
2083 	sink_init_data.sink_signal = sink_caps.signal;
2084 
2085 	sink = dc_sink_create(&sink_init_data);
2086 	if (!sink) {
2087 		DC_ERROR("Failed to create sink!\n");
2088 		return;
2089 	}
2090 
2091 	/* dc_sink_create returns a new reference */
2092 	link->local_sink = sink;
2093 
2094 	edid_status = dm_helpers_read_local_edid(
2095 			link->ctx,
2096 			link,
2097 			sink);
2098 
2099 	if (edid_status != EDID_OK)
2100 		DC_ERROR("Failed to read EDID");
2101 
2102 }
2103 
2104 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2105 				     struct amdgpu_display_manager *dm)
2106 {
2107 	struct {
2108 		struct dc_surface_update surface_updates[MAX_SURFACES];
2109 		struct dc_plane_info plane_infos[MAX_SURFACES];
2110 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2111 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2112 		struct dc_stream_update stream_update;
2113 	} * bundle;
2114 	int k, m;
2115 
2116 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2117 
2118 	if (!bundle) {
2119 		dm_error("Failed to allocate update bundle\n");
2120 		goto cleanup;
2121 	}
2122 
2123 	for (k = 0; k < dc_state->stream_count; k++) {
2124 		bundle->stream_update.stream = dc_state->streams[k];
2125 
2126 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2127 			bundle->surface_updates[m].surface =
2128 				dc_state->stream_status->plane_states[m];
2129 			bundle->surface_updates[m].surface->force_full_update =
2130 				true;
2131 		}
2132 		dc_commit_updates_for_stream(
2133 			dm->dc, bundle->surface_updates,
2134 			dc_state->stream_status->plane_count,
2135 			dc_state->streams[k], &bundle->stream_update, dc_state);
2136 	}
2137 
2138 cleanup:
2139 	kfree(bundle);
2140 
2141 	return;
2142 }
2143 
2144 static void dm_set_dpms_off(struct dc_link *link)
2145 {
2146 	struct dc_stream_state *stream_state;
2147 	struct amdgpu_dm_connector *aconnector = link->priv;
2148 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2149 	struct dc_stream_update stream_update;
2150 	bool dpms_off = true;
2151 
2152 	memset(&stream_update, 0, sizeof(stream_update));
2153 	stream_update.dpms_off = &dpms_off;
2154 
2155 	mutex_lock(&adev->dm.dc_lock);
2156 	stream_state = dc_stream_find_from_link(link);
2157 
2158 	if (stream_state == NULL) {
2159 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2160 		mutex_unlock(&adev->dm.dc_lock);
2161 		return;
2162 	}
2163 
2164 	stream_update.stream = stream_state;
2165 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2166 				     stream_state, &stream_update,
2167 				     stream_state->ctx->dc->current_state);
2168 	mutex_unlock(&adev->dm.dc_lock);
2169 }
2170 
2171 static int dm_resume(void *handle)
2172 {
2173 	struct amdgpu_device *adev = handle;
2174 	struct drm_device *ddev = adev_to_drm(adev);
2175 	struct amdgpu_display_manager *dm = &adev->dm;
2176 	struct amdgpu_dm_connector *aconnector;
2177 	struct drm_connector *connector;
2178 	struct drm_connector_list_iter iter;
2179 	struct drm_crtc *crtc;
2180 	struct drm_crtc_state *new_crtc_state;
2181 	struct dm_crtc_state *dm_new_crtc_state;
2182 	struct drm_plane *plane;
2183 	struct drm_plane_state *new_plane_state;
2184 	struct dm_plane_state *dm_new_plane_state;
2185 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2186 	enum dc_connection_type new_connection_type = dc_connection_none;
2187 	struct dc_state *dc_state;
2188 	int i, r, j;
2189 
2190 	if (amdgpu_in_reset(adev)) {
2191 		dc_state = dm->cached_dc_state;
2192 
2193 		r = dm_dmub_hw_init(adev);
2194 		if (r)
2195 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2196 
2197 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198 		dc_resume(dm->dc);
2199 
2200 		amdgpu_dm_irq_resume_early(adev);
2201 
2202 		for (i = 0; i < dc_state->stream_count; i++) {
2203 			dc_state->streams[i]->mode_changed = true;
2204 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2205 				dc_state->stream_status->plane_states[j]->update_flags.raw
2206 					= 0xffffffff;
2207 			}
2208 		}
2209 
2210 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2211 
2212 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2213 
2214 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2215 
2216 		dc_release_state(dm->cached_dc_state);
2217 		dm->cached_dc_state = NULL;
2218 
2219 		amdgpu_dm_irq_resume_late(adev);
2220 
2221 		mutex_unlock(&dm->dc_lock);
2222 
2223 		return 0;
2224 	}
2225 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2226 	dc_release_state(dm_state->context);
2227 	dm_state->context = dc_create_state(dm->dc);
2228 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2229 	dc_resource_state_construct(dm->dc, dm_state->context);
2230 
2231 	/* Before powering on DC we need to re-initialize DMUB. */
2232 	r = dm_dmub_hw_init(adev);
2233 	if (r)
2234 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2235 
2236 	/* power on hardware */
2237 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2238 
2239 	/* program HPD filter */
2240 	dc_resume(dm->dc);
2241 
2242 	/*
2243 	 * early enable HPD Rx IRQ, should be done before set mode as short
2244 	 * pulse interrupts are used for MST
2245 	 */
2246 	amdgpu_dm_irq_resume_early(adev);
2247 
2248 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2249 	s3_handle_mst(ddev, false);
2250 
2251 	/* Do detection*/
2252 	drm_connector_list_iter_begin(ddev, &iter);
2253 	drm_for_each_connector_iter(connector, &iter) {
2254 		aconnector = to_amdgpu_dm_connector(connector);
2255 
2256 		/*
2257 		 * this is the case when traversing through already created
2258 		 * MST connectors, should be skipped
2259 		 */
2260 		if (aconnector->mst_port)
2261 			continue;
2262 
2263 		mutex_lock(&aconnector->hpd_lock);
2264 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2265 			DRM_ERROR("KMS: Failed to detect connector\n");
2266 
2267 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2268 			emulated_link_detect(aconnector->dc_link);
2269 		else
2270 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2271 
2272 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2273 			aconnector->fake_enable = false;
2274 
2275 		if (aconnector->dc_sink)
2276 			dc_sink_release(aconnector->dc_sink);
2277 		aconnector->dc_sink = NULL;
2278 		amdgpu_dm_update_connector_after_detect(aconnector);
2279 		mutex_unlock(&aconnector->hpd_lock);
2280 	}
2281 	drm_connector_list_iter_end(&iter);
2282 
2283 	/* Force mode set in atomic commit */
2284 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2285 		new_crtc_state->active_changed = true;
2286 
2287 	/*
2288 	 * atomic_check is expected to create the dc states. We need to release
2289 	 * them here, since they were duplicated as part of the suspend
2290 	 * procedure.
2291 	 */
2292 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2293 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2294 		if (dm_new_crtc_state->stream) {
2295 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2296 			dc_stream_release(dm_new_crtc_state->stream);
2297 			dm_new_crtc_state->stream = NULL;
2298 		}
2299 	}
2300 
2301 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2302 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2303 		if (dm_new_plane_state->dc_state) {
2304 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2305 			dc_plane_state_release(dm_new_plane_state->dc_state);
2306 			dm_new_plane_state->dc_state = NULL;
2307 		}
2308 	}
2309 
2310 	drm_atomic_helper_resume(ddev, dm->cached_state);
2311 
2312 	dm->cached_state = NULL;
2313 
2314 	amdgpu_dm_irq_resume_late(adev);
2315 
2316 	amdgpu_dm_smu_write_watermarks_table(adev);
2317 
2318 	return 0;
2319 }
2320 
2321 /**
2322  * DOC: DM Lifecycle
2323  *
2324  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2325  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2326  * the base driver's device list to be initialized and torn down accordingly.
2327  *
2328  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2329  */
2330 
2331 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2332 	.name = "dm",
2333 	.early_init = dm_early_init,
2334 	.late_init = dm_late_init,
2335 	.sw_init = dm_sw_init,
2336 	.sw_fini = dm_sw_fini,
2337 	.hw_init = dm_hw_init,
2338 	.hw_fini = dm_hw_fini,
2339 	.suspend = dm_suspend,
2340 	.resume = dm_resume,
2341 	.is_idle = dm_is_idle,
2342 	.wait_for_idle = dm_wait_for_idle,
2343 	.check_soft_reset = dm_check_soft_reset,
2344 	.soft_reset = dm_soft_reset,
2345 	.set_clockgating_state = dm_set_clockgating_state,
2346 	.set_powergating_state = dm_set_powergating_state,
2347 };
2348 
2349 const struct amdgpu_ip_block_version dm_ip_block =
2350 {
2351 	.type = AMD_IP_BLOCK_TYPE_DCE,
2352 	.major = 1,
2353 	.minor = 0,
2354 	.rev = 0,
2355 	.funcs = &amdgpu_dm_funcs,
2356 };
2357 
2358 
2359 /**
2360  * DOC: atomic
2361  *
2362  * *WIP*
2363  */
2364 
2365 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2366 	.fb_create = amdgpu_display_user_framebuffer_create,
2367 	.get_format_info = amd_get_format_info,
2368 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2369 	.atomic_check = amdgpu_dm_atomic_check,
2370 	.atomic_commit = drm_atomic_helper_commit,
2371 };
2372 
2373 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2374 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2375 };
2376 
2377 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2378 {
2379 	u32 max_cll, min_cll, max, min, q, r;
2380 	struct amdgpu_dm_backlight_caps *caps;
2381 	struct amdgpu_display_manager *dm;
2382 	struct drm_connector *conn_base;
2383 	struct amdgpu_device *adev;
2384 	struct dc_link *link = NULL;
2385 	static const u8 pre_computed_values[] = {
2386 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2387 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2388 
2389 	if (!aconnector || !aconnector->dc_link)
2390 		return;
2391 
2392 	link = aconnector->dc_link;
2393 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2394 		return;
2395 
2396 	conn_base = &aconnector->base;
2397 	adev = drm_to_adev(conn_base->dev);
2398 	dm = &adev->dm;
2399 	caps = &dm->backlight_caps;
2400 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2401 	caps->aux_support = false;
2402 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2403 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2404 
2405 	if (caps->ext_caps->bits.oled == 1 ||
2406 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2407 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2408 		caps->aux_support = true;
2409 
2410 	if (amdgpu_backlight == 0)
2411 		caps->aux_support = false;
2412 	else if (amdgpu_backlight == 1)
2413 		caps->aux_support = true;
2414 
2415 	/* From the specification (CTA-861-G), for calculating the maximum
2416 	 * luminance we need to use:
2417 	 *	Luminance = 50*2**(CV/32)
2418 	 * Where CV is a one-byte value.
2419 	 * For calculating this expression we may need float point precision;
2420 	 * to avoid this complexity level, we take advantage that CV is divided
2421 	 * by a constant. From the Euclids division algorithm, we know that CV
2422 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2423 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2424 	 * need to pre-compute the value of r/32. For pre-computing the values
2425 	 * We just used the following Ruby line:
2426 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2427 	 * The results of the above expressions can be verified at
2428 	 * pre_computed_values.
2429 	 */
2430 	q = max_cll >> 5;
2431 	r = max_cll % 32;
2432 	max = (1 << q) * pre_computed_values[r];
2433 
2434 	// min luminance: maxLum * (CV/255)^2 / 100
2435 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2436 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2437 
2438 	caps->aux_max_input_signal = max;
2439 	caps->aux_min_input_signal = min;
2440 }
2441 
2442 void amdgpu_dm_update_connector_after_detect(
2443 		struct amdgpu_dm_connector *aconnector)
2444 {
2445 	struct drm_connector *connector = &aconnector->base;
2446 	struct drm_device *dev = connector->dev;
2447 	struct dc_sink *sink;
2448 
2449 	/* MST handled by drm_mst framework */
2450 	if (aconnector->mst_mgr.mst_state == true)
2451 		return;
2452 
2453 	sink = aconnector->dc_link->local_sink;
2454 	if (sink)
2455 		dc_sink_retain(sink);
2456 
2457 	/*
2458 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2459 	 * the connector sink is set to either fake or physical sink depends on link status.
2460 	 * Skip if already done during boot.
2461 	 */
2462 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2463 			&& aconnector->dc_em_sink) {
2464 
2465 		/*
2466 		 * For S3 resume with headless use eml_sink to fake stream
2467 		 * because on resume connector->sink is set to NULL
2468 		 */
2469 		mutex_lock(&dev->mode_config.mutex);
2470 
2471 		if (sink) {
2472 			if (aconnector->dc_sink) {
2473 				amdgpu_dm_update_freesync_caps(connector, NULL);
2474 				/*
2475 				 * retain and release below are used to
2476 				 * bump up refcount for sink because the link doesn't point
2477 				 * to it anymore after disconnect, so on next crtc to connector
2478 				 * reshuffle by UMD we will get into unwanted dc_sink release
2479 				 */
2480 				dc_sink_release(aconnector->dc_sink);
2481 			}
2482 			aconnector->dc_sink = sink;
2483 			dc_sink_retain(aconnector->dc_sink);
2484 			amdgpu_dm_update_freesync_caps(connector,
2485 					aconnector->edid);
2486 		} else {
2487 			amdgpu_dm_update_freesync_caps(connector, NULL);
2488 			if (!aconnector->dc_sink) {
2489 				aconnector->dc_sink = aconnector->dc_em_sink;
2490 				dc_sink_retain(aconnector->dc_sink);
2491 			}
2492 		}
2493 
2494 		mutex_unlock(&dev->mode_config.mutex);
2495 
2496 		if (sink)
2497 			dc_sink_release(sink);
2498 		return;
2499 	}
2500 
2501 	/*
2502 	 * TODO: temporary guard to look for proper fix
2503 	 * if this sink is MST sink, we should not do anything
2504 	 */
2505 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2506 		dc_sink_release(sink);
2507 		return;
2508 	}
2509 
2510 	if (aconnector->dc_sink == sink) {
2511 		/*
2512 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2513 		 * Do nothing!!
2514 		 */
2515 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2516 				aconnector->connector_id);
2517 		if (sink)
2518 			dc_sink_release(sink);
2519 		return;
2520 	}
2521 
2522 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2523 		aconnector->connector_id, aconnector->dc_sink, sink);
2524 
2525 	mutex_lock(&dev->mode_config.mutex);
2526 
2527 	/*
2528 	 * 1. Update status of the drm connector
2529 	 * 2. Send an event and let userspace tell us what to do
2530 	 */
2531 	if (sink) {
2532 		/*
2533 		 * TODO: check if we still need the S3 mode update workaround.
2534 		 * If yes, put it here.
2535 		 */
2536 		if (aconnector->dc_sink) {
2537 			amdgpu_dm_update_freesync_caps(connector, NULL);
2538 			dc_sink_release(aconnector->dc_sink);
2539 		}
2540 
2541 		aconnector->dc_sink = sink;
2542 		dc_sink_retain(aconnector->dc_sink);
2543 		if (sink->dc_edid.length == 0) {
2544 			aconnector->edid = NULL;
2545 			if (aconnector->dc_link->aux_mode) {
2546 				drm_dp_cec_unset_edid(
2547 					&aconnector->dm_dp_aux.aux);
2548 			}
2549 		} else {
2550 			aconnector->edid =
2551 				(struct edid *)sink->dc_edid.raw_edid;
2552 
2553 			drm_connector_update_edid_property(connector,
2554 							   aconnector->edid);
2555 			if (aconnector->dc_link->aux_mode)
2556 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2557 						    aconnector->edid);
2558 		}
2559 
2560 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2561 		update_connector_ext_caps(aconnector);
2562 	} else {
2563 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2564 		amdgpu_dm_update_freesync_caps(connector, NULL);
2565 		drm_connector_update_edid_property(connector, NULL);
2566 		aconnector->num_modes = 0;
2567 		dc_sink_release(aconnector->dc_sink);
2568 		aconnector->dc_sink = NULL;
2569 		aconnector->edid = NULL;
2570 #ifdef CONFIG_DRM_AMD_DC_HDCP
2571 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2572 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2573 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2574 #endif
2575 	}
2576 
2577 	mutex_unlock(&dev->mode_config.mutex);
2578 
2579 	update_subconnector_property(aconnector);
2580 
2581 	if (sink)
2582 		dc_sink_release(sink);
2583 }
2584 
2585 static void handle_hpd_irq(void *param)
2586 {
2587 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2588 	struct drm_connector *connector = &aconnector->base;
2589 	struct drm_device *dev = connector->dev;
2590 	enum dc_connection_type new_connection_type = dc_connection_none;
2591 	struct amdgpu_device *adev = drm_to_adev(dev);
2592 #ifdef CONFIG_DRM_AMD_DC_HDCP
2593 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2594 #endif
2595 
2596 	if (adev->dm.disable_hpd_irq)
2597 		return;
2598 
2599 	/*
2600 	 * In case of failure or MST no need to update connector status or notify the OS
2601 	 * since (for MST case) MST does this in its own context.
2602 	 */
2603 	mutex_lock(&aconnector->hpd_lock);
2604 
2605 #ifdef CONFIG_DRM_AMD_DC_HDCP
2606 	if (adev->dm.hdcp_workqueue) {
2607 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2608 		dm_con_state->update_hdcp = true;
2609 	}
2610 #endif
2611 	if (aconnector->fake_enable)
2612 		aconnector->fake_enable = false;
2613 
2614 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2615 		DRM_ERROR("KMS: Failed to detect connector\n");
2616 
2617 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2618 		emulated_link_detect(aconnector->dc_link);
2619 
2620 
2621 		drm_modeset_lock_all(dev);
2622 		dm_restore_drm_connector_state(dev, connector);
2623 		drm_modeset_unlock_all(dev);
2624 
2625 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2626 			drm_kms_helper_hotplug_event(dev);
2627 
2628 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2629 		if (new_connection_type == dc_connection_none &&
2630 		    aconnector->dc_link->type == dc_connection_none)
2631 			dm_set_dpms_off(aconnector->dc_link);
2632 
2633 		amdgpu_dm_update_connector_after_detect(aconnector);
2634 
2635 		drm_modeset_lock_all(dev);
2636 		dm_restore_drm_connector_state(dev, connector);
2637 		drm_modeset_unlock_all(dev);
2638 
2639 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2640 			drm_kms_helper_hotplug_event(dev);
2641 	}
2642 	mutex_unlock(&aconnector->hpd_lock);
2643 
2644 }
2645 
2646 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2647 {
2648 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2649 	uint8_t dret;
2650 	bool new_irq_handled = false;
2651 	int dpcd_addr;
2652 	int dpcd_bytes_to_read;
2653 
2654 	const int max_process_count = 30;
2655 	int process_count = 0;
2656 
2657 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2658 
2659 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2660 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2661 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2662 		dpcd_addr = DP_SINK_COUNT;
2663 	} else {
2664 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2665 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2666 		dpcd_addr = DP_SINK_COUNT_ESI;
2667 	}
2668 
2669 	dret = drm_dp_dpcd_read(
2670 		&aconnector->dm_dp_aux.aux,
2671 		dpcd_addr,
2672 		esi,
2673 		dpcd_bytes_to_read);
2674 
2675 	while (dret == dpcd_bytes_to_read &&
2676 		process_count < max_process_count) {
2677 		uint8_t retry;
2678 		dret = 0;
2679 
2680 		process_count++;
2681 
2682 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2683 		/* handle HPD short pulse irq */
2684 		if (aconnector->mst_mgr.mst_state)
2685 			drm_dp_mst_hpd_irq(
2686 				&aconnector->mst_mgr,
2687 				esi,
2688 				&new_irq_handled);
2689 
2690 		if (new_irq_handled) {
2691 			/* ACK at DPCD to notify down stream */
2692 			const int ack_dpcd_bytes_to_write =
2693 				dpcd_bytes_to_read - 1;
2694 
2695 			for (retry = 0; retry < 3; retry++) {
2696 				uint8_t wret;
2697 
2698 				wret = drm_dp_dpcd_write(
2699 					&aconnector->dm_dp_aux.aux,
2700 					dpcd_addr + 1,
2701 					&esi[1],
2702 					ack_dpcd_bytes_to_write);
2703 				if (wret == ack_dpcd_bytes_to_write)
2704 					break;
2705 			}
2706 
2707 			/* check if there is new irq to be handled */
2708 			dret = drm_dp_dpcd_read(
2709 				&aconnector->dm_dp_aux.aux,
2710 				dpcd_addr,
2711 				esi,
2712 				dpcd_bytes_to_read);
2713 
2714 			new_irq_handled = false;
2715 		} else {
2716 			break;
2717 		}
2718 	}
2719 
2720 	if (process_count == max_process_count)
2721 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2722 }
2723 
2724 static void handle_hpd_rx_irq(void *param)
2725 {
2726 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2727 	struct drm_connector *connector = &aconnector->base;
2728 	struct drm_device *dev = connector->dev;
2729 	struct dc_link *dc_link = aconnector->dc_link;
2730 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2731 	bool result = false;
2732 	enum dc_connection_type new_connection_type = dc_connection_none;
2733 	struct amdgpu_device *adev = drm_to_adev(dev);
2734 	union hpd_irq_data hpd_irq_data;
2735 
2736 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2737 
2738 	if (adev->dm.disable_hpd_irq)
2739 		return;
2740 
2741 
2742 	/*
2743 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2744 	 * conflict, after implement i2c helper, this mutex should be
2745 	 * retired.
2746 	 */
2747 	mutex_lock(&aconnector->hpd_lock);
2748 
2749 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2750 
2751 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2752 		(dc_link->type == dc_connection_mst_branch)) {
2753 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2754 			result = true;
2755 			dm_handle_hpd_rx_irq(aconnector);
2756 			goto out;
2757 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2758 			result = false;
2759 			dm_handle_hpd_rx_irq(aconnector);
2760 			goto out;
2761 		}
2762 	}
2763 
2764 	if (!amdgpu_in_reset(adev)) {
2765 		mutex_lock(&adev->dm.dc_lock);
2766 #ifdef CONFIG_DRM_AMD_DC_HDCP
2767 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2768 #else
2769 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2770 #endif
2771 		mutex_unlock(&adev->dm.dc_lock);
2772 	}
2773 
2774 out:
2775 	if (result && !is_mst_root_connector) {
2776 		/* Downstream Port status changed. */
2777 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2778 			DRM_ERROR("KMS: Failed to detect connector\n");
2779 
2780 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2781 			emulated_link_detect(dc_link);
2782 
2783 			if (aconnector->fake_enable)
2784 				aconnector->fake_enable = false;
2785 
2786 			amdgpu_dm_update_connector_after_detect(aconnector);
2787 
2788 
2789 			drm_modeset_lock_all(dev);
2790 			dm_restore_drm_connector_state(dev, connector);
2791 			drm_modeset_unlock_all(dev);
2792 
2793 			drm_kms_helper_hotplug_event(dev);
2794 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2795 
2796 			if (aconnector->fake_enable)
2797 				aconnector->fake_enable = false;
2798 
2799 			amdgpu_dm_update_connector_after_detect(aconnector);
2800 
2801 
2802 			drm_modeset_lock_all(dev);
2803 			dm_restore_drm_connector_state(dev, connector);
2804 			drm_modeset_unlock_all(dev);
2805 
2806 			drm_kms_helper_hotplug_event(dev);
2807 		}
2808 	}
2809 #ifdef CONFIG_DRM_AMD_DC_HDCP
2810 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2811 		if (adev->dm.hdcp_workqueue)
2812 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2813 	}
2814 #endif
2815 
2816 	if (dc_link->type != dc_connection_mst_branch)
2817 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2818 
2819 	mutex_unlock(&aconnector->hpd_lock);
2820 }
2821 
2822 static void register_hpd_handlers(struct amdgpu_device *adev)
2823 {
2824 	struct drm_device *dev = adev_to_drm(adev);
2825 	struct drm_connector *connector;
2826 	struct amdgpu_dm_connector *aconnector;
2827 	const struct dc_link *dc_link;
2828 	struct dc_interrupt_params int_params = {0};
2829 
2830 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2831 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2832 
2833 	list_for_each_entry(connector,
2834 			&dev->mode_config.connector_list, head)	{
2835 
2836 		aconnector = to_amdgpu_dm_connector(connector);
2837 		dc_link = aconnector->dc_link;
2838 
2839 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2840 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2841 			int_params.irq_source = dc_link->irq_source_hpd;
2842 
2843 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2844 					handle_hpd_irq,
2845 					(void *) aconnector);
2846 		}
2847 
2848 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2849 
2850 			/* Also register for DP short pulse (hpd_rx). */
2851 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2852 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2853 
2854 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2855 					handle_hpd_rx_irq,
2856 					(void *) aconnector);
2857 		}
2858 	}
2859 }
2860 
2861 #if defined(CONFIG_DRM_AMD_DC_SI)
2862 /* Register IRQ sources and initialize IRQ callbacks */
2863 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2864 {
2865 	struct dc *dc = adev->dm.dc;
2866 	struct common_irq_params *c_irq_params;
2867 	struct dc_interrupt_params int_params = {0};
2868 	int r;
2869 	int i;
2870 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2871 
2872 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2873 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2874 
2875 	/*
2876 	 * Actions of amdgpu_irq_add_id():
2877 	 * 1. Register a set() function with base driver.
2878 	 *    Base driver will call set() function to enable/disable an
2879 	 *    interrupt in DC hardware.
2880 	 * 2. Register amdgpu_dm_irq_handler().
2881 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2882 	 *    coming from DC hardware.
2883 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2884 	 *    for acknowledging and handling. */
2885 
2886 	/* Use VBLANK interrupt */
2887 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2888 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2889 		if (r) {
2890 			DRM_ERROR("Failed to add crtc irq id!\n");
2891 			return r;
2892 		}
2893 
2894 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2895 		int_params.irq_source =
2896 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2897 
2898 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2899 
2900 		c_irq_params->adev = adev;
2901 		c_irq_params->irq_src = int_params.irq_source;
2902 
2903 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2904 				dm_crtc_high_irq, c_irq_params);
2905 	}
2906 
2907 	/* Use GRPH_PFLIP interrupt */
2908 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2909 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2910 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2911 		if (r) {
2912 			DRM_ERROR("Failed to add page flip irq id!\n");
2913 			return r;
2914 		}
2915 
2916 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2917 		int_params.irq_source =
2918 			dc_interrupt_to_irq_source(dc, i, 0);
2919 
2920 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2921 
2922 		c_irq_params->adev = adev;
2923 		c_irq_params->irq_src = int_params.irq_source;
2924 
2925 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2926 				dm_pflip_high_irq, c_irq_params);
2927 
2928 	}
2929 
2930 	/* HPD */
2931 	r = amdgpu_irq_add_id(adev, client_id,
2932 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2933 	if (r) {
2934 		DRM_ERROR("Failed to add hpd irq id!\n");
2935 		return r;
2936 	}
2937 
2938 	register_hpd_handlers(adev);
2939 
2940 	return 0;
2941 }
2942 #endif
2943 
2944 /* Register IRQ sources and initialize IRQ callbacks */
2945 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2946 {
2947 	struct dc *dc = adev->dm.dc;
2948 	struct common_irq_params *c_irq_params;
2949 	struct dc_interrupt_params int_params = {0};
2950 	int r;
2951 	int i;
2952 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2953 
2954 	if (adev->asic_type >= CHIP_VEGA10)
2955 		client_id = SOC15_IH_CLIENTID_DCE;
2956 
2957 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2958 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2959 
2960 	/*
2961 	 * Actions of amdgpu_irq_add_id():
2962 	 * 1. Register a set() function with base driver.
2963 	 *    Base driver will call set() function to enable/disable an
2964 	 *    interrupt in DC hardware.
2965 	 * 2. Register amdgpu_dm_irq_handler().
2966 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2967 	 *    coming from DC hardware.
2968 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2969 	 *    for acknowledging and handling. */
2970 
2971 	/* Use VBLANK interrupt */
2972 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2973 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2974 		if (r) {
2975 			DRM_ERROR("Failed to add crtc irq id!\n");
2976 			return r;
2977 		}
2978 
2979 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2980 		int_params.irq_source =
2981 			dc_interrupt_to_irq_source(dc, i, 0);
2982 
2983 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2984 
2985 		c_irq_params->adev = adev;
2986 		c_irq_params->irq_src = int_params.irq_source;
2987 
2988 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2989 				dm_crtc_high_irq, c_irq_params);
2990 	}
2991 
2992 	/* Use VUPDATE interrupt */
2993 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2994 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2995 		if (r) {
2996 			DRM_ERROR("Failed to add vupdate irq id!\n");
2997 			return r;
2998 		}
2999 
3000 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3001 		int_params.irq_source =
3002 			dc_interrupt_to_irq_source(dc, i, 0);
3003 
3004 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3005 
3006 		c_irq_params->adev = adev;
3007 		c_irq_params->irq_src = int_params.irq_source;
3008 
3009 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3010 				dm_vupdate_high_irq, c_irq_params);
3011 	}
3012 
3013 	/* Use GRPH_PFLIP interrupt */
3014 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3015 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3016 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3017 		if (r) {
3018 			DRM_ERROR("Failed to add page flip irq id!\n");
3019 			return r;
3020 		}
3021 
3022 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3023 		int_params.irq_source =
3024 			dc_interrupt_to_irq_source(dc, i, 0);
3025 
3026 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3027 
3028 		c_irq_params->adev = adev;
3029 		c_irq_params->irq_src = int_params.irq_source;
3030 
3031 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3032 				dm_pflip_high_irq, c_irq_params);
3033 
3034 	}
3035 
3036 	/* HPD */
3037 	r = amdgpu_irq_add_id(adev, client_id,
3038 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3039 	if (r) {
3040 		DRM_ERROR("Failed to add hpd irq id!\n");
3041 		return r;
3042 	}
3043 
3044 	register_hpd_handlers(adev);
3045 
3046 	return 0;
3047 }
3048 
3049 #if defined(CONFIG_DRM_AMD_DC_DCN)
3050 /* Register IRQ sources and initialize IRQ callbacks */
3051 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3052 {
3053 	struct dc *dc = adev->dm.dc;
3054 	struct common_irq_params *c_irq_params;
3055 	struct dc_interrupt_params int_params = {0};
3056 	int r;
3057 	int i;
3058 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3059 	static const unsigned int vrtl_int_srcid[] = {
3060 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3061 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3062 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3063 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3064 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3065 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3066 	};
3067 #endif
3068 
3069 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3070 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3071 
3072 	/*
3073 	 * Actions of amdgpu_irq_add_id():
3074 	 * 1. Register a set() function with base driver.
3075 	 *    Base driver will call set() function to enable/disable an
3076 	 *    interrupt in DC hardware.
3077 	 * 2. Register amdgpu_dm_irq_handler().
3078 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3079 	 *    coming from DC hardware.
3080 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3081 	 *    for acknowledging and handling.
3082 	 */
3083 
3084 	/* Use VSTARTUP interrupt */
3085 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3086 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3087 			i++) {
3088 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3089 
3090 		if (r) {
3091 			DRM_ERROR("Failed to add crtc irq id!\n");
3092 			return r;
3093 		}
3094 
3095 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3096 		int_params.irq_source =
3097 			dc_interrupt_to_irq_source(dc, i, 0);
3098 
3099 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3100 
3101 		c_irq_params->adev = adev;
3102 		c_irq_params->irq_src = int_params.irq_source;
3103 
3104 		amdgpu_dm_irq_register_interrupt(
3105 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3106 	}
3107 
3108 	/* Use otg vertical line interrupt */
3109 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3110 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3111 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3112 				vrtl_int_srcid[i], &adev->vline0_irq);
3113 
3114 		if (r) {
3115 			DRM_ERROR("Failed to add vline0 irq id!\n");
3116 			return r;
3117 		}
3118 
3119 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3120 		int_params.irq_source =
3121 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3122 
3123 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3124 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3125 			break;
3126 		}
3127 
3128 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3129 					- DC_IRQ_SOURCE_DC1_VLINE0];
3130 
3131 		c_irq_params->adev = adev;
3132 		c_irq_params->irq_src = int_params.irq_source;
3133 
3134 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3135 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3136 	}
3137 #endif
3138 
3139 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3140 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3141 	 * to trigger at end of each vblank, regardless of state of the lock,
3142 	 * matching DCE behaviour.
3143 	 */
3144 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3145 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3146 	     i++) {
3147 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3148 
3149 		if (r) {
3150 			DRM_ERROR("Failed to add vupdate irq id!\n");
3151 			return r;
3152 		}
3153 
3154 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3155 		int_params.irq_source =
3156 			dc_interrupt_to_irq_source(dc, i, 0);
3157 
3158 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3159 
3160 		c_irq_params->adev = adev;
3161 		c_irq_params->irq_src = int_params.irq_source;
3162 
3163 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3164 				dm_vupdate_high_irq, c_irq_params);
3165 	}
3166 
3167 	/* Use GRPH_PFLIP interrupt */
3168 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3169 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3170 			i++) {
3171 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3172 		if (r) {
3173 			DRM_ERROR("Failed to add page flip irq id!\n");
3174 			return r;
3175 		}
3176 
3177 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3178 		int_params.irq_source =
3179 			dc_interrupt_to_irq_source(dc, i, 0);
3180 
3181 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3182 
3183 		c_irq_params->adev = adev;
3184 		c_irq_params->irq_src = int_params.irq_source;
3185 
3186 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3187 				dm_pflip_high_irq, c_irq_params);
3188 
3189 	}
3190 
3191 	/* HPD */
3192 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3193 			&adev->hpd_irq);
3194 	if (r) {
3195 		DRM_ERROR("Failed to add hpd irq id!\n");
3196 		return r;
3197 	}
3198 
3199 	register_hpd_handlers(adev);
3200 
3201 	return 0;
3202 }
3203 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3204 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3205 {
3206 	struct dc *dc = adev->dm.dc;
3207 	struct common_irq_params *c_irq_params;
3208 	struct dc_interrupt_params int_params = {0};
3209 	int r, i;
3210 
3211 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3212 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3213 
3214 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3215 			&adev->dmub_outbox_irq);
3216 	if (r) {
3217 		DRM_ERROR("Failed to add outbox irq id!\n");
3218 		return r;
3219 	}
3220 
3221 	if (dc->ctx->dmub_srv) {
3222 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3223 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3224 		int_params.irq_source =
3225 		dc_interrupt_to_irq_source(dc, i, 0);
3226 
3227 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3228 
3229 		c_irq_params->adev = adev;
3230 		c_irq_params->irq_src = int_params.irq_source;
3231 
3232 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3233 				dm_dmub_outbox1_low_irq, c_irq_params);
3234 	}
3235 
3236 	return 0;
3237 }
3238 #endif
3239 
3240 /*
3241  * Acquires the lock for the atomic state object and returns
3242  * the new atomic state.
3243  *
3244  * This should only be called during atomic check.
3245  */
3246 static int dm_atomic_get_state(struct drm_atomic_state *state,
3247 			       struct dm_atomic_state **dm_state)
3248 {
3249 	struct drm_device *dev = state->dev;
3250 	struct amdgpu_device *adev = drm_to_adev(dev);
3251 	struct amdgpu_display_manager *dm = &adev->dm;
3252 	struct drm_private_state *priv_state;
3253 
3254 	if (*dm_state)
3255 		return 0;
3256 
3257 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3258 	if (IS_ERR(priv_state))
3259 		return PTR_ERR(priv_state);
3260 
3261 	*dm_state = to_dm_atomic_state(priv_state);
3262 
3263 	return 0;
3264 }
3265 
3266 static struct dm_atomic_state *
3267 dm_atomic_get_new_state(struct drm_atomic_state *state)
3268 {
3269 	struct drm_device *dev = state->dev;
3270 	struct amdgpu_device *adev = drm_to_adev(dev);
3271 	struct amdgpu_display_manager *dm = &adev->dm;
3272 	struct drm_private_obj *obj;
3273 	struct drm_private_state *new_obj_state;
3274 	int i;
3275 
3276 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3277 		if (obj->funcs == dm->atomic_obj.funcs)
3278 			return to_dm_atomic_state(new_obj_state);
3279 	}
3280 
3281 	return NULL;
3282 }
3283 
3284 static struct drm_private_state *
3285 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3286 {
3287 	struct dm_atomic_state *old_state, *new_state;
3288 
3289 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3290 	if (!new_state)
3291 		return NULL;
3292 
3293 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3294 
3295 	old_state = to_dm_atomic_state(obj->state);
3296 
3297 	if (old_state && old_state->context)
3298 		new_state->context = dc_copy_state(old_state->context);
3299 
3300 	if (!new_state->context) {
3301 		kfree(new_state);
3302 		return NULL;
3303 	}
3304 
3305 	return &new_state->base;
3306 }
3307 
3308 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3309 				    struct drm_private_state *state)
3310 {
3311 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3312 
3313 	if (dm_state && dm_state->context)
3314 		dc_release_state(dm_state->context);
3315 
3316 	kfree(dm_state);
3317 }
3318 
3319 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3320 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3321 	.atomic_destroy_state = dm_atomic_destroy_state,
3322 };
3323 
3324 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3325 {
3326 	struct dm_atomic_state *state;
3327 	int r;
3328 
3329 	adev->mode_info.mode_config_initialized = true;
3330 
3331 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3332 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3333 
3334 	adev_to_drm(adev)->mode_config.max_width = 16384;
3335 	adev_to_drm(adev)->mode_config.max_height = 16384;
3336 
3337 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3338 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3339 	/* indicates support for immediate flip */
3340 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3341 
3342 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3343 
3344 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3345 	if (!state)
3346 		return -ENOMEM;
3347 
3348 	state->context = dc_create_state(adev->dm.dc);
3349 	if (!state->context) {
3350 		kfree(state);
3351 		return -ENOMEM;
3352 	}
3353 
3354 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3355 
3356 	drm_atomic_private_obj_init(adev_to_drm(adev),
3357 				    &adev->dm.atomic_obj,
3358 				    &state->base,
3359 				    &dm_atomic_state_funcs);
3360 
3361 	r = amdgpu_display_modeset_create_props(adev);
3362 	if (r) {
3363 		dc_release_state(state->context);
3364 		kfree(state);
3365 		return r;
3366 	}
3367 
3368 	r = amdgpu_dm_audio_init(adev);
3369 	if (r) {
3370 		dc_release_state(state->context);
3371 		kfree(state);
3372 		return r;
3373 	}
3374 
3375 	return 0;
3376 }
3377 
3378 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3379 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3380 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3381 
3382 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3383 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3384 
3385 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3386 {
3387 #if defined(CONFIG_ACPI)
3388 	struct amdgpu_dm_backlight_caps caps;
3389 
3390 	memset(&caps, 0, sizeof(caps));
3391 
3392 	if (dm->backlight_caps.caps_valid)
3393 		return;
3394 
3395 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3396 	if (caps.caps_valid) {
3397 		dm->backlight_caps.caps_valid = true;
3398 		if (caps.aux_support)
3399 			return;
3400 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3401 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3402 	} else {
3403 		dm->backlight_caps.min_input_signal =
3404 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3405 		dm->backlight_caps.max_input_signal =
3406 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3407 	}
3408 #else
3409 	if (dm->backlight_caps.aux_support)
3410 		return;
3411 
3412 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3413 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3414 #endif
3415 }
3416 
3417 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3418 				unsigned *min, unsigned *max)
3419 {
3420 	if (!caps)
3421 		return 0;
3422 
3423 	if (caps->aux_support) {
3424 		// Firmware limits are in nits, DC API wants millinits.
3425 		*max = 1000 * caps->aux_max_input_signal;
3426 		*min = 1000 * caps->aux_min_input_signal;
3427 	} else {
3428 		// Firmware limits are 8-bit, PWM control is 16-bit.
3429 		*max = 0x101 * caps->max_input_signal;
3430 		*min = 0x101 * caps->min_input_signal;
3431 	}
3432 	return 1;
3433 }
3434 
3435 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3436 					uint32_t brightness)
3437 {
3438 	unsigned min, max;
3439 
3440 	if (!get_brightness_range(caps, &min, &max))
3441 		return brightness;
3442 
3443 	// Rescale 0..255 to min..max
3444 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3445 				       AMDGPU_MAX_BL_LEVEL);
3446 }
3447 
3448 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3449 				      uint32_t brightness)
3450 {
3451 	unsigned min, max;
3452 
3453 	if (!get_brightness_range(caps, &min, &max))
3454 		return brightness;
3455 
3456 	if (brightness < min)
3457 		return 0;
3458 	// Rescale min..max to 0..255
3459 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3460 				 max - min);
3461 }
3462 
3463 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3464 {
3465 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3466 	struct amdgpu_dm_backlight_caps caps;
3467 	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3468 	u32 brightness;
3469 	bool rc;
3470 	int i;
3471 
3472 	amdgpu_dm_update_backlight_caps(dm);
3473 	caps = dm->backlight_caps;
3474 
3475 	for (i = 0; i < dm->num_of_edps; i++)
3476 		link[i] = (struct dc_link *)dm->backlight_link[i];
3477 
3478 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3479 	// Change brightness based on AUX property
3480 	if (caps.aux_support) {
3481 		for (i = 0; i < dm->num_of_edps; i++) {
3482 			rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
3483 				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3484 			if (!rc) {
3485 				DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3486 				break;
3487 			}
3488 		}
3489 	} else {
3490 		for (i = 0; i < dm->num_of_edps; i++) {
3491 			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
3492 			if (!rc) {
3493 				DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3494 				break;
3495 			}
3496 		}
3497 	}
3498 
3499 	return rc ? 0 : 1;
3500 }
3501 
3502 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3503 {
3504 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3505 	struct amdgpu_dm_backlight_caps caps;
3506 
3507 	amdgpu_dm_update_backlight_caps(dm);
3508 	caps = dm->backlight_caps;
3509 
3510 	if (caps.aux_support) {
3511 		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3512 		u32 avg, peak;
3513 		bool rc;
3514 
3515 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3516 		if (!rc)
3517 			return bd->props.brightness;
3518 		return convert_brightness_to_user(&caps, avg);
3519 	} else {
3520 		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3521 
3522 		if (ret == DC_ERROR_UNEXPECTED)
3523 			return bd->props.brightness;
3524 		return convert_brightness_to_user(&caps, ret);
3525 	}
3526 }
3527 
3528 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3529 	.options = BL_CORE_SUSPENDRESUME,
3530 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3531 	.update_status	= amdgpu_dm_backlight_update_status,
3532 };
3533 
3534 static void
3535 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3536 {
3537 	char bl_name[16];
3538 	struct backlight_properties props = { 0 };
3539 
3540 	amdgpu_dm_update_backlight_caps(dm);
3541 
3542 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3543 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3544 	props.type = BACKLIGHT_RAW;
3545 
3546 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3547 		 adev_to_drm(dm->adev)->primary->index);
3548 
3549 	dm->backlight_dev = backlight_device_register(bl_name,
3550 						      adev_to_drm(dm->adev)->dev,
3551 						      dm,
3552 						      &amdgpu_dm_backlight_ops,
3553 						      &props);
3554 
3555 	if (IS_ERR(dm->backlight_dev))
3556 		DRM_ERROR("DM: Backlight registration failed!\n");
3557 	else
3558 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3559 }
3560 
3561 #endif
3562 
3563 static int initialize_plane(struct amdgpu_display_manager *dm,
3564 			    struct amdgpu_mode_info *mode_info, int plane_id,
3565 			    enum drm_plane_type plane_type,
3566 			    const struct dc_plane_cap *plane_cap)
3567 {
3568 	struct drm_plane *plane;
3569 	unsigned long possible_crtcs;
3570 	int ret = 0;
3571 
3572 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3573 	if (!plane) {
3574 		DRM_ERROR("KMS: Failed to allocate plane\n");
3575 		return -ENOMEM;
3576 	}
3577 	plane->type = plane_type;
3578 
3579 	/*
3580 	 * HACK: IGT tests expect that the primary plane for a CRTC
3581 	 * can only have one possible CRTC. Only expose support for
3582 	 * any CRTC if they're not going to be used as a primary plane
3583 	 * for a CRTC - like overlay or underlay planes.
3584 	 */
3585 	possible_crtcs = 1 << plane_id;
3586 	if (plane_id >= dm->dc->caps.max_streams)
3587 		possible_crtcs = 0xff;
3588 
3589 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3590 
3591 	if (ret) {
3592 		DRM_ERROR("KMS: Failed to initialize plane\n");
3593 		kfree(plane);
3594 		return ret;
3595 	}
3596 
3597 	if (mode_info)
3598 		mode_info->planes[plane_id] = plane;
3599 
3600 	return ret;
3601 }
3602 
3603 
3604 static void register_backlight_device(struct amdgpu_display_manager *dm,
3605 				      struct dc_link *link)
3606 {
3607 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3608 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3609 
3610 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3611 	    link->type != dc_connection_none) {
3612 		/*
3613 		 * Event if registration failed, we should continue with
3614 		 * DM initialization because not having a backlight control
3615 		 * is better then a black screen.
3616 		 */
3617 		if (!dm->backlight_dev)
3618 			amdgpu_dm_register_backlight_device(dm);
3619 
3620 		if (dm->backlight_dev) {
3621 			dm->backlight_link[dm->num_of_edps] = link;
3622 			dm->num_of_edps++;
3623 		}
3624 	}
3625 #endif
3626 }
3627 
3628 
3629 /*
3630  * In this architecture, the association
3631  * connector -> encoder -> crtc
3632  * id not really requried. The crtc and connector will hold the
3633  * display_index as an abstraction to use with DAL component
3634  *
3635  * Returns 0 on success
3636  */
3637 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3638 {
3639 	struct amdgpu_display_manager *dm = &adev->dm;
3640 	int32_t i;
3641 	struct amdgpu_dm_connector *aconnector = NULL;
3642 	struct amdgpu_encoder *aencoder = NULL;
3643 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3644 	uint32_t link_cnt;
3645 	int32_t primary_planes;
3646 	enum dc_connection_type new_connection_type = dc_connection_none;
3647 	const struct dc_plane_cap *plane;
3648 
3649 	dm->display_indexes_num = dm->dc->caps.max_streams;
3650 	/* Update the actual used number of crtc */
3651 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3652 
3653 	link_cnt = dm->dc->caps.max_links;
3654 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3655 		DRM_ERROR("DM: Failed to initialize mode config\n");
3656 		return -EINVAL;
3657 	}
3658 
3659 	/* There is one primary plane per CRTC */
3660 	primary_planes = dm->dc->caps.max_streams;
3661 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3662 
3663 	/*
3664 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3665 	 * Order is reversed to match iteration order in atomic check.
3666 	 */
3667 	for (i = (primary_planes - 1); i >= 0; i--) {
3668 		plane = &dm->dc->caps.planes[i];
3669 
3670 		if (initialize_plane(dm, mode_info, i,
3671 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3672 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3673 			goto fail;
3674 		}
3675 	}
3676 
3677 	/*
3678 	 * Initialize overlay planes, index starting after primary planes.
3679 	 * These planes have a higher DRM index than the primary planes since
3680 	 * they should be considered as having a higher z-order.
3681 	 * Order is reversed to match iteration order in atomic check.
3682 	 *
3683 	 * Only support DCN for now, and only expose one so we don't encourage
3684 	 * userspace to use up all the pipes.
3685 	 */
3686 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3687 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3688 
3689 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3690 			continue;
3691 
3692 		if (!plane->blends_with_above || !plane->blends_with_below)
3693 			continue;
3694 
3695 		if (!plane->pixel_format_support.argb8888)
3696 			continue;
3697 
3698 		if (initialize_plane(dm, NULL, primary_planes + i,
3699 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3700 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3701 			goto fail;
3702 		}
3703 
3704 		/* Only create one overlay plane. */
3705 		break;
3706 	}
3707 
3708 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3709 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3710 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3711 			goto fail;
3712 		}
3713 
3714 #if defined(CONFIG_DRM_AMD_DC_DCN)
3715 	/* Use Outbox interrupt */
3716 	switch (adev->asic_type) {
3717 	case CHIP_SIENNA_CICHLID:
3718 	case CHIP_NAVY_FLOUNDER:
3719 	case CHIP_RENOIR:
3720 		if (register_outbox_irq_handlers(dm->adev)) {
3721 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3722 			goto fail;
3723 		}
3724 		break;
3725 	default:
3726 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3727 	}
3728 #endif
3729 
3730 	/* loops over all connectors on the board */
3731 	for (i = 0; i < link_cnt; i++) {
3732 		struct dc_link *link = NULL;
3733 
3734 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3735 			DRM_ERROR(
3736 				"KMS: Cannot support more than %d display indexes\n",
3737 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3738 			continue;
3739 		}
3740 
3741 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3742 		if (!aconnector)
3743 			goto fail;
3744 
3745 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3746 		if (!aencoder)
3747 			goto fail;
3748 
3749 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3750 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3751 			goto fail;
3752 		}
3753 
3754 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3755 			DRM_ERROR("KMS: Failed to initialize connector\n");
3756 			goto fail;
3757 		}
3758 
3759 		link = dc_get_link_at_index(dm->dc, i);
3760 
3761 		if (!dc_link_detect_sink(link, &new_connection_type))
3762 			DRM_ERROR("KMS: Failed to detect connector\n");
3763 
3764 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3765 			emulated_link_detect(link);
3766 			amdgpu_dm_update_connector_after_detect(aconnector);
3767 
3768 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3769 			amdgpu_dm_update_connector_after_detect(aconnector);
3770 			register_backlight_device(dm, link);
3771 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3772 				amdgpu_dm_set_psr_caps(link);
3773 		}
3774 
3775 
3776 	}
3777 
3778 	/* Software is initialized. Now we can register interrupt handlers. */
3779 	switch (adev->asic_type) {
3780 #if defined(CONFIG_DRM_AMD_DC_SI)
3781 	case CHIP_TAHITI:
3782 	case CHIP_PITCAIRN:
3783 	case CHIP_VERDE:
3784 	case CHIP_OLAND:
3785 		if (dce60_register_irq_handlers(dm->adev)) {
3786 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3787 			goto fail;
3788 		}
3789 		break;
3790 #endif
3791 	case CHIP_BONAIRE:
3792 	case CHIP_HAWAII:
3793 	case CHIP_KAVERI:
3794 	case CHIP_KABINI:
3795 	case CHIP_MULLINS:
3796 	case CHIP_TONGA:
3797 	case CHIP_FIJI:
3798 	case CHIP_CARRIZO:
3799 	case CHIP_STONEY:
3800 	case CHIP_POLARIS11:
3801 	case CHIP_POLARIS10:
3802 	case CHIP_POLARIS12:
3803 	case CHIP_VEGAM:
3804 	case CHIP_VEGA10:
3805 	case CHIP_VEGA12:
3806 	case CHIP_VEGA20:
3807 		if (dce110_register_irq_handlers(dm->adev)) {
3808 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3809 			goto fail;
3810 		}
3811 		break;
3812 #if defined(CONFIG_DRM_AMD_DC_DCN)
3813 	case CHIP_RAVEN:
3814 	case CHIP_NAVI12:
3815 	case CHIP_NAVI10:
3816 	case CHIP_NAVI14:
3817 	case CHIP_RENOIR:
3818 	case CHIP_SIENNA_CICHLID:
3819 	case CHIP_NAVY_FLOUNDER:
3820 	case CHIP_DIMGREY_CAVEFISH:
3821 	case CHIP_VANGOGH:
3822 		if (dcn10_register_irq_handlers(dm->adev)) {
3823 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3824 			goto fail;
3825 		}
3826 		break;
3827 #endif
3828 	default:
3829 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3830 		goto fail;
3831 	}
3832 
3833 	return 0;
3834 fail:
3835 	kfree(aencoder);
3836 	kfree(aconnector);
3837 
3838 	return -EINVAL;
3839 }
3840 
3841 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3842 {
3843 	drm_mode_config_cleanup(dm->ddev);
3844 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3845 	return;
3846 }
3847 
3848 /******************************************************************************
3849  * amdgpu_display_funcs functions
3850  *****************************************************************************/
3851 
3852 /*
3853  * dm_bandwidth_update - program display watermarks
3854  *
3855  * @adev: amdgpu_device pointer
3856  *
3857  * Calculate and program the display watermarks and line buffer allocation.
3858  */
3859 static void dm_bandwidth_update(struct amdgpu_device *adev)
3860 {
3861 	/* TODO: implement later */
3862 }
3863 
3864 static const struct amdgpu_display_funcs dm_display_funcs = {
3865 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3866 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3867 	.backlight_set_level = NULL, /* never called for DC */
3868 	.backlight_get_level = NULL, /* never called for DC */
3869 	.hpd_sense = NULL,/* called unconditionally */
3870 	.hpd_set_polarity = NULL, /* called unconditionally */
3871 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3872 	.page_flip_get_scanoutpos =
3873 		dm_crtc_get_scanoutpos,/* called unconditionally */
3874 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3875 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3876 };
3877 
3878 #if defined(CONFIG_DEBUG_KERNEL_DC)
3879 
3880 static ssize_t s3_debug_store(struct device *device,
3881 			      struct device_attribute *attr,
3882 			      const char *buf,
3883 			      size_t count)
3884 {
3885 	int ret;
3886 	int s3_state;
3887 	struct drm_device *drm_dev = dev_get_drvdata(device);
3888 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3889 
3890 	ret = kstrtoint(buf, 0, &s3_state);
3891 
3892 	if (ret == 0) {
3893 		if (s3_state) {
3894 			dm_resume(adev);
3895 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3896 		} else
3897 			dm_suspend(adev);
3898 	}
3899 
3900 	return ret == 0 ? count : 0;
3901 }
3902 
3903 DEVICE_ATTR_WO(s3_debug);
3904 
3905 #endif
3906 
3907 static int dm_early_init(void *handle)
3908 {
3909 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3910 
3911 	switch (adev->asic_type) {
3912 #if defined(CONFIG_DRM_AMD_DC_SI)
3913 	case CHIP_TAHITI:
3914 	case CHIP_PITCAIRN:
3915 	case CHIP_VERDE:
3916 		adev->mode_info.num_crtc = 6;
3917 		adev->mode_info.num_hpd = 6;
3918 		adev->mode_info.num_dig = 6;
3919 		break;
3920 	case CHIP_OLAND:
3921 		adev->mode_info.num_crtc = 2;
3922 		adev->mode_info.num_hpd = 2;
3923 		adev->mode_info.num_dig = 2;
3924 		break;
3925 #endif
3926 	case CHIP_BONAIRE:
3927 	case CHIP_HAWAII:
3928 		adev->mode_info.num_crtc = 6;
3929 		adev->mode_info.num_hpd = 6;
3930 		adev->mode_info.num_dig = 6;
3931 		break;
3932 	case CHIP_KAVERI:
3933 		adev->mode_info.num_crtc = 4;
3934 		adev->mode_info.num_hpd = 6;
3935 		adev->mode_info.num_dig = 7;
3936 		break;
3937 	case CHIP_KABINI:
3938 	case CHIP_MULLINS:
3939 		adev->mode_info.num_crtc = 2;
3940 		adev->mode_info.num_hpd = 6;
3941 		adev->mode_info.num_dig = 6;
3942 		break;
3943 	case CHIP_FIJI:
3944 	case CHIP_TONGA:
3945 		adev->mode_info.num_crtc = 6;
3946 		adev->mode_info.num_hpd = 6;
3947 		adev->mode_info.num_dig = 7;
3948 		break;
3949 	case CHIP_CARRIZO:
3950 		adev->mode_info.num_crtc = 3;
3951 		adev->mode_info.num_hpd = 6;
3952 		adev->mode_info.num_dig = 9;
3953 		break;
3954 	case CHIP_STONEY:
3955 		adev->mode_info.num_crtc = 2;
3956 		adev->mode_info.num_hpd = 6;
3957 		adev->mode_info.num_dig = 9;
3958 		break;
3959 	case CHIP_POLARIS11:
3960 	case CHIP_POLARIS12:
3961 		adev->mode_info.num_crtc = 5;
3962 		adev->mode_info.num_hpd = 5;
3963 		adev->mode_info.num_dig = 5;
3964 		break;
3965 	case CHIP_POLARIS10:
3966 	case CHIP_VEGAM:
3967 		adev->mode_info.num_crtc = 6;
3968 		adev->mode_info.num_hpd = 6;
3969 		adev->mode_info.num_dig = 6;
3970 		break;
3971 	case CHIP_VEGA10:
3972 	case CHIP_VEGA12:
3973 	case CHIP_VEGA20:
3974 		adev->mode_info.num_crtc = 6;
3975 		adev->mode_info.num_hpd = 6;
3976 		adev->mode_info.num_dig = 6;
3977 		break;
3978 #if defined(CONFIG_DRM_AMD_DC_DCN)
3979 	case CHIP_RAVEN:
3980 	case CHIP_RENOIR:
3981 	case CHIP_VANGOGH:
3982 		adev->mode_info.num_crtc = 4;
3983 		adev->mode_info.num_hpd = 4;
3984 		adev->mode_info.num_dig = 4;
3985 		break;
3986 	case CHIP_NAVI10:
3987 	case CHIP_NAVI12:
3988 	case CHIP_SIENNA_CICHLID:
3989 	case CHIP_NAVY_FLOUNDER:
3990 		adev->mode_info.num_crtc = 6;
3991 		adev->mode_info.num_hpd = 6;
3992 		adev->mode_info.num_dig = 6;
3993 		break;
3994 	case CHIP_NAVI14:
3995 	case CHIP_DIMGREY_CAVEFISH:
3996 		adev->mode_info.num_crtc = 5;
3997 		adev->mode_info.num_hpd = 5;
3998 		adev->mode_info.num_dig = 5;
3999 		break;
4000 #endif
4001 	default:
4002 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4003 		return -EINVAL;
4004 	}
4005 
4006 	amdgpu_dm_set_irq_funcs(adev);
4007 
4008 	if (adev->mode_info.funcs == NULL)
4009 		adev->mode_info.funcs = &dm_display_funcs;
4010 
4011 	/*
4012 	 * Note: Do NOT change adev->audio_endpt_rreg and
4013 	 * adev->audio_endpt_wreg because they are initialised in
4014 	 * amdgpu_device_init()
4015 	 */
4016 #if defined(CONFIG_DEBUG_KERNEL_DC)
4017 	device_create_file(
4018 		adev_to_drm(adev)->dev,
4019 		&dev_attr_s3_debug);
4020 #endif
4021 
4022 	return 0;
4023 }
4024 
4025 static bool modeset_required(struct drm_crtc_state *crtc_state,
4026 			     struct dc_stream_state *new_stream,
4027 			     struct dc_stream_state *old_stream)
4028 {
4029 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4030 }
4031 
4032 static bool modereset_required(struct drm_crtc_state *crtc_state)
4033 {
4034 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4035 }
4036 
4037 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4038 {
4039 	drm_encoder_cleanup(encoder);
4040 	kfree(encoder);
4041 }
4042 
4043 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4044 	.destroy = amdgpu_dm_encoder_destroy,
4045 };
4046 
4047 
4048 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4049 					 struct drm_framebuffer *fb,
4050 					 int *min_downscale, int *max_upscale)
4051 {
4052 	struct amdgpu_device *adev = drm_to_adev(dev);
4053 	struct dc *dc = adev->dm.dc;
4054 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4055 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4056 
4057 	switch (fb->format->format) {
4058 	case DRM_FORMAT_P010:
4059 	case DRM_FORMAT_NV12:
4060 	case DRM_FORMAT_NV21:
4061 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4062 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4063 		break;
4064 
4065 	case DRM_FORMAT_XRGB16161616F:
4066 	case DRM_FORMAT_ARGB16161616F:
4067 	case DRM_FORMAT_XBGR16161616F:
4068 	case DRM_FORMAT_ABGR16161616F:
4069 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4070 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4071 		break;
4072 
4073 	default:
4074 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4075 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4076 		break;
4077 	}
4078 
4079 	/*
4080 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4081 	 * scaling factor of 1.0 == 1000 units.
4082 	 */
4083 	if (*max_upscale == 1)
4084 		*max_upscale = 1000;
4085 
4086 	if (*min_downscale == 1)
4087 		*min_downscale = 1000;
4088 }
4089 
4090 
4091 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4092 				struct dc_scaling_info *scaling_info)
4093 {
4094 	int scale_w, scale_h, min_downscale, max_upscale;
4095 
4096 	memset(scaling_info, 0, sizeof(*scaling_info));
4097 
4098 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4099 	scaling_info->src_rect.x = state->src_x >> 16;
4100 	scaling_info->src_rect.y = state->src_y >> 16;
4101 
4102 	/*
4103 	 * For reasons we don't (yet) fully understand a non-zero
4104 	 * src_y coordinate into an NV12 buffer can cause a
4105 	 * system hang. To avoid hangs (and maybe be overly cautious)
4106 	 * let's reject both non-zero src_x and src_y.
4107 	 *
4108 	 * We currently know of only one use-case to reproduce a
4109 	 * scenario with non-zero src_x and src_y for NV12, which
4110 	 * is to gesture the YouTube Android app into full screen
4111 	 * on ChromeOS.
4112 	 */
4113 	if (state->fb &&
4114 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4115 	    (scaling_info->src_rect.x != 0 ||
4116 	     scaling_info->src_rect.y != 0))
4117 		return -EINVAL;
4118 
4119 	scaling_info->src_rect.width = state->src_w >> 16;
4120 	if (scaling_info->src_rect.width == 0)
4121 		return -EINVAL;
4122 
4123 	scaling_info->src_rect.height = state->src_h >> 16;
4124 	if (scaling_info->src_rect.height == 0)
4125 		return -EINVAL;
4126 
4127 	scaling_info->dst_rect.x = state->crtc_x;
4128 	scaling_info->dst_rect.y = state->crtc_y;
4129 
4130 	if (state->crtc_w == 0)
4131 		return -EINVAL;
4132 
4133 	scaling_info->dst_rect.width = state->crtc_w;
4134 
4135 	if (state->crtc_h == 0)
4136 		return -EINVAL;
4137 
4138 	scaling_info->dst_rect.height = state->crtc_h;
4139 
4140 	/* DRM doesn't specify clipping on destination output. */
4141 	scaling_info->clip_rect = scaling_info->dst_rect;
4142 
4143 	/* Validate scaling per-format with DC plane caps */
4144 	if (state->plane && state->plane->dev && state->fb) {
4145 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4146 					     &min_downscale, &max_upscale);
4147 	} else {
4148 		min_downscale = 250;
4149 		max_upscale = 16000;
4150 	}
4151 
4152 	scale_w = scaling_info->dst_rect.width * 1000 /
4153 		  scaling_info->src_rect.width;
4154 
4155 	if (scale_w < min_downscale || scale_w > max_upscale)
4156 		return -EINVAL;
4157 
4158 	scale_h = scaling_info->dst_rect.height * 1000 /
4159 		  scaling_info->src_rect.height;
4160 
4161 	if (scale_h < min_downscale || scale_h > max_upscale)
4162 		return -EINVAL;
4163 
4164 	/*
4165 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4166 	 * assume reasonable defaults based on the format.
4167 	 */
4168 
4169 	return 0;
4170 }
4171 
4172 static void
4173 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4174 				 uint64_t tiling_flags)
4175 {
4176 	/* Fill GFX8 params */
4177 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4178 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4179 
4180 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4181 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4182 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4183 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4184 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4185 
4186 		/* XXX fix me for VI */
4187 		tiling_info->gfx8.num_banks = num_banks;
4188 		tiling_info->gfx8.array_mode =
4189 				DC_ARRAY_2D_TILED_THIN1;
4190 		tiling_info->gfx8.tile_split = tile_split;
4191 		tiling_info->gfx8.bank_width = bankw;
4192 		tiling_info->gfx8.bank_height = bankh;
4193 		tiling_info->gfx8.tile_aspect = mtaspect;
4194 		tiling_info->gfx8.tile_mode =
4195 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4196 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4197 			== DC_ARRAY_1D_TILED_THIN1) {
4198 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4199 	}
4200 
4201 	tiling_info->gfx8.pipe_config =
4202 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4203 }
4204 
4205 static void
4206 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4207 				  union dc_tiling_info *tiling_info)
4208 {
4209 	tiling_info->gfx9.num_pipes =
4210 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4211 	tiling_info->gfx9.num_banks =
4212 		adev->gfx.config.gb_addr_config_fields.num_banks;
4213 	tiling_info->gfx9.pipe_interleave =
4214 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4215 	tiling_info->gfx9.num_shader_engines =
4216 		adev->gfx.config.gb_addr_config_fields.num_se;
4217 	tiling_info->gfx9.max_compressed_frags =
4218 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4219 	tiling_info->gfx9.num_rb_per_se =
4220 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4221 	tiling_info->gfx9.shaderEnable = 1;
4222 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4223 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4224 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4225 	    adev->asic_type == CHIP_VANGOGH)
4226 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4227 }
4228 
4229 static int
4230 validate_dcc(struct amdgpu_device *adev,
4231 	     const enum surface_pixel_format format,
4232 	     const enum dc_rotation_angle rotation,
4233 	     const union dc_tiling_info *tiling_info,
4234 	     const struct dc_plane_dcc_param *dcc,
4235 	     const struct dc_plane_address *address,
4236 	     const struct plane_size *plane_size)
4237 {
4238 	struct dc *dc = adev->dm.dc;
4239 	struct dc_dcc_surface_param input;
4240 	struct dc_surface_dcc_cap output;
4241 
4242 	memset(&input, 0, sizeof(input));
4243 	memset(&output, 0, sizeof(output));
4244 
4245 	if (!dcc->enable)
4246 		return 0;
4247 
4248 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4249 	    !dc->cap_funcs.get_dcc_compression_cap)
4250 		return -EINVAL;
4251 
4252 	input.format = format;
4253 	input.surface_size.width = plane_size->surface_size.width;
4254 	input.surface_size.height = plane_size->surface_size.height;
4255 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4256 
4257 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4258 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4259 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4260 		input.scan = SCAN_DIRECTION_VERTICAL;
4261 
4262 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4263 		return -EINVAL;
4264 
4265 	if (!output.capable)
4266 		return -EINVAL;
4267 
4268 	if (dcc->independent_64b_blks == 0 &&
4269 	    output.grph.rgb.independent_64b_blks != 0)
4270 		return -EINVAL;
4271 
4272 	return 0;
4273 }
4274 
4275 static bool
4276 modifier_has_dcc(uint64_t modifier)
4277 {
4278 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4279 }
4280 
4281 static unsigned
4282 modifier_gfx9_swizzle_mode(uint64_t modifier)
4283 {
4284 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4285 		return 0;
4286 
4287 	return AMD_FMT_MOD_GET(TILE, modifier);
4288 }
4289 
4290 static const struct drm_format_info *
4291 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4292 {
4293 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4294 }
4295 
4296 static void
4297 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4298 				    union dc_tiling_info *tiling_info,
4299 				    uint64_t modifier)
4300 {
4301 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4302 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4303 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4304 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4305 
4306 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4307 
4308 	if (!IS_AMD_FMT_MOD(modifier))
4309 		return;
4310 
4311 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4312 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4313 
4314 	if (adev->family >= AMDGPU_FAMILY_NV) {
4315 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4316 	} else {
4317 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4318 
4319 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4320 	}
4321 }
4322 
4323 enum dm_micro_swizzle {
4324 	MICRO_SWIZZLE_Z = 0,
4325 	MICRO_SWIZZLE_S = 1,
4326 	MICRO_SWIZZLE_D = 2,
4327 	MICRO_SWIZZLE_R = 3
4328 };
4329 
4330 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4331 					  uint32_t format,
4332 					  uint64_t modifier)
4333 {
4334 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4335 	const struct drm_format_info *info = drm_format_info(format);
4336 	int i;
4337 
4338 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4339 
4340 	if (!info)
4341 		return false;
4342 
4343 	/*
4344 	 * We always have to allow these modifiers:
4345 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4346 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4347 	 */
4348 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4349 	    modifier == DRM_FORMAT_MOD_INVALID) {
4350 		return true;
4351 	}
4352 
4353 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4354 	for (i = 0; i < plane->modifier_count; i++) {
4355 		if (modifier == plane->modifiers[i])
4356 			break;
4357 	}
4358 	if (i == plane->modifier_count)
4359 		return false;
4360 
4361 	/*
4362 	 * For D swizzle the canonical modifier depends on the bpp, so check
4363 	 * it here.
4364 	 */
4365 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4366 	    adev->family >= AMDGPU_FAMILY_NV) {
4367 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4368 			return false;
4369 	}
4370 
4371 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4372 	    info->cpp[0] < 8)
4373 		return false;
4374 
4375 	if (modifier_has_dcc(modifier)) {
4376 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4377 		if (info->cpp[0] != 4)
4378 			return false;
4379 		/* We support multi-planar formats, but not when combined with
4380 		 * additional DCC metadata planes. */
4381 		if (info->num_planes > 1)
4382 			return false;
4383 	}
4384 
4385 	return true;
4386 }
4387 
4388 static void
4389 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4390 {
4391 	if (!*mods)
4392 		return;
4393 
4394 	if (*cap - *size < 1) {
4395 		uint64_t new_cap = *cap * 2;
4396 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4397 
4398 		if (!new_mods) {
4399 			kfree(*mods);
4400 			*mods = NULL;
4401 			return;
4402 		}
4403 
4404 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4405 		kfree(*mods);
4406 		*mods = new_mods;
4407 		*cap = new_cap;
4408 	}
4409 
4410 	(*mods)[*size] = mod;
4411 	*size += 1;
4412 }
4413 
4414 static void
4415 add_gfx9_modifiers(const struct amdgpu_device *adev,
4416 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4417 {
4418 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4419 	int pipe_xor_bits = min(8, pipes +
4420 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4421 	int bank_xor_bits = min(8 - pipe_xor_bits,
4422 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4423 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4424 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4425 
4426 
4427 	if (adev->family == AMDGPU_FAMILY_RV) {
4428 		/* Raven2 and later */
4429 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4430 
4431 		/*
4432 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4433 		 * doesn't support _D on DCN
4434 		 */
4435 
4436 		if (has_constant_encode) {
4437 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4438 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4439 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4440 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4441 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4442 				    AMD_FMT_MOD_SET(DCC, 1) |
4443 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4444 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4445 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4446 		}
4447 
4448 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4449 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4450 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4451 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4452 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4453 			    AMD_FMT_MOD_SET(DCC, 1) |
4454 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4455 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4456 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4457 
4458 		if (has_constant_encode) {
4459 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4460 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4461 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4462 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4463 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4464 				    AMD_FMT_MOD_SET(DCC, 1) |
4465 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4466 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4467 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4468 
4469 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4470 				    AMD_FMT_MOD_SET(RB, rb) |
4471 				    AMD_FMT_MOD_SET(PIPE, pipes));
4472 		}
4473 
4474 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4475 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4476 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4477 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4478 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4479 			    AMD_FMT_MOD_SET(DCC, 1) |
4480 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4481 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4482 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4483 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4484 			    AMD_FMT_MOD_SET(RB, rb) |
4485 			    AMD_FMT_MOD_SET(PIPE, pipes));
4486 	}
4487 
4488 	/*
4489 	 * Only supported for 64bpp on Raven, will be filtered on format in
4490 	 * dm_plane_format_mod_supported.
4491 	 */
4492 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4493 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4494 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4495 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4496 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4497 
4498 	if (adev->family == AMDGPU_FAMILY_RV) {
4499 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4500 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4501 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4502 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4503 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4504 	}
4505 
4506 	/*
4507 	 * Only supported for 64bpp on Raven, will be filtered on format in
4508 	 * dm_plane_format_mod_supported.
4509 	 */
4510 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4511 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4512 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4513 
4514 	if (adev->family == AMDGPU_FAMILY_RV) {
4515 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4516 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4517 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4518 	}
4519 }
4520 
4521 static void
4522 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4523 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4524 {
4525 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4526 
4527 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4528 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4529 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4530 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4531 		    AMD_FMT_MOD_SET(DCC, 1) |
4532 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4533 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4534 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4535 
4536 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4537 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4538 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4539 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4540 		    AMD_FMT_MOD_SET(DCC, 1) |
4541 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4542 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4543 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4544 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4545 
4546 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4547 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4548 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4549 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4550 
4551 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4552 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4553 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4554 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4555 
4556 
4557 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4558 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4559 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4560 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4561 
4562 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4563 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4564 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4565 }
4566 
4567 static void
4568 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4569 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4570 {
4571 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4572 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4573 
4574 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4575 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4576 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4577 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4578 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4579 		    AMD_FMT_MOD_SET(DCC, 1) |
4580 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4581 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4582 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4583 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4584 
4585 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4587 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4588 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4589 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4590 		    AMD_FMT_MOD_SET(DCC, 1) |
4591 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4592 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4593 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4594 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4595 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4596 
4597 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4598 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4599 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4600 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4601 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4602 
4603 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4604 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4605 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4606 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4607 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4608 
4609 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4610 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4611 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4612 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4613 
4614 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4615 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4616 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4617 }
4618 
4619 static int
4620 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4621 {
4622 	uint64_t size = 0, capacity = 128;
4623 	*mods = NULL;
4624 
4625 	/* We have not hooked up any pre-GFX9 modifiers. */
4626 	if (adev->family < AMDGPU_FAMILY_AI)
4627 		return 0;
4628 
4629 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4630 
4631 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4632 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4633 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4634 		return *mods ? 0 : -ENOMEM;
4635 	}
4636 
4637 	switch (adev->family) {
4638 	case AMDGPU_FAMILY_AI:
4639 	case AMDGPU_FAMILY_RV:
4640 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4641 		break;
4642 	case AMDGPU_FAMILY_NV:
4643 	case AMDGPU_FAMILY_VGH:
4644 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4645 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4646 		else
4647 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4648 		break;
4649 	}
4650 
4651 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4652 
4653 	/* INVALID marks the end of the list. */
4654 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4655 
4656 	if (!*mods)
4657 		return -ENOMEM;
4658 
4659 	return 0;
4660 }
4661 
4662 static int
4663 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4664 					  const struct amdgpu_framebuffer *afb,
4665 					  const enum surface_pixel_format format,
4666 					  const enum dc_rotation_angle rotation,
4667 					  const struct plane_size *plane_size,
4668 					  union dc_tiling_info *tiling_info,
4669 					  struct dc_plane_dcc_param *dcc,
4670 					  struct dc_plane_address *address,
4671 					  const bool force_disable_dcc)
4672 {
4673 	const uint64_t modifier = afb->base.modifier;
4674 	int ret;
4675 
4676 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4677 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4678 
4679 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4680 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4681 
4682 		dcc->enable = 1;
4683 		dcc->meta_pitch = afb->base.pitches[1];
4684 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4685 
4686 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4687 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4688 	}
4689 
4690 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4691 	if (ret)
4692 		return ret;
4693 
4694 	return 0;
4695 }
4696 
4697 static int
4698 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4699 			     const struct amdgpu_framebuffer *afb,
4700 			     const enum surface_pixel_format format,
4701 			     const enum dc_rotation_angle rotation,
4702 			     const uint64_t tiling_flags,
4703 			     union dc_tiling_info *tiling_info,
4704 			     struct plane_size *plane_size,
4705 			     struct dc_plane_dcc_param *dcc,
4706 			     struct dc_plane_address *address,
4707 			     bool tmz_surface,
4708 			     bool force_disable_dcc)
4709 {
4710 	const struct drm_framebuffer *fb = &afb->base;
4711 	int ret;
4712 
4713 	memset(tiling_info, 0, sizeof(*tiling_info));
4714 	memset(plane_size, 0, sizeof(*plane_size));
4715 	memset(dcc, 0, sizeof(*dcc));
4716 	memset(address, 0, sizeof(*address));
4717 
4718 	address->tmz_surface = tmz_surface;
4719 
4720 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4721 		uint64_t addr = afb->address + fb->offsets[0];
4722 
4723 		plane_size->surface_size.x = 0;
4724 		plane_size->surface_size.y = 0;
4725 		plane_size->surface_size.width = fb->width;
4726 		plane_size->surface_size.height = fb->height;
4727 		plane_size->surface_pitch =
4728 			fb->pitches[0] / fb->format->cpp[0];
4729 
4730 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4731 		address->grph.addr.low_part = lower_32_bits(addr);
4732 		address->grph.addr.high_part = upper_32_bits(addr);
4733 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4734 		uint64_t luma_addr = afb->address + fb->offsets[0];
4735 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4736 
4737 		plane_size->surface_size.x = 0;
4738 		plane_size->surface_size.y = 0;
4739 		plane_size->surface_size.width = fb->width;
4740 		plane_size->surface_size.height = fb->height;
4741 		plane_size->surface_pitch =
4742 			fb->pitches[0] / fb->format->cpp[0];
4743 
4744 		plane_size->chroma_size.x = 0;
4745 		plane_size->chroma_size.y = 0;
4746 		/* TODO: set these based on surface format */
4747 		plane_size->chroma_size.width = fb->width / 2;
4748 		plane_size->chroma_size.height = fb->height / 2;
4749 
4750 		plane_size->chroma_pitch =
4751 			fb->pitches[1] / fb->format->cpp[1];
4752 
4753 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4754 		address->video_progressive.luma_addr.low_part =
4755 			lower_32_bits(luma_addr);
4756 		address->video_progressive.luma_addr.high_part =
4757 			upper_32_bits(luma_addr);
4758 		address->video_progressive.chroma_addr.low_part =
4759 			lower_32_bits(chroma_addr);
4760 		address->video_progressive.chroma_addr.high_part =
4761 			upper_32_bits(chroma_addr);
4762 	}
4763 
4764 	if (adev->family >= AMDGPU_FAMILY_AI) {
4765 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4766 								rotation, plane_size,
4767 								tiling_info, dcc,
4768 								address,
4769 								force_disable_dcc);
4770 		if (ret)
4771 			return ret;
4772 	} else {
4773 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4774 	}
4775 
4776 	return 0;
4777 }
4778 
4779 static void
4780 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4781 			       bool *per_pixel_alpha, bool *global_alpha,
4782 			       int *global_alpha_value)
4783 {
4784 	*per_pixel_alpha = false;
4785 	*global_alpha = false;
4786 	*global_alpha_value = 0xff;
4787 
4788 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4789 		return;
4790 
4791 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4792 		static const uint32_t alpha_formats[] = {
4793 			DRM_FORMAT_ARGB8888,
4794 			DRM_FORMAT_RGBA8888,
4795 			DRM_FORMAT_ABGR8888,
4796 		};
4797 		uint32_t format = plane_state->fb->format->format;
4798 		unsigned int i;
4799 
4800 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4801 			if (format == alpha_formats[i]) {
4802 				*per_pixel_alpha = true;
4803 				break;
4804 			}
4805 		}
4806 	}
4807 
4808 	if (plane_state->alpha < 0xffff) {
4809 		*global_alpha = true;
4810 		*global_alpha_value = plane_state->alpha >> 8;
4811 	}
4812 }
4813 
4814 static int
4815 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4816 			    const enum surface_pixel_format format,
4817 			    enum dc_color_space *color_space)
4818 {
4819 	bool full_range;
4820 
4821 	*color_space = COLOR_SPACE_SRGB;
4822 
4823 	/* DRM color properties only affect non-RGB formats. */
4824 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4825 		return 0;
4826 
4827 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4828 
4829 	switch (plane_state->color_encoding) {
4830 	case DRM_COLOR_YCBCR_BT601:
4831 		if (full_range)
4832 			*color_space = COLOR_SPACE_YCBCR601;
4833 		else
4834 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4835 		break;
4836 
4837 	case DRM_COLOR_YCBCR_BT709:
4838 		if (full_range)
4839 			*color_space = COLOR_SPACE_YCBCR709;
4840 		else
4841 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4842 		break;
4843 
4844 	case DRM_COLOR_YCBCR_BT2020:
4845 		if (full_range)
4846 			*color_space = COLOR_SPACE_2020_YCBCR;
4847 		else
4848 			return -EINVAL;
4849 		break;
4850 
4851 	default:
4852 		return -EINVAL;
4853 	}
4854 
4855 	return 0;
4856 }
4857 
4858 static int
4859 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4860 			    const struct drm_plane_state *plane_state,
4861 			    const uint64_t tiling_flags,
4862 			    struct dc_plane_info *plane_info,
4863 			    struct dc_plane_address *address,
4864 			    bool tmz_surface,
4865 			    bool force_disable_dcc)
4866 {
4867 	const struct drm_framebuffer *fb = plane_state->fb;
4868 	const struct amdgpu_framebuffer *afb =
4869 		to_amdgpu_framebuffer(plane_state->fb);
4870 	int ret;
4871 
4872 	memset(plane_info, 0, sizeof(*plane_info));
4873 
4874 	switch (fb->format->format) {
4875 	case DRM_FORMAT_C8:
4876 		plane_info->format =
4877 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4878 		break;
4879 	case DRM_FORMAT_RGB565:
4880 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4881 		break;
4882 	case DRM_FORMAT_XRGB8888:
4883 	case DRM_FORMAT_ARGB8888:
4884 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4885 		break;
4886 	case DRM_FORMAT_XRGB2101010:
4887 	case DRM_FORMAT_ARGB2101010:
4888 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4889 		break;
4890 	case DRM_FORMAT_XBGR2101010:
4891 	case DRM_FORMAT_ABGR2101010:
4892 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4893 		break;
4894 	case DRM_FORMAT_XBGR8888:
4895 	case DRM_FORMAT_ABGR8888:
4896 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4897 		break;
4898 	case DRM_FORMAT_NV21:
4899 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4900 		break;
4901 	case DRM_FORMAT_NV12:
4902 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4903 		break;
4904 	case DRM_FORMAT_P010:
4905 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4906 		break;
4907 	case DRM_FORMAT_XRGB16161616F:
4908 	case DRM_FORMAT_ARGB16161616F:
4909 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4910 		break;
4911 	case DRM_FORMAT_XBGR16161616F:
4912 	case DRM_FORMAT_ABGR16161616F:
4913 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4914 		break;
4915 	default:
4916 		DRM_ERROR(
4917 			"Unsupported screen format %p4cc\n",
4918 			&fb->format->format);
4919 		return -EINVAL;
4920 	}
4921 
4922 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4923 	case DRM_MODE_ROTATE_0:
4924 		plane_info->rotation = ROTATION_ANGLE_0;
4925 		break;
4926 	case DRM_MODE_ROTATE_90:
4927 		plane_info->rotation = ROTATION_ANGLE_90;
4928 		break;
4929 	case DRM_MODE_ROTATE_180:
4930 		plane_info->rotation = ROTATION_ANGLE_180;
4931 		break;
4932 	case DRM_MODE_ROTATE_270:
4933 		plane_info->rotation = ROTATION_ANGLE_270;
4934 		break;
4935 	default:
4936 		plane_info->rotation = ROTATION_ANGLE_0;
4937 		break;
4938 	}
4939 
4940 	plane_info->visible = true;
4941 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4942 
4943 	plane_info->layer_index = 0;
4944 
4945 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4946 					  &plane_info->color_space);
4947 	if (ret)
4948 		return ret;
4949 
4950 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4951 					   plane_info->rotation, tiling_flags,
4952 					   &plane_info->tiling_info,
4953 					   &plane_info->plane_size,
4954 					   &plane_info->dcc, address, tmz_surface,
4955 					   force_disable_dcc);
4956 	if (ret)
4957 		return ret;
4958 
4959 	fill_blending_from_plane_state(
4960 		plane_state, &plane_info->per_pixel_alpha,
4961 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4962 
4963 	return 0;
4964 }
4965 
4966 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4967 				    struct dc_plane_state *dc_plane_state,
4968 				    struct drm_plane_state *plane_state,
4969 				    struct drm_crtc_state *crtc_state)
4970 {
4971 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4972 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4973 	struct dc_scaling_info scaling_info;
4974 	struct dc_plane_info plane_info;
4975 	int ret;
4976 	bool force_disable_dcc = false;
4977 
4978 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4979 	if (ret)
4980 		return ret;
4981 
4982 	dc_plane_state->src_rect = scaling_info.src_rect;
4983 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4984 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4985 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4986 
4987 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4988 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4989 					  afb->tiling_flags,
4990 					  &plane_info,
4991 					  &dc_plane_state->address,
4992 					  afb->tmz_surface,
4993 					  force_disable_dcc);
4994 	if (ret)
4995 		return ret;
4996 
4997 	dc_plane_state->format = plane_info.format;
4998 	dc_plane_state->color_space = plane_info.color_space;
4999 	dc_plane_state->format = plane_info.format;
5000 	dc_plane_state->plane_size = plane_info.plane_size;
5001 	dc_plane_state->rotation = plane_info.rotation;
5002 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5003 	dc_plane_state->stereo_format = plane_info.stereo_format;
5004 	dc_plane_state->tiling_info = plane_info.tiling_info;
5005 	dc_plane_state->visible = plane_info.visible;
5006 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5007 	dc_plane_state->global_alpha = plane_info.global_alpha;
5008 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5009 	dc_plane_state->dcc = plane_info.dcc;
5010 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5011 	dc_plane_state->flip_int_enabled = true;
5012 
5013 	/*
5014 	 * Always set input transfer function, since plane state is refreshed
5015 	 * every time.
5016 	 */
5017 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5018 	if (ret)
5019 		return ret;
5020 
5021 	return 0;
5022 }
5023 
5024 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5025 					   const struct dm_connector_state *dm_state,
5026 					   struct dc_stream_state *stream)
5027 {
5028 	enum amdgpu_rmx_type rmx_type;
5029 
5030 	struct rect src = { 0 }; /* viewport in composition space*/
5031 	struct rect dst = { 0 }; /* stream addressable area */
5032 
5033 	/* no mode. nothing to be done */
5034 	if (!mode)
5035 		return;
5036 
5037 	/* Full screen scaling by default */
5038 	src.width = mode->hdisplay;
5039 	src.height = mode->vdisplay;
5040 	dst.width = stream->timing.h_addressable;
5041 	dst.height = stream->timing.v_addressable;
5042 
5043 	if (dm_state) {
5044 		rmx_type = dm_state->scaling;
5045 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5046 			if (src.width * dst.height <
5047 					src.height * dst.width) {
5048 				/* height needs less upscaling/more downscaling */
5049 				dst.width = src.width *
5050 						dst.height / src.height;
5051 			} else {
5052 				/* width needs less upscaling/more downscaling */
5053 				dst.height = src.height *
5054 						dst.width / src.width;
5055 			}
5056 		} else if (rmx_type == RMX_CENTER) {
5057 			dst = src;
5058 		}
5059 
5060 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5061 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5062 
5063 		if (dm_state->underscan_enable) {
5064 			dst.x += dm_state->underscan_hborder / 2;
5065 			dst.y += dm_state->underscan_vborder / 2;
5066 			dst.width -= dm_state->underscan_hborder;
5067 			dst.height -= dm_state->underscan_vborder;
5068 		}
5069 	}
5070 
5071 	stream->src = src;
5072 	stream->dst = dst;
5073 
5074 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5075 		      dst.x, dst.y, dst.width, dst.height);
5076 
5077 }
5078 
5079 static enum dc_color_depth
5080 convert_color_depth_from_display_info(const struct drm_connector *connector,
5081 				      bool is_y420, int requested_bpc)
5082 {
5083 	uint8_t bpc;
5084 
5085 	if (is_y420) {
5086 		bpc = 8;
5087 
5088 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5089 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5090 			bpc = 16;
5091 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5092 			bpc = 12;
5093 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5094 			bpc = 10;
5095 	} else {
5096 		bpc = (uint8_t)connector->display_info.bpc;
5097 		/* Assume 8 bpc by default if no bpc is specified. */
5098 		bpc = bpc ? bpc : 8;
5099 	}
5100 
5101 	if (requested_bpc > 0) {
5102 		/*
5103 		 * Cap display bpc based on the user requested value.
5104 		 *
5105 		 * The value for state->max_bpc may not correctly updated
5106 		 * depending on when the connector gets added to the state
5107 		 * or if this was called outside of atomic check, so it
5108 		 * can't be used directly.
5109 		 */
5110 		bpc = min_t(u8, bpc, requested_bpc);
5111 
5112 		/* Round down to the nearest even number. */
5113 		bpc = bpc - (bpc & 1);
5114 	}
5115 
5116 	switch (bpc) {
5117 	case 0:
5118 		/*
5119 		 * Temporary Work around, DRM doesn't parse color depth for
5120 		 * EDID revision before 1.4
5121 		 * TODO: Fix edid parsing
5122 		 */
5123 		return COLOR_DEPTH_888;
5124 	case 6:
5125 		return COLOR_DEPTH_666;
5126 	case 8:
5127 		return COLOR_DEPTH_888;
5128 	case 10:
5129 		return COLOR_DEPTH_101010;
5130 	case 12:
5131 		return COLOR_DEPTH_121212;
5132 	case 14:
5133 		return COLOR_DEPTH_141414;
5134 	case 16:
5135 		return COLOR_DEPTH_161616;
5136 	default:
5137 		return COLOR_DEPTH_UNDEFINED;
5138 	}
5139 }
5140 
5141 static enum dc_aspect_ratio
5142 get_aspect_ratio(const struct drm_display_mode *mode_in)
5143 {
5144 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5145 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5146 }
5147 
5148 static enum dc_color_space
5149 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5150 {
5151 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5152 
5153 	switch (dc_crtc_timing->pixel_encoding)	{
5154 	case PIXEL_ENCODING_YCBCR422:
5155 	case PIXEL_ENCODING_YCBCR444:
5156 	case PIXEL_ENCODING_YCBCR420:
5157 	{
5158 		/*
5159 		 * 27030khz is the separation point between HDTV and SDTV
5160 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5161 		 * respectively
5162 		 */
5163 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5164 			if (dc_crtc_timing->flags.Y_ONLY)
5165 				color_space =
5166 					COLOR_SPACE_YCBCR709_LIMITED;
5167 			else
5168 				color_space = COLOR_SPACE_YCBCR709;
5169 		} else {
5170 			if (dc_crtc_timing->flags.Y_ONLY)
5171 				color_space =
5172 					COLOR_SPACE_YCBCR601_LIMITED;
5173 			else
5174 				color_space = COLOR_SPACE_YCBCR601;
5175 		}
5176 
5177 	}
5178 	break;
5179 	case PIXEL_ENCODING_RGB:
5180 		color_space = COLOR_SPACE_SRGB;
5181 		break;
5182 
5183 	default:
5184 		WARN_ON(1);
5185 		break;
5186 	}
5187 
5188 	return color_space;
5189 }
5190 
5191 static bool adjust_colour_depth_from_display_info(
5192 	struct dc_crtc_timing *timing_out,
5193 	const struct drm_display_info *info)
5194 {
5195 	enum dc_color_depth depth = timing_out->display_color_depth;
5196 	int normalized_clk;
5197 	do {
5198 		normalized_clk = timing_out->pix_clk_100hz / 10;
5199 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5200 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5201 			normalized_clk /= 2;
5202 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5203 		switch (depth) {
5204 		case COLOR_DEPTH_888:
5205 			break;
5206 		case COLOR_DEPTH_101010:
5207 			normalized_clk = (normalized_clk * 30) / 24;
5208 			break;
5209 		case COLOR_DEPTH_121212:
5210 			normalized_clk = (normalized_clk * 36) / 24;
5211 			break;
5212 		case COLOR_DEPTH_161616:
5213 			normalized_clk = (normalized_clk * 48) / 24;
5214 			break;
5215 		default:
5216 			/* The above depths are the only ones valid for HDMI. */
5217 			return false;
5218 		}
5219 		if (normalized_clk <= info->max_tmds_clock) {
5220 			timing_out->display_color_depth = depth;
5221 			return true;
5222 		}
5223 	} while (--depth > COLOR_DEPTH_666);
5224 	return false;
5225 }
5226 
5227 static void fill_stream_properties_from_drm_display_mode(
5228 	struct dc_stream_state *stream,
5229 	const struct drm_display_mode *mode_in,
5230 	const struct drm_connector *connector,
5231 	const struct drm_connector_state *connector_state,
5232 	const struct dc_stream_state *old_stream,
5233 	int requested_bpc)
5234 {
5235 	struct dc_crtc_timing *timing_out = &stream->timing;
5236 	const struct drm_display_info *info = &connector->display_info;
5237 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5238 	struct hdmi_vendor_infoframe hv_frame;
5239 	struct hdmi_avi_infoframe avi_frame;
5240 
5241 	memset(&hv_frame, 0, sizeof(hv_frame));
5242 	memset(&avi_frame, 0, sizeof(avi_frame));
5243 
5244 	timing_out->h_border_left = 0;
5245 	timing_out->h_border_right = 0;
5246 	timing_out->v_border_top = 0;
5247 	timing_out->v_border_bottom = 0;
5248 	/* TODO: un-hardcode */
5249 	if (drm_mode_is_420_only(info, mode_in)
5250 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5251 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5252 	else if (drm_mode_is_420_also(info, mode_in)
5253 			&& aconnector->force_yuv420_output)
5254 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5255 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5256 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5257 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5258 	else
5259 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5260 
5261 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5262 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5263 		connector,
5264 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5265 		requested_bpc);
5266 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5267 	timing_out->hdmi_vic = 0;
5268 
5269 	if(old_stream) {
5270 		timing_out->vic = old_stream->timing.vic;
5271 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5272 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5273 	} else {
5274 		timing_out->vic = drm_match_cea_mode(mode_in);
5275 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5276 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5277 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5278 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5279 	}
5280 
5281 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5282 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5283 		timing_out->vic = avi_frame.video_code;
5284 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5285 		timing_out->hdmi_vic = hv_frame.vic;
5286 	}
5287 
5288 	if (is_freesync_video_mode(mode_in, aconnector)) {
5289 		timing_out->h_addressable = mode_in->hdisplay;
5290 		timing_out->h_total = mode_in->htotal;
5291 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5292 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5293 		timing_out->v_total = mode_in->vtotal;
5294 		timing_out->v_addressable = mode_in->vdisplay;
5295 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5296 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5297 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5298 	} else {
5299 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5300 		timing_out->h_total = mode_in->crtc_htotal;
5301 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5302 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5303 		timing_out->v_total = mode_in->crtc_vtotal;
5304 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5305 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5306 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5307 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5308 	}
5309 
5310 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5311 
5312 	stream->output_color_space = get_output_color_space(timing_out);
5313 
5314 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5315 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5316 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5317 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5318 		    drm_mode_is_420_also(info, mode_in) &&
5319 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5320 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5321 			adjust_colour_depth_from_display_info(timing_out, info);
5322 		}
5323 	}
5324 }
5325 
5326 static void fill_audio_info(struct audio_info *audio_info,
5327 			    const struct drm_connector *drm_connector,
5328 			    const struct dc_sink *dc_sink)
5329 {
5330 	int i = 0;
5331 	int cea_revision = 0;
5332 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5333 
5334 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5335 	audio_info->product_id = edid_caps->product_id;
5336 
5337 	cea_revision = drm_connector->display_info.cea_rev;
5338 
5339 	strscpy(audio_info->display_name,
5340 		edid_caps->display_name,
5341 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5342 
5343 	if (cea_revision >= 3) {
5344 		audio_info->mode_count = edid_caps->audio_mode_count;
5345 
5346 		for (i = 0; i < audio_info->mode_count; ++i) {
5347 			audio_info->modes[i].format_code =
5348 					(enum audio_format_code)
5349 					(edid_caps->audio_modes[i].format_code);
5350 			audio_info->modes[i].channel_count =
5351 					edid_caps->audio_modes[i].channel_count;
5352 			audio_info->modes[i].sample_rates.all =
5353 					edid_caps->audio_modes[i].sample_rate;
5354 			audio_info->modes[i].sample_size =
5355 					edid_caps->audio_modes[i].sample_size;
5356 		}
5357 	}
5358 
5359 	audio_info->flags.all = edid_caps->speaker_flags;
5360 
5361 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5362 	if (drm_connector->latency_present[0]) {
5363 		audio_info->video_latency = drm_connector->video_latency[0];
5364 		audio_info->audio_latency = drm_connector->audio_latency[0];
5365 	}
5366 
5367 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5368 
5369 }
5370 
5371 static void
5372 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5373 				      struct drm_display_mode *dst_mode)
5374 {
5375 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5376 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5377 	dst_mode->crtc_clock = src_mode->crtc_clock;
5378 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5379 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5380 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5381 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5382 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5383 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5384 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5385 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5386 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5387 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5388 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5389 }
5390 
5391 static void
5392 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5393 					const struct drm_display_mode *native_mode,
5394 					bool scale_enabled)
5395 {
5396 	if (scale_enabled) {
5397 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5398 	} else if (native_mode->clock == drm_mode->clock &&
5399 			native_mode->htotal == drm_mode->htotal &&
5400 			native_mode->vtotal == drm_mode->vtotal) {
5401 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5402 	} else {
5403 		/* no scaling nor amdgpu inserted, no need to patch */
5404 	}
5405 }
5406 
5407 static struct dc_sink *
5408 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5409 {
5410 	struct dc_sink_init_data sink_init_data = { 0 };
5411 	struct dc_sink *sink = NULL;
5412 	sink_init_data.link = aconnector->dc_link;
5413 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5414 
5415 	sink = dc_sink_create(&sink_init_data);
5416 	if (!sink) {
5417 		DRM_ERROR("Failed to create sink!\n");
5418 		return NULL;
5419 	}
5420 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5421 
5422 	return sink;
5423 }
5424 
5425 static void set_multisync_trigger_params(
5426 		struct dc_stream_state *stream)
5427 {
5428 	struct dc_stream_state *master = NULL;
5429 
5430 	if (stream->triggered_crtc_reset.enabled) {
5431 		master = stream->triggered_crtc_reset.event_source;
5432 		stream->triggered_crtc_reset.event =
5433 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5434 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5435 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5436 	}
5437 }
5438 
5439 static void set_master_stream(struct dc_stream_state *stream_set[],
5440 			      int stream_count)
5441 {
5442 	int j, highest_rfr = 0, master_stream = 0;
5443 
5444 	for (j = 0;  j < stream_count; j++) {
5445 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5446 			int refresh_rate = 0;
5447 
5448 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5449 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5450 			if (refresh_rate > highest_rfr) {
5451 				highest_rfr = refresh_rate;
5452 				master_stream = j;
5453 			}
5454 		}
5455 	}
5456 	for (j = 0;  j < stream_count; j++) {
5457 		if (stream_set[j])
5458 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5459 	}
5460 }
5461 
5462 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5463 {
5464 	int i = 0;
5465 	struct dc_stream_state *stream;
5466 
5467 	if (context->stream_count < 2)
5468 		return;
5469 	for (i = 0; i < context->stream_count ; i++) {
5470 		if (!context->streams[i])
5471 			continue;
5472 		/*
5473 		 * TODO: add a function to read AMD VSDB bits and set
5474 		 * crtc_sync_master.multi_sync_enabled flag
5475 		 * For now it's set to false
5476 		 */
5477 	}
5478 
5479 	set_master_stream(context->streams, context->stream_count);
5480 
5481 	for (i = 0; i < context->stream_count ; i++) {
5482 		stream = context->streams[i];
5483 
5484 		if (!stream)
5485 			continue;
5486 
5487 		set_multisync_trigger_params(stream);
5488 	}
5489 }
5490 
5491 static struct drm_display_mode *
5492 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5493 			  bool use_probed_modes)
5494 {
5495 	struct drm_display_mode *m, *m_pref = NULL;
5496 	u16 current_refresh, highest_refresh;
5497 	struct list_head *list_head = use_probed_modes ?
5498 						    &aconnector->base.probed_modes :
5499 						    &aconnector->base.modes;
5500 
5501 	if (aconnector->freesync_vid_base.clock != 0)
5502 		return &aconnector->freesync_vid_base;
5503 
5504 	/* Find the preferred mode */
5505 	list_for_each_entry (m, list_head, head) {
5506 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5507 			m_pref = m;
5508 			break;
5509 		}
5510 	}
5511 
5512 	if (!m_pref) {
5513 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5514 		m_pref = list_first_entry_or_null(
5515 			&aconnector->base.modes, struct drm_display_mode, head);
5516 		if (!m_pref) {
5517 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5518 			return NULL;
5519 		}
5520 	}
5521 
5522 	highest_refresh = drm_mode_vrefresh(m_pref);
5523 
5524 	/*
5525 	 * Find the mode with highest refresh rate with same resolution.
5526 	 * For some monitors, preferred mode is not the mode with highest
5527 	 * supported refresh rate.
5528 	 */
5529 	list_for_each_entry (m, list_head, head) {
5530 		current_refresh  = drm_mode_vrefresh(m);
5531 
5532 		if (m->hdisplay == m_pref->hdisplay &&
5533 		    m->vdisplay == m_pref->vdisplay &&
5534 		    highest_refresh < current_refresh) {
5535 			highest_refresh = current_refresh;
5536 			m_pref = m;
5537 		}
5538 	}
5539 
5540 	aconnector->freesync_vid_base = *m_pref;
5541 	return m_pref;
5542 }
5543 
5544 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5545 				   struct amdgpu_dm_connector *aconnector)
5546 {
5547 	struct drm_display_mode *high_mode;
5548 	int timing_diff;
5549 
5550 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5551 	if (!high_mode || !mode)
5552 		return false;
5553 
5554 	timing_diff = high_mode->vtotal - mode->vtotal;
5555 
5556 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5557 	    high_mode->hdisplay != mode->hdisplay ||
5558 	    high_mode->vdisplay != mode->vdisplay ||
5559 	    high_mode->hsync_start != mode->hsync_start ||
5560 	    high_mode->hsync_end != mode->hsync_end ||
5561 	    high_mode->htotal != mode->htotal ||
5562 	    high_mode->hskew != mode->hskew ||
5563 	    high_mode->vscan != mode->vscan ||
5564 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5565 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5566 		return false;
5567 	else
5568 		return true;
5569 }
5570 
5571 static struct dc_stream_state *
5572 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5573 		       const struct drm_display_mode *drm_mode,
5574 		       const struct dm_connector_state *dm_state,
5575 		       const struct dc_stream_state *old_stream,
5576 		       int requested_bpc)
5577 {
5578 	struct drm_display_mode *preferred_mode = NULL;
5579 	struct drm_connector *drm_connector;
5580 	const struct drm_connector_state *con_state =
5581 		dm_state ? &dm_state->base : NULL;
5582 	struct dc_stream_state *stream = NULL;
5583 	struct drm_display_mode mode = *drm_mode;
5584 	struct drm_display_mode saved_mode;
5585 	struct drm_display_mode *freesync_mode = NULL;
5586 	bool native_mode_found = false;
5587 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5588 	int mode_refresh;
5589 	int preferred_refresh = 0;
5590 #if defined(CONFIG_DRM_AMD_DC_DCN)
5591 	struct dsc_dec_dpcd_caps dsc_caps;
5592 	uint32_t link_bandwidth_kbps;
5593 #endif
5594 	struct dc_sink *sink = NULL;
5595 
5596 	memset(&saved_mode, 0, sizeof(saved_mode));
5597 
5598 	if (aconnector == NULL) {
5599 		DRM_ERROR("aconnector is NULL!\n");
5600 		return stream;
5601 	}
5602 
5603 	drm_connector = &aconnector->base;
5604 
5605 	if (!aconnector->dc_sink) {
5606 		sink = create_fake_sink(aconnector);
5607 		if (!sink)
5608 			return stream;
5609 	} else {
5610 		sink = aconnector->dc_sink;
5611 		dc_sink_retain(sink);
5612 	}
5613 
5614 	stream = dc_create_stream_for_sink(sink);
5615 
5616 	if (stream == NULL) {
5617 		DRM_ERROR("Failed to create stream for sink!\n");
5618 		goto finish;
5619 	}
5620 
5621 	stream->dm_stream_context = aconnector;
5622 
5623 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5624 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5625 
5626 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5627 		/* Search for preferred mode */
5628 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5629 			native_mode_found = true;
5630 			break;
5631 		}
5632 	}
5633 	if (!native_mode_found)
5634 		preferred_mode = list_first_entry_or_null(
5635 				&aconnector->base.modes,
5636 				struct drm_display_mode,
5637 				head);
5638 
5639 	mode_refresh = drm_mode_vrefresh(&mode);
5640 
5641 	if (preferred_mode == NULL) {
5642 		/*
5643 		 * This may not be an error, the use case is when we have no
5644 		 * usermode calls to reset and set mode upon hotplug. In this
5645 		 * case, we call set mode ourselves to restore the previous mode
5646 		 * and the modelist may not be filled in in time.
5647 		 */
5648 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5649 	} else {
5650 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5651 				 is_freesync_video_mode(&mode, aconnector);
5652 		if (recalculate_timing) {
5653 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5654 			saved_mode = mode;
5655 			mode = *freesync_mode;
5656 		} else {
5657 			decide_crtc_timing_for_drm_display_mode(
5658 				&mode, preferred_mode,
5659 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5660 		}
5661 
5662 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5663 	}
5664 
5665 	if (recalculate_timing)
5666 		drm_mode_set_crtcinfo(&saved_mode, 0);
5667 	else if (!dm_state)
5668 		drm_mode_set_crtcinfo(&mode, 0);
5669 
5670        /*
5671 	* If scaling is enabled and refresh rate didn't change
5672 	* we copy the vic and polarities of the old timings
5673 	*/
5674 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5675 		fill_stream_properties_from_drm_display_mode(
5676 			stream, &mode, &aconnector->base, con_state, NULL,
5677 			requested_bpc);
5678 	else
5679 		fill_stream_properties_from_drm_display_mode(
5680 			stream, &mode, &aconnector->base, con_state, old_stream,
5681 			requested_bpc);
5682 
5683 	stream->timing.flags.DSC = 0;
5684 
5685 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5686 #if defined(CONFIG_DRM_AMD_DC_DCN)
5687 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5688 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5689 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5690 				      &dsc_caps);
5691 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5692 							     dc_link_get_link_cap(aconnector->dc_link));
5693 
5694 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5695 			/* Set DSC policy according to dsc_clock_en */
5696 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5697 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5698 
5699 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5700 						  &dsc_caps,
5701 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5702 						  0,
5703 						  link_bandwidth_kbps,
5704 						  &stream->timing,
5705 						  &stream->timing.dsc_cfg))
5706 				stream->timing.flags.DSC = 1;
5707 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5708 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5709 				stream->timing.flags.DSC = 1;
5710 
5711 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5712 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5713 
5714 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5715 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5716 
5717 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5718 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5719 		}
5720 #endif
5721 	}
5722 
5723 	update_stream_scaling_settings(&mode, dm_state, stream);
5724 
5725 	fill_audio_info(
5726 		&stream->audio_info,
5727 		drm_connector,
5728 		sink);
5729 
5730 	update_stream_signal(stream, sink);
5731 
5732 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5733 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5734 
5735 	if (stream->link->psr_settings.psr_feature_enabled) {
5736 		//
5737 		// should decide stream support vsc sdp colorimetry capability
5738 		// before building vsc info packet
5739 		//
5740 		stream->use_vsc_sdp_for_colorimetry = false;
5741 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5742 			stream->use_vsc_sdp_for_colorimetry =
5743 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5744 		} else {
5745 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5746 				stream->use_vsc_sdp_for_colorimetry = true;
5747 		}
5748 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5749 	}
5750 finish:
5751 	dc_sink_release(sink);
5752 
5753 	return stream;
5754 }
5755 
5756 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5757 {
5758 	drm_crtc_cleanup(crtc);
5759 	kfree(crtc);
5760 }
5761 
5762 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5763 				  struct drm_crtc_state *state)
5764 {
5765 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5766 
5767 	/* TODO Destroy dc_stream objects are stream object is flattened */
5768 	if (cur->stream)
5769 		dc_stream_release(cur->stream);
5770 
5771 
5772 	__drm_atomic_helper_crtc_destroy_state(state);
5773 
5774 
5775 	kfree(state);
5776 }
5777 
5778 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5779 {
5780 	struct dm_crtc_state *state;
5781 
5782 	if (crtc->state)
5783 		dm_crtc_destroy_state(crtc, crtc->state);
5784 
5785 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5786 	if (WARN_ON(!state))
5787 		return;
5788 
5789 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5790 }
5791 
5792 static struct drm_crtc_state *
5793 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5794 {
5795 	struct dm_crtc_state *state, *cur;
5796 
5797 	cur = to_dm_crtc_state(crtc->state);
5798 
5799 	if (WARN_ON(!crtc->state))
5800 		return NULL;
5801 
5802 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5803 	if (!state)
5804 		return NULL;
5805 
5806 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5807 
5808 	if (cur->stream) {
5809 		state->stream = cur->stream;
5810 		dc_stream_retain(state->stream);
5811 	}
5812 
5813 	state->active_planes = cur->active_planes;
5814 	state->vrr_infopacket = cur->vrr_infopacket;
5815 	state->abm_level = cur->abm_level;
5816 	state->vrr_supported = cur->vrr_supported;
5817 	state->freesync_config = cur->freesync_config;
5818 	state->cm_has_degamma = cur->cm_has_degamma;
5819 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5820 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5821 
5822 	return &state->base;
5823 }
5824 
5825 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5826 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5827 {
5828 	crtc_debugfs_init(crtc);
5829 
5830 	return 0;
5831 }
5832 #endif
5833 
5834 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5835 {
5836 	enum dc_irq_source irq_source;
5837 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5838 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5839 	int rc;
5840 
5841 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5842 
5843 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5844 
5845 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5846 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5847 	return rc;
5848 }
5849 
5850 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5851 {
5852 	enum dc_irq_source irq_source;
5853 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5854 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5855 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5856 #if defined(CONFIG_DRM_AMD_DC_DCN)
5857 	struct amdgpu_display_manager *dm = &adev->dm;
5858 	unsigned long flags;
5859 #endif
5860 	int rc = 0;
5861 
5862 	if (enable) {
5863 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5864 		if (amdgpu_dm_vrr_active(acrtc_state))
5865 			rc = dm_set_vupdate_irq(crtc, true);
5866 	} else {
5867 		/* vblank irq off -> vupdate irq off */
5868 		rc = dm_set_vupdate_irq(crtc, false);
5869 	}
5870 
5871 	if (rc)
5872 		return rc;
5873 
5874 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5875 
5876 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5877 		return -EBUSY;
5878 
5879 	if (amdgpu_in_reset(adev))
5880 		return 0;
5881 
5882 #if defined(CONFIG_DRM_AMD_DC_DCN)
5883 	spin_lock_irqsave(&dm->vblank_lock, flags);
5884 	dm->vblank_workqueue->dm = dm;
5885 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5886 	dm->vblank_workqueue->enable = enable;
5887 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5888 	schedule_work(&dm->vblank_workqueue->mall_work);
5889 #endif
5890 
5891 	return 0;
5892 }
5893 
5894 static int dm_enable_vblank(struct drm_crtc *crtc)
5895 {
5896 	return dm_set_vblank(crtc, true);
5897 }
5898 
5899 static void dm_disable_vblank(struct drm_crtc *crtc)
5900 {
5901 	dm_set_vblank(crtc, false);
5902 }
5903 
5904 /* Implemented only the options currently availible for the driver */
5905 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5906 	.reset = dm_crtc_reset_state,
5907 	.destroy = amdgpu_dm_crtc_destroy,
5908 	.set_config = drm_atomic_helper_set_config,
5909 	.page_flip = drm_atomic_helper_page_flip,
5910 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5911 	.atomic_destroy_state = dm_crtc_destroy_state,
5912 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5913 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5914 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5915 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5916 	.enable_vblank = dm_enable_vblank,
5917 	.disable_vblank = dm_disable_vblank,
5918 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5919 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5920 	.late_register = amdgpu_dm_crtc_late_register,
5921 #endif
5922 };
5923 
5924 static enum drm_connector_status
5925 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5926 {
5927 	bool connected;
5928 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5929 
5930 	/*
5931 	 * Notes:
5932 	 * 1. This interface is NOT called in context of HPD irq.
5933 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5934 	 * makes it a bad place for *any* MST-related activity.
5935 	 */
5936 
5937 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5938 	    !aconnector->fake_enable)
5939 		connected = (aconnector->dc_sink != NULL);
5940 	else
5941 		connected = (aconnector->base.force == DRM_FORCE_ON);
5942 
5943 	update_subconnector_property(aconnector);
5944 
5945 	return (connected ? connector_status_connected :
5946 			connector_status_disconnected);
5947 }
5948 
5949 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5950 					    struct drm_connector_state *connector_state,
5951 					    struct drm_property *property,
5952 					    uint64_t val)
5953 {
5954 	struct drm_device *dev = connector->dev;
5955 	struct amdgpu_device *adev = drm_to_adev(dev);
5956 	struct dm_connector_state *dm_old_state =
5957 		to_dm_connector_state(connector->state);
5958 	struct dm_connector_state *dm_new_state =
5959 		to_dm_connector_state(connector_state);
5960 
5961 	int ret = -EINVAL;
5962 
5963 	if (property == dev->mode_config.scaling_mode_property) {
5964 		enum amdgpu_rmx_type rmx_type;
5965 
5966 		switch (val) {
5967 		case DRM_MODE_SCALE_CENTER:
5968 			rmx_type = RMX_CENTER;
5969 			break;
5970 		case DRM_MODE_SCALE_ASPECT:
5971 			rmx_type = RMX_ASPECT;
5972 			break;
5973 		case DRM_MODE_SCALE_FULLSCREEN:
5974 			rmx_type = RMX_FULL;
5975 			break;
5976 		case DRM_MODE_SCALE_NONE:
5977 		default:
5978 			rmx_type = RMX_OFF;
5979 			break;
5980 		}
5981 
5982 		if (dm_old_state->scaling == rmx_type)
5983 			return 0;
5984 
5985 		dm_new_state->scaling = rmx_type;
5986 		ret = 0;
5987 	} else if (property == adev->mode_info.underscan_hborder_property) {
5988 		dm_new_state->underscan_hborder = val;
5989 		ret = 0;
5990 	} else if (property == adev->mode_info.underscan_vborder_property) {
5991 		dm_new_state->underscan_vborder = val;
5992 		ret = 0;
5993 	} else if (property == adev->mode_info.underscan_property) {
5994 		dm_new_state->underscan_enable = val;
5995 		ret = 0;
5996 	} else if (property == adev->mode_info.abm_level_property) {
5997 		dm_new_state->abm_level = val;
5998 		ret = 0;
5999 	}
6000 
6001 	return ret;
6002 }
6003 
6004 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6005 					    const struct drm_connector_state *state,
6006 					    struct drm_property *property,
6007 					    uint64_t *val)
6008 {
6009 	struct drm_device *dev = connector->dev;
6010 	struct amdgpu_device *adev = drm_to_adev(dev);
6011 	struct dm_connector_state *dm_state =
6012 		to_dm_connector_state(state);
6013 	int ret = -EINVAL;
6014 
6015 	if (property == dev->mode_config.scaling_mode_property) {
6016 		switch (dm_state->scaling) {
6017 		case RMX_CENTER:
6018 			*val = DRM_MODE_SCALE_CENTER;
6019 			break;
6020 		case RMX_ASPECT:
6021 			*val = DRM_MODE_SCALE_ASPECT;
6022 			break;
6023 		case RMX_FULL:
6024 			*val = DRM_MODE_SCALE_FULLSCREEN;
6025 			break;
6026 		case RMX_OFF:
6027 		default:
6028 			*val = DRM_MODE_SCALE_NONE;
6029 			break;
6030 		}
6031 		ret = 0;
6032 	} else if (property == adev->mode_info.underscan_hborder_property) {
6033 		*val = dm_state->underscan_hborder;
6034 		ret = 0;
6035 	} else if (property == adev->mode_info.underscan_vborder_property) {
6036 		*val = dm_state->underscan_vborder;
6037 		ret = 0;
6038 	} else if (property == adev->mode_info.underscan_property) {
6039 		*val = dm_state->underscan_enable;
6040 		ret = 0;
6041 	} else if (property == adev->mode_info.abm_level_property) {
6042 		*val = dm_state->abm_level;
6043 		ret = 0;
6044 	}
6045 
6046 	return ret;
6047 }
6048 
6049 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6050 {
6051 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6052 
6053 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6054 }
6055 
6056 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6057 {
6058 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6059 	const struct dc_link *link = aconnector->dc_link;
6060 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6061 	struct amdgpu_display_manager *dm = &adev->dm;
6062 
6063 	/*
6064 	 * Call only if mst_mgr was iniitalized before since it's not done
6065 	 * for all connector types.
6066 	 */
6067 	if (aconnector->mst_mgr.dev)
6068 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6069 
6070 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6071 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6072 
6073 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6074 	    link->type != dc_connection_none &&
6075 	    dm->backlight_dev) {
6076 		backlight_device_unregister(dm->backlight_dev);
6077 		dm->backlight_dev = NULL;
6078 	}
6079 #endif
6080 
6081 	if (aconnector->dc_em_sink)
6082 		dc_sink_release(aconnector->dc_em_sink);
6083 	aconnector->dc_em_sink = NULL;
6084 	if (aconnector->dc_sink)
6085 		dc_sink_release(aconnector->dc_sink);
6086 	aconnector->dc_sink = NULL;
6087 
6088 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6089 	drm_connector_unregister(connector);
6090 	drm_connector_cleanup(connector);
6091 	if (aconnector->i2c) {
6092 		i2c_del_adapter(&aconnector->i2c->base);
6093 		kfree(aconnector->i2c);
6094 	}
6095 	kfree(aconnector->dm_dp_aux.aux.name);
6096 
6097 	kfree(connector);
6098 }
6099 
6100 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6101 {
6102 	struct dm_connector_state *state =
6103 		to_dm_connector_state(connector->state);
6104 
6105 	if (connector->state)
6106 		__drm_atomic_helper_connector_destroy_state(connector->state);
6107 
6108 	kfree(state);
6109 
6110 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6111 
6112 	if (state) {
6113 		state->scaling = RMX_OFF;
6114 		state->underscan_enable = false;
6115 		state->underscan_hborder = 0;
6116 		state->underscan_vborder = 0;
6117 		state->base.max_requested_bpc = 8;
6118 		state->vcpi_slots = 0;
6119 		state->pbn = 0;
6120 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6121 			state->abm_level = amdgpu_dm_abm_level;
6122 
6123 		__drm_atomic_helper_connector_reset(connector, &state->base);
6124 	}
6125 }
6126 
6127 struct drm_connector_state *
6128 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6129 {
6130 	struct dm_connector_state *state =
6131 		to_dm_connector_state(connector->state);
6132 
6133 	struct dm_connector_state *new_state =
6134 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6135 
6136 	if (!new_state)
6137 		return NULL;
6138 
6139 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6140 
6141 	new_state->freesync_capable = state->freesync_capable;
6142 	new_state->abm_level = state->abm_level;
6143 	new_state->scaling = state->scaling;
6144 	new_state->underscan_enable = state->underscan_enable;
6145 	new_state->underscan_hborder = state->underscan_hborder;
6146 	new_state->underscan_vborder = state->underscan_vborder;
6147 	new_state->vcpi_slots = state->vcpi_slots;
6148 	new_state->pbn = state->pbn;
6149 	return &new_state->base;
6150 }
6151 
6152 static int
6153 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6154 {
6155 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6156 		to_amdgpu_dm_connector(connector);
6157 	int r;
6158 
6159 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6160 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6161 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6162 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6163 		if (r)
6164 			return r;
6165 	}
6166 
6167 #if defined(CONFIG_DEBUG_FS)
6168 	connector_debugfs_init(amdgpu_dm_connector);
6169 #endif
6170 
6171 	return 0;
6172 }
6173 
6174 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6175 	.reset = amdgpu_dm_connector_funcs_reset,
6176 	.detect = amdgpu_dm_connector_detect,
6177 	.fill_modes = drm_helper_probe_single_connector_modes,
6178 	.destroy = amdgpu_dm_connector_destroy,
6179 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6180 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6181 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6182 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6183 	.late_register = amdgpu_dm_connector_late_register,
6184 	.early_unregister = amdgpu_dm_connector_unregister
6185 };
6186 
6187 static int get_modes(struct drm_connector *connector)
6188 {
6189 	return amdgpu_dm_connector_get_modes(connector);
6190 }
6191 
6192 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6193 {
6194 	struct dc_sink_init_data init_params = {
6195 			.link = aconnector->dc_link,
6196 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6197 	};
6198 	struct edid *edid;
6199 
6200 	if (!aconnector->base.edid_blob_ptr) {
6201 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6202 				aconnector->base.name);
6203 
6204 		aconnector->base.force = DRM_FORCE_OFF;
6205 		aconnector->base.override_edid = false;
6206 		return;
6207 	}
6208 
6209 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6210 
6211 	aconnector->edid = edid;
6212 
6213 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6214 		aconnector->dc_link,
6215 		(uint8_t *)edid,
6216 		(edid->extensions + 1) * EDID_LENGTH,
6217 		&init_params);
6218 
6219 	if (aconnector->base.force == DRM_FORCE_ON) {
6220 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6221 		aconnector->dc_link->local_sink :
6222 		aconnector->dc_em_sink;
6223 		dc_sink_retain(aconnector->dc_sink);
6224 	}
6225 }
6226 
6227 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6228 {
6229 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6230 
6231 	/*
6232 	 * In case of headless boot with force on for DP managed connector
6233 	 * Those settings have to be != 0 to get initial modeset
6234 	 */
6235 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6236 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6237 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6238 	}
6239 
6240 
6241 	aconnector->base.override_edid = true;
6242 	create_eml_sink(aconnector);
6243 }
6244 
6245 static struct dc_stream_state *
6246 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6247 				const struct drm_display_mode *drm_mode,
6248 				const struct dm_connector_state *dm_state,
6249 				const struct dc_stream_state *old_stream)
6250 {
6251 	struct drm_connector *connector = &aconnector->base;
6252 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6253 	struct dc_stream_state *stream;
6254 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6255 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6256 	enum dc_status dc_result = DC_OK;
6257 
6258 	do {
6259 		stream = create_stream_for_sink(aconnector, drm_mode,
6260 						dm_state, old_stream,
6261 						requested_bpc);
6262 		if (stream == NULL) {
6263 			DRM_ERROR("Failed to create stream for sink!\n");
6264 			break;
6265 		}
6266 
6267 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6268 
6269 		if (dc_result != DC_OK) {
6270 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6271 				      drm_mode->hdisplay,
6272 				      drm_mode->vdisplay,
6273 				      drm_mode->clock,
6274 				      dc_result,
6275 				      dc_status_to_str(dc_result));
6276 
6277 			dc_stream_release(stream);
6278 			stream = NULL;
6279 			requested_bpc -= 2; /* lower bpc to retry validation */
6280 		}
6281 
6282 	} while (stream == NULL && requested_bpc >= 6);
6283 
6284 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6285 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6286 
6287 		aconnector->force_yuv420_output = true;
6288 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6289 						dm_state, old_stream);
6290 		aconnector->force_yuv420_output = false;
6291 	}
6292 
6293 	return stream;
6294 }
6295 
6296 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6297 				   struct drm_display_mode *mode)
6298 {
6299 	int result = MODE_ERROR;
6300 	struct dc_sink *dc_sink;
6301 	/* TODO: Unhardcode stream count */
6302 	struct dc_stream_state *stream;
6303 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6304 
6305 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6306 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6307 		return result;
6308 
6309 	/*
6310 	 * Only run this the first time mode_valid is called to initilialize
6311 	 * EDID mgmt
6312 	 */
6313 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6314 		!aconnector->dc_em_sink)
6315 		handle_edid_mgmt(aconnector);
6316 
6317 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6318 
6319 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6320 				aconnector->base.force != DRM_FORCE_ON) {
6321 		DRM_ERROR("dc_sink is NULL!\n");
6322 		goto fail;
6323 	}
6324 
6325 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6326 	if (stream) {
6327 		dc_stream_release(stream);
6328 		result = MODE_OK;
6329 	}
6330 
6331 fail:
6332 	/* TODO: error handling*/
6333 	return result;
6334 }
6335 
6336 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6337 				struct dc_info_packet *out)
6338 {
6339 	struct hdmi_drm_infoframe frame;
6340 	unsigned char buf[30]; /* 26 + 4 */
6341 	ssize_t len;
6342 	int ret, i;
6343 
6344 	memset(out, 0, sizeof(*out));
6345 
6346 	if (!state->hdr_output_metadata)
6347 		return 0;
6348 
6349 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6350 	if (ret)
6351 		return ret;
6352 
6353 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6354 	if (len < 0)
6355 		return (int)len;
6356 
6357 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6358 	if (len != 30)
6359 		return -EINVAL;
6360 
6361 	/* Prepare the infopacket for DC. */
6362 	switch (state->connector->connector_type) {
6363 	case DRM_MODE_CONNECTOR_HDMIA:
6364 		out->hb0 = 0x87; /* type */
6365 		out->hb1 = 0x01; /* version */
6366 		out->hb2 = 0x1A; /* length */
6367 		out->sb[0] = buf[3]; /* checksum */
6368 		i = 1;
6369 		break;
6370 
6371 	case DRM_MODE_CONNECTOR_DisplayPort:
6372 	case DRM_MODE_CONNECTOR_eDP:
6373 		out->hb0 = 0x00; /* sdp id, zero */
6374 		out->hb1 = 0x87; /* type */
6375 		out->hb2 = 0x1D; /* payload len - 1 */
6376 		out->hb3 = (0x13 << 2); /* sdp version */
6377 		out->sb[0] = 0x01; /* version */
6378 		out->sb[1] = 0x1A; /* length */
6379 		i = 2;
6380 		break;
6381 
6382 	default:
6383 		return -EINVAL;
6384 	}
6385 
6386 	memcpy(&out->sb[i], &buf[4], 26);
6387 	out->valid = true;
6388 
6389 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6390 		       sizeof(out->sb), false);
6391 
6392 	return 0;
6393 }
6394 
6395 static bool
6396 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6397 			  const struct drm_connector_state *new_state)
6398 {
6399 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6400 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6401 
6402 	if (old_blob != new_blob) {
6403 		if (old_blob && new_blob &&
6404 		    old_blob->length == new_blob->length)
6405 			return memcmp(old_blob->data, new_blob->data,
6406 				      old_blob->length);
6407 
6408 		return true;
6409 	}
6410 
6411 	return false;
6412 }
6413 
6414 static int
6415 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6416 				 struct drm_atomic_state *state)
6417 {
6418 	struct drm_connector_state *new_con_state =
6419 		drm_atomic_get_new_connector_state(state, conn);
6420 	struct drm_connector_state *old_con_state =
6421 		drm_atomic_get_old_connector_state(state, conn);
6422 	struct drm_crtc *crtc = new_con_state->crtc;
6423 	struct drm_crtc_state *new_crtc_state;
6424 	int ret;
6425 
6426 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6427 
6428 	if (!crtc)
6429 		return 0;
6430 
6431 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6432 		struct dc_info_packet hdr_infopacket;
6433 
6434 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6435 		if (ret)
6436 			return ret;
6437 
6438 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6439 		if (IS_ERR(new_crtc_state))
6440 			return PTR_ERR(new_crtc_state);
6441 
6442 		/*
6443 		 * DC considers the stream backends changed if the
6444 		 * static metadata changes. Forcing the modeset also
6445 		 * gives a simple way for userspace to switch from
6446 		 * 8bpc to 10bpc when setting the metadata to enter
6447 		 * or exit HDR.
6448 		 *
6449 		 * Changing the static metadata after it's been
6450 		 * set is permissible, however. So only force a
6451 		 * modeset if we're entering or exiting HDR.
6452 		 */
6453 		new_crtc_state->mode_changed =
6454 			!old_con_state->hdr_output_metadata ||
6455 			!new_con_state->hdr_output_metadata;
6456 	}
6457 
6458 	return 0;
6459 }
6460 
6461 static const struct drm_connector_helper_funcs
6462 amdgpu_dm_connector_helper_funcs = {
6463 	/*
6464 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6465 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6466 	 * are missing after user start lightdm. So we need to renew modes list.
6467 	 * in get_modes call back, not just return the modes count
6468 	 */
6469 	.get_modes = get_modes,
6470 	.mode_valid = amdgpu_dm_connector_mode_valid,
6471 	.atomic_check = amdgpu_dm_connector_atomic_check,
6472 };
6473 
6474 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6475 {
6476 }
6477 
6478 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6479 {
6480 	struct drm_atomic_state *state = new_crtc_state->state;
6481 	struct drm_plane *plane;
6482 	int num_active = 0;
6483 
6484 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6485 		struct drm_plane_state *new_plane_state;
6486 
6487 		/* Cursor planes are "fake". */
6488 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6489 			continue;
6490 
6491 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6492 
6493 		if (!new_plane_state) {
6494 			/*
6495 			 * The plane is enable on the CRTC and hasn't changed
6496 			 * state. This means that it previously passed
6497 			 * validation and is therefore enabled.
6498 			 */
6499 			num_active += 1;
6500 			continue;
6501 		}
6502 
6503 		/* We need a framebuffer to be considered enabled. */
6504 		num_active += (new_plane_state->fb != NULL);
6505 	}
6506 
6507 	return num_active;
6508 }
6509 
6510 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6511 					 struct drm_crtc_state *new_crtc_state)
6512 {
6513 	struct dm_crtc_state *dm_new_crtc_state =
6514 		to_dm_crtc_state(new_crtc_state);
6515 
6516 	dm_new_crtc_state->active_planes = 0;
6517 
6518 	if (!dm_new_crtc_state->stream)
6519 		return;
6520 
6521 	dm_new_crtc_state->active_planes =
6522 		count_crtc_active_planes(new_crtc_state);
6523 }
6524 
6525 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6526 				       struct drm_atomic_state *state)
6527 {
6528 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6529 									  crtc);
6530 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6531 	struct dc *dc = adev->dm.dc;
6532 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6533 	int ret = -EINVAL;
6534 
6535 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6536 
6537 	dm_update_crtc_active_planes(crtc, crtc_state);
6538 
6539 	if (unlikely(!dm_crtc_state->stream &&
6540 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6541 		WARN_ON(1);
6542 		return ret;
6543 	}
6544 
6545 	/*
6546 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6547 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6548 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6549 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6550 	 */
6551 	if (crtc_state->enable &&
6552 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6553 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6554 		return -EINVAL;
6555 	}
6556 
6557 	/* In some use cases, like reset, no stream is attached */
6558 	if (!dm_crtc_state->stream)
6559 		return 0;
6560 
6561 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6562 		return 0;
6563 
6564 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6565 	return ret;
6566 }
6567 
6568 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6569 				      const struct drm_display_mode *mode,
6570 				      struct drm_display_mode *adjusted_mode)
6571 {
6572 	return true;
6573 }
6574 
6575 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6576 	.disable = dm_crtc_helper_disable,
6577 	.atomic_check = dm_crtc_helper_atomic_check,
6578 	.mode_fixup = dm_crtc_helper_mode_fixup,
6579 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6580 };
6581 
6582 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6583 {
6584 
6585 }
6586 
6587 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6588 {
6589 	switch (display_color_depth) {
6590 		case COLOR_DEPTH_666:
6591 			return 6;
6592 		case COLOR_DEPTH_888:
6593 			return 8;
6594 		case COLOR_DEPTH_101010:
6595 			return 10;
6596 		case COLOR_DEPTH_121212:
6597 			return 12;
6598 		case COLOR_DEPTH_141414:
6599 			return 14;
6600 		case COLOR_DEPTH_161616:
6601 			return 16;
6602 		default:
6603 			break;
6604 		}
6605 	return 0;
6606 }
6607 
6608 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6609 					  struct drm_crtc_state *crtc_state,
6610 					  struct drm_connector_state *conn_state)
6611 {
6612 	struct drm_atomic_state *state = crtc_state->state;
6613 	struct drm_connector *connector = conn_state->connector;
6614 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6615 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6616 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6617 	struct drm_dp_mst_topology_mgr *mst_mgr;
6618 	struct drm_dp_mst_port *mst_port;
6619 	enum dc_color_depth color_depth;
6620 	int clock, bpp = 0;
6621 	bool is_y420 = false;
6622 
6623 	if (!aconnector->port || !aconnector->dc_sink)
6624 		return 0;
6625 
6626 	mst_port = aconnector->port;
6627 	mst_mgr = &aconnector->mst_port->mst_mgr;
6628 
6629 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6630 		return 0;
6631 
6632 	if (!state->duplicated) {
6633 		int max_bpc = conn_state->max_requested_bpc;
6634 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6635 				aconnector->force_yuv420_output;
6636 		color_depth = convert_color_depth_from_display_info(connector,
6637 								    is_y420,
6638 								    max_bpc);
6639 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6640 		clock = adjusted_mode->clock;
6641 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6642 	}
6643 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6644 									   mst_mgr,
6645 									   mst_port,
6646 									   dm_new_connector_state->pbn,
6647 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6648 	if (dm_new_connector_state->vcpi_slots < 0) {
6649 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6650 		return dm_new_connector_state->vcpi_slots;
6651 	}
6652 	return 0;
6653 }
6654 
6655 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6656 	.disable = dm_encoder_helper_disable,
6657 	.atomic_check = dm_encoder_helper_atomic_check
6658 };
6659 
6660 #if defined(CONFIG_DRM_AMD_DC_DCN)
6661 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6662 					    struct dc_state *dc_state)
6663 {
6664 	struct dc_stream_state *stream = NULL;
6665 	struct drm_connector *connector;
6666 	struct drm_connector_state *new_con_state;
6667 	struct amdgpu_dm_connector *aconnector;
6668 	struct dm_connector_state *dm_conn_state;
6669 	int i, j, clock, bpp;
6670 	int vcpi, pbn_div, pbn = 0;
6671 
6672 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6673 
6674 		aconnector = to_amdgpu_dm_connector(connector);
6675 
6676 		if (!aconnector->port)
6677 			continue;
6678 
6679 		if (!new_con_state || !new_con_state->crtc)
6680 			continue;
6681 
6682 		dm_conn_state = to_dm_connector_state(new_con_state);
6683 
6684 		for (j = 0; j < dc_state->stream_count; j++) {
6685 			stream = dc_state->streams[j];
6686 			if (!stream)
6687 				continue;
6688 
6689 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6690 				break;
6691 
6692 			stream = NULL;
6693 		}
6694 
6695 		if (!stream)
6696 			continue;
6697 
6698 		if (stream->timing.flags.DSC != 1) {
6699 			drm_dp_mst_atomic_enable_dsc(state,
6700 						     aconnector->port,
6701 						     dm_conn_state->pbn,
6702 						     0,
6703 						     false);
6704 			continue;
6705 		}
6706 
6707 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6708 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6709 		clock = stream->timing.pix_clk_100hz / 10;
6710 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6711 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6712 						    aconnector->port,
6713 						    pbn, pbn_div,
6714 						    true);
6715 		if (vcpi < 0)
6716 			return vcpi;
6717 
6718 		dm_conn_state->pbn = pbn;
6719 		dm_conn_state->vcpi_slots = vcpi;
6720 	}
6721 	return 0;
6722 }
6723 #endif
6724 
6725 static void dm_drm_plane_reset(struct drm_plane *plane)
6726 {
6727 	struct dm_plane_state *amdgpu_state = NULL;
6728 
6729 	if (plane->state)
6730 		plane->funcs->atomic_destroy_state(plane, plane->state);
6731 
6732 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6733 	WARN_ON(amdgpu_state == NULL);
6734 
6735 	if (amdgpu_state)
6736 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6737 }
6738 
6739 static struct drm_plane_state *
6740 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6741 {
6742 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6743 
6744 	old_dm_plane_state = to_dm_plane_state(plane->state);
6745 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6746 	if (!dm_plane_state)
6747 		return NULL;
6748 
6749 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6750 
6751 	if (old_dm_plane_state->dc_state) {
6752 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6753 		dc_plane_state_retain(dm_plane_state->dc_state);
6754 	}
6755 
6756 	return &dm_plane_state->base;
6757 }
6758 
6759 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6760 				struct drm_plane_state *state)
6761 {
6762 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6763 
6764 	if (dm_plane_state->dc_state)
6765 		dc_plane_state_release(dm_plane_state->dc_state);
6766 
6767 	drm_atomic_helper_plane_destroy_state(plane, state);
6768 }
6769 
6770 static const struct drm_plane_funcs dm_plane_funcs = {
6771 	.update_plane	= drm_atomic_helper_update_plane,
6772 	.disable_plane	= drm_atomic_helper_disable_plane,
6773 	.destroy	= drm_primary_helper_destroy,
6774 	.reset = dm_drm_plane_reset,
6775 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6776 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6777 	.format_mod_supported = dm_plane_format_mod_supported,
6778 };
6779 
6780 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6781 				      struct drm_plane_state *new_state)
6782 {
6783 	struct amdgpu_framebuffer *afb;
6784 	struct drm_gem_object *obj;
6785 	struct amdgpu_device *adev;
6786 	struct amdgpu_bo *rbo;
6787 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6788 	struct list_head list;
6789 	struct ttm_validate_buffer tv;
6790 	struct ww_acquire_ctx ticket;
6791 	uint32_t domain;
6792 	int r;
6793 
6794 	if (!new_state->fb) {
6795 		DRM_DEBUG_KMS("No FB bound\n");
6796 		return 0;
6797 	}
6798 
6799 	afb = to_amdgpu_framebuffer(new_state->fb);
6800 	obj = new_state->fb->obj[0];
6801 	rbo = gem_to_amdgpu_bo(obj);
6802 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6803 	INIT_LIST_HEAD(&list);
6804 
6805 	tv.bo = &rbo->tbo;
6806 	tv.num_shared = 1;
6807 	list_add(&tv.head, &list);
6808 
6809 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6810 	if (r) {
6811 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6812 		return r;
6813 	}
6814 
6815 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6816 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6817 	else
6818 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6819 
6820 	r = amdgpu_bo_pin(rbo, domain);
6821 	if (unlikely(r != 0)) {
6822 		if (r != -ERESTARTSYS)
6823 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6824 		ttm_eu_backoff_reservation(&ticket, &list);
6825 		return r;
6826 	}
6827 
6828 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6829 	if (unlikely(r != 0)) {
6830 		amdgpu_bo_unpin(rbo);
6831 		ttm_eu_backoff_reservation(&ticket, &list);
6832 		DRM_ERROR("%p bind failed\n", rbo);
6833 		return r;
6834 	}
6835 
6836 	ttm_eu_backoff_reservation(&ticket, &list);
6837 
6838 	afb->address = amdgpu_bo_gpu_offset(rbo);
6839 
6840 	amdgpu_bo_ref(rbo);
6841 
6842 	/**
6843 	 * We don't do surface updates on planes that have been newly created,
6844 	 * but we also don't have the afb->address during atomic check.
6845 	 *
6846 	 * Fill in buffer attributes depending on the address here, but only on
6847 	 * newly created planes since they're not being used by DC yet and this
6848 	 * won't modify global state.
6849 	 */
6850 	dm_plane_state_old = to_dm_plane_state(plane->state);
6851 	dm_plane_state_new = to_dm_plane_state(new_state);
6852 
6853 	if (dm_plane_state_new->dc_state &&
6854 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6855 		struct dc_plane_state *plane_state =
6856 			dm_plane_state_new->dc_state;
6857 		bool force_disable_dcc = !plane_state->dcc.enable;
6858 
6859 		fill_plane_buffer_attributes(
6860 			adev, afb, plane_state->format, plane_state->rotation,
6861 			afb->tiling_flags,
6862 			&plane_state->tiling_info, &plane_state->plane_size,
6863 			&plane_state->dcc, &plane_state->address,
6864 			afb->tmz_surface, force_disable_dcc);
6865 	}
6866 
6867 	return 0;
6868 }
6869 
6870 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6871 				       struct drm_plane_state *old_state)
6872 {
6873 	struct amdgpu_bo *rbo;
6874 	int r;
6875 
6876 	if (!old_state->fb)
6877 		return;
6878 
6879 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6880 	r = amdgpu_bo_reserve(rbo, false);
6881 	if (unlikely(r)) {
6882 		DRM_ERROR("failed to reserve rbo before unpin\n");
6883 		return;
6884 	}
6885 
6886 	amdgpu_bo_unpin(rbo);
6887 	amdgpu_bo_unreserve(rbo);
6888 	amdgpu_bo_unref(&rbo);
6889 }
6890 
6891 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6892 				       struct drm_crtc_state *new_crtc_state)
6893 {
6894 	struct drm_framebuffer *fb = state->fb;
6895 	int min_downscale, max_upscale;
6896 	int min_scale = 0;
6897 	int max_scale = INT_MAX;
6898 
6899 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6900 	if (fb && state->crtc) {
6901 		/* Validate viewport to cover the case when only the position changes */
6902 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6903 			int viewport_width = state->crtc_w;
6904 			int viewport_height = state->crtc_h;
6905 
6906 			if (state->crtc_x < 0)
6907 				viewport_width += state->crtc_x;
6908 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6909 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6910 
6911 			if (state->crtc_y < 0)
6912 				viewport_height += state->crtc_y;
6913 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6914 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6915 
6916 			if (viewport_width < 0 || viewport_height < 0) {
6917 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6918 				return -EINVAL;
6919 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6920 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6921 				return -EINVAL;
6922 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
6923 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6924 				return -EINVAL;
6925 			}
6926 
6927 		}
6928 
6929 		/* Get min/max allowed scaling factors from plane caps. */
6930 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6931 					     &min_downscale, &max_upscale);
6932 		/*
6933 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6934 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6935 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6936 		 */
6937 		min_scale = (1000 << 16) / max_upscale;
6938 		max_scale = (1000 << 16) / min_downscale;
6939 	}
6940 
6941 	return drm_atomic_helper_check_plane_state(
6942 		state, new_crtc_state, min_scale, max_scale, true, true);
6943 }
6944 
6945 static int dm_plane_atomic_check(struct drm_plane *plane,
6946 				 struct drm_atomic_state *state)
6947 {
6948 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6949 										 plane);
6950 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6951 	struct dc *dc = adev->dm.dc;
6952 	struct dm_plane_state *dm_plane_state;
6953 	struct dc_scaling_info scaling_info;
6954 	struct drm_crtc_state *new_crtc_state;
6955 	int ret;
6956 
6957 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6958 
6959 	dm_plane_state = to_dm_plane_state(new_plane_state);
6960 
6961 	if (!dm_plane_state->dc_state)
6962 		return 0;
6963 
6964 	new_crtc_state =
6965 		drm_atomic_get_new_crtc_state(state,
6966 					      new_plane_state->crtc);
6967 	if (!new_crtc_state)
6968 		return -EINVAL;
6969 
6970 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6971 	if (ret)
6972 		return ret;
6973 
6974 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6975 	if (ret)
6976 		return ret;
6977 
6978 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6979 		return 0;
6980 
6981 	return -EINVAL;
6982 }
6983 
6984 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6985 				       struct drm_atomic_state *state)
6986 {
6987 	/* Only support async updates on cursor planes. */
6988 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6989 		return -EINVAL;
6990 
6991 	return 0;
6992 }
6993 
6994 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6995 					 struct drm_atomic_state *state)
6996 {
6997 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6998 									   plane);
6999 	struct drm_plane_state *old_state =
7000 		drm_atomic_get_old_plane_state(state, plane);
7001 
7002 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7003 
7004 	swap(plane->state->fb, new_state->fb);
7005 
7006 	plane->state->src_x = new_state->src_x;
7007 	plane->state->src_y = new_state->src_y;
7008 	plane->state->src_w = new_state->src_w;
7009 	plane->state->src_h = new_state->src_h;
7010 	plane->state->crtc_x = new_state->crtc_x;
7011 	plane->state->crtc_y = new_state->crtc_y;
7012 	plane->state->crtc_w = new_state->crtc_w;
7013 	plane->state->crtc_h = new_state->crtc_h;
7014 
7015 	handle_cursor_update(plane, old_state);
7016 }
7017 
7018 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7019 	.prepare_fb = dm_plane_helper_prepare_fb,
7020 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7021 	.atomic_check = dm_plane_atomic_check,
7022 	.atomic_async_check = dm_plane_atomic_async_check,
7023 	.atomic_async_update = dm_plane_atomic_async_update
7024 };
7025 
7026 /*
7027  * TODO: these are currently initialized to rgb formats only.
7028  * For future use cases we should either initialize them dynamically based on
7029  * plane capabilities, or initialize this array to all formats, so internal drm
7030  * check will succeed, and let DC implement proper check
7031  */
7032 static const uint32_t rgb_formats[] = {
7033 	DRM_FORMAT_XRGB8888,
7034 	DRM_FORMAT_ARGB8888,
7035 	DRM_FORMAT_RGBA8888,
7036 	DRM_FORMAT_XRGB2101010,
7037 	DRM_FORMAT_XBGR2101010,
7038 	DRM_FORMAT_ARGB2101010,
7039 	DRM_FORMAT_ABGR2101010,
7040 	DRM_FORMAT_XBGR8888,
7041 	DRM_FORMAT_ABGR8888,
7042 	DRM_FORMAT_RGB565,
7043 };
7044 
7045 static const uint32_t overlay_formats[] = {
7046 	DRM_FORMAT_XRGB8888,
7047 	DRM_FORMAT_ARGB8888,
7048 	DRM_FORMAT_RGBA8888,
7049 	DRM_FORMAT_XBGR8888,
7050 	DRM_FORMAT_ABGR8888,
7051 	DRM_FORMAT_RGB565
7052 };
7053 
7054 static const u32 cursor_formats[] = {
7055 	DRM_FORMAT_ARGB8888
7056 };
7057 
7058 static int get_plane_formats(const struct drm_plane *plane,
7059 			     const struct dc_plane_cap *plane_cap,
7060 			     uint32_t *formats, int max_formats)
7061 {
7062 	int i, num_formats = 0;
7063 
7064 	/*
7065 	 * TODO: Query support for each group of formats directly from
7066 	 * DC plane caps. This will require adding more formats to the
7067 	 * caps list.
7068 	 */
7069 
7070 	switch (plane->type) {
7071 	case DRM_PLANE_TYPE_PRIMARY:
7072 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7073 			if (num_formats >= max_formats)
7074 				break;
7075 
7076 			formats[num_formats++] = rgb_formats[i];
7077 		}
7078 
7079 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7080 			formats[num_formats++] = DRM_FORMAT_NV12;
7081 		if (plane_cap && plane_cap->pixel_format_support.p010)
7082 			formats[num_formats++] = DRM_FORMAT_P010;
7083 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7084 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7085 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7086 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7087 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7088 		}
7089 		break;
7090 
7091 	case DRM_PLANE_TYPE_OVERLAY:
7092 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7093 			if (num_formats >= max_formats)
7094 				break;
7095 
7096 			formats[num_formats++] = overlay_formats[i];
7097 		}
7098 		break;
7099 
7100 	case DRM_PLANE_TYPE_CURSOR:
7101 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7102 			if (num_formats >= max_formats)
7103 				break;
7104 
7105 			formats[num_formats++] = cursor_formats[i];
7106 		}
7107 		break;
7108 	}
7109 
7110 	return num_formats;
7111 }
7112 
7113 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7114 				struct drm_plane *plane,
7115 				unsigned long possible_crtcs,
7116 				const struct dc_plane_cap *plane_cap)
7117 {
7118 	uint32_t formats[32];
7119 	int num_formats;
7120 	int res = -EPERM;
7121 	unsigned int supported_rotations;
7122 	uint64_t *modifiers = NULL;
7123 
7124 	num_formats = get_plane_formats(plane, plane_cap, formats,
7125 					ARRAY_SIZE(formats));
7126 
7127 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7128 	if (res)
7129 		return res;
7130 
7131 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7132 				       &dm_plane_funcs, formats, num_formats,
7133 				       modifiers, plane->type, NULL);
7134 	kfree(modifiers);
7135 	if (res)
7136 		return res;
7137 
7138 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7139 	    plane_cap && plane_cap->per_pixel_alpha) {
7140 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7141 					  BIT(DRM_MODE_BLEND_PREMULTI);
7142 
7143 		drm_plane_create_alpha_property(plane);
7144 		drm_plane_create_blend_mode_property(plane, blend_caps);
7145 	}
7146 
7147 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7148 	    plane_cap &&
7149 	    (plane_cap->pixel_format_support.nv12 ||
7150 	     plane_cap->pixel_format_support.p010)) {
7151 		/* This only affects YUV formats. */
7152 		drm_plane_create_color_properties(
7153 			plane,
7154 			BIT(DRM_COLOR_YCBCR_BT601) |
7155 			BIT(DRM_COLOR_YCBCR_BT709) |
7156 			BIT(DRM_COLOR_YCBCR_BT2020),
7157 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7158 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7159 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7160 	}
7161 
7162 	supported_rotations =
7163 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7164 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7165 
7166 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7167 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7168 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7169 						   supported_rotations);
7170 
7171 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7172 
7173 	/* Create (reset) the plane state */
7174 	if (plane->funcs->reset)
7175 		plane->funcs->reset(plane);
7176 
7177 	return 0;
7178 }
7179 
7180 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7181 			       struct drm_plane *plane,
7182 			       uint32_t crtc_index)
7183 {
7184 	struct amdgpu_crtc *acrtc = NULL;
7185 	struct drm_plane *cursor_plane;
7186 
7187 	int res = -ENOMEM;
7188 
7189 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7190 	if (!cursor_plane)
7191 		goto fail;
7192 
7193 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7194 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7195 
7196 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7197 	if (!acrtc)
7198 		goto fail;
7199 
7200 	res = drm_crtc_init_with_planes(
7201 			dm->ddev,
7202 			&acrtc->base,
7203 			plane,
7204 			cursor_plane,
7205 			&amdgpu_dm_crtc_funcs, NULL);
7206 
7207 	if (res)
7208 		goto fail;
7209 
7210 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7211 
7212 	/* Create (reset) the plane state */
7213 	if (acrtc->base.funcs->reset)
7214 		acrtc->base.funcs->reset(&acrtc->base);
7215 
7216 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7217 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7218 
7219 	acrtc->crtc_id = crtc_index;
7220 	acrtc->base.enabled = false;
7221 	acrtc->otg_inst = -1;
7222 
7223 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7224 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7225 				   true, MAX_COLOR_LUT_ENTRIES);
7226 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7227 
7228 	return 0;
7229 
7230 fail:
7231 	kfree(acrtc);
7232 	kfree(cursor_plane);
7233 	return res;
7234 }
7235 
7236 
7237 static int to_drm_connector_type(enum signal_type st)
7238 {
7239 	switch (st) {
7240 	case SIGNAL_TYPE_HDMI_TYPE_A:
7241 		return DRM_MODE_CONNECTOR_HDMIA;
7242 	case SIGNAL_TYPE_EDP:
7243 		return DRM_MODE_CONNECTOR_eDP;
7244 	case SIGNAL_TYPE_LVDS:
7245 		return DRM_MODE_CONNECTOR_LVDS;
7246 	case SIGNAL_TYPE_RGB:
7247 		return DRM_MODE_CONNECTOR_VGA;
7248 	case SIGNAL_TYPE_DISPLAY_PORT:
7249 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7250 		return DRM_MODE_CONNECTOR_DisplayPort;
7251 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7252 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7253 		return DRM_MODE_CONNECTOR_DVID;
7254 	case SIGNAL_TYPE_VIRTUAL:
7255 		return DRM_MODE_CONNECTOR_VIRTUAL;
7256 
7257 	default:
7258 		return DRM_MODE_CONNECTOR_Unknown;
7259 	}
7260 }
7261 
7262 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7263 {
7264 	struct drm_encoder *encoder;
7265 
7266 	/* There is only one encoder per connector */
7267 	drm_connector_for_each_possible_encoder(connector, encoder)
7268 		return encoder;
7269 
7270 	return NULL;
7271 }
7272 
7273 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7274 {
7275 	struct drm_encoder *encoder;
7276 	struct amdgpu_encoder *amdgpu_encoder;
7277 
7278 	encoder = amdgpu_dm_connector_to_encoder(connector);
7279 
7280 	if (encoder == NULL)
7281 		return;
7282 
7283 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7284 
7285 	amdgpu_encoder->native_mode.clock = 0;
7286 
7287 	if (!list_empty(&connector->probed_modes)) {
7288 		struct drm_display_mode *preferred_mode = NULL;
7289 
7290 		list_for_each_entry(preferred_mode,
7291 				    &connector->probed_modes,
7292 				    head) {
7293 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7294 				amdgpu_encoder->native_mode = *preferred_mode;
7295 
7296 			break;
7297 		}
7298 
7299 	}
7300 }
7301 
7302 static struct drm_display_mode *
7303 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7304 			     char *name,
7305 			     int hdisplay, int vdisplay)
7306 {
7307 	struct drm_device *dev = encoder->dev;
7308 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7309 	struct drm_display_mode *mode = NULL;
7310 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7311 
7312 	mode = drm_mode_duplicate(dev, native_mode);
7313 
7314 	if (mode == NULL)
7315 		return NULL;
7316 
7317 	mode->hdisplay = hdisplay;
7318 	mode->vdisplay = vdisplay;
7319 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7320 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7321 
7322 	return mode;
7323 
7324 }
7325 
7326 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7327 						 struct drm_connector *connector)
7328 {
7329 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7330 	struct drm_display_mode *mode = NULL;
7331 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7332 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7333 				to_amdgpu_dm_connector(connector);
7334 	int i;
7335 	int n;
7336 	struct mode_size {
7337 		char name[DRM_DISPLAY_MODE_LEN];
7338 		int w;
7339 		int h;
7340 	} common_modes[] = {
7341 		{  "640x480",  640,  480},
7342 		{  "800x600",  800,  600},
7343 		{ "1024x768", 1024,  768},
7344 		{ "1280x720", 1280,  720},
7345 		{ "1280x800", 1280,  800},
7346 		{"1280x1024", 1280, 1024},
7347 		{ "1440x900", 1440,  900},
7348 		{"1680x1050", 1680, 1050},
7349 		{"1600x1200", 1600, 1200},
7350 		{"1920x1080", 1920, 1080},
7351 		{"1920x1200", 1920, 1200}
7352 	};
7353 
7354 	n = ARRAY_SIZE(common_modes);
7355 
7356 	for (i = 0; i < n; i++) {
7357 		struct drm_display_mode *curmode = NULL;
7358 		bool mode_existed = false;
7359 
7360 		if (common_modes[i].w > native_mode->hdisplay ||
7361 		    common_modes[i].h > native_mode->vdisplay ||
7362 		   (common_modes[i].w == native_mode->hdisplay &&
7363 		    common_modes[i].h == native_mode->vdisplay))
7364 			continue;
7365 
7366 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7367 			if (common_modes[i].w == curmode->hdisplay &&
7368 			    common_modes[i].h == curmode->vdisplay) {
7369 				mode_existed = true;
7370 				break;
7371 			}
7372 		}
7373 
7374 		if (mode_existed)
7375 			continue;
7376 
7377 		mode = amdgpu_dm_create_common_mode(encoder,
7378 				common_modes[i].name, common_modes[i].w,
7379 				common_modes[i].h);
7380 		drm_mode_probed_add(connector, mode);
7381 		amdgpu_dm_connector->num_modes++;
7382 	}
7383 }
7384 
7385 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7386 					      struct edid *edid)
7387 {
7388 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7389 			to_amdgpu_dm_connector(connector);
7390 
7391 	if (edid) {
7392 		/* empty probed_modes */
7393 		INIT_LIST_HEAD(&connector->probed_modes);
7394 		amdgpu_dm_connector->num_modes =
7395 				drm_add_edid_modes(connector, edid);
7396 
7397 		/* sorting the probed modes before calling function
7398 		 * amdgpu_dm_get_native_mode() since EDID can have
7399 		 * more than one preferred mode. The modes that are
7400 		 * later in the probed mode list could be of higher
7401 		 * and preferred resolution. For example, 3840x2160
7402 		 * resolution in base EDID preferred timing and 4096x2160
7403 		 * preferred resolution in DID extension block later.
7404 		 */
7405 		drm_mode_sort(&connector->probed_modes);
7406 		amdgpu_dm_get_native_mode(connector);
7407 
7408 		/* Freesync capabilities are reset by calling
7409 		 * drm_add_edid_modes() and need to be
7410 		 * restored here.
7411 		 */
7412 		amdgpu_dm_update_freesync_caps(connector, edid);
7413 	} else {
7414 		amdgpu_dm_connector->num_modes = 0;
7415 	}
7416 }
7417 
7418 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7419 			      struct drm_display_mode *mode)
7420 {
7421 	struct drm_display_mode *m;
7422 
7423 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7424 		if (drm_mode_equal(m, mode))
7425 			return true;
7426 	}
7427 
7428 	return false;
7429 }
7430 
7431 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7432 {
7433 	const struct drm_display_mode *m;
7434 	struct drm_display_mode *new_mode;
7435 	uint i;
7436 	uint32_t new_modes_count = 0;
7437 
7438 	/* Standard FPS values
7439 	 *
7440 	 * 23.976   - TV/NTSC
7441 	 * 24 	    - Cinema
7442 	 * 25 	    - TV/PAL
7443 	 * 29.97    - TV/NTSC
7444 	 * 30 	    - TV/NTSC
7445 	 * 48 	    - Cinema HFR
7446 	 * 50 	    - TV/PAL
7447 	 * 60 	    - Commonly used
7448 	 * 48,72,96 - Multiples of 24
7449 	 */
7450 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7451 					 48000, 50000, 60000, 72000, 96000 };
7452 
7453 	/*
7454 	 * Find mode with highest refresh rate with the same resolution
7455 	 * as the preferred mode. Some monitors report a preferred mode
7456 	 * with lower resolution than the highest refresh rate supported.
7457 	 */
7458 
7459 	m = get_highest_refresh_rate_mode(aconnector, true);
7460 	if (!m)
7461 		return 0;
7462 
7463 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7464 		uint64_t target_vtotal, target_vtotal_diff;
7465 		uint64_t num, den;
7466 
7467 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7468 			continue;
7469 
7470 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7471 		    common_rates[i] > aconnector->max_vfreq * 1000)
7472 			continue;
7473 
7474 		num = (unsigned long long)m->clock * 1000 * 1000;
7475 		den = common_rates[i] * (unsigned long long)m->htotal;
7476 		target_vtotal = div_u64(num, den);
7477 		target_vtotal_diff = target_vtotal - m->vtotal;
7478 
7479 		/* Check for illegal modes */
7480 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7481 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7482 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7483 			continue;
7484 
7485 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7486 		if (!new_mode)
7487 			goto out;
7488 
7489 		new_mode->vtotal += (u16)target_vtotal_diff;
7490 		new_mode->vsync_start += (u16)target_vtotal_diff;
7491 		new_mode->vsync_end += (u16)target_vtotal_diff;
7492 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7493 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7494 
7495 		if (!is_duplicate_mode(aconnector, new_mode)) {
7496 			drm_mode_probed_add(&aconnector->base, new_mode);
7497 			new_modes_count += 1;
7498 		} else
7499 			drm_mode_destroy(aconnector->base.dev, new_mode);
7500 	}
7501  out:
7502 	return new_modes_count;
7503 }
7504 
7505 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7506 						   struct edid *edid)
7507 {
7508 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7509 		to_amdgpu_dm_connector(connector);
7510 
7511 	if (!(amdgpu_freesync_vid_mode && edid))
7512 		return;
7513 
7514 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7515 		amdgpu_dm_connector->num_modes +=
7516 			add_fs_modes(amdgpu_dm_connector);
7517 }
7518 
7519 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7520 {
7521 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7522 			to_amdgpu_dm_connector(connector);
7523 	struct drm_encoder *encoder;
7524 	struct edid *edid = amdgpu_dm_connector->edid;
7525 
7526 	encoder = amdgpu_dm_connector_to_encoder(connector);
7527 
7528 	if (!drm_edid_is_valid(edid)) {
7529 		amdgpu_dm_connector->num_modes =
7530 				drm_add_modes_noedid(connector, 640, 480);
7531 	} else {
7532 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7533 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7534 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7535 	}
7536 	amdgpu_dm_fbc_init(connector);
7537 
7538 	return amdgpu_dm_connector->num_modes;
7539 }
7540 
7541 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7542 				     struct amdgpu_dm_connector *aconnector,
7543 				     int connector_type,
7544 				     struct dc_link *link,
7545 				     int link_index)
7546 {
7547 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7548 
7549 	/*
7550 	 * Some of the properties below require access to state, like bpc.
7551 	 * Allocate some default initial connector state with our reset helper.
7552 	 */
7553 	if (aconnector->base.funcs->reset)
7554 		aconnector->base.funcs->reset(&aconnector->base);
7555 
7556 	aconnector->connector_id = link_index;
7557 	aconnector->dc_link = link;
7558 	aconnector->base.interlace_allowed = false;
7559 	aconnector->base.doublescan_allowed = false;
7560 	aconnector->base.stereo_allowed = false;
7561 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7562 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7563 	aconnector->audio_inst = -1;
7564 	mutex_init(&aconnector->hpd_lock);
7565 
7566 	/*
7567 	 * configure support HPD hot plug connector_>polled default value is 0
7568 	 * which means HPD hot plug not supported
7569 	 */
7570 	switch (connector_type) {
7571 	case DRM_MODE_CONNECTOR_HDMIA:
7572 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7573 		aconnector->base.ycbcr_420_allowed =
7574 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7575 		break;
7576 	case DRM_MODE_CONNECTOR_DisplayPort:
7577 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7578 		aconnector->base.ycbcr_420_allowed =
7579 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7580 		break;
7581 	case DRM_MODE_CONNECTOR_DVID:
7582 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7583 		break;
7584 	default:
7585 		break;
7586 	}
7587 
7588 	drm_object_attach_property(&aconnector->base.base,
7589 				dm->ddev->mode_config.scaling_mode_property,
7590 				DRM_MODE_SCALE_NONE);
7591 
7592 	drm_object_attach_property(&aconnector->base.base,
7593 				adev->mode_info.underscan_property,
7594 				UNDERSCAN_OFF);
7595 	drm_object_attach_property(&aconnector->base.base,
7596 				adev->mode_info.underscan_hborder_property,
7597 				0);
7598 	drm_object_attach_property(&aconnector->base.base,
7599 				adev->mode_info.underscan_vborder_property,
7600 				0);
7601 
7602 	if (!aconnector->mst_port)
7603 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7604 
7605 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7606 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7607 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7608 
7609 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7610 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7611 		drm_object_attach_property(&aconnector->base.base,
7612 				adev->mode_info.abm_level_property, 0);
7613 	}
7614 
7615 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7616 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7617 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7618 		drm_object_attach_property(
7619 			&aconnector->base.base,
7620 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7621 
7622 		if (!aconnector->mst_port)
7623 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7624 
7625 #ifdef CONFIG_DRM_AMD_DC_HDCP
7626 		if (adev->dm.hdcp_workqueue)
7627 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7628 #endif
7629 	}
7630 }
7631 
7632 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7633 			      struct i2c_msg *msgs, int num)
7634 {
7635 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7636 	struct ddc_service *ddc_service = i2c->ddc_service;
7637 	struct i2c_command cmd;
7638 	int i;
7639 	int result = -EIO;
7640 
7641 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7642 
7643 	if (!cmd.payloads)
7644 		return result;
7645 
7646 	cmd.number_of_payloads = num;
7647 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7648 	cmd.speed = 100;
7649 
7650 	for (i = 0; i < num; i++) {
7651 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7652 		cmd.payloads[i].address = msgs[i].addr;
7653 		cmd.payloads[i].length = msgs[i].len;
7654 		cmd.payloads[i].data = msgs[i].buf;
7655 	}
7656 
7657 	if (dc_submit_i2c(
7658 			ddc_service->ctx->dc,
7659 			ddc_service->ddc_pin->hw_info.ddc_channel,
7660 			&cmd))
7661 		result = num;
7662 
7663 	kfree(cmd.payloads);
7664 	return result;
7665 }
7666 
7667 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7668 {
7669 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7670 }
7671 
7672 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7673 	.master_xfer = amdgpu_dm_i2c_xfer,
7674 	.functionality = amdgpu_dm_i2c_func,
7675 };
7676 
7677 static struct amdgpu_i2c_adapter *
7678 create_i2c(struct ddc_service *ddc_service,
7679 	   int link_index,
7680 	   int *res)
7681 {
7682 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7683 	struct amdgpu_i2c_adapter *i2c;
7684 
7685 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7686 	if (!i2c)
7687 		return NULL;
7688 	i2c->base.owner = THIS_MODULE;
7689 	i2c->base.class = I2C_CLASS_DDC;
7690 	i2c->base.dev.parent = &adev->pdev->dev;
7691 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7692 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7693 	i2c_set_adapdata(&i2c->base, i2c);
7694 	i2c->ddc_service = ddc_service;
7695 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7696 
7697 	return i2c;
7698 }
7699 
7700 
7701 /*
7702  * Note: this function assumes that dc_link_detect() was called for the
7703  * dc_link which will be represented by this aconnector.
7704  */
7705 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7706 				    struct amdgpu_dm_connector *aconnector,
7707 				    uint32_t link_index,
7708 				    struct amdgpu_encoder *aencoder)
7709 {
7710 	int res = 0;
7711 	int connector_type;
7712 	struct dc *dc = dm->dc;
7713 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7714 	struct amdgpu_i2c_adapter *i2c;
7715 
7716 	link->priv = aconnector;
7717 
7718 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7719 
7720 	i2c = create_i2c(link->ddc, link->link_index, &res);
7721 	if (!i2c) {
7722 		DRM_ERROR("Failed to create i2c adapter data\n");
7723 		return -ENOMEM;
7724 	}
7725 
7726 	aconnector->i2c = i2c;
7727 	res = i2c_add_adapter(&i2c->base);
7728 
7729 	if (res) {
7730 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7731 		goto out_free;
7732 	}
7733 
7734 	connector_type = to_drm_connector_type(link->connector_signal);
7735 
7736 	res = drm_connector_init_with_ddc(
7737 			dm->ddev,
7738 			&aconnector->base,
7739 			&amdgpu_dm_connector_funcs,
7740 			connector_type,
7741 			&i2c->base);
7742 
7743 	if (res) {
7744 		DRM_ERROR("connector_init failed\n");
7745 		aconnector->connector_id = -1;
7746 		goto out_free;
7747 	}
7748 
7749 	drm_connector_helper_add(
7750 			&aconnector->base,
7751 			&amdgpu_dm_connector_helper_funcs);
7752 
7753 	amdgpu_dm_connector_init_helper(
7754 		dm,
7755 		aconnector,
7756 		connector_type,
7757 		link,
7758 		link_index);
7759 
7760 	drm_connector_attach_encoder(
7761 		&aconnector->base, &aencoder->base);
7762 
7763 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7764 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7765 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7766 
7767 out_free:
7768 	if (res) {
7769 		kfree(i2c);
7770 		aconnector->i2c = NULL;
7771 	}
7772 	return res;
7773 }
7774 
7775 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7776 {
7777 	switch (adev->mode_info.num_crtc) {
7778 	case 1:
7779 		return 0x1;
7780 	case 2:
7781 		return 0x3;
7782 	case 3:
7783 		return 0x7;
7784 	case 4:
7785 		return 0xf;
7786 	case 5:
7787 		return 0x1f;
7788 	case 6:
7789 	default:
7790 		return 0x3f;
7791 	}
7792 }
7793 
7794 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7795 				  struct amdgpu_encoder *aencoder,
7796 				  uint32_t link_index)
7797 {
7798 	struct amdgpu_device *adev = drm_to_adev(dev);
7799 
7800 	int res = drm_encoder_init(dev,
7801 				   &aencoder->base,
7802 				   &amdgpu_dm_encoder_funcs,
7803 				   DRM_MODE_ENCODER_TMDS,
7804 				   NULL);
7805 
7806 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7807 
7808 	if (!res)
7809 		aencoder->encoder_id = link_index;
7810 	else
7811 		aencoder->encoder_id = -1;
7812 
7813 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7814 
7815 	return res;
7816 }
7817 
7818 static void manage_dm_interrupts(struct amdgpu_device *adev,
7819 				 struct amdgpu_crtc *acrtc,
7820 				 bool enable)
7821 {
7822 	/*
7823 	 * We have no guarantee that the frontend index maps to the same
7824 	 * backend index - some even map to more than one.
7825 	 *
7826 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7827 	 */
7828 	int irq_type =
7829 		amdgpu_display_crtc_idx_to_irq_type(
7830 			adev,
7831 			acrtc->crtc_id);
7832 
7833 	if (enable) {
7834 		drm_crtc_vblank_on(&acrtc->base);
7835 		amdgpu_irq_get(
7836 			adev,
7837 			&adev->pageflip_irq,
7838 			irq_type);
7839 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7840 		amdgpu_irq_get(
7841 			adev,
7842 			&adev->vline0_irq,
7843 			irq_type);
7844 #endif
7845 	} else {
7846 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7847 		amdgpu_irq_put(
7848 			adev,
7849 			&adev->vline0_irq,
7850 			irq_type);
7851 #endif
7852 		amdgpu_irq_put(
7853 			adev,
7854 			&adev->pageflip_irq,
7855 			irq_type);
7856 		drm_crtc_vblank_off(&acrtc->base);
7857 	}
7858 }
7859 
7860 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7861 				      struct amdgpu_crtc *acrtc)
7862 {
7863 	int irq_type =
7864 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7865 
7866 	/**
7867 	 * This reads the current state for the IRQ and force reapplies
7868 	 * the setting to hardware.
7869 	 */
7870 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7871 }
7872 
7873 static bool
7874 is_scaling_state_different(const struct dm_connector_state *dm_state,
7875 			   const struct dm_connector_state *old_dm_state)
7876 {
7877 	if (dm_state->scaling != old_dm_state->scaling)
7878 		return true;
7879 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7880 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7881 			return true;
7882 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7883 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7884 			return true;
7885 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7886 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7887 		return true;
7888 	return false;
7889 }
7890 
7891 #ifdef CONFIG_DRM_AMD_DC_HDCP
7892 static bool is_content_protection_different(struct drm_connector_state *state,
7893 					    const struct drm_connector_state *old_state,
7894 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7895 {
7896 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7897 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7898 
7899 	/* Handle: Type0/1 change */
7900 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7901 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7902 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7903 		return true;
7904 	}
7905 
7906 	/* CP is being re enabled, ignore this
7907 	 *
7908 	 * Handles:	ENABLED -> DESIRED
7909 	 */
7910 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7911 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7912 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7913 		return false;
7914 	}
7915 
7916 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7917 	 *
7918 	 * Handles:	UNDESIRED -> ENABLED
7919 	 */
7920 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7921 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7922 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7923 
7924 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7925 	 * hot-plug, headless s3, dpms
7926 	 *
7927 	 * Handles:	DESIRED -> DESIRED (Special case)
7928 	 */
7929 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7930 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7931 		dm_con_state->update_hdcp = false;
7932 		return true;
7933 	}
7934 
7935 	/*
7936 	 * Handles:	UNDESIRED -> UNDESIRED
7937 	 *		DESIRED -> DESIRED
7938 	 *		ENABLED -> ENABLED
7939 	 */
7940 	if (old_state->content_protection == state->content_protection)
7941 		return false;
7942 
7943 	/*
7944 	 * Handles:	UNDESIRED -> DESIRED
7945 	 *		DESIRED -> UNDESIRED
7946 	 *		ENABLED -> UNDESIRED
7947 	 */
7948 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7949 		return true;
7950 
7951 	/*
7952 	 * Handles:	DESIRED -> ENABLED
7953 	 */
7954 	return false;
7955 }
7956 
7957 #endif
7958 static void remove_stream(struct amdgpu_device *adev,
7959 			  struct amdgpu_crtc *acrtc,
7960 			  struct dc_stream_state *stream)
7961 {
7962 	/* this is the update mode case */
7963 
7964 	acrtc->otg_inst = -1;
7965 	acrtc->enabled = false;
7966 }
7967 
7968 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7969 			       struct dc_cursor_position *position)
7970 {
7971 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7972 	int x, y;
7973 	int xorigin = 0, yorigin = 0;
7974 
7975 	if (!crtc || !plane->state->fb)
7976 		return 0;
7977 
7978 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7979 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7980 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7981 			  __func__,
7982 			  plane->state->crtc_w,
7983 			  plane->state->crtc_h);
7984 		return -EINVAL;
7985 	}
7986 
7987 	x = plane->state->crtc_x;
7988 	y = plane->state->crtc_y;
7989 
7990 	if (x <= -amdgpu_crtc->max_cursor_width ||
7991 	    y <= -amdgpu_crtc->max_cursor_height)
7992 		return 0;
7993 
7994 	if (x < 0) {
7995 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7996 		x = 0;
7997 	}
7998 	if (y < 0) {
7999 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8000 		y = 0;
8001 	}
8002 	position->enable = true;
8003 	position->translate_by_source = true;
8004 	position->x = x;
8005 	position->y = y;
8006 	position->x_hotspot = xorigin;
8007 	position->y_hotspot = yorigin;
8008 
8009 	return 0;
8010 }
8011 
8012 static void handle_cursor_update(struct drm_plane *plane,
8013 				 struct drm_plane_state *old_plane_state)
8014 {
8015 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8016 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8017 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8018 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8019 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8020 	uint64_t address = afb ? afb->address : 0;
8021 	struct dc_cursor_position position = {0};
8022 	struct dc_cursor_attributes attributes;
8023 	int ret;
8024 
8025 	if (!plane->state->fb && !old_plane_state->fb)
8026 		return;
8027 
8028 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8029 		      __func__,
8030 		      amdgpu_crtc->crtc_id,
8031 		      plane->state->crtc_w,
8032 		      plane->state->crtc_h);
8033 
8034 	ret = get_cursor_position(plane, crtc, &position);
8035 	if (ret)
8036 		return;
8037 
8038 	if (!position.enable) {
8039 		/* turn off cursor */
8040 		if (crtc_state && crtc_state->stream) {
8041 			mutex_lock(&adev->dm.dc_lock);
8042 			dc_stream_set_cursor_position(crtc_state->stream,
8043 						      &position);
8044 			mutex_unlock(&adev->dm.dc_lock);
8045 		}
8046 		return;
8047 	}
8048 
8049 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8050 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8051 
8052 	memset(&attributes, 0, sizeof(attributes));
8053 	attributes.address.high_part = upper_32_bits(address);
8054 	attributes.address.low_part  = lower_32_bits(address);
8055 	attributes.width             = plane->state->crtc_w;
8056 	attributes.height            = plane->state->crtc_h;
8057 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8058 	attributes.rotation_angle    = 0;
8059 	attributes.attribute_flags.value = 0;
8060 
8061 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8062 
8063 	if (crtc_state->stream) {
8064 		mutex_lock(&adev->dm.dc_lock);
8065 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8066 							 &attributes))
8067 			DRM_ERROR("DC failed to set cursor attributes\n");
8068 
8069 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8070 						   &position))
8071 			DRM_ERROR("DC failed to set cursor position\n");
8072 		mutex_unlock(&adev->dm.dc_lock);
8073 	}
8074 }
8075 
8076 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8077 {
8078 
8079 	assert_spin_locked(&acrtc->base.dev->event_lock);
8080 	WARN_ON(acrtc->event);
8081 
8082 	acrtc->event = acrtc->base.state->event;
8083 
8084 	/* Set the flip status */
8085 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8086 
8087 	/* Mark this event as consumed */
8088 	acrtc->base.state->event = NULL;
8089 
8090 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8091 		     acrtc->crtc_id);
8092 }
8093 
8094 static void update_freesync_state_on_stream(
8095 	struct amdgpu_display_manager *dm,
8096 	struct dm_crtc_state *new_crtc_state,
8097 	struct dc_stream_state *new_stream,
8098 	struct dc_plane_state *surface,
8099 	u32 flip_timestamp_in_us)
8100 {
8101 	struct mod_vrr_params vrr_params;
8102 	struct dc_info_packet vrr_infopacket = {0};
8103 	struct amdgpu_device *adev = dm->adev;
8104 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8105 	unsigned long flags;
8106 	bool pack_sdp_v1_3 = false;
8107 
8108 	if (!new_stream)
8109 		return;
8110 
8111 	/*
8112 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8113 	 * For now it's sufficient to just guard against these conditions.
8114 	 */
8115 
8116 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8117 		return;
8118 
8119 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8120         vrr_params = acrtc->dm_irq_params.vrr_params;
8121 
8122 	if (surface) {
8123 		mod_freesync_handle_preflip(
8124 			dm->freesync_module,
8125 			surface,
8126 			new_stream,
8127 			flip_timestamp_in_us,
8128 			&vrr_params);
8129 
8130 		if (adev->family < AMDGPU_FAMILY_AI &&
8131 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8132 			mod_freesync_handle_v_update(dm->freesync_module,
8133 						     new_stream, &vrr_params);
8134 
8135 			/* Need to call this before the frame ends. */
8136 			dc_stream_adjust_vmin_vmax(dm->dc,
8137 						   new_crtc_state->stream,
8138 						   &vrr_params.adjust);
8139 		}
8140 	}
8141 
8142 	mod_freesync_build_vrr_infopacket(
8143 		dm->freesync_module,
8144 		new_stream,
8145 		&vrr_params,
8146 		PACKET_TYPE_VRR,
8147 		TRANSFER_FUNC_UNKNOWN,
8148 		&vrr_infopacket,
8149 		pack_sdp_v1_3);
8150 
8151 	new_crtc_state->freesync_timing_changed |=
8152 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8153 			&vrr_params.adjust,
8154 			sizeof(vrr_params.adjust)) != 0);
8155 
8156 	new_crtc_state->freesync_vrr_info_changed |=
8157 		(memcmp(&new_crtc_state->vrr_infopacket,
8158 			&vrr_infopacket,
8159 			sizeof(vrr_infopacket)) != 0);
8160 
8161 	acrtc->dm_irq_params.vrr_params = vrr_params;
8162 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8163 
8164 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8165 	new_stream->vrr_infopacket = vrr_infopacket;
8166 
8167 	if (new_crtc_state->freesync_vrr_info_changed)
8168 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8169 			      new_crtc_state->base.crtc->base.id,
8170 			      (int)new_crtc_state->base.vrr_enabled,
8171 			      (int)vrr_params.state);
8172 
8173 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8174 }
8175 
8176 static void update_stream_irq_parameters(
8177 	struct amdgpu_display_manager *dm,
8178 	struct dm_crtc_state *new_crtc_state)
8179 {
8180 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8181 	struct mod_vrr_params vrr_params;
8182 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8183 	struct amdgpu_device *adev = dm->adev;
8184 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8185 	unsigned long flags;
8186 
8187 	if (!new_stream)
8188 		return;
8189 
8190 	/*
8191 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8192 	 * For now it's sufficient to just guard against these conditions.
8193 	 */
8194 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8195 		return;
8196 
8197 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8198 	vrr_params = acrtc->dm_irq_params.vrr_params;
8199 
8200 	if (new_crtc_state->vrr_supported &&
8201 	    config.min_refresh_in_uhz &&
8202 	    config.max_refresh_in_uhz) {
8203 		/*
8204 		 * if freesync compatible mode was set, config.state will be set
8205 		 * in atomic check
8206 		 */
8207 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8208 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8209 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8210 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8211 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8212 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8213 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8214 		} else {
8215 			config.state = new_crtc_state->base.vrr_enabled ?
8216 						     VRR_STATE_ACTIVE_VARIABLE :
8217 						     VRR_STATE_INACTIVE;
8218 		}
8219 	} else {
8220 		config.state = VRR_STATE_UNSUPPORTED;
8221 	}
8222 
8223 	mod_freesync_build_vrr_params(dm->freesync_module,
8224 				      new_stream,
8225 				      &config, &vrr_params);
8226 
8227 	new_crtc_state->freesync_timing_changed |=
8228 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8229 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8230 
8231 	new_crtc_state->freesync_config = config;
8232 	/* Copy state for access from DM IRQ handler */
8233 	acrtc->dm_irq_params.freesync_config = config;
8234 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8235 	acrtc->dm_irq_params.vrr_params = vrr_params;
8236 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8237 }
8238 
8239 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8240 					    struct dm_crtc_state *new_state)
8241 {
8242 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8243 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8244 
8245 	if (!old_vrr_active && new_vrr_active) {
8246 		/* Transition VRR inactive -> active:
8247 		 * While VRR is active, we must not disable vblank irq, as a
8248 		 * reenable after disable would compute bogus vblank/pflip
8249 		 * timestamps if it likely happened inside display front-porch.
8250 		 *
8251 		 * We also need vupdate irq for the actual core vblank handling
8252 		 * at end of vblank.
8253 		 */
8254 		dm_set_vupdate_irq(new_state->base.crtc, true);
8255 		drm_crtc_vblank_get(new_state->base.crtc);
8256 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8257 				 __func__, new_state->base.crtc->base.id);
8258 	} else if (old_vrr_active && !new_vrr_active) {
8259 		/* Transition VRR active -> inactive:
8260 		 * Allow vblank irq disable again for fixed refresh rate.
8261 		 */
8262 		dm_set_vupdate_irq(new_state->base.crtc, false);
8263 		drm_crtc_vblank_put(new_state->base.crtc);
8264 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8265 				 __func__, new_state->base.crtc->base.id);
8266 	}
8267 }
8268 
8269 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8270 {
8271 	struct drm_plane *plane;
8272 	struct drm_plane_state *old_plane_state;
8273 	int i;
8274 
8275 	/*
8276 	 * TODO: Make this per-stream so we don't issue redundant updates for
8277 	 * commits with multiple streams.
8278 	 */
8279 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8280 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8281 			handle_cursor_update(plane, old_plane_state);
8282 }
8283 
8284 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8285 				    struct dc_state *dc_state,
8286 				    struct drm_device *dev,
8287 				    struct amdgpu_display_manager *dm,
8288 				    struct drm_crtc *pcrtc,
8289 				    bool wait_for_vblank)
8290 {
8291 	uint32_t i;
8292 	uint64_t timestamp_ns;
8293 	struct drm_plane *plane;
8294 	struct drm_plane_state *old_plane_state, *new_plane_state;
8295 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8296 	struct drm_crtc_state *new_pcrtc_state =
8297 			drm_atomic_get_new_crtc_state(state, pcrtc);
8298 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8299 	struct dm_crtc_state *dm_old_crtc_state =
8300 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8301 	int planes_count = 0, vpos, hpos;
8302 	long r;
8303 	unsigned long flags;
8304 	struct amdgpu_bo *abo;
8305 	uint32_t target_vblank, last_flip_vblank;
8306 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8307 	bool pflip_present = false;
8308 	struct {
8309 		struct dc_surface_update surface_updates[MAX_SURFACES];
8310 		struct dc_plane_info plane_infos[MAX_SURFACES];
8311 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8312 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8313 		struct dc_stream_update stream_update;
8314 	} *bundle;
8315 
8316 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8317 
8318 	if (!bundle) {
8319 		dm_error("Failed to allocate update bundle\n");
8320 		goto cleanup;
8321 	}
8322 
8323 	/*
8324 	 * Disable the cursor first if we're disabling all the planes.
8325 	 * It'll remain on the screen after the planes are re-enabled
8326 	 * if we don't.
8327 	 */
8328 	if (acrtc_state->active_planes == 0)
8329 		amdgpu_dm_commit_cursors(state);
8330 
8331 	/* update planes when needed */
8332 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8333 		struct drm_crtc *crtc = new_plane_state->crtc;
8334 		struct drm_crtc_state *new_crtc_state;
8335 		struct drm_framebuffer *fb = new_plane_state->fb;
8336 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8337 		bool plane_needs_flip;
8338 		struct dc_plane_state *dc_plane;
8339 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8340 
8341 		/* Cursor plane is handled after stream updates */
8342 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8343 			continue;
8344 
8345 		if (!fb || !crtc || pcrtc != crtc)
8346 			continue;
8347 
8348 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8349 		if (!new_crtc_state->active)
8350 			continue;
8351 
8352 		dc_plane = dm_new_plane_state->dc_state;
8353 
8354 		bundle->surface_updates[planes_count].surface = dc_plane;
8355 		if (new_pcrtc_state->color_mgmt_changed) {
8356 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8357 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8358 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8359 		}
8360 
8361 		fill_dc_scaling_info(new_plane_state,
8362 				     &bundle->scaling_infos[planes_count]);
8363 
8364 		bundle->surface_updates[planes_count].scaling_info =
8365 			&bundle->scaling_infos[planes_count];
8366 
8367 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8368 
8369 		pflip_present = pflip_present || plane_needs_flip;
8370 
8371 		if (!plane_needs_flip) {
8372 			planes_count += 1;
8373 			continue;
8374 		}
8375 
8376 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8377 
8378 		/*
8379 		 * Wait for all fences on this FB. Do limited wait to avoid
8380 		 * deadlock during GPU reset when this fence will not signal
8381 		 * but we hold reservation lock for the BO.
8382 		 */
8383 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8384 							false,
8385 							msecs_to_jiffies(5000));
8386 		if (unlikely(r <= 0))
8387 			DRM_ERROR("Waiting for fences timed out!");
8388 
8389 		fill_dc_plane_info_and_addr(
8390 			dm->adev, new_plane_state,
8391 			afb->tiling_flags,
8392 			&bundle->plane_infos[planes_count],
8393 			&bundle->flip_addrs[planes_count].address,
8394 			afb->tmz_surface, false);
8395 
8396 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8397 				 new_plane_state->plane->index,
8398 				 bundle->plane_infos[planes_count].dcc.enable);
8399 
8400 		bundle->surface_updates[planes_count].plane_info =
8401 			&bundle->plane_infos[planes_count];
8402 
8403 		/*
8404 		 * Only allow immediate flips for fast updates that don't
8405 		 * change FB pitch, DCC state, rotation or mirroing.
8406 		 */
8407 		bundle->flip_addrs[planes_count].flip_immediate =
8408 			crtc->state->async_flip &&
8409 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8410 
8411 		timestamp_ns = ktime_get_ns();
8412 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8413 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8414 		bundle->surface_updates[planes_count].surface = dc_plane;
8415 
8416 		if (!bundle->surface_updates[planes_count].surface) {
8417 			DRM_ERROR("No surface for CRTC: id=%d\n",
8418 					acrtc_attach->crtc_id);
8419 			continue;
8420 		}
8421 
8422 		if (plane == pcrtc->primary)
8423 			update_freesync_state_on_stream(
8424 				dm,
8425 				acrtc_state,
8426 				acrtc_state->stream,
8427 				dc_plane,
8428 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8429 
8430 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8431 				 __func__,
8432 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8433 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8434 
8435 		planes_count += 1;
8436 
8437 	}
8438 
8439 	if (pflip_present) {
8440 		if (!vrr_active) {
8441 			/* Use old throttling in non-vrr fixed refresh rate mode
8442 			 * to keep flip scheduling based on target vblank counts
8443 			 * working in a backwards compatible way, e.g., for
8444 			 * clients using the GLX_OML_sync_control extension or
8445 			 * DRI3/Present extension with defined target_msc.
8446 			 */
8447 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8448 		}
8449 		else {
8450 			/* For variable refresh rate mode only:
8451 			 * Get vblank of last completed flip to avoid > 1 vrr
8452 			 * flips per video frame by use of throttling, but allow
8453 			 * flip programming anywhere in the possibly large
8454 			 * variable vrr vblank interval for fine-grained flip
8455 			 * timing control and more opportunity to avoid stutter
8456 			 * on late submission of flips.
8457 			 */
8458 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8459 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8460 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8461 		}
8462 
8463 		target_vblank = last_flip_vblank + wait_for_vblank;
8464 
8465 		/*
8466 		 * Wait until we're out of the vertical blank period before the one
8467 		 * targeted by the flip
8468 		 */
8469 		while ((acrtc_attach->enabled &&
8470 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8471 							    0, &vpos, &hpos, NULL,
8472 							    NULL, &pcrtc->hwmode)
8473 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8474 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8475 			(int)(target_vblank -
8476 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8477 			usleep_range(1000, 1100);
8478 		}
8479 
8480 		/**
8481 		 * Prepare the flip event for the pageflip interrupt to handle.
8482 		 *
8483 		 * This only works in the case where we've already turned on the
8484 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8485 		 * from 0 -> n planes we have to skip a hardware generated event
8486 		 * and rely on sending it from software.
8487 		 */
8488 		if (acrtc_attach->base.state->event &&
8489 		    acrtc_state->active_planes > 0) {
8490 			drm_crtc_vblank_get(pcrtc);
8491 
8492 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8493 
8494 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8495 			prepare_flip_isr(acrtc_attach);
8496 
8497 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8498 		}
8499 
8500 		if (acrtc_state->stream) {
8501 			if (acrtc_state->freesync_vrr_info_changed)
8502 				bundle->stream_update.vrr_infopacket =
8503 					&acrtc_state->stream->vrr_infopacket;
8504 		}
8505 	}
8506 
8507 	/* Update the planes if changed or disable if we don't have any. */
8508 	if ((planes_count || acrtc_state->active_planes == 0) &&
8509 		acrtc_state->stream) {
8510 		bundle->stream_update.stream = acrtc_state->stream;
8511 		if (new_pcrtc_state->mode_changed) {
8512 			bundle->stream_update.src = acrtc_state->stream->src;
8513 			bundle->stream_update.dst = acrtc_state->stream->dst;
8514 		}
8515 
8516 		if (new_pcrtc_state->color_mgmt_changed) {
8517 			/*
8518 			 * TODO: This isn't fully correct since we've actually
8519 			 * already modified the stream in place.
8520 			 */
8521 			bundle->stream_update.gamut_remap =
8522 				&acrtc_state->stream->gamut_remap_matrix;
8523 			bundle->stream_update.output_csc_transform =
8524 				&acrtc_state->stream->csc_color_matrix;
8525 			bundle->stream_update.out_transfer_func =
8526 				acrtc_state->stream->out_transfer_func;
8527 		}
8528 
8529 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8530 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8531 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8532 
8533 		/*
8534 		 * If FreeSync state on the stream has changed then we need to
8535 		 * re-adjust the min/max bounds now that DC doesn't handle this
8536 		 * as part of commit.
8537 		 */
8538 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8539 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8540 			dc_stream_adjust_vmin_vmax(
8541 				dm->dc, acrtc_state->stream,
8542 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8543 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8544 		}
8545 		mutex_lock(&dm->dc_lock);
8546 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8547 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8548 			amdgpu_dm_psr_disable(acrtc_state->stream);
8549 
8550 		dc_commit_updates_for_stream(dm->dc,
8551 						     bundle->surface_updates,
8552 						     planes_count,
8553 						     acrtc_state->stream,
8554 						     &bundle->stream_update,
8555 						     dc_state);
8556 
8557 		/**
8558 		 * Enable or disable the interrupts on the backend.
8559 		 *
8560 		 * Most pipes are put into power gating when unused.
8561 		 *
8562 		 * When power gating is enabled on a pipe we lose the
8563 		 * interrupt enablement state when power gating is disabled.
8564 		 *
8565 		 * So we need to update the IRQ control state in hardware
8566 		 * whenever the pipe turns on (since it could be previously
8567 		 * power gated) or off (since some pipes can't be power gated
8568 		 * on some ASICs).
8569 		 */
8570 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8571 			dm_update_pflip_irq_state(drm_to_adev(dev),
8572 						  acrtc_attach);
8573 
8574 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8575 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8576 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8577 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8578 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8579 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8580 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8581 			amdgpu_dm_psr_enable(acrtc_state->stream);
8582 		}
8583 
8584 		mutex_unlock(&dm->dc_lock);
8585 	}
8586 
8587 	/*
8588 	 * Update cursor state *after* programming all the planes.
8589 	 * This avoids redundant programming in the case where we're going
8590 	 * to be disabling a single plane - those pipes are being disabled.
8591 	 */
8592 	if (acrtc_state->active_planes)
8593 		amdgpu_dm_commit_cursors(state);
8594 
8595 cleanup:
8596 	kfree(bundle);
8597 }
8598 
8599 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8600 				   struct drm_atomic_state *state)
8601 {
8602 	struct amdgpu_device *adev = drm_to_adev(dev);
8603 	struct amdgpu_dm_connector *aconnector;
8604 	struct drm_connector *connector;
8605 	struct drm_connector_state *old_con_state, *new_con_state;
8606 	struct drm_crtc_state *new_crtc_state;
8607 	struct dm_crtc_state *new_dm_crtc_state;
8608 	const struct dc_stream_status *status;
8609 	int i, inst;
8610 
8611 	/* Notify device removals. */
8612 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8613 		if (old_con_state->crtc != new_con_state->crtc) {
8614 			/* CRTC changes require notification. */
8615 			goto notify;
8616 		}
8617 
8618 		if (!new_con_state->crtc)
8619 			continue;
8620 
8621 		new_crtc_state = drm_atomic_get_new_crtc_state(
8622 			state, new_con_state->crtc);
8623 
8624 		if (!new_crtc_state)
8625 			continue;
8626 
8627 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8628 			continue;
8629 
8630 	notify:
8631 		aconnector = to_amdgpu_dm_connector(connector);
8632 
8633 		mutex_lock(&adev->dm.audio_lock);
8634 		inst = aconnector->audio_inst;
8635 		aconnector->audio_inst = -1;
8636 		mutex_unlock(&adev->dm.audio_lock);
8637 
8638 		amdgpu_dm_audio_eld_notify(adev, inst);
8639 	}
8640 
8641 	/* Notify audio device additions. */
8642 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8643 		if (!new_con_state->crtc)
8644 			continue;
8645 
8646 		new_crtc_state = drm_atomic_get_new_crtc_state(
8647 			state, new_con_state->crtc);
8648 
8649 		if (!new_crtc_state)
8650 			continue;
8651 
8652 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8653 			continue;
8654 
8655 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8656 		if (!new_dm_crtc_state->stream)
8657 			continue;
8658 
8659 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8660 		if (!status)
8661 			continue;
8662 
8663 		aconnector = to_amdgpu_dm_connector(connector);
8664 
8665 		mutex_lock(&adev->dm.audio_lock);
8666 		inst = status->audio_inst;
8667 		aconnector->audio_inst = inst;
8668 		mutex_unlock(&adev->dm.audio_lock);
8669 
8670 		amdgpu_dm_audio_eld_notify(adev, inst);
8671 	}
8672 }
8673 
8674 /*
8675  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8676  * @crtc_state: the DRM CRTC state
8677  * @stream_state: the DC stream state.
8678  *
8679  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8680  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8681  */
8682 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8683 						struct dc_stream_state *stream_state)
8684 {
8685 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8686 }
8687 
8688 /**
8689  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8690  * @state: The atomic state to commit
8691  *
8692  * This will tell DC to commit the constructed DC state from atomic_check,
8693  * programming the hardware. Any failures here implies a hardware failure, since
8694  * atomic check should have filtered anything non-kosher.
8695  */
8696 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8697 {
8698 	struct drm_device *dev = state->dev;
8699 	struct amdgpu_device *adev = drm_to_adev(dev);
8700 	struct amdgpu_display_manager *dm = &adev->dm;
8701 	struct dm_atomic_state *dm_state;
8702 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8703 	uint32_t i, j;
8704 	struct drm_crtc *crtc;
8705 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8706 	unsigned long flags;
8707 	bool wait_for_vblank = true;
8708 	struct drm_connector *connector;
8709 	struct drm_connector_state *old_con_state, *new_con_state;
8710 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8711 	int crtc_disable_count = 0;
8712 	bool mode_set_reset_required = false;
8713 
8714 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8715 
8716 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8717 
8718 	dm_state = dm_atomic_get_new_state(state);
8719 	if (dm_state && dm_state->context) {
8720 		dc_state = dm_state->context;
8721 	} else {
8722 		/* No state changes, retain current state. */
8723 		dc_state_temp = dc_create_state(dm->dc);
8724 		ASSERT(dc_state_temp);
8725 		dc_state = dc_state_temp;
8726 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8727 	}
8728 
8729 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8730 				       new_crtc_state, i) {
8731 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8732 
8733 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8734 
8735 		if (old_crtc_state->active &&
8736 		    (!new_crtc_state->active ||
8737 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8738 			manage_dm_interrupts(adev, acrtc, false);
8739 			dc_stream_release(dm_old_crtc_state->stream);
8740 		}
8741 	}
8742 
8743 	drm_atomic_helper_calc_timestamping_constants(state);
8744 
8745 	/* update changed items */
8746 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8747 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8748 
8749 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8750 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8751 
8752 		DRM_DEBUG_ATOMIC(
8753 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8754 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8755 			"connectors_changed:%d\n",
8756 			acrtc->crtc_id,
8757 			new_crtc_state->enable,
8758 			new_crtc_state->active,
8759 			new_crtc_state->planes_changed,
8760 			new_crtc_state->mode_changed,
8761 			new_crtc_state->active_changed,
8762 			new_crtc_state->connectors_changed);
8763 
8764 		/* Disable cursor if disabling crtc */
8765 		if (old_crtc_state->active && !new_crtc_state->active) {
8766 			struct dc_cursor_position position;
8767 
8768 			memset(&position, 0, sizeof(position));
8769 			mutex_lock(&dm->dc_lock);
8770 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8771 			mutex_unlock(&dm->dc_lock);
8772 		}
8773 
8774 		/* Copy all transient state flags into dc state */
8775 		if (dm_new_crtc_state->stream) {
8776 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8777 							    dm_new_crtc_state->stream);
8778 		}
8779 
8780 		/* handles headless hotplug case, updating new_state and
8781 		 * aconnector as needed
8782 		 */
8783 
8784 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8785 
8786 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8787 
8788 			if (!dm_new_crtc_state->stream) {
8789 				/*
8790 				 * this could happen because of issues with
8791 				 * userspace notifications delivery.
8792 				 * In this case userspace tries to set mode on
8793 				 * display which is disconnected in fact.
8794 				 * dc_sink is NULL in this case on aconnector.
8795 				 * We expect reset mode will come soon.
8796 				 *
8797 				 * This can also happen when unplug is done
8798 				 * during resume sequence ended
8799 				 *
8800 				 * In this case, we want to pretend we still
8801 				 * have a sink to keep the pipe running so that
8802 				 * hw state is consistent with the sw state
8803 				 */
8804 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8805 						__func__, acrtc->base.base.id);
8806 				continue;
8807 			}
8808 
8809 			if (dm_old_crtc_state->stream)
8810 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8811 
8812 			pm_runtime_get_noresume(dev->dev);
8813 
8814 			acrtc->enabled = true;
8815 			acrtc->hw_mode = new_crtc_state->mode;
8816 			crtc->hwmode = new_crtc_state->mode;
8817 			mode_set_reset_required = true;
8818 		} else if (modereset_required(new_crtc_state)) {
8819 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8820 			/* i.e. reset mode */
8821 			if (dm_old_crtc_state->stream)
8822 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8823 
8824 			mode_set_reset_required = true;
8825 		}
8826 	} /* for_each_crtc_in_state() */
8827 
8828 	if (dc_state) {
8829 		/* if there mode set or reset, disable eDP PSR */
8830 		if (mode_set_reset_required)
8831 			amdgpu_dm_psr_disable_all(dm);
8832 
8833 		dm_enable_per_frame_crtc_master_sync(dc_state);
8834 		mutex_lock(&dm->dc_lock);
8835 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8836 #if defined(CONFIG_DRM_AMD_DC_DCN)
8837                /* Allow idle optimization when vblank count is 0 for display off */
8838                if (dm->active_vblank_irq_count == 0)
8839                    dc_allow_idle_optimizations(dm->dc,true);
8840 #endif
8841 		mutex_unlock(&dm->dc_lock);
8842 	}
8843 
8844 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8845 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8846 
8847 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8848 
8849 		if (dm_new_crtc_state->stream != NULL) {
8850 			const struct dc_stream_status *status =
8851 					dc_stream_get_status(dm_new_crtc_state->stream);
8852 
8853 			if (!status)
8854 				status = dc_stream_get_status_from_state(dc_state,
8855 									 dm_new_crtc_state->stream);
8856 			if (!status)
8857 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8858 			else
8859 				acrtc->otg_inst = status->primary_otg_inst;
8860 		}
8861 	}
8862 #ifdef CONFIG_DRM_AMD_DC_HDCP
8863 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8864 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8865 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8866 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8867 
8868 		new_crtc_state = NULL;
8869 
8870 		if (acrtc)
8871 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8872 
8873 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8874 
8875 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8876 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8877 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8878 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8879 			dm_new_con_state->update_hdcp = true;
8880 			continue;
8881 		}
8882 
8883 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8884 			hdcp_update_display(
8885 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8886 				new_con_state->hdcp_content_type,
8887 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8888 	}
8889 #endif
8890 
8891 	/* Handle connector state changes */
8892 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8893 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8894 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8895 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8896 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8897 		struct dc_stream_update stream_update;
8898 		struct dc_info_packet hdr_packet;
8899 		struct dc_stream_status *status = NULL;
8900 		bool abm_changed, hdr_changed, scaling_changed;
8901 
8902 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8903 		memset(&stream_update, 0, sizeof(stream_update));
8904 
8905 		if (acrtc) {
8906 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8907 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8908 		}
8909 
8910 		/* Skip any modesets/resets */
8911 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8912 			continue;
8913 
8914 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8915 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8916 
8917 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8918 							     dm_old_con_state);
8919 
8920 		abm_changed = dm_new_crtc_state->abm_level !=
8921 			      dm_old_crtc_state->abm_level;
8922 
8923 		hdr_changed =
8924 			is_hdr_metadata_different(old_con_state, new_con_state);
8925 
8926 		if (!scaling_changed && !abm_changed && !hdr_changed)
8927 			continue;
8928 
8929 		stream_update.stream = dm_new_crtc_state->stream;
8930 		if (scaling_changed) {
8931 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8932 					dm_new_con_state, dm_new_crtc_state->stream);
8933 
8934 			stream_update.src = dm_new_crtc_state->stream->src;
8935 			stream_update.dst = dm_new_crtc_state->stream->dst;
8936 		}
8937 
8938 		if (abm_changed) {
8939 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8940 
8941 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8942 		}
8943 
8944 		if (hdr_changed) {
8945 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8946 			stream_update.hdr_static_metadata = &hdr_packet;
8947 		}
8948 
8949 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8950 		WARN_ON(!status);
8951 		WARN_ON(!status->plane_count);
8952 
8953 		/*
8954 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8955 		 * Here we create an empty update on each plane.
8956 		 * To fix this, DC should permit updating only stream properties.
8957 		 */
8958 		for (j = 0; j < status->plane_count; j++)
8959 			dummy_updates[j].surface = status->plane_states[0];
8960 
8961 
8962 		mutex_lock(&dm->dc_lock);
8963 		dc_commit_updates_for_stream(dm->dc,
8964 						     dummy_updates,
8965 						     status->plane_count,
8966 						     dm_new_crtc_state->stream,
8967 						     &stream_update,
8968 						     dc_state);
8969 		mutex_unlock(&dm->dc_lock);
8970 	}
8971 
8972 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8973 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8974 				      new_crtc_state, i) {
8975 		if (old_crtc_state->active && !new_crtc_state->active)
8976 			crtc_disable_count++;
8977 
8978 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8979 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8980 
8981 		/* For freesync config update on crtc state and params for irq */
8982 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8983 
8984 		/* Handle vrr on->off / off->on transitions */
8985 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8986 						dm_new_crtc_state);
8987 	}
8988 
8989 	/**
8990 	 * Enable interrupts for CRTCs that are newly enabled or went through
8991 	 * a modeset. It was intentionally deferred until after the front end
8992 	 * state was modified to wait until the OTG was on and so the IRQ
8993 	 * handlers didn't access stale or invalid state.
8994 	 */
8995 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8996 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8997 #ifdef CONFIG_DEBUG_FS
8998 		bool configure_crc = false;
8999 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9000 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9001 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9002 #endif
9003 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9004 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9005 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9006 #endif
9007 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9008 
9009 		if (new_crtc_state->active &&
9010 		    (!old_crtc_state->active ||
9011 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9012 			dc_stream_retain(dm_new_crtc_state->stream);
9013 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9014 			manage_dm_interrupts(adev, acrtc, true);
9015 
9016 #ifdef CONFIG_DEBUG_FS
9017 			/**
9018 			 * Frontend may have changed so reapply the CRC capture
9019 			 * settings for the stream.
9020 			 */
9021 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9022 
9023 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9024 				configure_crc = true;
9025 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9026 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9027 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9028 					acrtc->dm_irq_params.crc_window.update_win = true;
9029 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9030 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9031 					crc_rd_wrk->crtc = crtc;
9032 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9033 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9034 				}
9035 #endif
9036 			}
9037 
9038 			if (configure_crc)
9039 				if (amdgpu_dm_crtc_configure_crc_source(
9040 					crtc, dm_new_crtc_state, cur_crc_src))
9041 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9042 #endif
9043 		}
9044 	}
9045 
9046 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9047 		if (new_crtc_state->async_flip)
9048 			wait_for_vblank = false;
9049 
9050 	/* update planes when needed per crtc*/
9051 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9052 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9053 
9054 		if (dm_new_crtc_state->stream)
9055 			amdgpu_dm_commit_planes(state, dc_state, dev,
9056 						dm, crtc, wait_for_vblank);
9057 	}
9058 
9059 	/* Update audio instances for each connector. */
9060 	amdgpu_dm_commit_audio(dev, state);
9061 
9062 	/*
9063 	 * send vblank event on all events not handled in flip and
9064 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9065 	 */
9066 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9067 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9068 
9069 		if (new_crtc_state->event)
9070 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9071 
9072 		new_crtc_state->event = NULL;
9073 	}
9074 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9075 
9076 	/* Signal HW programming completion */
9077 	drm_atomic_helper_commit_hw_done(state);
9078 
9079 	if (wait_for_vblank)
9080 		drm_atomic_helper_wait_for_flip_done(dev, state);
9081 
9082 	drm_atomic_helper_cleanup_planes(dev, state);
9083 
9084 	/* return the stolen vga memory back to VRAM */
9085 	if (!adev->mman.keep_stolen_vga_memory)
9086 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9087 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9088 
9089 	/*
9090 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9091 	 * so we can put the GPU into runtime suspend if we're not driving any
9092 	 * displays anymore
9093 	 */
9094 	for (i = 0; i < crtc_disable_count; i++)
9095 		pm_runtime_put_autosuspend(dev->dev);
9096 	pm_runtime_mark_last_busy(dev->dev);
9097 
9098 	if (dc_state_temp)
9099 		dc_release_state(dc_state_temp);
9100 }
9101 
9102 
9103 static int dm_force_atomic_commit(struct drm_connector *connector)
9104 {
9105 	int ret = 0;
9106 	struct drm_device *ddev = connector->dev;
9107 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9108 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9109 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9110 	struct drm_connector_state *conn_state;
9111 	struct drm_crtc_state *crtc_state;
9112 	struct drm_plane_state *plane_state;
9113 
9114 	if (!state)
9115 		return -ENOMEM;
9116 
9117 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9118 
9119 	/* Construct an atomic state to restore previous display setting */
9120 
9121 	/*
9122 	 * Attach connectors to drm_atomic_state
9123 	 */
9124 	conn_state = drm_atomic_get_connector_state(state, connector);
9125 
9126 	ret = PTR_ERR_OR_ZERO(conn_state);
9127 	if (ret)
9128 		goto out;
9129 
9130 	/* Attach crtc to drm_atomic_state*/
9131 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9132 
9133 	ret = PTR_ERR_OR_ZERO(crtc_state);
9134 	if (ret)
9135 		goto out;
9136 
9137 	/* force a restore */
9138 	crtc_state->mode_changed = true;
9139 
9140 	/* Attach plane to drm_atomic_state */
9141 	plane_state = drm_atomic_get_plane_state(state, plane);
9142 
9143 	ret = PTR_ERR_OR_ZERO(plane_state);
9144 	if (ret)
9145 		goto out;
9146 
9147 	/* Call commit internally with the state we just constructed */
9148 	ret = drm_atomic_commit(state);
9149 
9150 out:
9151 	drm_atomic_state_put(state);
9152 	if (ret)
9153 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9154 
9155 	return ret;
9156 }
9157 
9158 /*
9159  * This function handles all cases when set mode does not come upon hotplug.
9160  * This includes when a display is unplugged then plugged back into the
9161  * same port and when running without usermode desktop manager supprot
9162  */
9163 void dm_restore_drm_connector_state(struct drm_device *dev,
9164 				    struct drm_connector *connector)
9165 {
9166 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9167 	struct amdgpu_crtc *disconnected_acrtc;
9168 	struct dm_crtc_state *acrtc_state;
9169 
9170 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9171 		return;
9172 
9173 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9174 	if (!disconnected_acrtc)
9175 		return;
9176 
9177 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9178 	if (!acrtc_state->stream)
9179 		return;
9180 
9181 	/*
9182 	 * If the previous sink is not released and different from the current,
9183 	 * we deduce we are in a state where we can not rely on usermode call
9184 	 * to turn on the display, so we do it here
9185 	 */
9186 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9187 		dm_force_atomic_commit(&aconnector->base);
9188 }
9189 
9190 /*
9191  * Grabs all modesetting locks to serialize against any blocking commits,
9192  * Waits for completion of all non blocking commits.
9193  */
9194 static int do_aquire_global_lock(struct drm_device *dev,
9195 				 struct drm_atomic_state *state)
9196 {
9197 	struct drm_crtc *crtc;
9198 	struct drm_crtc_commit *commit;
9199 	long ret;
9200 
9201 	/*
9202 	 * Adding all modeset locks to aquire_ctx will
9203 	 * ensure that when the framework release it the
9204 	 * extra locks we are locking here will get released to
9205 	 */
9206 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9207 	if (ret)
9208 		return ret;
9209 
9210 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9211 		spin_lock(&crtc->commit_lock);
9212 		commit = list_first_entry_or_null(&crtc->commit_list,
9213 				struct drm_crtc_commit, commit_entry);
9214 		if (commit)
9215 			drm_crtc_commit_get(commit);
9216 		spin_unlock(&crtc->commit_lock);
9217 
9218 		if (!commit)
9219 			continue;
9220 
9221 		/*
9222 		 * Make sure all pending HW programming completed and
9223 		 * page flips done
9224 		 */
9225 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9226 
9227 		if (ret > 0)
9228 			ret = wait_for_completion_interruptible_timeout(
9229 					&commit->flip_done, 10*HZ);
9230 
9231 		if (ret == 0)
9232 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9233 				  "timed out\n", crtc->base.id, crtc->name);
9234 
9235 		drm_crtc_commit_put(commit);
9236 	}
9237 
9238 	return ret < 0 ? ret : 0;
9239 }
9240 
9241 static void get_freesync_config_for_crtc(
9242 	struct dm_crtc_state *new_crtc_state,
9243 	struct dm_connector_state *new_con_state)
9244 {
9245 	struct mod_freesync_config config = {0};
9246 	struct amdgpu_dm_connector *aconnector =
9247 			to_amdgpu_dm_connector(new_con_state->base.connector);
9248 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9249 	int vrefresh = drm_mode_vrefresh(mode);
9250 	bool fs_vid_mode = false;
9251 
9252 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9253 					vrefresh >= aconnector->min_vfreq &&
9254 					vrefresh <= aconnector->max_vfreq;
9255 
9256 	if (new_crtc_state->vrr_supported) {
9257 		new_crtc_state->stream->ignore_msa_timing_param = true;
9258 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9259 
9260 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9261 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9262 		config.vsif_supported = true;
9263 		config.btr = true;
9264 
9265 		if (fs_vid_mode) {
9266 			config.state = VRR_STATE_ACTIVE_FIXED;
9267 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9268 			goto out;
9269 		} else if (new_crtc_state->base.vrr_enabled) {
9270 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9271 		} else {
9272 			config.state = VRR_STATE_INACTIVE;
9273 		}
9274 	}
9275 out:
9276 	new_crtc_state->freesync_config = config;
9277 }
9278 
9279 static void reset_freesync_config_for_crtc(
9280 	struct dm_crtc_state *new_crtc_state)
9281 {
9282 	new_crtc_state->vrr_supported = false;
9283 
9284 	memset(&new_crtc_state->vrr_infopacket, 0,
9285 	       sizeof(new_crtc_state->vrr_infopacket));
9286 }
9287 
9288 static bool
9289 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9290 				 struct drm_crtc_state *new_crtc_state)
9291 {
9292 	struct drm_display_mode old_mode, new_mode;
9293 
9294 	if (!old_crtc_state || !new_crtc_state)
9295 		return false;
9296 
9297 	old_mode = old_crtc_state->mode;
9298 	new_mode = new_crtc_state->mode;
9299 
9300 	if (old_mode.clock       == new_mode.clock &&
9301 	    old_mode.hdisplay    == new_mode.hdisplay &&
9302 	    old_mode.vdisplay    == new_mode.vdisplay &&
9303 	    old_mode.htotal      == new_mode.htotal &&
9304 	    old_mode.vtotal      != new_mode.vtotal &&
9305 	    old_mode.hsync_start == new_mode.hsync_start &&
9306 	    old_mode.vsync_start != new_mode.vsync_start &&
9307 	    old_mode.hsync_end   == new_mode.hsync_end &&
9308 	    old_mode.vsync_end   != new_mode.vsync_end &&
9309 	    old_mode.hskew       == new_mode.hskew &&
9310 	    old_mode.vscan       == new_mode.vscan &&
9311 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9312 	    (new_mode.vsync_end - new_mode.vsync_start))
9313 		return true;
9314 
9315 	return false;
9316 }
9317 
9318 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9319 	uint64_t num, den, res;
9320 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9321 
9322 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9323 
9324 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9325 	den = (unsigned long long)new_crtc_state->mode.htotal *
9326 	      (unsigned long long)new_crtc_state->mode.vtotal;
9327 
9328 	res = div_u64(num, den);
9329 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9330 }
9331 
9332 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9333 				struct drm_atomic_state *state,
9334 				struct drm_crtc *crtc,
9335 				struct drm_crtc_state *old_crtc_state,
9336 				struct drm_crtc_state *new_crtc_state,
9337 				bool enable,
9338 				bool *lock_and_validation_needed)
9339 {
9340 	struct dm_atomic_state *dm_state = NULL;
9341 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9342 	struct dc_stream_state *new_stream;
9343 	int ret = 0;
9344 
9345 	/*
9346 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9347 	 * update changed items
9348 	 */
9349 	struct amdgpu_crtc *acrtc = NULL;
9350 	struct amdgpu_dm_connector *aconnector = NULL;
9351 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9352 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9353 
9354 	new_stream = NULL;
9355 
9356 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9357 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9358 	acrtc = to_amdgpu_crtc(crtc);
9359 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9360 
9361 	/* TODO This hack should go away */
9362 	if (aconnector && enable) {
9363 		/* Make sure fake sink is created in plug-in scenario */
9364 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9365 							    &aconnector->base);
9366 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9367 							    &aconnector->base);
9368 
9369 		if (IS_ERR(drm_new_conn_state)) {
9370 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9371 			goto fail;
9372 		}
9373 
9374 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9375 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9376 
9377 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9378 			goto skip_modeset;
9379 
9380 		new_stream = create_validate_stream_for_sink(aconnector,
9381 							     &new_crtc_state->mode,
9382 							     dm_new_conn_state,
9383 							     dm_old_crtc_state->stream);
9384 
9385 		/*
9386 		 * we can have no stream on ACTION_SET if a display
9387 		 * was disconnected during S3, in this case it is not an
9388 		 * error, the OS will be updated after detection, and
9389 		 * will do the right thing on next atomic commit
9390 		 */
9391 
9392 		if (!new_stream) {
9393 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9394 					__func__, acrtc->base.base.id);
9395 			ret = -ENOMEM;
9396 			goto fail;
9397 		}
9398 
9399 		/*
9400 		 * TODO: Check VSDB bits to decide whether this should
9401 		 * be enabled or not.
9402 		 */
9403 		new_stream->triggered_crtc_reset.enabled =
9404 			dm->force_timing_sync;
9405 
9406 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9407 
9408 		ret = fill_hdr_info_packet(drm_new_conn_state,
9409 					   &new_stream->hdr_static_metadata);
9410 		if (ret)
9411 			goto fail;
9412 
9413 		/*
9414 		 * If we already removed the old stream from the context
9415 		 * (and set the new stream to NULL) then we can't reuse
9416 		 * the old stream even if the stream and scaling are unchanged.
9417 		 * We'll hit the BUG_ON and black screen.
9418 		 *
9419 		 * TODO: Refactor this function to allow this check to work
9420 		 * in all conditions.
9421 		 */
9422 		if (amdgpu_freesync_vid_mode &&
9423 		    dm_new_crtc_state->stream &&
9424 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9425 			goto skip_modeset;
9426 
9427 		if (dm_new_crtc_state->stream &&
9428 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9429 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9430 			new_crtc_state->mode_changed = false;
9431 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9432 					 new_crtc_state->mode_changed);
9433 		}
9434 	}
9435 
9436 	/* mode_changed flag may get updated above, need to check again */
9437 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9438 		goto skip_modeset;
9439 
9440 	DRM_DEBUG_ATOMIC(
9441 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9442 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9443 		"connectors_changed:%d\n",
9444 		acrtc->crtc_id,
9445 		new_crtc_state->enable,
9446 		new_crtc_state->active,
9447 		new_crtc_state->planes_changed,
9448 		new_crtc_state->mode_changed,
9449 		new_crtc_state->active_changed,
9450 		new_crtc_state->connectors_changed);
9451 
9452 	/* Remove stream for any changed/disabled CRTC */
9453 	if (!enable) {
9454 
9455 		if (!dm_old_crtc_state->stream)
9456 			goto skip_modeset;
9457 
9458 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9459 		    is_timing_unchanged_for_freesync(new_crtc_state,
9460 						     old_crtc_state)) {
9461 			new_crtc_state->mode_changed = false;
9462 			DRM_DEBUG_DRIVER(
9463 				"Mode change not required for front porch change, "
9464 				"setting mode_changed to %d",
9465 				new_crtc_state->mode_changed);
9466 
9467 			set_freesync_fixed_config(dm_new_crtc_state);
9468 
9469 			goto skip_modeset;
9470 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9471 			   is_freesync_video_mode(&new_crtc_state->mode,
9472 						  aconnector)) {
9473 			set_freesync_fixed_config(dm_new_crtc_state);
9474 		}
9475 
9476 		ret = dm_atomic_get_state(state, &dm_state);
9477 		if (ret)
9478 			goto fail;
9479 
9480 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9481 				crtc->base.id);
9482 
9483 		/* i.e. reset mode */
9484 		if (dc_remove_stream_from_ctx(
9485 				dm->dc,
9486 				dm_state->context,
9487 				dm_old_crtc_state->stream) != DC_OK) {
9488 			ret = -EINVAL;
9489 			goto fail;
9490 		}
9491 
9492 		dc_stream_release(dm_old_crtc_state->stream);
9493 		dm_new_crtc_state->stream = NULL;
9494 
9495 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9496 
9497 		*lock_and_validation_needed = true;
9498 
9499 	} else {/* Add stream for any updated/enabled CRTC */
9500 		/*
9501 		 * Quick fix to prevent NULL pointer on new_stream when
9502 		 * added MST connectors not found in existing crtc_state in the chained mode
9503 		 * TODO: need to dig out the root cause of that
9504 		 */
9505 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9506 			goto skip_modeset;
9507 
9508 		if (modereset_required(new_crtc_state))
9509 			goto skip_modeset;
9510 
9511 		if (modeset_required(new_crtc_state, new_stream,
9512 				     dm_old_crtc_state->stream)) {
9513 
9514 			WARN_ON(dm_new_crtc_state->stream);
9515 
9516 			ret = dm_atomic_get_state(state, &dm_state);
9517 			if (ret)
9518 				goto fail;
9519 
9520 			dm_new_crtc_state->stream = new_stream;
9521 
9522 			dc_stream_retain(new_stream);
9523 
9524 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9525 					 crtc->base.id);
9526 
9527 			if (dc_add_stream_to_ctx(
9528 					dm->dc,
9529 					dm_state->context,
9530 					dm_new_crtc_state->stream) != DC_OK) {
9531 				ret = -EINVAL;
9532 				goto fail;
9533 			}
9534 
9535 			*lock_and_validation_needed = true;
9536 		}
9537 	}
9538 
9539 skip_modeset:
9540 	/* Release extra reference */
9541 	if (new_stream)
9542 		 dc_stream_release(new_stream);
9543 
9544 	/*
9545 	 * We want to do dc stream updates that do not require a
9546 	 * full modeset below.
9547 	 */
9548 	if (!(enable && aconnector && new_crtc_state->active))
9549 		return 0;
9550 	/*
9551 	 * Given above conditions, the dc state cannot be NULL because:
9552 	 * 1. We're in the process of enabling CRTCs (just been added
9553 	 *    to the dc context, or already is on the context)
9554 	 * 2. Has a valid connector attached, and
9555 	 * 3. Is currently active and enabled.
9556 	 * => The dc stream state currently exists.
9557 	 */
9558 	BUG_ON(dm_new_crtc_state->stream == NULL);
9559 
9560 	/* Scaling or underscan settings */
9561 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9562 		update_stream_scaling_settings(
9563 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9564 
9565 	/* ABM settings */
9566 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9567 
9568 	/*
9569 	 * Color management settings. We also update color properties
9570 	 * when a modeset is needed, to ensure it gets reprogrammed.
9571 	 */
9572 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9573 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9574 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9575 		if (ret)
9576 			goto fail;
9577 	}
9578 
9579 	/* Update Freesync settings. */
9580 	get_freesync_config_for_crtc(dm_new_crtc_state,
9581 				     dm_new_conn_state);
9582 
9583 	return ret;
9584 
9585 fail:
9586 	if (new_stream)
9587 		dc_stream_release(new_stream);
9588 	return ret;
9589 }
9590 
9591 static bool should_reset_plane(struct drm_atomic_state *state,
9592 			       struct drm_plane *plane,
9593 			       struct drm_plane_state *old_plane_state,
9594 			       struct drm_plane_state *new_plane_state)
9595 {
9596 	struct drm_plane *other;
9597 	struct drm_plane_state *old_other_state, *new_other_state;
9598 	struct drm_crtc_state *new_crtc_state;
9599 	int i;
9600 
9601 	/*
9602 	 * TODO: Remove this hack once the checks below are sufficient
9603 	 * enough to determine when we need to reset all the planes on
9604 	 * the stream.
9605 	 */
9606 	if (state->allow_modeset)
9607 		return true;
9608 
9609 	/* Exit early if we know that we're adding or removing the plane. */
9610 	if (old_plane_state->crtc != new_plane_state->crtc)
9611 		return true;
9612 
9613 	/* old crtc == new_crtc == NULL, plane not in context. */
9614 	if (!new_plane_state->crtc)
9615 		return false;
9616 
9617 	new_crtc_state =
9618 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9619 
9620 	if (!new_crtc_state)
9621 		return true;
9622 
9623 	/* CRTC Degamma changes currently require us to recreate planes. */
9624 	if (new_crtc_state->color_mgmt_changed)
9625 		return true;
9626 
9627 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9628 		return true;
9629 
9630 	/*
9631 	 * If there are any new primary or overlay planes being added or
9632 	 * removed then the z-order can potentially change. To ensure
9633 	 * correct z-order and pipe acquisition the current DC architecture
9634 	 * requires us to remove and recreate all existing planes.
9635 	 *
9636 	 * TODO: Come up with a more elegant solution for this.
9637 	 */
9638 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9639 		struct amdgpu_framebuffer *old_afb, *new_afb;
9640 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9641 			continue;
9642 
9643 		if (old_other_state->crtc != new_plane_state->crtc &&
9644 		    new_other_state->crtc != new_plane_state->crtc)
9645 			continue;
9646 
9647 		if (old_other_state->crtc != new_other_state->crtc)
9648 			return true;
9649 
9650 		/* Src/dst size and scaling updates. */
9651 		if (old_other_state->src_w != new_other_state->src_w ||
9652 		    old_other_state->src_h != new_other_state->src_h ||
9653 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9654 		    old_other_state->crtc_h != new_other_state->crtc_h)
9655 			return true;
9656 
9657 		/* Rotation / mirroring updates. */
9658 		if (old_other_state->rotation != new_other_state->rotation)
9659 			return true;
9660 
9661 		/* Blending updates. */
9662 		if (old_other_state->pixel_blend_mode !=
9663 		    new_other_state->pixel_blend_mode)
9664 			return true;
9665 
9666 		/* Alpha updates. */
9667 		if (old_other_state->alpha != new_other_state->alpha)
9668 			return true;
9669 
9670 		/* Colorspace changes. */
9671 		if (old_other_state->color_range != new_other_state->color_range ||
9672 		    old_other_state->color_encoding != new_other_state->color_encoding)
9673 			return true;
9674 
9675 		/* Framebuffer checks fall at the end. */
9676 		if (!old_other_state->fb || !new_other_state->fb)
9677 			continue;
9678 
9679 		/* Pixel format changes can require bandwidth updates. */
9680 		if (old_other_state->fb->format != new_other_state->fb->format)
9681 			return true;
9682 
9683 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9684 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9685 
9686 		/* Tiling and DCC changes also require bandwidth updates. */
9687 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9688 		    old_afb->base.modifier != new_afb->base.modifier)
9689 			return true;
9690 	}
9691 
9692 	return false;
9693 }
9694 
9695 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9696 			      struct drm_plane_state *new_plane_state,
9697 			      struct drm_framebuffer *fb)
9698 {
9699 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9700 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9701 	unsigned int pitch;
9702 	bool linear;
9703 
9704 	if (fb->width > new_acrtc->max_cursor_width ||
9705 	    fb->height > new_acrtc->max_cursor_height) {
9706 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9707 				 new_plane_state->fb->width,
9708 				 new_plane_state->fb->height);
9709 		return -EINVAL;
9710 	}
9711 	if (new_plane_state->src_w != fb->width << 16 ||
9712 	    new_plane_state->src_h != fb->height << 16) {
9713 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9714 		return -EINVAL;
9715 	}
9716 
9717 	/* Pitch in pixels */
9718 	pitch = fb->pitches[0] / fb->format->cpp[0];
9719 
9720 	if (fb->width != pitch) {
9721 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9722 				 fb->width, pitch);
9723 		return -EINVAL;
9724 	}
9725 
9726 	switch (pitch) {
9727 	case 64:
9728 	case 128:
9729 	case 256:
9730 		/* FB pitch is supported by cursor plane */
9731 		break;
9732 	default:
9733 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9734 		return -EINVAL;
9735 	}
9736 
9737 	/* Core DRM takes care of checking FB modifiers, so we only need to
9738 	 * check tiling flags when the FB doesn't have a modifier. */
9739 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9740 		if (adev->family < AMDGPU_FAMILY_AI) {
9741 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9742 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9743 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9744 		} else {
9745 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9746 		}
9747 		if (!linear) {
9748 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9749 			return -EINVAL;
9750 		}
9751 	}
9752 
9753 	return 0;
9754 }
9755 
9756 static int dm_update_plane_state(struct dc *dc,
9757 				 struct drm_atomic_state *state,
9758 				 struct drm_plane *plane,
9759 				 struct drm_plane_state *old_plane_state,
9760 				 struct drm_plane_state *new_plane_state,
9761 				 bool enable,
9762 				 bool *lock_and_validation_needed)
9763 {
9764 
9765 	struct dm_atomic_state *dm_state = NULL;
9766 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9767 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9768 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9769 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9770 	struct amdgpu_crtc *new_acrtc;
9771 	bool needs_reset;
9772 	int ret = 0;
9773 
9774 
9775 	new_plane_crtc = new_plane_state->crtc;
9776 	old_plane_crtc = old_plane_state->crtc;
9777 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9778 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9779 
9780 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9781 		if (!enable || !new_plane_crtc ||
9782 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9783 			return 0;
9784 
9785 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9786 
9787 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9788 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9789 			return -EINVAL;
9790 		}
9791 
9792 		if (new_plane_state->fb) {
9793 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9794 						 new_plane_state->fb);
9795 			if (ret)
9796 				return ret;
9797 		}
9798 
9799 		return 0;
9800 	}
9801 
9802 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9803 					 new_plane_state);
9804 
9805 	/* Remove any changed/removed planes */
9806 	if (!enable) {
9807 		if (!needs_reset)
9808 			return 0;
9809 
9810 		if (!old_plane_crtc)
9811 			return 0;
9812 
9813 		old_crtc_state = drm_atomic_get_old_crtc_state(
9814 				state, old_plane_crtc);
9815 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9816 
9817 		if (!dm_old_crtc_state->stream)
9818 			return 0;
9819 
9820 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9821 				plane->base.id, old_plane_crtc->base.id);
9822 
9823 		ret = dm_atomic_get_state(state, &dm_state);
9824 		if (ret)
9825 			return ret;
9826 
9827 		if (!dc_remove_plane_from_context(
9828 				dc,
9829 				dm_old_crtc_state->stream,
9830 				dm_old_plane_state->dc_state,
9831 				dm_state->context)) {
9832 
9833 			return -EINVAL;
9834 		}
9835 
9836 
9837 		dc_plane_state_release(dm_old_plane_state->dc_state);
9838 		dm_new_plane_state->dc_state = NULL;
9839 
9840 		*lock_and_validation_needed = true;
9841 
9842 	} else { /* Add new planes */
9843 		struct dc_plane_state *dc_new_plane_state;
9844 
9845 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9846 			return 0;
9847 
9848 		if (!new_plane_crtc)
9849 			return 0;
9850 
9851 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9852 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9853 
9854 		if (!dm_new_crtc_state->stream)
9855 			return 0;
9856 
9857 		if (!needs_reset)
9858 			return 0;
9859 
9860 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9861 		if (ret)
9862 			return ret;
9863 
9864 		WARN_ON(dm_new_plane_state->dc_state);
9865 
9866 		dc_new_plane_state = dc_create_plane_state(dc);
9867 		if (!dc_new_plane_state)
9868 			return -ENOMEM;
9869 
9870 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9871 				 plane->base.id, new_plane_crtc->base.id);
9872 
9873 		ret = fill_dc_plane_attributes(
9874 			drm_to_adev(new_plane_crtc->dev),
9875 			dc_new_plane_state,
9876 			new_plane_state,
9877 			new_crtc_state);
9878 		if (ret) {
9879 			dc_plane_state_release(dc_new_plane_state);
9880 			return ret;
9881 		}
9882 
9883 		ret = dm_atomic_get_state(state, &dm_state);
9884 		if (ret) {
9885 			dc_plane_state_release(dc_new_plane_state);
9886 			return ret;
9887 		}
9888 
9889 		/*
9890 		 * Any atomic check errors that occur after this will
9891 		 * not need a release. The plane state will be attached
9892 		 * to the stream, and therefore part of the atomic
9893 		 * state. It'll be released when the atomic state is
9894 		 * cleaned.
9895 		 */
9896 		if (!dc_add_plane_to_context(
9897 				dc,
9898 				dm_new_crtc_state->stream,
9899 				dc_new_plane_state,
9900 				dm_state->context)) {
9901 
9902 			dc_plane_state_release(dc_new_plane_state);
9903 			return -EINVAL;
9904 		}
9905 
9906 		dm_new_plane_state->dc_state = dc_new_plane_state;
9907 
9908 		/* Tell DC to do a full surface update every time there
9909 		 * is a plane change. Inefficient, but works for now.
9910 		 */
9911 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9912 
9913 		*lock_and_validation_needed = true;
9914 	}
9915 
9916 
9917 	return ret;
9918 }
9919 
9920 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9921 				struct drm_crtc *crtc,
9922 				struct drm_crtc_state *new_crtc_state)
9923 {
9924 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9925 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9926 
9927 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9928 	 * cursor per pipe but it's going to inherit the scaling and
9929 	 * positioning from the underlying pipe. Check the cursor plane's
9930 	 * blending properties match the primary plane's. */
9931 
9932 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9933 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9934 	if (!new_cursor_state || !new_primary_state ||
9935 	    !new_cursor_state->fb || !new_primary_state->fb) {
9936 		return 0;
9937 	}
9938 
9939 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9940 			 (new_cursor_state->src_w >> 16);
9941 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9942 			 (new_cursor_state->src_h >> 16);
9943 
9944 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9945 			 (new_primary_state->src_w >> 16);
9946 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9947 			 (new_primary_state->src_h >> 16);
9948 
9949 	if (cursor_scale_w != primary_scale_w ||
9950 	    cursor_scale_h != primary_scale_h) {
9951 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9952 		return -EINVAL;
9953 	}
9954 
9955 	return 0;
9956 }
9957 
9958 #if defined(CONFIG_DRM_AMD_DC_DCN)
9959 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9960 {
9961 	struct drm_connector *connector;
9962 	struct drm_connector_state *conn_state;
9963 	struct amdgpu_dm_connector *aconnector = NULL;
9964 	int i;
9965 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9966 		if (conn_state->crtc != crtc)
9967 			continue;
9968 
9969 		aconnector = to_amdgpu_dm_connector(connector);
9970 		if (!aconnector->port || !aconnector->mst_port)
9971 			aconnector = NULL;
9972 		else
9973 			break;
9974 	}
9975 
9976 	if (!aconnector)
9977 		return 0;
9978 
9979 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9980 }
9981 #endif
9982 
9983 static int validate_overlay(struct drm_atomic_state *state)
9984 {
9985 	int i;
9986 	struct drm_plane *plane;
9987 	struct drm_plane_state *old_plane_state, *new_plane_state;
9988 	struct drm_plane_state *primary_state, *overlay_state = NULL;
9989 
9990 	/* Check if primary plane is contained inside overlay */
9991 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9992 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9993 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9994 				return 0;
9995 
9996 			overlay_state = new_plane_state;
9997 			continue;
9998 		}
9999 	}
10000 
10001 	/* check if we're making changes to the overlay plane */
10002 	if (!overlay_state)
10003 		return 0;
10004 
10005 	/* check if overlay plane is enabled */
10006 	if (!overlay_state->crtc)
10007 		return 0;
10008 
10009 	/* find the primary plane for the CRTC that the overlay is enabled on */
10010 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10011 	if (IS_ERR(primary_state))
10012 		return PTR_ERR(primary_state);
10013 
10014 	/* check if primary plane is enabled */
10015 	if (!primary_state->crtc)
10016 		return 0;
10017 
10018 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10019 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10020 	    primary_state->crtc_y < overlay_state->crtc_y ||
10021 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10022 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10023 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10024 		return -EINVAL;
10025 	}
10026 
10027 	return 0;
10028 }
10029 
10030 /**
10031  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10032  * @dev: The DRM device
10033  * @state: The atomic state to commit
10034  *
10035  * Validate that the given atomic state is programmable by DC into hardware.
10036  * This involves constructing a &struct dc_state reflecting the new hardware
10037  * state we wish to commit, then querying DC to see if it is programmable. It's
10038  * important not to modify the existing DC state. Otherwise, atomic_check
10039  * may unexpectedly commit hardware changes.
10040  *
10041  * When validating the DC state, it's important that the right locks are
10042  * acquired. For full updates case which removes/adds/updates streams on one
10043  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10044  * that any such full update commit will wait for completion of any outstanding
10045  * flip using DRMs synchronization events.
10046  *
10047  * Note that DM adds the affected connectors for all CRTCs in state, when that
10048  * might not seem necessary. This is because DC stream creation requires the
10049  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10050  * be possible but non-trivial - a possible TODO item.
10051  *
10052  * Return: -Error code if validation failed.
10053  */
10054 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10055 				  struct drm_atomic_state *state)
10056 {
10057 	struct amdgpu_device *adev = drm_to_adev(dev);
10058 	struct dm_atomic_state *dm_state = NULL;
10059 	struct dc *dc = adev->dm.dc;
10060 	struct drm_connector *connector;
10061 	struct drm_connector_state *old_con_state, *new_con_state;
10062 	struct drm_crtc *crtc;
10063 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10064 	struct drm_plane *plane;
10065 	struct drm_plane_state *old_plane_state, *new_plane_state;
10066 	enum dc_status status;
10067 	int ret, i;
10068 	bool lock_and_validation_needed = false;
10069 	struct dm_crtc_state *dm_old_crtc_state;
10070 
10071 	trace_amdgpu_dm_atomic_check_begin(state);
10072 
10073 	ret = drm_atomic_helper_check_modeset(dev, state);
10074 	if (ret)
10075 		goto fail;
10076 
10077 	/* Check connector changes */
10078 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10079 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10080 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10081 
10082 		/* Skip connectors that are disabled or part of modeset already. */
10083 		if (!old_con_state->crtc && !new_con_state->crtc)
10084 			continue;
10085 
10086 		if (!new_con_state->crtc)
10087 			continue;
10088 
10089 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10090 		if (IS_ERR(new_crtc_state)) {
10091 			ret = PTR_ERR(new_crtc_state);
10092 			goto fail;
10093 		}
10094 
10095 		if (dm_old_con_state->abm_level !=
10096 		    dm_new_con_state->abm_level)
10097 			new_crtc_state->connectors_changed = true;
10098 	}
10099 
10100 #if defined(CONFIG_DRM_AMD_DC_DCN)
10101 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10102 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10103 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10104 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10105 				if (ret)
10106 					goto fail;
10107 			}
10108 		}
10109 	}
10110 #endif
10111 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10112 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10113 
10114 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10115 		    !new_crtc_state->color_mgmt_changed &&
10116 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10117 			dm_old_crtc_state->dsc_force_changed == false)
10118 			continue;
10119 
10120 		if (!new_crtc_state->enable)
10121 			continue;
10122 
10123 		ret = drm_atomic_add_affected_connectors(state, crtc);
10124 		if (ret)
10125 			return ret;
10126 
10127 		ret = drm_atomic_add_affected_planes(state, crtc);
10128 		if (ret)
10129 			goto fail;
10130 
10131 		if (dm_old_crtc_state->dsc_force_changed)
10132 			new_crtc_state->mode_changed = true;
10133 	}
10134 
10135 	/*
10136 	 * Add all primary and overlay planes on the CRTC to the state
10137 	 * whenever a plane is enabled to maintain correct z-ordering
10138 	 * and to enable fast surface updates.
10139 	 */
10140 	drm_for_each_crtc(crtc, dev) {
10141 		bool modified = false;
10142 
10143 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10144 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10145 				continue;
10146 
10147 			if (new_plane_state->crtc == crtc ||
10148 			    old_plane_state->crtc == crtc) {
10149 				modified = true;
10150 				break;
10151 			}
10152 		}
10153 
10154 		if (!modified)
10155 			continue;
10156 
10157 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10158 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10159 				continue;
10160 
10161 			new_plane_state =
10162 				drm_atomic_get_plane_state(state, plane);
10163 
10164 			if (IS_ERR(new_plane_state)) {
10165 				ret = PTR_ERR(new_plane_state);
10166 				goto fail;
10167 			}
10168 		}
10169 	}
10170 
10171 	/* Remove exiting planes if they are modified */
10172 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10173 		ret = dm_update_plane_state(dc, state, plane,
10174 					    old_plane_state,
10175 					    new_plane_state,
10176 					    false,
10177 					    &lock_and_validation_needed);
10178 		if (ret)
10179 			goto fail;
10180 	}
10181 
10182 	/* Disable all crtcs which require disable */
10183 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10184 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10185 					   old_crtc_state,
10186 					   new_crtc_state,
10187 					   false,
10188 					   &lock_and_validation_needed);
10189 		if (ret)
10190 			goto fail;
10191 	}
10192 
10193 	/* Enable all crtcs which require enable */
10194 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10195 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10196 					   old_crtc_state,
10197 					   new_crtc_state,
10198 					   true,
10199 					   &lock_and_validation_needed);
10200 		if (ret)
10201 			goto fail;
10202 	}
10203 
10204 	ret = validate_overlay(state);
10205 	if (ret)
10206 		goto fail;
10207 
10208 	/* Add new/modified planes */
10209 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10210 		ret = dm_update_plane_state(dc, state, plane,
10211 					    old_plane_state,
10212 					    new_plane_state,
10213 					    true,
10214 					    &lock_and_validation_needed);
10215 		if (ret)
10216 			goto fail;
10217 	}
10218 
10219 	/* Run this here since we want to validate the streams we created */
10220 	ret = drm_atomic_helper_check_planes(dev, state);
10221 	if (ret)
10222 		goto fail;
10223 
10224 	/* Check cursor planes scaling */
10225 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10226 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10227 		if (ret)
10228 			goto fail;
10229 	}
10230 
10231 	if (state->legacy_cursor_update) {
10232 		/*
10233 		 * This is a fast cursor update coming from the plane update
10234 		 * helper, check if it can be done asynchronously for better
10235 		 * performance.
10236 		 */
10237 		state->async_update =
10238 			!drm_atomic_helper_async_check(dev, state);
10239 
10240 		/*
10241 		 * Skip the remaining global validation if this is an async
10242 		 * update. Cursor updates can be done without affecting
10243 		 * state or bandwidth calcs and this avoids the performance
10244 		 * penalty of locking the private state object and
10245 		 * allocating a new dc_state.
10246 		 */
10247 		if (state->async_update)
10248 			return 0;
10249 	}
10250 
10251 	/* Check scaling and underscan changes*/
10252 	/* TODO Removed scaling changes validation due to inability to commit
10253 	 * new stream into context w\o causing full reset. Need to
10254 	 * decide how to handle.
10255 	 */
10256 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10257 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10258 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10259 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10260 
10261 		/* Skip any modesets/resets */
10262 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10263 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10264 			continue;
10265 
10266 		/* Skip any thing not scale or underscan changes */
10267 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10268 			continue;
10269 
10270 		lock_and_validation_needed = true;
10271 	}
10272 
10273 	/**
10274 	 * Streams and planes are reset when there are changes that affect
10275 	 * bandwidth. Anything that affects bandwidth needs to go through
10276 	 * DC global validation to ensure that the configuration can be applied
10277 	 * to hardware.
10278 	 *
10279 	 * We have to currently stall out here in atomic_check for outstanding
10280 	 * commits to finish in this case because our IRQ handlers reference
10281 	 * DRM state directly - we can end up disabling interrupts too early
10282 	 * if we don't.
10283 	 *
10284 	 * TODO: Remove this stall and drop DM state private objects.
10285 	 */
10286 	if (lock_and_validation_needed) {
10287 		ret = dm_atomic_get_state(state, &dm_state);
10288 		if (ret)
10289 			goto fail;
10290 
10291 		ret = do_aquire_global_lock(dev, state);
10292 		if (ret)
10293 			goto fail;
10294 
10295 #if defined(CONFIG_DRM_AMD_DC_DCN)
10296 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10297 			goto fail;
10298 
10299 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10300 		if (ret)
10301 			goto fail;
10302 #endif
10303 
10304 		/*
10305 		 * Perform validation of MST topology in the state:
10306 		 * We need to perform MST atomic check before calling
10307 		 * dc_validate_global_state(), or there is a chance
10308 		 * to get stuck in an infinite loop and hang eventually.
10309 		 */
10310 		ret = drm_dp_mst_atomic_check(state);
10311 		if (ret)
10312 			goto fail;
10313 		status = dc_validate_global_state(dc, dm_state->context, false);
10314 		if (status != DC_OK) {
10315 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10316 				       dc_status_to_str(status), status);
10317 			ret = -EINVAL;
10318 			goto fail;
10319 		}
10320 	} else {
10321 		/*
10322 		 * The commit is a fast update. Fast updates shouldn't change
10323 		 * the DC context, affect global validation, and can have their
10324 		 * commit work done in parallel with other commits not touching
10325 		 * the same resource. If we have a new DC context as part of
10326 		 * the DM atomic state from validation we need to free it and
10327 		 * retain the existing one instead.
10328 		 *
10329 		 * Furthermore, since the DM atomic state only contains the DC
10330 		 * context and can safely be annulled, we can free the state
10331 		 * and clear the associated private object now to free
10332 		 * some memory and avoid a possible use-after-free later.
10333 		 */
10334 
10335 		for (i = 0; i < state->num_private_objs; i++) {
10336 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10337 
10338 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10339 				int j = state->num_private_objs-1;
10340 
10341 				dm_atomic_destroy_state(obj,
10342 						state->private_objs[i].state);
10343 
10344 				/* If i is not at the end of the array then the
10345 				 * last element needs to be moved to where i was
10346 				 * before the array can safely be truncated.
10347 				 */
10348 				if (i != j)
10349 					state->private_objs[i] =
10350 						state->private_objs[j];
10351 
10352 				state->private_objs[j].ptr = NULL;
10353 				state->private_objs[j].state = NULL;
10354 				state->private_objs[j].old_state = NULL;
10355 				state->private_objs[j].new_state = NULL;
10356 
10357 				state->num_private_objs = j;
10358 				break;
10359 			}
10360 		}
10361 	}
10362 
10363 	/* Store the overall update type for use later in atomic check. */
10364 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10365 		struct dm_crtc_state *dm_new_crtc_state =
10366 			to_dm_crtc_state(new_crtc_state);
10367 
10368 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10369 							 UPDATE_TYPE_FULL :
10370 							 UPDATE_TYPE_FAST;
10371 	}
10372 
10373 	/* Must be success */
10374 	WARN_ON(ret);
10375 
10376 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10377 
10378 	return ret;
10379 
10380 fail:
10381 	if (ret == -EDEADLK)
10382 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10383 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10384 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10385 	else
10386 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10387 
10388 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10389 
10390 	return ret;
10391 }
10392 
10393 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10394 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10395 {
10396 	uint8_t dpcd_data;
10397 	bool capable = false;
10398 
10399 	if (amdgpu_dm_connector->dc_link &&
10400 		dm_helpers_dp_read_dpcd(
10401 				NULL,
10402 				amdgpu_dm_connector->dc_link,
10403 				DP_DOWN_STREAM_PORT_COUNT,
10404 				&dpcd_data,
10405 				sizeof(dpcd_data))) {
10406 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10407 	}
10408 
10409 	return capable;
10410 }
10411 
10412 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10413 		uint8_t *edid_ext, int len,
10414 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10415 {
10416 	int i;
10417 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10418 	struct dc *dc = adev->dm.dc;
10419 
10420 	/* send extension block to DMCU for parsing */
10421 	for (i = 0; i < len; i += 8) {
10422 		bool res;
10423 		int offset;
10424 
10425 		/* send 8 bytes a time */
10426 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10427 			return false;
10428 
10429 		if (i+8 == len) {
10430 			/* EDID block sent completed, expect result */
10431 			int version, min_rate, max_rate;
10432 
10433 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10434 			if (res) {
10435 				/* amd vsdb found */
10436 				vsdb_info->freesync_supported = 1;
10437 				vsdb_info->amd_vsdb_version = version;
10438 				vsdb_info->min_refresh_rate_hz = min_rate;
10439 				vsdb_info->max_refresh_rate_hz = max_rate;
10440 				return true;
10441 			}
10442 			/* not amd vsdb */
10443 			return false;
10444 		}
10445 
10446 		/* check for ack*/
10447 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10448 		if (!res)
10449 			return false;
10450 	}
10451 
10452 	return false;
10453 }
10454 
10455 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10456 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10457 {
10458 	uint8_t *edid_ext = NULL;
10459 	int i;
10460 	bool valid_vsdb_found = false;
10461 
10462 	/*----- drm_find_cea_extension() -----*/
10463 	/* No EDID or EDID extensions */
10464 	if (edid == NULL || edid->extensions == 0)
10465 		return -ENODEV;
10466 
10467 	/* Find CEA extension */
10468 	for (i = 0; i < edid->extensions; i++) {
10469 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10470 		if (edid_ext[0] == CEA_EXT)
10471 			break;
10472 	}
10473 
10474 	if (i == edid->extensions)
10475 		return -ENODEV;
10476 
10477 	/*----- cea_db_offsets() -----*/
10478 	if (edid_ext[0] != CEA_EXT)
10479 		return -ENODEV;
10480 
10481 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10482 
10483 	return valid_vsdb_found ? i : -ENODEV;
10484 }
10485 
10486 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10487 					struct edid *edid)
10488 {
10489 	int i = 0;
10490 	struct detailed_timing *timing;
10491 	struct detailed_non_pixel *data;
10492 	struct detailed_data_monitor_range *range;
10493 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10494 			to_amdgpu_dm_connector(connector);
10495 	struct dm_connector_state *dm_con_state = NULL;
10496 
10497 	struct drm_device *dev = connector->dev;
10498 	struct amdgpu_device *adev = drm_to_adev(dev);
10499 	bool freesync_capable = false;
10500 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10501 
10502 	if (!connector->state) {
10503 		DRM_ERROR("%s - Connector has no state", __func__);
10504 		goto update;
10505 	}
10506 
10507 	if (!edid) {
10508 		dm_con_state = to_dm_connector_state(connector->state);
10509 
10510 		amdgpu_dm_connector->min_vfreq = 0;
10511 		amdgpu_dm_connector->max_vfreq = 0;
10512 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10513 
10514 		goto update;
10515 	}
10516 
10517 	dm_con_state = to_dm_connector_state(connector->state);
10518 
10519 	if (!amdgpu_dm_connector->dc_sink) {
10520 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10521 		goto update;
10522 	}
10523 	if (!adev->dm.freesync_module)
10524 		goto update;
10525 
10526 
10527 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10528 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10529 		bool edid_check_required = false;
10530 
10531 		if (edid) {
10532 			edid_check_required = is_dp_capable_without_timing_msa(
10533 						adev->dm.dc,
10534 						amdgpu_dm_connector);
10535 		}
10536 
10537 		if (edid_check_required == true && (edid->version > 1 ||
10538 		   (edid->version == 1 && edid->revision > 1))) {
10539 			for (i = 0; i < 4; i++) {
10540 
10541 				timing	= &edid->detailed_timings[i];
10542 				data	= &timing->data.other_data;
10543 				range	= &data->data.range;
10544 				/*
10545 				 * Check if monitor has continuous frequency mode
10546 				 */
10547 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10548 					continue;
10549 				/*
10550 				 * Check for flag range limits only. If flag == 1 then
10551 				 * no additional timing information provided.
10552 				 * Default GTF, GTF Secondary curve and CVT are not
10553 				 * supported
10554 				 */
10555 				if (range->flags != 1)
10556 					continue;
10557 
10558 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10559 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10560 				amdgpu_dm_connector->pixel_clock_mhz =
10561 					range->pixel_clock_mhz * 10;
10562 
10563 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10564 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10565 
10566 				break;
10567 			}
10568 
10569 			if (amdgpu_dm_connector->max_vfreq -
10570 			    amdgpu_dm_connector->min_vfreq > 10) {
10571 
10572 				freesync_capable = true;
10573 			}
10574 		}
10575 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10576 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10577 		if (i >= 0 && vsdb_info.freesync_supported) {
10578 			timing  = &edid->detailed_timings[i];
10579 			data    = &timing->data.other_data;
10580 
10581 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10582 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10583 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10584 				freesync_capable = true;
10585 
10586 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10587 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10588 		}
10589 	}
10590 
10591 update:
10592 	if (dm_con_state)
10593 		dm_con_state->freesync_capable = freesync_capable;
10594 
10595 	if (connector->vrr_capable_property)
10596 		drm_connector_set_vrr_capable_property(connector,
10597 						       freesync_capable);
10598 }
10599 
10600 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10601 {
10602 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10603 
10604 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10605 		return;
10606 	if (link->type == dc_connection_none)
10607 		return;
10608 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10609 					dpcd_data, sizeof(dpcd_data))) {
10610 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10611 
10612 		if (dpcd_data[0] == 0) {
10613 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10614 			link->psr_settings.psr_feature_enabled = false;
10615 		} else {
10616 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10617 			link->psr_settings.psr_feature_enabled = true;
10618 		}
10619 
10620 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10621 	}
10622 }
10623 
10624 /*
10625  * amdgpu_dm_link_setup_psr() - configure psr link
10626  * @stream: stream state
10627  *
10628  * Return: true if success
10629  */
10630 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10631 {
10632 	struct dc_link *link = NULL;
10633 	struct psr_config psr_config = {0};
10634 	struct psr_context psr_context = {0};
10635 	bool ret = false;
10636 
10637 	if (stream == NULL)
10638 		return false;
10639 
10640 	link = stream->link;
10641 
10642 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10643 
10644 	if (psr_config.psr_version > 0) {
10645 		psr_config.psr_exit_link_training_required = 0x1;
10646 		psr_config.psr_frame_capture_indication_req = 0;
10647 		psr_config.psr_rfb_setup_time = 0x37;
10648 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10649 		psr_config.allow_smu_optimizations = 0x0;
10650 
10651 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10652 
10653 	}
10654 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10655 
10656 	return ret;
10657 }
10658 
10659 /*
10660  * amdgpu_dm_psr_enable() - enable psr f/w
10661  * @stream: stream state
10662  *
10663  * Return: true if success
10664  */
10665 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10666 {
10667 	struct dc_link *link = stream->link;
10668 	unsigned int vsync_rate_hz = 0;
10669 	struct dc_static_screen_params params = {0};
10670 	/* Calculate number of static frames before generating interrupt to
10671 	 * enter PSR.
10672 	 */
10673 	// Init fail safe of 2 frames static
10674 	unsigned int num_frames_static = 2;
10675 
10676 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10677 
10678 	vsync_rate_hz = div64_u64(div64_u64((
10679 			stream->timing.pix_clk_100hz * 100),
10680 			stream->timing.v_total),
10681 			stream->timing.h_total);
10682 
10683 	/* Round up
10684 	 * Calculate number of frames such that at least 30 ms of time has
10685 	 * passed.
10686 	 */
10687 	if (vsync_rate_hz != 0) {
10688 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10689 		num_frames_static = (30000 / frame_time_microsec) + 1;
10690 	}
10691 
10692 	params.triggers.cursor_update = true;
10693 	params.triggers.overlay_update = true;
10694 	params.triggers.surface_update = true;
10695 	params.num_frames = num_frames_static;
10696 
10697 	dc_stream_set_static_screen_params(link->ctx->dc,
10698 					   &stream, 1,
10699 					   &params);
10700 
10701 	return dc_link_set_psr_allow_active(link, true, false, false);
10702 }
10703 
10704 /*
10705  * amdgpu_dm_psr_disable() - disable psr f/w
10706  * @stream:  stream state
10707  *
10708  * Return: true if success
10709  */
10710 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10711 {
10712 
10713 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10714 
10715 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10716 }
10717 
10718 /*
10719  * amdgpu_dm_psr_disable() - disable psr f/w
10720  * if psr is enabled on any stream
10721  *
10722  * Return: true if success
10723  */
10724 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10725 {
10726 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10727 	return dc_set_psr_allow_active(dm->dc, false);
10728 }
10729 
10730 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10731 {
10732 	struct amdgpu_device *adev = drm_to_adev(dev);
10733 	struct dc *dc = adev->dm.dc;
10734 	int i;
10735 
10736 	mutex_lock(&adev->dm.dc_lock);
10737 	if (dc->current_state) {
10738 		for (i = 0; i < dc->current_state->stream_count; ++i)
10739 			dc->current_state->streams[i]
10740 				->triggered_crtc_reset.enabled =
10741 				adev->dm.force_timing_sync;
10742 
10743 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10744 		dc_trigger_sync(dc, dc->current_state);
10745 	}
10746 	mutex_unlock(&adev->dm.dc_lock);
10747 }
10748 
10749 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10750 		       uint32_t value, const char *func_name)
10751 {
10752 #ifdef DM_CHECK_ADDR_0
10753 	if (address == 0) {
10754 		DC_ERR("invalid register write. address = 0");
10755 		return;
10756 	}
10757 #endif
10758 	cgs_write_register(ctx->cgs_device, address, value);
10759 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10760 }
10761 
10762 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10763 			  const char *func_name)
10764 {
10765 	uint32_t value;
10766 #ifdef DM_CHECK_ADDR_0
10767 	if (address == 0) {
10768 		DC_ERR("invalid register read; address = 0\n");
10769 		return 0;
10770 	}
10771 #endif
10772 
10773 	if (ctx->dmub_srv &&
10774 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10775 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10776 		ASSERT(false);
10777 		return 0;
10778 	}
10779 
10780 	value = cgs_read_register(ctx->cgs_device, address);
10781 
10782 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10783 
10784 	return value;
10785 }
10786 
10787 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10788 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10789 {
10790 	struct amdgpu_device *adev = ctx->driver_context;
10791 	int ret = 0;
10792 
10793 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10794 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10795 	if (ret == 0) {
10796 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10797 		return -1;
10798 	}
10799 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10800 
10801 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10802 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10803 
10804 		// For read case, Copy data to payload
10805 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10806 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10807 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10808 			adev->dm.dmub_notify->aux_reply.length);
10809 	}
10810 
10811 	return adev->dm.dmub_notify->aux_reply.length;
10812 }
10813