1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 
116 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118 
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121 
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124 
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127 
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137 
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208 
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 				  struct drm_atomic_state *state);
211 
212 static void handle_cursor_update(struct drm_plane *plane,
213 				 struct drm_plane_state *old_plane_state);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 				 struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236 	if (crtc >= adev->mode_info.num_crtc)
237 		return 0;
238 	else {
239 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 
241 		if (acrtc->dm_irq_params.stream == NULL) {
242 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 				  crtc);
244 			return 0;
245 		}
246 
247 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 	}
249 }
250 
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 				  u32 *vbl, u32 *position)
253 {
254 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 
256 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 		return -EINVAL;
258 	else {
259 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 
261 		if (acrtc->dm_irq_params.stream ==  NULL) {
262 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 				  crtc);
264 			return 0;
265 		}
266 
267 		/*
268 		 * TODO rework base driver to use values directly.
269 		 * for now parse it back into reg-format
270 		 */
271 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 					 &v_blank_start,
273 					 &v_blank_end,
274 					 &h_position,
275 					 &v_position);
276 
277 		*position = v_position | (h_position << 16);
278 		*vbl = v_blank_start | (v_blank_end << 16);
279 	}
280 
281 	return 0;
282 }
283 
284 static bool dm_is_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return true;
288 }
289 
290 static int dm_wait_for_idle(void *handle)
291 {
292 	/* XXX todo */
293 	return 0;
294 }
295 
296 static bool dm_check_soft_reset(void *handle)
297 {
298 	return false;
299 }
300 
301 static int dm_soft_reset(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 		     int otg_inst)
310 {
311 	struct drm_device *dev = adev_to_drm(adev);
312 	struct drm_crtc *crtc;
313 	struct amdgpu_crtc *amdgpu_crtc;
314 
315 	if (WARN_ON(otg_inst == -1))
316 		return adev->mode_info.crtcs[0];
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	WARN_ON(!e);
396 
397 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398 
399 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
400 	if (!vrr_active ||
401 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 				      &v_blank_end, &hpos, &vpos) ||
403 	    (vpos < v_blank_start)) {
404 		/* Update to correct count and vblank timestamp if racing with
405 		 * vblank irq. This also updates to the correct vblank timestamp
406 		 * even in VRR mode, as scanout is past the front-porch atm.
407 		 */
408 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409 
410 		/* Wake up userspace by sending the pageflip event with proper
411 		 * count and timestamp of vblank of flip completion.
412 		 */
413 		if (e) {
414 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415 
416 			/* Event sent, so done with vblank for this flip */
417 			drm_crtc_vblank_put(&amdgpu_crtc->base);
418 		}
419 	} else if (e) {
420 		/* VRR active and inside front-porch: vblank count and
421 		 * timestamp for pageflip event will only be up to date after
422 		 * drm_crtc_handle_vblank() has been executed from late vblank
423 		 * irq handler after start of back-porch (vline 0). We queue the
424 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 		 * updated timestamp and count, once it runs after us.
426 		 *
427 		 * We need to open-code this instead of using the helper
428 		 * drm_crtc_arm_vblank_event(), as that helper would
429 		 * call drm_crtc_accurate_vblank_count(), which we must
430 		 * not call in VRR mode while we are in front-porch!
431 		 */
432 
433 		/* sequence will be replaced by real count during send-out. */
434 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 		e->pipe = amdgpu_crtc->crtc_id;
436 
437 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 		e = NULL;
439 	}
440 
441 	/* Keep track of vblank of this flip for flip throttling. We use the
442 	 * cooked hw counter, as that one incremented at start of this vblank
443 	 * of pageflip completion, so last_flip_vblank is the forbidden count
444 	 * for queueing new pageflips if vsync + VRR is enabled.
445 	 */
446 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448 
449 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451 
452 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
454 		     vrr_active, (int) !e);
455 }
456 
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459 	struct common_irq_params *irq_params = interrupt_params;
460 	struct amdgpu_device *adev = irq_params->adev;
461 	struct amdgpu_crtc *acrtc;
462 	struct drm_device *drm_dev;
463 	struct drm_vblank_crtc *vblank;
464 	ktime_t frame_duration_ns, previous_timestamp;
465 	unsigned long flags;
466 	int vrr_active;
467 
468 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469 
470 	if (acrtc) {
471 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 		drm_dev = acrtc->base.dev;
473 		vblank = &drm_dev->vblank[acrtc->base.index];
474 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 		frame_duration_ns = vblank->time - previous_timestamp;
476 
477 		if (frame_duration_ns > 0) {
478 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
479 						frame_duration_ns,
480 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
482 		}
483 
484 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485 			      acrtc->crtc_id,
486 			      vrr_active);
487 
488 		/* Core vblank handling is done here after end of front-porch in
489 		 * vrr mode, as vblank timestamping will give valid results
490 		 * while now done after front-porch. This will also deliver
491 		 * page-flip completion events that have been queued to us
492 		 * if a pageflip happened inside front-porch.
493 		 */
494 		if (vrr_active) {
495 			drm_crtc_handle_vblank(&acrtc->base);
496 
497 			/* BTR processing for pre-DCE12 ASICs */
498 			if (acrtc->dm_irq_params.stream &&
499 			    adev->family < AMDGPU_FAMILY_AI) {
500 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 				mod_freesync_handle_v_update(
502 				    adev->dm.freesync_module,
503 				    acrtc->dm_irq_params.stream,
504 				    &acrtc->dm_irq_params.vrr_params);
505 
506 				dc_stream_adjust_vmin_vmax(
507 				    adev->dm.dc,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params.adjust);
510 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
511 			}
512 		}
513 	}
514 }
515 
516 /**
517  * dm_crtc_high_irq() - Handles CRTC interrupt
518  * @interrupt_params: used for determining the CRTC instance
519  *
520  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
521  * event handler.
522  */
523 static void dm_crtc_high_irq(void *interrupt_params)
524 {
525 	struct common_irq_params *irq_params = interrupt_params;
526 	struct amdgpu_device *adev = irq_params->adev;
527 	struct amdgpu_crtc *acrtc;
528 	unsigned long flags;
529 	int vrr_active;
530 
531 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532 	if (!acrtc)
533 		return;
534 
535 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
536 
537 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 		      vrr_active, acrtc->dm_irq_params.active_planes);
539 
540 	/**
541 	 * Core vblank handling at start of front-porch is only possible
542 	 * in non-vrr mode, as only there vblank timestamping will give
543 	 * valid results while done in front-porch. Otherwise defer it
544 	 * to dm_vupdate_high_irq after end of front-porch.
545 	 */
546 	if (!vrr_active)
547 		drm_crtc_handle_vblank(&acrtc->base);
548 
549 	/**
550 	 * Following stuff must happen at start of vblank, for crc
551 	 * computation and below-the-range btr support in vrr mode.
552 	 */
553 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
554 
555 	/* BTR updates need to happen before VUPDATE on Vega and above. */
556 	if (adev->family < AMDGPU_FAMILY_AI)
557 		return;
558 
559 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
560 
561 	if (acrtc->dm_irq_params.stream &&
562 	    acrtc->dm_irq_params.vrr_params.supported &&
563 	    acrtc->dm_irq_params.freesync_config.state ==
564 		    VRR_STATE_ACTIVE_VARIABLE) {
565 		mod_freesync_handle_v_update(adev->dm.freesync_module,
566 					     acrtc->dm_irq_params.stream,
567 					     &acrtc->dm_irq_params.vrr_params);
568 
569 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 					   &acrtc->dm_irq_params.vrr_params.adjust);
571 	}
572 
573 	/*
574 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 	 * In that case, pageflip completion interrupts won't fire and pageflip
576 	 * completion events won't get delivered. Prevent this by sending
577 	 * pending pageflip events from here if a flip is still pending.
578 	 *
579 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 	 * avoid race conditions between flip programming and completion,
581 	 * which could cause too early flip completion events.
582 	 */
583 	if (adev->family >= AMDGPU_FAMILY_RV &&
584 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 	    acrtc->dm_irq_params.active_planes == 0) {
586 		if (acrtc->event) {
587 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588 			acrtc->event = NULL;
589 			drm_crtc_vblank_put(&acrtc->base);
590 		}
591 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
592 	}
593 
594 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
595 }
596 
597 #if defined(CONFIG_DRM_AMD_DC_DCN)
598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt_params: interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
607 {
608 	struct common_irq_params *irq_params = interrupt_params;
609 	struct amdgpu_device *adev = irq_params->adev;
610 	struct amdgpu_crtc *acrtc;
611 
612 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613 
614 	if (!acrtc)
615 		return;
616 
617 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
618 }
619 #endif
620 
621 /**
622  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
623  * @interrupt_params: used for determining the Outbox instance
624  *
625  * Handles the Outbox Interrupt
626  * event handler.
627  */
628 #define DMUB_TRACE_MAX_READ 64
629 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
630 {
631 	struct dmub_notification notify;
632 	struct common_irq_params *irq_params = interrupt_params;
633 	struct amdgpu_device *adev = irq_params->adev;
634 	struct amdgpu_display_manager *dm = &adev->dm;
635 	struct dmcub_trace_buf_entry entry = { 0 };
636 	uint32_t count = 0;
637 
638 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
640 			do {
641 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
642 			} while (notify.pending_notification);
643 
644 			if (adev->dm.dmub_notify)
645 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
646 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 				complete(&adev->dm.dmub_aux_transfer_done);
648 			// TODO : HPD Implementation
649 
650 		} else {
651 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
652 		}
653 	}
654 
655 
656 	do {
657 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 							entry.param0, entry.param1);
660 
661 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
663 		} else
664 			break;
665 
666 		count++;
667 
668 	} while (count <= DMUB_TRACE_MAX_READ);
669 
670 	ASSERT(count <= DMUB_TRACE_MAX_READ);
671 }
672 #endif
673 
674 static int dm_set_clockgating_state(void *handle,
675 		  enum amd_clockgating_state state)
676 {
677 	return 0;
678 }
679 
680 static int dm_set_powergating_state(void *handle,
681 		  enum amd_powergating_state state)
682 {
683 	return 0;
684 }
685 
686 /* Prototypes of private functions */
687 static int dm_early_init(void* handle);
688 
689 /* Allocate memory for FBC compressed data  */
690 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
691 {
692 	struct drm_device *dev = connector->dev;
693 	struct amdgpu_device *adev = drm_to_adev(dev);
694 	struct dm_compressor_info *compressor = &adev->dm.compressor;
695 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 	struct drm_display_mode *mode;
697 	unsigned long max_size = 0;
698 
699 	if (adev->dm.dc->fbc_compressor == NULL)
700 		return;
701 
702 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
703 		return;
704 
705 	if (compressor->bo_ptr)
706 		return;
707 
708 
709 	list_for_each_entry(mode, &connector->modes, head) {
710 		if (max_size < mode->htotal * mode->vtotal)
711 			max_size = mode->htotal * mode->vtotal;
712 	}
713 
714 	if (max_size) {
715 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 			    &compressor->gpu_addr, &compressor->cpu_addr);
718 
719 		if (r)
720 			DRM_ERROR("DM: Failed to initialize FBC\n");
721 		else {
722 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
724 		}
725 
726 	}
727 
728 }
729 
730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 					  int pipe, bool *enabled,
732 					  unsigned char *buf, int max_bytes)
733 {
734 	struct drm_device *dev = dev_get_drvdata(kdev);
735 	struct amdgpu_device *adev = drm_to_adev(dev);
736 	struct drm_connector *connector;
737 	struct drm_connector_list_iter conn_iter;
738 	struct amdgpu_dm_connector *aconnector;
739 	int ret = 0;
740 
741 	*enabled = false;
742 
743 	mutex_lock(&adev->dm.audio_lock);
744 
745 	drm_connector_list_iter_begin(dev, &conn_iter);
746 	drm_for_each_connector_iter(connector, &conn_iter) {
747 		aconnector = to_amdgpu_dm_connector(connector);
748 		if (aconnector->audio_inst != port)
749 			continue;
750 
751 		*enabled = true;
752 		ret = drm_eld_size(connector->eld);
753 		memcpy(buf, connector->eld, min(max_bytes, ret));
754 
755 		break;
756 	}
757 	drm_connector_list_iter_end(&conn_iter);
758 
759 	mutex_unlock(&adev->dm.audio_lock);
760 
761 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
762 
763 	return ret;
764 }
765 
766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 	.get_eld = amdgpu_dm_audio_component_get_eld,
768 };
769 
770 static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 				       struct device *hda_kdev, void *data)
772 {
773 	struct drm_device *dev = dev_get_drvdata(kdev);
774 	struct amdgpu_device *adev = drm_to_adev(dev);
775 	struct drm_audio_component *acomp = data;
776 
777 	acomp->ops = &amdgpu_dm_audio_component_ops;
778 	acomp->dev = kdev;
779 	adev->dm.audio_component = acomp;
780 
781 	return 0;
782 }
783 
784 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 					  struct device *hda_kdev, void *data)
786 {
787 	struct drm_device *dev = dev_get_drvdata(kdev);
788 	struct amdgpu_device *adev = drm_to_adev(dev);
789 	struct drm_audio_component *acomp = data;
790 
791 	acomp->ops = NULL;
792 	acomp->dev = NULL;
793 	adev->dm.audio_component = NULL;
794 }
795 
796 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
797 	.bind	= amdgpu_dm_audio_component_bind,
798 	.unbind	= amdgpu_dm_audio_component_unbind,
799 };
800 
801 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
802 {
803 	int i, ret;
804 
805 	if (!amdgpu_audio)
806 		return 0;
807 
808 	adev->mode_info.audio.enabled = true;
809 
810 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
811 
812 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
813 		adev->mode_info.audio.pin[i].channels = -1;
814 		adev->mode_info.audio.pin[i].rate = -1;
815 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
816 		adev->mode_info.audio.pin[i].status_bits = 0;
817 		adev->mode_info.audio.pin[i].category_code = 0;
818 		adev->mode_info.audio.pin[i].connected = false;
819 		adev->mode_info.audio.pin[i].id =
820 			adev->dm.dc->res_pool->audios[i]->inst;
821 		adev->mode_info.audio.pin[i].offset = 0;
822 	}
823 
824 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
825 	if (ret < 0)
826 		return ret;
827 
828 	adev->dm.audio_registered = true;
829 
830 	return 0;
831 }
832 
833 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
834 {
835 	if (!amdgpu_audio)
836 		return;
837 
838 	if (!adev->mode_info.audio.enabled)
839 		return;
840 
841 	if (adev->dm.audio_registered) {
842 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
843 		adev->dm.audio_registered = false;
844 	}
845 
846 	/* TODO: Disable audio? */
847 
848 	adev->mode_info.audio.enabled = false;
849 }
850 
851 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
852 {
853 	struct drm_audio_component *acomp = adev->dm.audio_component;
854 
855 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
856 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
857 
858 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
859 						 pin, -1);
860 	}
861 }
862 
863 static int dm_dmub_hw_init(struct amdgpu_device *adev)
864 {
865 	const struct dmcub_firmware_header_v1_0 *hdr;
866 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
867 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
868 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
869 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
870 	struct abm *abm = adev->dm.dc->res_pool->abm;
871 	struct dmub_srv_hw_params hw_params;
872 	enum dmub_status status;
873 	const unsigned char *fw_inst_const, *fw_bss_data;
874 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
875 	bool has_hw_support;
876 
877 	if (!dmub_srv)
878 		/* DMUB isn't supported on the ASIC. */
879 		return 0;
880 
881 	if (!fb_info) {
882 		DRM_ERROR("No framebuffer info for DMUB service.\n");
883 		return -EINVAL;
884 	}
885 
886 	if (!dmub_fw) {
887 		/* Firmware required for DMUB support. */
888 		DRM_ERROR("No firmware provided for DMUB.\n");
889 		return -EINVAL;
890 	}
891 
892 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
893 	if (status != DMUB_STATUS_OK) {
894 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
895 		return -EINVAL;
896 	}
897 
898 	if (!has_hw_support) {
899 		DRM_INFO("DMUB unsupported on ASIC\n");
900 		return 0;
901 	}
902 
903 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
904 
905 	fw_inst_const = dmub_fw->data +
906 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
907 			PSP_HEADER_BYTES;
908 
909 	fw_bss_data = dmub_fw->data +
910 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
911 		      le32_to_cpu(hdr->inst_const_bytes);
912 
913 	/* Copy firmware and bios info into FB memory. */
914 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
915 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
916 
917 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
918 
919 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
920 	 * amdgpu_ucode_init_single_fw will load dmub firmware
921 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
922 	 * will be done by dm_dmub_hw_init
923 	 */
924 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
925 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
926 				fw_inst_const_size);
927 	}
928 
929 	if (fw_bss_data_size)
930 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
931 		       fw_bss_data, fw_bss_data_size);
932 
933 	/* Copy firmware bios info into FB memory. */
934 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
935 	       adev->bios_size);
936 
937 	/* Reset regions that need to be reset. */
938 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
939 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
940 
941 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
942 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
943 
944 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
945 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
946 
947 	/* Initialize hardware. */
948 	memset(&hw_params, 0, sizeof(hw_params));
949 	hw_params.fb_base = adev->gmc.fb_start;
950 	hw_params.fb_offset = adev->gmc.aper_base;
951 
952 	/* backdoor load firmware and trigger dmub running */
953 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
954 		hw_params.load_inst_const = true;
955 
956 	if (dmcu)
957 		hw_params.psp_version = dmcu->psp_version;
958 
959 	for (i = 0; i < fb_info->num_fb; ++i)
960 		hw_params.fb[i] = &fb_info->fb[i];
961 
962 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
963 	if (status != DMUB_STATUS_OK) {
964 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
965 		return -EINVAL;
966 	}
967 
968 	/* Wait for firmware load to finish. */
969 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
970 	if (status != DMUB_STATUS_OK)
971 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
972 
973 	/* Init DMCU and ABM if available. */
974 	if (dmcu && abm) {
975 		dmcu->funcs->dmcu_init(dmcu);
976 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
977 	}
978 
979 	if (!adev->dm.dc->ctx->dmub_srv)
980 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
981 	if (!adev->dm.dc->ctx->dmub_srv) {
982 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
983 		return -ENOMEM;
984 	}
985 
986 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
987 		 adev->dm.dmcub_fw_version);
988 
989 	return 0;
990 }
991 
992 #if defined(CONFIG_DRM_AMD_DC_DCN)
993 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
994 {
995 	uint64_t pt_base;
996 	uint32_t logical_addr_low;
997 	uint32_t logical_addr_high;
998 	uint32_t agp_base, agp_bot, agp_top;
999 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1000 
1001 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1002 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1003 
1004 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1005 		/*
1006 		 * Raven2 has a HW issue that it is unable to use the vram which
1007 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1008 		 * workaround that increase system aperture high address (add 1)
1009 		 * to get rid of the VM fault and hardware hang.
1010 		 */
1011 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1012 	else
1013 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1014 
1015 	agp_base = 0;
1016 	agp_bot = adev->gmc.agp_start >> 24;
1017 	agp_top = adev->gmc.agp_end >> 24;
1018 
1019 
1020 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1021 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1022 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1023 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1024 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1025 	page_table_base.low_part = lower_32_bits(pt_base);
1026 
1027 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1028 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1029 
1030 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1031 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1032 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1033 
1034 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1035 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1036 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1037 
1038 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1039 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1040 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1041 
1042 	pa_config->is_hvm_enabled = 0;
1043 
1044 }
1045 #endif
1046 #if defined(CONFIG_DRM_AMD_DC_DCN)
1047 static void event_mall_stutter(struct work_struct *work)
1048 {
1049 
1050 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1051 	struct amdgpu_display_manager *dm = vblank_work->dm;
1052 
1053 	mutex_lock(&dm->dc_lock);
1054 
1055 	if (vblank_work->enable)
1056 		dm->active_vblank_irq_count++;
1057 	else if(dm->active_vblank_irq_count)
1058 		dm->active_vblank_irq_count--;
1059 
1060 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1061 
1062 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1063 
1064 	mutex_unlock(&dm->dc_lock);
1065 }
1066 
1067 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1068 {
1069 
1070 	int max_caps = dc->caps.max_links;
1071 	struct vblank_workqueue *vblank_work;
1072 	int i = 0;
1073 
1074 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1075 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1076 		kfree(vblank_work);
1077 		return NULL;
1078 	}
1079 
1080 	for (i = 0; i < max_caps; i++)
1081 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1082 
1083 	return vblank_work;
1084 }
1085 #endif
1086 static int amdgpu_dm_init(struct amdgpu_device *adev)
1087 {
1088 	struct dc_init_data init_data;
1089 #ifdef CONFIG_DRM_AMD_DC_HDCP
1090 	struct dc_callback_init init_params;
1091 #endif
1092 	int r;
1093 
1094 	adev->dm.ddev = adev_to_drm(adev);
1095 	adev->dm.adev = adev;
1096 
1097 	/* Zero all the fields */
1098 	memset(&init_data, 0, sizeof(init_data));
1099 #ifdef CONFIG_DRM_AMD_DC_HDCP
1100 	memset(&init_params, 0, sizeof(init_params));
1101 #endif
1102 
1103 	mutex_init(&adev->dm.dc_lock);
1104 	mutex_init(&adev->dm.audio_lock);
1105 #if defined(CONFIG_DRM_AMD_DC_DCN)
1106 	spin_lock_init(&adev->dm.vblank_lock);
1107 #endif
1108 
1109 	if(amdgpu_dm_irq_init(adev)) {
1110 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1111 		goto error;
1112 	}
1113 
1114 	init_data.asic_id.chip_family = adev->family;
1115 
1116 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1117 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1118 
1119 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1120 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1121 	init_data.asic_id.atombios_base_address =
1122 		adev->mode_info.atom_context->bios;
1123 
1124 	init_data.driver = adev;
1125 
1126 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1127 
1128 	if (!adev->dm.cgs_device) {
1129 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1130 		goto error;
1131 	}
1132 
1133 	init_data.cgs_device = adev->dm.cgs_device;
1134 
1135 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1136 
1137 	switch (adev->asic_type) {
1138 	case CHIP_CARRIZO:
1139 	case CHIP_STONEY:
1140 	case CHIP_RAVEN:
1141 	case CHIP_RENOIR:
1142 		init_data.flags.gpu_vm_support = true;
1143 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1144 			init_data.flags.disable_dmcu = true;
1145 		break;
1146 	case CHIP_VANGOGH:
1147 	case CHIP_YELLOW_CARP:
1148 		init_data.flags.gpu_vm_support = true;
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1155 		init_data.flags.fbc_support = true;
1156 
1157 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1158 		init_data.flags.multi_mon_pp_mclk_switch = true;
1159 
1160 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1161 		init_data.flags.disable_fractional_pwm = true;
1162 
1163 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1164 		init_data.flags.edp_no_power_sequencing = true;
1165 
1166 	init_data.flags.power_down_display_on_boot = true;
1167 
1168 	INIT_LIST_HEAD(&adev->dm.da_list);
1169 	/* Display Core create. */
1170 	adev->dm.dc = dc_create(&init_data);
1171 
1172 	if (adev->dm.dc) {
1173 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1174 	} else {
1175 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1176 		goto error;
1177 	}
1178 
1179 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1180 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1181 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1182 	}
1183 
1184 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1185 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1186 
1187 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1188 		adev->dm.dc->debug.disable_stutter = true;
1189 
1190 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1191 		adev->dm.dc->debug.disable_dsc = true;
1192 
1193 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1194 		adev->dm.dc->debug.disable_clock_gate = true;
1195 
1196 	r = dm_dmub_hw_init(adev);
1197 	if (r) {
1198 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1199 		goto error;
1200 	}
1201 
1202 	dc_hardware_init(adev->dm.dc);
1203 
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205 	if (adev->apu_flags) {
1206 		struct dc_phy_addr_space_config pa_config;
1207 
1208 		mmhub_read_system_context(adev, &pa_config);
1209 
1210 		// Call the DC init_memory func
1211 		dc_setup_system_context(adev->dm.dc, &pa_config);
1212 	}
1213 #endif
1214 
1215 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1216 	if (!adev->dm.freesync_module) {
1217 		DRM_ERROR(
1218 		"amdgpu: failed to initialize freesync_module.\n");
1219 	} else
1220 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1221 				adev->dm.freesync_module);
1222 
1223 	amdgpu_dm_init_color_mod();
1224 
1225 #if defined(CONFIG_DRM_AMD_DC_DCN)
1226 	if (adev->dm.dc->caps.max_links > 0) {
1227 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1228 
1229 		if (!adev->dm.vblank_workqueue)
1230 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1231 		else
1232 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1233 	}
1234 #endif
1235 
1236 #ifdef CONFIG_DRM_AMD_DC_HDCP
1237 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1238 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1239 
1240 		if (!adev->dm.hdcp_workqueue)
1241 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1242 		else
1243 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1244 
1245 		dc_init_callbacks(adev->dm.dc, &init_params);
1246 	}
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1249 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1250 #endif
1251 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1252 		init_completion(&adev->dm.dmub_aux_transfer_done);
1253 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1254 		if (!adev->dm.dmub_notify) {
1255 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1256 			goto error;
1257 		}
1258 		amdgpu_dm_outbox_init(adev);
1259 	}
1260 
1261 	if (amdgpu_dm_initialize_drm_device(adev)) {
1262 		DRM_ERROR(
1263 		"amdgpu: failed to initialize sw for display support.\n");
1264 		goto error;
1265 	}
1266 
1267 	/* create fake encoders for MST */
1268 	dm_dp_create_fake_mst_encoders(adev);
1269 
1270 	/* TODO: Add_display_info? */
1271 
1272 	/* TODO use dynamic cursor width */
1273 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1274 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1275 
1276 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1277 		DRM_ERROR(
1278 		"amdgpu: failed to initialize sw for display support.\n");
1279 		goto error;
1280 	}
1281 
1282 
1283 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1284 
1285 	return 0;
1286 error:
1287 	amdgpu_dm_fini(adev);
1288 
1289 	return -EINVAL;
1290 }
1291 
1292 static int amdgpu_dm_early_fini(void *handle)
1293 {
1294 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1295 
1296 	amdgpu_dm_audio_fini(adev);
1297 
1298 	return 0;
1299 }
1300 
1301 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1302 {
1303 	int i;
1304 
1305 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1306 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1307 	}
1308 
1309 	amdgpu_dm_destroy_drm_device(&adev->dm);
1310 
1311 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1312 	if (adev->dm.crc_rd_wrk) {
1313 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1314 		kfree(adev->dm.crc_rd_wrk);
1315 		adev->dm.crc_rd_wrk = NULL;
1316 	}
1317 #endif
1318 #ifdef CONFIG_DRM_AMD_DC_HDCP
1319 	if (adev->dm.hdcp_workqueue) {
1320 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1321 		adev->dm.hdcp_workqueue = NULL;
1322 	}
1323 
1324 	if (adev->dm.dc)
1325 		dc_deinit_callbacks(adev->dm.dc);
1326 #endif
1327 
1328 #if defined(CONFIG_DRM_AMD_DC_DCN)
1329 	if (adev->dm.vblank_workqueue) {
1330 		adev->dm.vblank_workqueue->dm = NULL;
1331 		kfree(adev->dm.vblank_workqueue);
1332 		adev->dm.vblank_workqueue = NULL;
1333 	}
1334 #endif
1335 
1336 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1337 
1338 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1339 		kfree(adev->dm.dmub_notify);
1340 		adev->dm.dmub_notify = NULL;
1341 	}
1342 
1343 	if (adev->dm.dmub_bo)
1344 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1345 				      &adev->dm.dmub_bo_gpu_addr,
1346 				      &adev->dm.dmub_bo_cpu_addr);
1347 
1348 	/* DC Destroy TODO: Replace destroy DAL */
1349 	if (adev->dm.dc)
1350 		dc_destroy(&adev->dm.dc);
1351 	/*
1352 	 * TODO: pageflip, vlank interrupt
1353 	 *
1354 	 * amdgpu_dm_irq_fini(adev);
1355 	 */
1356 
1357 	if (adev->dm.cgs_device) {
1358 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1359 		adev->dm.cgs_device = NULL;
1360 	}
1361 	if (adev->dm.freesync_module) {
1362 		mod_freesync_destroy(adev->dm.freesync_module);
1363 		adev->dm.freesync_module = NULL;
1364 	}
1365 
1366 	mutex_destroy(&adev->dm.audio_lock);
1367 	mutex_destroy(&adev->dm.dc_lock);
1368 
1369 	return;
1370 }
1371 
1372 static int load_dmcu_fw(struct amdgpu_device *adev)
1373 {
1374 	const char *fw_name_dmcu = NULL;
1375 	int r;
1376 	const struct dmcu_firmware_header_v1_0 *hdr;
1377 
1378 	switch(adev->asic_type) {
1379 #if defined(CONFIG_DRM_AMD_DC_SI)
1380 	case CHIP_TAHITI:
1381 	case CHIP_PITCAIRN:
1382 	case CHIP_VERDE:
1383 	case CHIP_OLAND:
1384 #endif
1385 	case CHIP_BONAIRE:
1386 	case CHIP_HAWAII:
1387 	case CHIP_KAVERI:
1388 	case CHIP_KABINI:
1389 	case CHIP_MULLINS:
1390 	case CHIP_TONGA:
1391 	case CHIP_FIJI:
1392 	case CHIP_CARRIZO:
1393 	case CHIP_STONEY:
1394 	case CHIP_POLARIS11:
1395 	case CHIP_POLARIS10:
1396 	case CHIP_POLARIS12:
1397 	case CHIP_VEGAM:
1398 	case CHIP_VEGA10:
1399 	case CHIP_VEGA12:
1400 	case CHIP_VEGA20:
1401 	case CHIP_NAVI10:
1402 	case CHIP_NAVI14:
1403 	case CHIP_RENOIR:
1404 	case CHIP_SIENNA_CICHLID:
1405 	case CHIP_NAVY_FLOUNDER:
1406 	case CHIP_DIMGREY_CAVEFISH:
1407 	case CHIP_BEIGE_GOBY:
1408 	case CHIP_VANGOGH:
1409 	case CHIP_YELLOW_CARP:
1410 		return 0;
1411 	case CHIP_NAVI12:
1412 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1413 		break;
1414 	case CHIP_RAVEN:
1415 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1416 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1418 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1419 		else
1420 			return 0;
1421 		break;
1422 	default:
1423 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1424 		return -EINVAL;
1425 	}
1426 
1427 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1428 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1429 		return 0;
1430 	}
1431 
1432 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1433 	if (r == -ENOENT) {
1434 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1435 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1436 		adev->dm.fw_dmcu = NULL;
1437 		return 0;
1438 	}
1439 	if (r) {
1440 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1441 			fw_name_dmcu);
1442 		return r;
1443 	}
1444 
1445 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1446 	if (r) {
1447 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1448 			fw_name_dmcu);
1449 		release_firmware(adev->dm.fw_dmcu);
1450 		adev->dm.fw_dmcu = NULL;
1451 		return r;
1452 	}
1453 
1454 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1455 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1456 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1457 	adev->firmware.fw_size +=
1458 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1459 
1460 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1461 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1462 	adev->firmware.fw_size +=
1463 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1464 
1465 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1466 
1467 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1468 
1469 	return 0;
1470 }
1471 
1472 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1473 {
1474 	struct amdgpu_device *adev = ctx;
1475 
1476 	return dm_read_reg(adev->dm.dc->ctx, address);
1477 }
1478 
1479 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1480 				     uint32_t value)
1481 {
1482 	struct amdgpu_device *adev = ctx;
1483 
1484 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1485 }
1486 
1487 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1488 {
1489 	struct dmub_srv_create_params create_params;
1490 	struct dmub_srv_region_params region_params;
1491 	struct dmub_srv_region_info region_info;
1492 	struct dmub_srv_fb_params fb_params;
1493 	struct dmub_srv_fb_info *fb_info;
1494 	struct dmub_srv *dmub_srv;
1495 	const struct dmcub_firmware_header_v1_0 *hdr;
1496 	const char *fw_name_dmub;
1497 	enum dmub_asic dmub_asic;
1498 	enum dmub_status status;
1499 	int r;
1500 
1501 	switch (adev->asic_type) {
1502 	case CHIP_RENOIR:
1503 		dmub_asic = DMUB_ASIC_DCN21;
1504 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1505 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1506 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1507 		break;
1508 	case CHIP_SIENNA_CICHLID:
1509 		dmub_asic = DMUB_ASIC_DCN30;
1510 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1511 		break;
1512 	case CHIP_NAVY_FLOUNDER:
1513 		dmub_asic = DMUB_ASIC_DCN30;
1514 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1515 		break;
1516 	case CHIP_VANGOGH:
1517 		dmub_asic = DMUB_ASIC_DCN301;
1518 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1519 		break;
1520 	case CHIP_DIMGREY_CAVEFISH:
1521 		dmub_asic = DMUB_ASIC_DCN302;
1522 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1523 		break;
1524 	case CHIP_BEIGE_GOBY:
1525 		dmub_asic = DMUB_ASIC_DCN303;
1526 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1527 		break;
1528 	case CHIP_YELLOW_CARP:
1529 		dmub_asic = DMUB_ASIC_DCN31;
1530 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1531 		break;
1532 
1533 	default:
1534 		/* ASIC doesn't support DMUB. */
1535 		return 0;
1536 	}
1537 
1538 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1539 	if (r) {
1540 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1541 		return 0;
1542 	}
1543 
1544 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1545 	if (r) {
1546 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1547 		return 0;
1548 	}
1549 
1550 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1551 
1552 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1553 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1554 			AMDGPU_UCODE_ID_DMCUB;
1555 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1556 			adev->dm.dmub_fw;
1557 		adev->firmware.fw_size +=
1558 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1559 
1560 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1561 			 adev->dm.dmcub_fw_version);
1562 	}
1563 
1564 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1565 
1566 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1567 	dmub_srv = adev->dm.dmub_srv;
1568 
1569 	if (!dmub_srv) {
1570 		DRM_ERROR("Failed to allocate DMUB service!\n");
1571 		return -ENOMEM;
1572 	}
1573 
1574 	memset(&create_params, 0, sizeof(create_params));
1575 	create_params.user_ctx = adev;
1576 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1577 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1578 	create_params.asic = dmub_asic;
1579 
1580 	/* Create the DMUB service. */
1581 	status = dmub_srv_create(dmub_srv, &create_params);
1582 	if (status != DMUB_STATUS_OK) {
1583 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1584 		return -EINVAL;
1585 	}
1586 
1587 	/* Calculate the size of all the regions for the DMUB service. */
1588 	memset(&region_params, 0, sizeof(region_params));
1589 
1590 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1591 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1592 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1593 	region_params.vbios_size = adev->bios_size;
1594 	region_params.fw_bss_data = region_params.bss_data_size ?
1595 		adev->dm.dmub_fw->data +
1596 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1597 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1598 	region_params.fw_inst_const =
1599 		adev->dm.dmub_fw->data +
1600 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1601 		PSP_HEADER_BYTES;
1602 
1603 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1604 					   &region_info);
1605 
1606 	if (status != DMUB_STATUS_OK) {
1607 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1608 		return -EINVAL;
1609 	}
1610 
1611 	/*
1612 	 * Allocate a framebuffer based on the total size of all the regions.
1613 	 * TODO: Move this into GART.
1614 	 */
1615 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1616 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1617 				    &adev->dm.dmub_bo_gpu_addr,
1618 				    &adev->dm.dmub_bo_cpu_addr);
1619 	if (r)
1620 		return r;
1621 
1622 	/* Rebase the regions on the framebuffer address. */
1623 	memset(&fb_params, 0, sizeof(fb_params));
1624 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1625 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1626 	fb_params.region_info = &region_info;
1627 
1628 	adev->dm.dmub_fb_info =
1629 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1630 	fb_info = adev->dm.dmub_fb_info;
1631 
1632 	if (!fb_info) {
1633 		DRM_ERROR(
1634 			"Failed to allocate framebuffer info for DMUB service!\n");
1635 		return -ENOMEM;
1636 	}
1637 
1638 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1639 	if (status != DMUB_STATUS_OK) {
1640 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1641 		return -EINVAL;
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 static int dm_sw_init(void *handle)
1648 {
1649 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1650 	int r;
1651 
1652 	r = dm_dmub_sw_init(adev);
1653 	if (r)
1654 		return r;
1655 
1656 	return load_dmcu_fw(adev);
1657 }
1658 
1659 static int dm_sw_fini(void *handle)
1660 {
1661 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1662 
1663 	kfree(adev->dm.dmub_fb_info);
1664 	adev->dm.dmub_fb_info = NULL;
1665 
1666 	if (adev->dm.dmub_srv) {
1667 		dmub_srv_destroy(adev->dm.dmub_srv);
1668 		adev->dm.dmub_srv = NULL;
1669 	}
1670 
1671 	release_firmware(adev->dm.dmub_fw);
1672 	adev->dm.dmub_fw = NULL;
1673 
1674 	release_firmware(adev->dm.fw_dmcu);
1675 	adev->dm.fw_dmcu = NULL;
1676 
1677 	return 0;
1678 }
1679 
1680 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1681 {
1682 	struct amdgpu_dm_connector *aconnector;
1683 	struct drm_connector *connector;
1684 	struct drm_connector_list_iter iter;
1685 	int ret = 0;
1686 
1687 	drm_connector_list_iter_begin(dev, &iter);
1688 	drm_for_each_connector_iter(connector, &iter) {
1689 		aconnector = to_amdgpu_dm_connector(connector);
1690 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1691 		    aconnector->mst_mgr.aux) {
1692 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1693 					 aconnector,
1694 					 aconnector->base.base.id);
1695 
1696 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1697 			if (ret < 0) {
1698 				DRM_ERROR("DM_MST: Failed to start MST\n");
1699 				aconnector->dc_link->type =
1700 					dc_connection_single;
1701 				break;
1702 			}
1703 		}
1704 	}
1705 	drm_connector_list_iter_end(&iter);
1706 
1707 	return ret;
1708 }
1709 
1710 static int dm_late_init(void *handle)
1711 {
1712 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1713 
1714 	struct dmcu_iram_parameters params;
1715 	unsigned int linear_lut[16];
1716 	int i;
1717 	struct dmcu *dmcu = NULL;
1718 
1719 	dmcu = adev->dm.dc->res_pool->dmcu;
1720 
1721 	for (i = 0; i < 16; i++)
1722 		linear_lut[i] = 0xFFFF * i / 15;
1723 
1724 	params.set = 0;
1725 	params.backlight_ramping_start = 0xCCCC;
1726 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1727 	params.backlight_lut_array_size = 16;
1728 	params.backlight_lut_array = linear_lut;
1729 
1730 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1731 	 * 0xFFFF x 0.01 = 0x28F
1732 	 */
1733 	params.min_abm_backlight = 0x28F;
1734 	/* In the case where abm is implemented on dmcub,
1735 	* dmcu object will be null.
1736 	* ABM 2.4 and up are implemented on dmcub.
1737 	*/
1738 	if (dmcu) {
1739 		if (!dmcu_load_iram(dmcu, params))
1740 			return -EINVAL;
1741 	} else if (adev->dm.dc->ctx->dmub_srv) {
1742 		struct dc_link *edp_links[MAX_NUM_EDP];
1743 		int edp_num;
1744 
1745 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1746 		for (i = 0; i < edp_num; i++) {
1747 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1748 				return -EINVAL;
1749 		}
1750 	}
1751 
1752 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1753 }
1754 
1755 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1756 {
1757 	struct amdgpu_dm_connector *aconnector;
1758 	struct drm_connector *connector;
1759 	struct drm_connector_list_iter iter;
1760 	struct drm_dp_mst_topology_mgr *mgr;
1761 	int ret;
1762 	bool need_hotplug = false;
1763 
1764 	drm_connector_list_iter_begin(dev, &iter);
1765 	drm_for_each_connector_iter(connector, &iter) {
1766 		aconnector = to_amdgpu_dm_connector(connector);
1767 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1768 		    aconnector->mst_port)
1769 			continue;
1770 
1771 		mgr = &aconnector->mst_mgr;
1772 
1773 		if (suspend) {
1774 			drm_dp_mst_topology_mgr_suspend(mgr);
1775 		} else {
1776 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1777 			if (ret < 0) {
1778 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1779 				need_hotplug = true;
1780 			}
1781 		}
1782 	}
1783 	drm_connector_list_iter_end(&iter);
1784 
1785 	if (need_hotplug)
1786 		drm_kms_helper_hotplug_event(dev);
1787 }
1788 
1789 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1790 {
1791 	struct smu_context *smu = &adev->smu;
1792 	int ret = 0;
1793 
1794 	if (!is_support_sw_smu(adev))
1795 		return 0;
1796 
1797 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1798 	 * on window driver dc implementation.
1799 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1800 	 * should be passed to smu during boot up and resume from s3.
1801 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1802 	 * dcn20_resource_construct
1803 	 * then call pplib functions below to pass the settings to smu:
1804 	 * smu_set_watermarks_for_clock_ranges
1805 	 * smu_set_watermarks_table
1806 	 * navi10_set_watermarks_table
1807 	 * smu_write_watermarks_table
1808 	 *
1809 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1810 	 * dc has implemented different flow for window driver:
1811 	 * dc_hardware_init / dc_set_power_state
1812 	 * dcn10_init_hw
1813 	 * notify_wm_ranges
1814 	 * set_wm_ranges
1815 	 * -- Linux
1816 	 * smu_set_watermarks_for_clock_ranges
1817 	 * renoir_set_watermarks_table
1818 	 * smu_write_watermarks_table
1819 	 *
1820 	 * For Linux,
1821 	 * dc_hardware_init -> amdgpu_dm_init
1822 	 * dc_set_power_state --> dm_resume
1823 	 *
1824 	 * therefore, this function apply to navi10/12/14 but not Renoir
1825 	 * *
1826 	 */
1827 	switch(adev->asic_type) {
1828 	case CHIP_NAVI10:
1829 	case CHIP_NAVI14:
1830 	case CHIP_NAVI12:
1831 		break;
1832 	default:
1833 		return 0;
1834 	}
1835 
1836 	ret = smu_write_watermarks_table(smu);
1837 	if (ret) {
1838 		DRM_ERROR("Failed to update WMTABLE!\n");
1839 		return ret;
1840 	}
1841 
1842 	return 0;
1843 }
1844 
1845 /**
1846  * dm_hw_init() - Initialize DC device
1847  * @handle: The base driver device containing the amdgpu_dm device.
1848  *
1849  * Initialize the &struct amdgpu_display_manager device. This involves calling
1850  * the initializers of each DM component, then populating the struct with them.
1851  *
1852  * Although the function implies hardware initialization, both hardware and
1853  * software are initialized here. Splitting them out to their relevant init
1854  * hooks is a future TODO item.
1855  *
1856  * Some notable things that are initialized here:
1857  *
1858  * - Display Core, both software and hardware
1859  * - DC modules that we need (freesync and color management)
1860  * - DRM software states
1861  * - Interrupt sources and handlers
1862  * - Vblank support
1863  * - Debug FS entries, if enabled
1864  */
1865 static int dm_hw_init(void *handle)
1866 {
1867 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1868 	/* Create DAL display manager */
1869 	amdgpu_dm_init(adev);
1870 	amdgpu_dm_hpd_init(adev);
1871 
1872 	return 0;
1873 }
1874 
1875 /**
1876  * dm_hw_fini() - Teardown DC device
1877  * @handle: The base driver device containing the amdgpu_dm device.
1878  *
1879  * Teardown components within &struct amdgpu_display_manager that require
1880  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1881  * were loaded. Also flush IRQ workqueues and disable them.
1882  */
1883 static int dm_hw_fini(void *handle)
1884 {
1885 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1886 
1887 	amdgpu_dm_hpd_fini(adev);
1888 
1889 	amdgpu_dm_irq_fini(adev);
1890 	amdgpu_dm_fini(adev);
1891 	return 0;
1892 }
1893 
1894 
1895 static int dm_enable_vblank(struct drm_crtc *crtc);
1896 static void dm_disable_vblank(struct drm_crtc *crtc);
1897 
1898 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1899 				 struct dc_state *state, bool enable)
1900 {
1901 	enum dc_irq_source irq_source;
1902 	struct amdgpu_crtc *acrtc;
1903 	int rc = -EBUSY;
1904 	int i = 0;
1905 
1906 	for (i = 0; i < state->stream_count; i++) {
1907 		acrtc = get_crtc_by_otg_inst(
1908 				adev, state->stream_status[i].primary_otg_inst);
1909 
1910 		if (acrtc && state->stream_status[i].plane_count != 0) {
1911 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1912 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1913 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1914 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1915 			if (rc)
1916 				DRM_WARN("Failed to %s pflip interrupts\n",
1917 					 enable ? "enable" : "disable");
1918 
1919 			if (enable) {
1920 				rc = dm_enable_vblank(&acrtc->base);
1921 				if (rc)
1922 					DRM_WARN("Failed to enable vblank interrupts\n");
1923 			} else {
1924 				dm_disable_vblank(&acrtc->base);
1925 			}
1926 
1927 		}
1928 	}
1929 
1930 }
1931 
1932 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1933 {
1934 	struct dc_state *context = NULL;
1935 	enum dc_status res = DC_ERROR_UNEXPECTED;
1936 	int i;
1937 	struct dc_stream_state *del_streams[MAX_PIPES];
1938 	int del_streams_count = 0;
1939 
1940 	memset(del_streams, 0, sizeof(del_streams));
1941 
1942 	context = dc_create_state(dc);
1943 	if (context == NULL)
1944 		goto context_alloc_fail;
1945 
1946 	dc_resource_state_copy_construct_current(dc, context);
1947 
1948 	/* First remove from context all streams */
1949 	for (i = 0; i < context->stream_count; i++) {
1950 		struct dc_stream_state *stream = context->streams[i];
1951 
1952 		del_streams[del_streams_count++] = stream;
1953 	}
1954 
1955 	/* Remove all planes for removed streams and then remove the streams */
1956 	for (i = 0; i < del_streams_count; i++) {
1957 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1958 			res = DC_FAIL_DETACH_SURFACES;
1959 			goto fail;
1960 		}
1961 
1962 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1963 		if (res != DC_OK)
1964 			goto fail;
1965 	}
1966 
1967 
1968 	res = dc_validate_global_state(dc, context, false);
1969 
1970 	if (res != DC_OK) {
1971 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1972 		goto fail;
1973 	}
1974 
1975 	res = dc_commit_state(dc, context);
1976 
1977 fail:
1978 	dc_release_state(context);
1979 
1980 context_alloc_fail:
1981 	return res;
1982 }
1983 
1984 static int dm_suspend(void *handle)
1985 {
1986 	struct amdgpu_device *adev = handle;
1987 	struct amdgpu_display_manager *dm = &adev->dm;
1988 	int ret = 0;
1989 
1990 	if (amdgpu_in_reset(adev)) {
1991 		mutex_lock(&dm->dc_lock);
1992 
1993 #if defined(CONFIG_DRM_AMD_DC_DCN)
1994 		dc_allow_idle_optimizations(adev->dm.dc, false);
1995 #endif
1996 
1997 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1998 
1999 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2000 
2001 		amdgpu_dm_commit_zero_streams(dm->dc);
2002 
2003 		amdgpu_dm_irq_suspend(adev);
2004 
2005 		return ret;
2006 	}
2007 
2008 	WARN_ON(adev->dm.cached_state);
2009 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2010 
2011 	s3_handle_mst(adev_to_drm(adev), true);
2012 
2013 	amdgpu_dm_irq_suspend(adev);
2014 
2015 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2016 
2017 	return 0;
2018 }
2019 
2020 static struct amdgpu_dm_connector *
2021 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2022 					     struct drm_crtc *crtc)
2023 {
2024 	uint32_t i;
2025 	struct drm_connector_state *new_con_state;
2026 	struct drm_connector *connector;
2027 	struct drm_crtc *crtc_from_state;
2028 
2029 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2030 		crtc_from_state = new_con_state->crtc;
2031 
2032 		if (crtc_from_state == crtc)
2033 			return to_amdgpu_dm_connector(connector);
2034 	}
2035 
2036 	return NULL;
2037 }
2038 
2039 static void emulated_link_detect(struct dc_link *link)
2040 {
2041 	struct dc_sink_init_data sink_init_data = { 0 };
2042 	struct display_sink_capability sink_caps = { 0 };
2043 	enum dc_edid_status edid_status;
2044 	struct dc_context *dc_ctx = link->ctx;
2045 	struct dc_sink *sink = NULL;
2046 	struct dc_sink *prev_sink = NULL;
2047 
2048 	link->type = dc_connection_none;
2049 	prev_sink = link->local_sink;
2050 
2051 	if (prev_sink)
2052 		dc_sink_release(prev_sink);
2053 
2054 	switch (link->connector_signal) {
2055 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2056 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2057 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2058 		break;
2059 	}
2060 
2061 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2062 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2063 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2064 		break;
2065 	}
2066 
2067 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2068 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2069 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2070 		break;
2071 	}
2072 
2073 	case SIGNAL_TYPE_LVDS: {
2074 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2075 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2076 		break;
2077 	}
2078 
2079 	case SIGNAL_TYPE_EDP: {
2080 		sink_caps.transaction_type =
2081 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2082 		sink_caps.signal = SIGNAL_TYPE_EDP;
2083 		break;
2084 	}
2085 
2086 	case SIGNAL_TYPE_DISPLAY_PORT: {
2087 		sink_caps.transaction_type =
2088 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2089 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2090 		break;
2091 	}
2092 
2093 	default:
2094 		DC_ERROR("Invalid connector type! signal:%d\n",
2095 			link->connector_signal);
2096 		return;
2097 	}
2098 
2099 	sink_init_data.link = link;
2100 	sink_init_data.sink_signal = sink_caps.signal;
2101 
2102 	sink = dc_sink_create(&sink_init_data);
2103 	if (!sink) {
2104 		DC_ERROR("Failed to create sink!\n");
2105 		return;
2106 	}
2107 
2108 	/* dc_sink_create returns a new reference */
2109 	link->local_sink = sink;
2110 
2111 	edid_status = dm_helpers_read_local_edid(
2112 			link->ctx,
2113 			link,
2114 			sink);
2115 
2116 	if (edid_status != EDID_OK)
2117 		DC_ERROR("Failed to read EDID");
2118 
2119 }
2120 
2121 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2122 				     struct amdgpu_display_manager *dm)
2123 {
2124 	struct {
2125 		struct dc_surface_update surface_updates[MAX_SURFACES];
2126 		struct dc_plane_info plane_infos[MAX_SURFACES];
2127 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2128 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2129 		struct dc_stream_update stream_update;
2130 	} * bundle;
2131 	int k, m;
2132 
2133 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2134 
2135 	if (!bundle) {
2136 		dm_error("Failed to allocate update bundle\n");
2137 		goto cleanup;
2138 	}
2139 
2140 	for (k = 0; k < dc_state->stream_count; k++) {
2141 		bundle->stream_update.stream = dc_state->streams[k];
2142 
2143 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2144 			bundle->surface_updates[m].surface =
2145 				dc_state->stream_status->plane_states[m];
2146 			bundle->surface_updates[m].surface->force_full_update =
2147 				true;
2148 		}
2149 		dc_commit_updates_for_stream(
2150 			dm->dc, bundle->surface_updates,
2151 			dc_state->stream_status->plane_count,
2152 			dc_state->streams[k], &bundle->stream_update, dc_state);
2153 	}
2154 
2155 cleanup:
2156 	kfree(bundle);
2157 
2158 	return;
2159 }
2160 
2161 static void dm_set_dpms_off(struct dc_link *link)
2162 {
2163 	struct dc_stream_state *stream_state;
2164 	struct amdgpu_dm_connector *aconnector = link->priv;
2165 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2166 	struct dc_stream_update stream_update;
2167 	bool dpms_off = true;
2168 
2169 	memset(&stream_update, 0, sizeof(stream_update));
2170 	stream_update.dpms_off = &dpms_off;
2171 
2172 	mutex_lock(&adev->dm.dc_lock);
2173 	stream_state = dc_stream_find_from_link(link);
2174 
2175 	if (stream_state == NULL) {
2176 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2177 		mutex_unlock(&adev->dm.dc_lock);
2178 		return;
2179 	}
2180 
2181 	stream_update.stream = stream_state;
2182 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2183 				     stream_state, &stream_update,
2184 				     stream_state->ctx->dc->current_state);
2185 	mutex_unlock(&adev->dm.dc_lock);
2186 }
2187 
2188 static int dm_resume(void *handle)
2189 {
2190 	struct amdgpu_device *adev = handle;
2191 	struct drm_device *ddev = adev_to_drm(adev);
2192 	struct amdgpu_display_manager *dm = &adev->dm;
2193 	struct amdgpu_dm_connector *aconnector;
2194 	struct drm_connector *connector;
2195 	struct drm_connector_list_iter iter;
2196 	struct drm_crtc *crtc;
2197 	struct drm_crtc_state *new_crtc_state;
2198 	struct dm_crtc_state *dm_new_crtc_state;
2199 	struct drm_plane *plane;
2200 	struct drm_plane_state *new_plane_state;
2201 	struct dm_plane_state *dm_new_plane_state;
2202 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2203 	enum dc_connection_type new_connection_type = dc_connection_none;
2204 	struct dc_state *dc_state;
2205 	int i, r, j;
2206 
2207 	if (amdgpu_in_reset(adev)) {
2208 		dc_state = dm->cached_dc_state;
2209 
2210 		r = dm_dmub_hw_init(adev);
2211 		if (r)
2212 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2213 
2214 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2215 		dc_resume(dm->dc);
2216 
2217 		amdgpu_dm_irq_resume_early(adev);
2218 
2219 		for (i = 0; i < dc_state->stream_count; i++) {
2220 			dc_state->streams[i]->mode_changed = true;
2221 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2222 				dc_state->stream_status->plane_states[j]->update_flags.raw
2223 					= 0xffffffff;
2224 			}
2225 		}
2226 #if defined(CONFIG_DRM_AMD_DC_DCN)
2227 		/*
2228 		 * Resource allocation happens for link encoders for newer ASIC in
2229 		 * dc_validate_global_state, so we need to revalidate it.
2230 		 *
2231 		 * This shouldn't fail (it passed once before), so warn if it does.
2232 		 */
2233 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2234 #endif
2235 
2236 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2237 
2238 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2239 
2240 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2241 
2242 		dc_release_state(dm->cached_dc_state);
2243 		dm->cached_dc_state = NULL;
2244 
2245 		amdgpu_dm_irq_resume_late(adev);
2246 
2247 		mutex_unlock(&dm->dc_lock);
2248 
2249 		return 0;
2250 	}
2251 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2252 	dc_release_state(dm_state->context);
2253 	dm_state->context = dc_create_state(dm->dc);
2254 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2255 	dc_resource_state_construct(dm->dc, dm_state->context);
2256 
2257 	/* Before powering on DC we need to re-initialize DMUB. */
2258 	r = dm_dmub_hw_init(adev);
2259 	if (r)
2260 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2261 
2262 	/* power on hardware */
2263 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2264 
2265 	/* program HPD filter */
2266 	dc_resume(dm->dc);
2267 
2268 	/*
2269 	 * early enable HPD Rx IRQ, should be done before set mode as short
2270 	 * pulse interrupts are used for MST
2271 	 */
2272 	amdgpu_dm_irq_resume_early(adev);
2273 
2274 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2275 	s3_handle_mst(ddev, false);
2276 
2277 	/* Do detection*/
2278 	drm_connector_list_iter_begin(ddev, &iter);
2279 	drm_for_each_connector_iter(connector, &iter) {
2280 		aconnector = to_amdgpu_dm_connector(connector);
2281 
2282 		/*
2283 		 * this is the case when traversing through already created
2284 		 * MST connectors, should be skipped
2285 		 */
2286 		if (aconnector->mst_port)
2287 			continue;
2288 
2289 		mutex_lock(&aconnector->hpd_lock);
2290 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2291 			DRM_ERROR("KMS: Failed to detect connector\n");
2292 
2293 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2294 			emulated_link_detect(aconnector->dc_link);
2295 		else
2296 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2297 
2298 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2299 			aconnector->fake_enable = false;
2300 
2301 		if (aconnector->dc_sink)
2302 			dc_sink_release(aconnector->dc_sink);
2303 		aconnector->dc_sink = NULL;
2304 		amdgpu_dm_update_connector_after_detect(aconnector);
2305 		mutex_unlock(&aconnector->hpd_lock);
2306 	}
2307 	drm_connector_list_iter_end(&iter);
2308 
2309 	/* Force mode set in atomic commit */
2310 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2311 		new_crtc_state->active_changed = true;
2312 
2313 	/*
2314 	 * atomic_check is expected to create the dc states. We need to release
2315 	 * them here, since they were duplicated as part of the suspend
2316 	 * procedure.
2317 	 */
2318 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2319 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2320 		if (dm_new_crtc_state->stream) {
2321 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2322 			dc_stream_release(dm_new_crtc_state->stream);
2323 			dm_new_crtc_state->stream = NULL;
2324 		}
2325 	}
2326 
2327 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2328 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2329 		if (dm_new_plane_state->dc_state) {
2330 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2331 			dc_plane_state_release(dm_new_plane_state->dc_state);
2332 			dm_new_plane_state->dc_state = NULL;
2333 		}
2334 	}
2335 
2336 	drm_atomic_helper_resume(ddev, dm->cached_state);
2337 
2338 	dm->cached_state = NULL;
2339 
2340 	amdgpu_dm_irq_resume_late(adev);
2341 
2342 	amdgpu_dm_smu_write_watermarks_table(adev);
2343 
2344 	return 0;
2345 }
2346 
2347 /**
2348  * DOC: DM Lifecycle
2349  *
2350  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2351  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2352  * the base driver's device list to be initialized and torn down accordingly.
2353  *
2354  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2355  */
2356 
2357 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2358 	.name = "dm",
2359 	.early_init = dm_early_init,
2360 	.late_init = dm_late_init,
2361 	.sw_init = dm_sw_init,
2362 	.sw_fini = dm_sw_fini,
2363 	.early_fini = amdgpu_dm_early_fini,
2364 	.hw_init = dm_hw_init,
2365 	.hw_fini = dm_hw_fini,
2366 	.suspend = dm_suspend,
2367 	.resume = dm_resume,
2368 	.is_idle = dm_is_idle,
2369 	.wait_for_idle = dm_wait_for_idle,
2370 	.check_soft_reset = dm_check_soft_reset,
2371 	.soft_reset = dm_soft_reset,
2372 	.set_clockgating_state = dm_set_clockgating_state,
2373 	.set_powergating_state = dm_set_powergating_state,
2374 };
2375 
2376 const struct amdgpu_ip_block_version dm_ip_block =
2377 {
2378 	.type = AMD_IP_BLOCK_TYPE_DCE,
2379 	.major = 1,
2380 	.minor = 0,
2381 	.rev = 0,
2382 	.funcs = &amdgpu_dm_funcs,
2383 };
2384 
2385 
2386 /**
2387  * DOC: atomic
2388  *
2389  * *WIP*
2390  */
2391 
2392 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2393 	.fb_create = amdgpu_display_user_framebuffer_create,
2394 	.get_format_info = amd_get_format_info,
2395 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2396 	.atomic_check = amdgpu_dm_atomic_check,
2397 	.atomic_commit = drm_atomic_helper_commit,
2398 };
2399 
2400 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2401 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2402 };
2403 
2404 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2405 {
2406 	u32 max_cll, min_cll, max, min, q, r;
2407 	struct amdgpu_dm_backlight_caps *caps;
2408 	struct amdgpu_display_manager *dm;
2409 	struct drm_connector *conn_base;
2410 	struct amdgpu_device *adev;
2411 	struct dc_link *link = NULL;
2412 	static const u8 pre_computed_values[] = {
2413 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2414 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2415 
2416 	if (!aconnector || !aconnector->dc_link)
2417 		return;
2418 
2419 	link = aconnector->dc_link;
2420 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2421 		return;
2422 
2423 	conn_base = &aconnector->base;
2424 	adev = drm_to_adev(conn_base->dev);
2425 	dm = &adev->dm;
2426 	caps = &dm->backlight_caps;
2427 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2428 	caps->aux_support = false;
2429 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2430 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2431 
2432 	if (caps->ext_caps->bits.oled == 1 ||
2433 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2434 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2435 		caps->aux_support = true;
2436 
2437 	if (amdgpu_backlight == 0)
2438 		caps->aux_support = false;
2439 	else if (amdgpu_backlight == 1)
2440 		caps->aux_support = true;
2441 
2442 	/* From the specification (CTA-861-G), for calculating the maximum
2443 	 * luminance we need to use:
2444 	 *	Luminance = 50*2**(CV/32)
2445 	 * Where CV is a one-byte value.
2446 	 * For calculating this expression we may need float point precision;
2447 	 * to avoid this complexity level, we take advantage that CV is divided
2448 	 * by a constant. From the Euclids division algorithm, we know that CV
2449 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2450 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2451 	 * need to pre-compute the value of r/32. For pre-computing the values
2452 	 * We just used the following Ruby line:
2453 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2454 	 * The results of the above expressions can be verified at
2455 	 * pre_computed_values.
2456 	 */
2457 	q = max_cll >> 5;
2458 	r = max_cll % 32;
2459 	max = (1 << q) * pre_computed_values[r];
2460 
2461 	// min luminance: maxLum * (CV/255)^2 / 100
2462 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2463 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2464 
2465 	caps->aux_max_input_signal = max;
2466 	caps->aux_min_input_signal = min;
2467 }
2468 
2469 void amdgpu_dm_update_connector_after_detect(
2470 		struct amdgpu_dm_connector *aconnector)
2471 {
2472 	struct drm_connector *connector = &aconnector->base;
2473 	struct drm_device *dev = connector->dev;
2474 	struct dc_sink *sink;
2475 
2476 	/* MST handled by drm_mst framework */
2477 	if (aconnector->mst_mgr.mst_state == true)
2478 		return;
2479 
2480 	sink = aconnector->dc_link->local_sink;
2481 	if (sink)
2482 		dc_sink_retain(sink);
2483 
2484 	/*
2485 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2486 	 * the connector sink is set to either fake or physical sink depends on link status.
2487 	 * Skip if already done during boot.
2488 	 */
2489 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2490 			&& aconnector->dc_em_sink) {
2491 
2492 		/*
2493 		 * For S3 resume with headless use eml_sink to fake stream
2494 		 * because on resume connector->sink is set to NULL
2495 		 */
2496 		mutex_lock(&dev->mode_config.mutex);
2497 
2498 		if (sink) {
2499 			if (aconnector->dc_sink) {
2500 				amdgpu_dm_update_freesync_caps(connector, NULL);
2501 				/*
2502 				 * retain and release below are used to
2503 				 * bump up refcount for sink because the link doesn't point
2504 				 * to it anymore after disconnect, so on next crtc to connector
2505 				 * reshuffle by UMD we will get into unwanted dc_sink release
2506 				 */
2507 				dc_sink_release(aconnector->dc_sink);
2508 			}
2509 			aconnector->dc_sink = sink;
2510 			dc_sink_retain(aconnector->dc_sink);
2511 			amdgpu_dm_update_freesync_caps(connector,
2512 					aconnector->edid);
2513 		} else {
2514 			amdgpu_dm_update_freesync_caps(connector, NULL);
2515 			if (!aconnector->dc_sink) {
2516 				aconnector->dc_sink = aconnector->dc_em_sink;
2517 				dc_sink_retain(aconnector->dc_sink);
2518 			}
2519 		}
2520 
2521 		mutex_unlock(&dev->mode_config.mutex);
2522 
2523 		if (sink)
2524 			dc_sink_release(sink);
2525 		return;
2526 	}
2527 
2528 	/*
2529 	 * TODO: temporary guard to look for proper fix
2530 	 * if this sink is MST sink, we should not do anything
2531 	 */
2532 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2533 		dc_sink_release(sink);
2534 		return;
2535 	}
2536 
2537 	if (aconnector->dc_sink == sink) {
2538 		/*
2539 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2540 		 * Do nothing!!
2541 		 */
2542 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2543 				aconnector->connector_id);
2544 		if (sink)
2545 			dc_sink_release(sink);
2546 		return;
2547 	}
2548 
2549 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2550 		aconnector->connector_id, aconnector->dc_sink, sink);
2551 
2552 	mutex_lock(&dev->mode_config.mutex);
2553 
2554 	/*
2555 	 * 1. Update status of the drm connector
2556 	 * 2. Send an event and let userspace tell us what to do
2557 	 */
2558 	if (sink) {
2559 		/*
2560 		 * TODO: check if we still need the S3 mode update workaround.
2561 		 * If yes, put it here.
2562 		 */
2563 		if (aconnector->dc_sink) {
2564 			amdgpu_dm_update_freesync_caps(connector, NULL);
2565 			dc_sink_release(aconnector->dc_sink);
2566 		}
2567 
2568 		aconnector->dc_sink = sink;
2569 		dc_sink_retain(aconnector->dc_sink);
2570 		if (sink->dc_edid.length == 0) {
2571 			aconnector->edid = NULL;
2572 			if (aconnector->dc_link->aux_mode) {
2573 				drm_dp_cec_unset_edid(
2574 					&aconnector->dm_dp_aux.aux);
2575 			}
2576 		} else {
2577 			aconnector->edid =
2578 				(struct edid *)sink->dc_edid.raw_edid;
2579 
2580 			drm_connector_update_edid_property(connector,
2581 							   aconnector->edid);
2582 			if (aconnector->dc_link->aux_mode)
2583 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2584 						    aconnector->edid);
2585 		}
2586 
2587 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2588 		update_connector_ext_caps(aconnector);
2589 	} else {
2590 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2591 		amdgpu_dm_update_freesync_caps(connector, NULL);
2592 		drm_connector_update_edid_property(connector, NULL);
2593 		aconnector->num_modes = 0;
2594 		dc_sink_release(aconnector->dc_sink);
2595 		aconnector->dc_sink = NULL;
2596 		aconnector->edid = NULL;
2597 #ifdef CONFIG_DRM_AMD_DC_HDCP
2598 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2599 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2600 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2601 #endif
2602 	}
2603 
2604 	mutex_unlock(&dev->mode_config.mutex);
2605 
2606 	update_subconnector_property(aconnector);
2607 
2608 	if (sink)
2609 		dc_sink_release(sink);
2610 }
2611 
2612 static void handle_hpd_irq(void *param)
2613 {
2614 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2615 	struct drm_connector *connector = &aconnector->base;
2616 	struct drm_device *dev = connector->dev;
2617 	enum dc_connection_type new_connection_type = dc_connection_none;
2618 	struct amdgpu_device *adev = drm_to_adev(dev);
2619 #ifdef CONFIG_DRM_AMD_DC_HDCP
2620 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2621 #endif
2622 
2623 	if (adev->dm.disable_hpd_irq)
2624 		return;
2625 
2626 	/*
2627 	 * In case of failure or MST no need to update connector status or notify the OS
2628 	 * since (for MST case) MST does this in its own context.
2629 	 */
2630 	mutex_lock(&aconnector->hpd_lock);
2631 
2632 #ifdef CONFIG_DRM_AMD_DC_HDCP
2633 	if (adev->dm.hdcp_workqueue) {
2634 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2635 		dm_con_state->update_hdcp = true;
2636 	}
2637 #endif
2638 	if (aconnector->fake_enable)
2639 		aconnector->fake_enable = false;
2640 
2641 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2642 		DRM_ERROR("KMS: Failed to detect connector\n");
2643 
2644 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2645 		emulated_link_detect(aconnector->dc_link);
2646 
2647 
2648 		drm_modeset_lock_all(dev);
2649 		dm_restore_drm_connector_state(dev, connector);
2650 		drm_modeset_unlock_all(dev);
2651 
2652 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2653 			drm_kms_helper_hotplug_event(dev);
2654 
2655 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2656 		if (new_connection_type == dc_connection_none &&
2657 		    aconnector->dc_link->type == dc_connection_none)
2658 			dm_set_dpms_off(aconnector->dc_link);
2659 
2660 		amdgpu_dm_update_connector_after_detect(aconnector);
2661 
2662 		drm_modeset_lock_all(dev);
2663 		dm_restore_drm_connector_state(dev, connector);
2664 		drm_modeset_unlock_all(dev);
2665 
2666 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2667 			drm_kms_helper_hotplug_event(dev);
2668 	}
2669 	mutex_unlock(&aconnector->hpd_lock);
2670 
2671 }
2672 
2673 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2674 {
2675 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2676 	uint8_t dret;
2677 	bool new_irq_handled = false;
2678 	int dpcd_addr;
2679 	int dpcd_bytes_to_read;
2680 
2681 	const int max_process_count = 30;
2682 	int process_count = 0;
2683 
2684 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2685 
2686 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2687 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2688 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2689 		dpcd_addr = DP_SINK_COUNT;
2690 	} else {
2691 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2692 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2693 		dpcd_addr = DP_SINK_COUNT_ESI;
2694 	}
2695 
2696 	dret = drm_dp_dpcd_read(
2697 		&aconnector->dm_dp_aux.aux,
2698 		dpcd_addr,
2699 		esi,
2700 		dpcd_bytes_to_read);
2701 
2702 	while (dret == dpcd_bytes_to_read &&
2703 		process_count < max_process_count) {
2704 		uint8_t retry;
2705 		dret = 0;
2706 
2707 		process_count++;
2708 
2709 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2710 		/* handle HPD short pulse irq */
2711 		if (aconnector->mst_mgr.mst_state)
2712 			drm_dp_mst_hpd_irq(
2713 				&aconnector->mst_mgr,
2714 				esi,
2715 				&new_irq_handled);
2716 
2717 		if (new_irq_handled) {
2718 			/* ACK at DPCD to notify down stream */
2719 			const int ack_dpcd_bytes_to_write =
2720 				dpcd_bytes_to_read - 1;
2721 
2722 			for (retry = 0; retry < 3; retry++) {
2723 				uint8_t wret;
2724 
2725 				wret = drm_dp_dpcd_write(
2726 					&aconnector->dm_dp_aux.aux,
2727 					dpcd_addr + 1,
2728 					&esi[1],
2729 					ack_dpcd_bytes_to_write);
2730 				if (wret == ack_dpcd_bytes_to_write)
2731 					break;
2732 			}
2733 
2734 			/* check if there is new irq to be handled */
2735 			dret = drm_dp_dpcd_read(
2736 				&aconnector->dm_dp_aux.aux,
2737 				dpcd_addr,
2738 				esi,
2739 				dpcd_bytes_to_read);
2740 
2741 			new_irq_handled = false;
2742 		} else {
2743 			break;
2744 		}
2745 	}
2746 
2747 	if (process_count == max_process_count)
2748 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2749 }
2750 
2751 static void handle_hpd_rx_irq(void *param)
2752 {
2753 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2754 	struct drm_connector *connector = &aconnector->base;
2755 	struct drm_device *dev = connector->dev;
2756 	struct dc_link *dc_link = aconnector->dc_link;
2757 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2758 	bool result = false;
2759 	enum dc_connection_type new_connection_type = dc_connection_none;
2760 	struct amdgpu_device *adev = drm_to_adev(dev);
2761 	union hpd_irq_data hpd_irq_data;
2762 	bool lock_flag = 0;
2763 
2764 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2765 
2766 	if (adev->dm.disable_hpd_irq)
2767 		return;
2768 
2769 
2770 	/*
2771 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2772 	 * conflict, after implement i2c helper, this mutex should be
2773 	 * retired.
2774 	 */
2775 	mutex_lock(&aconnector->hpd_lock);
2776 
2777 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2778 
2779 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2780 		(dc_link->type == dc_connection_mst_branch)) {
2781 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2782 			result = true;
2783 			dm_handle_hpd_rx_irq(aconnector);
2784 			goto out;
2785 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2786 			result = false;
2787 			dm_handle_hpd_rx_irq(aconnector);
2788 			goto out;
2789 		}
2790 	}
2791 
2792 	/*
2793 	 * TODO: We need the lock to avoid touching DC state while it's being
2794 	 * modified during automated compliance testing, or when link loss
2795 	 * happens. While this should be split into subhandlers and proper
2796 	 * interfaces to avoid having to conditionally lock like this in the
2797 	 * outer layer, we need this workaround temporarily to allow MST
2798 	 * lightup in some scenarios to avoid timeout.
2799 	 */
2800 	if (!amdgpu_in_reset(adev) &&
2801 	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2802 	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2803 		mutex_lock(&adev->dm.dc_lock);
2804 		lock_flag = 1;
2805 	}
2806 
2807 #ifdef CONFIG_DRM_AMD_DC_HDCP
2808 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2809 #else
2810 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2811 #endif
2812 	if (!amdgpu_in_reset(adev) && lock_flag)
2813 		mutex_unlock(&adev->dm.dc_lock);
2814 
2815 out:
2816 	if (result && !is_mst_root_connector) {
2817 		/* Downstream Port status changed. */
2818 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2819 			DRM_ERROR("KMS: Failed to detect connector\n");
2820 
2821 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2822 			emulated_link_detect(dc_link);
2823 
2824 			if (aconnector->fake_enable)
2825 				aconnector->fake_enable = false;
2826 
2827 			amdgpu_dm_update_connector_after_detect(aconnector);
2828 
2829 
2830 			drm_modeset_lock_all(dev);
2831 			dm_restore_drm_connector_state(dev, connector);
2832 			drm_modeset_unlock_all(dev);
2833 
2834 			drm_kms_helper_hotplug_event(dev);
2835 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2836 
2837 			if (aconnector->fake_enable)
2838 				aconnector->fake_enable = false;
2839 
2840 			amdgpu_dm_update_connector_after_detect(aconnector);
2841 
2842 
2843 			drm_modeset_lock_all(dev);
2844 			dm_restore_drm_connector_state(dev, connector);
2845 			drm_modeset_unlock_all(dev);
2846 
2847 			drm_kms_helper_hotplug_event(dev);
2848 		}
2849 	}
2850 #ifdef CONFIG_DRM_AMD_DC_HDCP
2851 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2852 		if (adev->dm.hdcp_workqueue)
2853 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2854 	}
2855 #endif
2856 
2857 	if (dc_link->type != dc_connection_mst_branch)
2858 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2859 
2860 	mutex_unlock(&aconnector->hpd_lock);
2861 }
2862 
2863 static void register_hpd_handlers(struct amdgpu_device *adev)
2864 {
2865 	struct drm_device *dev = adev_to_drm(adev);
2866 	struct drm_connector *connector;
2867 	struct amdgpu_dm_connector *aconnector;
2868 	const struct dc_link *dc_link;
2869 	struct dc_interrupt_params int_params = {0};
2870 
2871 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2872 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2873 
2874 	list_for_each_entry(connector,
2875 			&dev->mode_config.connector_list, head)	{
2876 
2877 		aconnector = to_amdgpu_dm_connector(connector);
2878 		dc_link = aconnector->dc_link;
2879 
2880 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2881 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2882 			int_params.irq_source = dc_link->irq_source_hpd;
2883 
2884 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2885 					handle_hpd_irq,
2886 					(void *) aconnector);
2887 		}
2888 
2889 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2890 
2891 			/* Also register for DP short pulse (hpd_rx). */
2892 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2893 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2894 
2895 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2896 					handle_hpd_rx_irq,
2897 					(void *) aconnector);
2898 		}
2899 	}
2900 }
2901 
2902 #if defined(CONFIG_DRM_AMD_DC_SI)
2903 /* Register IRQ sources and initialize IRQ callbacks */
2904 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2905 {
2906 	struct dc *dc = adev->dm.dc;
2907 	struct common_irq_params *c_irq_params;
2908 	struct dc_interrupt_params int_params = {0};
2909 	int r;
2910 	int i;
2911 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2912 
2913 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915 
2916 	/*
2917 	 * Actions of amdgpu_irq_add_id():
2918 	 * 1. Register a set() function with base driver.
2919 	 *    Base driver will call set() function to enable/disable an
2920 	 *    interrupt in DC hardware.
2921 	 * 2. Register amdgpu_dm_irq_handler().
2922 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923 	 *    coming from DC hardware.
2924 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925 	 *    for acknowledging and handling. */
2926 
2927 	/* Use VBLANK interrupt */
2928 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2929 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2930 		if (r) {
2931 			DRM_ERROR("Failed to add crtc irq id!\n");
2932 			return r;
2933 		}
2934 
2935 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936 		int_params.irq_source =
2937 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2938 
2939 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2940 
2941 		c_irq_params->adev = adev;
2942 		c_irq_params->irq_src = int_params.irq_source;
2943 
2944 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945 				dm_crtc_high_irq, c_irq_params);
2946 	}
2947 
2948 	/* Use GRPH_PFLIP interrupt */
2949 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2950 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2951 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2952 		if (r) {
2953 			DRM_ERROR("Failed to add page flip irq id!\n");
2954 			return r;
2955 		}
2956 
2957 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2958 		int_params.irq_source =
2959 			dc_interrupt_to_irq_source(dc, i, 0);
2960 
2961 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2962 
2963 		c_irq_params->adev = adev;
2964 		c_irq_params->irq_src = int_params.irq_source;
2965 
2966 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2967 				dm_pflip_high_irq, c_irq_params);
2968 
2969 	}
2970 
2971 	/* HPD */
2972 	r = amdgpu_irq_add_id(adev, client_id,
2973 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2974 	if (r) {
2975 		DRM_ERROR("Failed to add hpd irq id!\n");
2976 		return r;
2977 	}
2978 
2979 	register_hpd_handlers(adev);
2980 
2981 	return 0;
2982 }
2983 #endif
2984 
2985 /* Register IRQ sources and initialize IRQ callbacks */
2986 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2987 {
2988 	struct dc *dc = adev->dm.dc;
2989 	struct common_irq_params *c_irq_params;
2990 	struct dc_interrupt_params int_params = {0};
2991 	int r;
2992 	int i;
2993 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2994 
2995 	if (adev->asic_type >= CHIP_VEGA10)
2996 		client_id = SOC15_IH_CLIENTID_DCE;
2997 
2998 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2999 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3000 
3001 	/*
3002 	 * Actions of amdgpu_irq_add_id():
3003 	 * 1. Register a set() function with base driver.
3004 	 *    Base driver will call set() function to enable/disable an
3005 	 *    interrupt in DC hardware.
3006 	 * 2. Register amdgpu_dm_irq_handler().
3007 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3008 	 *    coming from DC hardware.
3009 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3010 	 *    for acknowledging and handling. */
3011 
3012 	/* Use VBLANK interrupt */
3013 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3014 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3015 		if (r) {
3016 			DRM_ERROR("Failed to add crtc irq id!\n");
3017 			return r;
3018 		}
3019 
3020 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3021 		int_params.irq_source =
3022 			dc_interrupt_to_irq_source(dc, i, 0);
3023 
3024 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3025 
3026 		c_irq_params->adev = adev;
3027 		c_irq_params->irq_src = int_params.irq_source;
3028 
3029 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3030 				dm_crtc_high_irq, c_irq_params);
3031 	}
3032 
3033 	/* Use VUPDATE interrupt */
3034 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3035 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3036 		if (r) {
3037 			DRM_ERROR("Failed to add vupdate irq id!\n");
3038 			return r;
3039 		}
3040 
3041 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3042 		int_params.irq_source =
3043 			dc_interrupt_to_irq_source(dc, i, 0);
3044 
3045 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3046 
3047 		c_irq_params->adev = adev;
3048 		c_irq_params->irq_src = int_params.irq_source;
3049 
3050 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3051 				dm_vupdate_high_irq, c_irq_params);
3052 	}
3053 
3054 	/* Use GRPH_PFLIP interrupt */
3055 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3056 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3057 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3058 		if (r) {
3059 			DRM_ERROR("Failed to add page flip irq id!\n");
3060 			return r;
3061 		}
3062 
3063 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3064 		int_params.irq_source =
3065 			dc_interrupt_to_irq_source(dc, i, 0);
3066 
3067 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3068 
3069 		c_irq_params->adev = adev;
3070 		c_irq_params->irq_src = int_params.irq_source;
3071 
3072 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3073 				dm_pflip_high_irq, c_irq_params);
3074 
3075 	}
3076 
3077 	/* HPD */
3078 	r = amdgpu_irq_add_id(adev, client_id,
3079 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3080 	if (r) {
3081 		DRM_ERROR("Failed to add hpd irq id!\n");
3082 		return r;
3083 	}
3084 
3085 	register_hpd_handlers(adev);
3086 
3087 	return 0;
3088 }
3089 
3090 #if defined(CONFIG_DRM_AMD_DC_DCN)
3091 /* Register IRQ sources and initialize IRQ callbacks */
3092 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3093 {
3094 	struct dc *dc = adev->dm.dc;
3095 	struct common_irq_params *c_irq_params;
3096 	struct dc_interrupt_params int_params = {0};
3097 	int r;
3098 	int i;
3099 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3100 	static const unsigned int vrtl_int_srcid[] = {
3101 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3102 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3103 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3104 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3105 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3106 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3107 	};
3108 #endif
3109 
3110 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3111 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3112 
3113 	/*
3114 	 * Actions of amdgpu_irq_add_id():
3115 	 * 1. Register a set() function with base driver.
3116 	 *    Base driver will call set() function to enable/disable an
3117 	 *    interrupt in DC hardware.
3118 	 * 2. Register amdgpu_dm_irq_handler().
3119 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3120 	 *    coming from DC hardware.
3121 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3122 	 *    for acknowledging and handling.
3123 	 */
3124 
3125 	/* Use VSTARTUP interrupt */
3126 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3127 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3128 			i++) {
3129 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3130 
3131 		if (r) {
3132 			DRM_ERROR("Failed to add crtc irq id!\n");
3133 			return r;
3134 		}
3135 
3136 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3137 		int_params.irq_source =
3138 			dc_interrupt_to_irq_source(dc, i, 0);
3139 
3140 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3141 
3142 		c_irq_params->adev = adev;
3143 		c_irq_params->irq_src = int_params.irq_source;
3144 
3145 		amdgpu_dm_irq_register_interrupt(
3146 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3147 	}
3148 
3149 	/* Use otg vertical line interrupt */
3150 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3151 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3152 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3153 				vrtl_int_srcid[i], &adev->vline0_irq);
3154 
3155 		if (r) {
3156 			DRM_ERROR("Failed to add vline0 irq id!\n");
3157 			return r;
3158 		}
3159 
3160 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3161 		int_params.irq_source =
3162 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3163 
3164 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3165 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3166 			break;
3167 		}
3168 
3169 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3170 					- DC_IRQ_SOURCE_DC1_VLINE0];
3171 
3172 		c_irq_params->adev = adev;
3173 		c_irq_params->irq_src = int_params.irq_source;
3174 
3175 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3176 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3177 	}
3178 #endif
3179 
3180 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3181 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3182 	 * to trigger at end of each vblank, regardless of state of the lock,
3183 	 * matching DCE behaviour.
3184 	 */
3185 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3186 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3187 	     i++) {
3188 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3189 
3190 		if (r) {
3191 			DRM_ERROR("Failed to add vupdate irq id!\n");
3192 			return r;
3193 		}
3194 
3195 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3196 		int_params.irq_source =
3197 			dc_interrupt_to_irq_source(dc, i, 0);
3198 
3199 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3200 
3201 		c_irq_params->adev = adev;
3202 		c_irq_params->irq_src = int_params.irq_source;
3203 
3204 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3205 				dm_vupdate_high_irq, c_irq_params);
3206 	}
3207 
3208 	/* Use GRPH_PFLIP interrupt */
3209 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3210 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3211 			i++) {
3212 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3213 		if (r) {
3214 			DRM_ERROR("Failed to add page flip irq id!\n");
3215 			return r;
3216 		}
3217 
3218 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3219 		int_params.irq_source =
3220 			dc_interrupt_to_irq_source(dc, i, 0);
3221 
3222 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3223 
3224 		c_irq_params->adev = adev;
3225 		c_irq_params->irq_src = int_params.irq_source;
3226 
3227 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3228 				dm_pflip_high_irq, c_irq_params);
3229 
3230 	}
3231 
3232 	/* HPD */
3233 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3234 			&adev->hpd_irq);
3235 	if (r) {
3236 		DRM_ERROR("Failed to add hpd irq id!\n");
3237 		return r;
3238 	}
3239 
3240 	register_hpd_handlers(adev);
3241 
3242 	return 0;
3243 }
3244 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3245 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3246 {
3247 	struct dc *dc = adev->dm.dc;
3248 	struct common_irq_params *c_irq_params;
3249 	struct dc_interrupt_params int_params = {0};
3250 	int r, i;
3251 
3252 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3253 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3254 
3255 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3256 			&adev->dmub_outbox_irq);
3257 	if (r) {
3258 		DRM_ERROR("Failed to add outbox irq id!\n");
3259 		return r;
3260 	}
3261 
3262 	if (dc->ctx->dmub_srv) {
3263 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3264 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3265 		int_params.irq_source =
3266 		dc_interrupt_to_irq_source(dc, i, 0);
3267 
3268 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3269 
3270 		c_irq_params->adev = adev;
3271 		c_irq_params->irq_src = int_params.irq_source;
3272 
3273 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3274 				dm_dmub_outbox1_low_irq, c_irq_params);
3275 	}
3276 
3277 	return 0;
3278 }
3279 #endif
3280 
3281 /*
3282  * Acquires the lock for the atomic state object and returns
3283  * the new atomic state.
3284  *
3285  * This should only be called during atomic check.
3286  */
3287 static int dm_atomic_get_state(struct drm_atomic_state *state,
3288 			       struct dm_atomic_state **dm_state)
3289 {
3290 	struct drm_device *dev = state->dev;
3291 	struct amdgpu_device *adev = drm_to_adev(dev);
3292 	struct amdgpu_display_manager *dm = &adev->dm;
3293 	struct drm_private_state *priv_state;
3294 
3295 	if (*dm_state)
3296 		return 0;
3297 
3298 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3299 	if (IS_ERR(priv_state))
3300 		return PTR_ERR(priv_state);
3301 
3302 	*dm_state = to_dm_atomic_state(priv_state);
3303 
3304 	return 0;
3305 }
3306 
3307 static struct dm_atomic_state *
3308 dm_atomic_get_new_state(struct drm_atomic_state *state)
3309 {
3310 	struct drm_device *dev = state->dev;
3311 	struct amdgpu_device *adev = drm_to_adev(dev);
3312 	struct amdgpu_display_manager *dm = &adev->dm;
3313 	struct drm_private_obj *obj;
3314 	struct drm_private_state *new_obj_state;
3315 	int i;
3316 
3317 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3318 		if (obj->funcs == dm->atomic_obj.funcs)
3319 			return to_dm_atomic_state(new_obj_state);
3320 	}
3321 
3322 	return NULL;
3323 }
3324 
3325 static struct drm_private_state *
3326 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3327 {
3328 	struct dm_atomic_state *old_state, *new_state;
3329 
3330 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3331 	if (!new_state)
3332 		return NULL;
3333 
3334 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3335 
3336 	old_state = to_dm_atomic_state(obj->state);
3337 
3338 	if (old_state && old_state->context)
3339 		new_state->context = dc_copy_state(old_state->context);
3340 
3341 	if (!new_state->context) {
3342 		kfree(new_state);
3343 		return NULL;
3344 	}
3345 
3346 	return &new_state->base;
3347 }
3348 
3349 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3350 				    struct drm_private_state *state)
3351 {
3352 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3353 
3354 	if (dm_state && dm_state->context)
3355 		dc_release_state(dm_state->context);
3356 
3357 	kfree(dm_state);
3358 }
3359 
3360 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3361 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3362 	.atomic_destroy_state = dm_atomic_destroy_state,
3363 };
3364 
3365 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3366 {
3367 	struct dm_atomic_state *state;
3368 	int r;
3369 
3370 	adev->mode_info.mode_config_initialized = true;
3371 
3372 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3373 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3374 
3375 	adev_to_drm(adev)->mode_config.max_width = 16384;
3376 	adev_to_drm(adev)->mode_config.max_height = 16384;
3377 
3378 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3379 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3380 	/* indicates support for immediate flip */
3381 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3382 
3383 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3384 
3385 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3386 	if (!state)
3387 		return -ENOMEM;
3388 
3389 	state->context = dc_create_state(adev->dm.dc);
3390 	if (!state->context) {
3391 		kfree(state);
3392 		return -ENOMEM;
3393 	}
3394 
3395 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3396 
3397 	drm_atomic_private_obj_init(adev_to_drm(adev),
3398 				    &adev->dm.atomic_obj,
3399 				    &state->base,
3400 				    &dm_atomic_state_funcs);
3401 
3402 	r = amdgpu_display_modeset_create_props(adev);
3403 	if (r) {
3404 		dc_release_state(state->context);
3405 		kfree(state);
3406 		return r;
3407 	}
3408 
3409 	r = amdgpu_dm_audio_init(adev);
3410 	if (r) {
3411 		dc_release_state(state->context);
3412 		kfree(state);
3413 		return r;
3414 	}
3415 
3416 	return 0;
3417 }
3418 
3419 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3420 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3421 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3422 
3423 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3424 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3425 
3426 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3427 {
3428 #if defined(CONFIG_ACPI)
3429 	struct amdgpu_dm_backlight_caps caps;
3430 
3431 	memset(&caps, 0, sizeof(caps));
3432 
3433 	if (dm->backlight_caps.caps_valid)
3434 		return;
3435 
3436 	amdgpu_acpi_get_backlight_caps(&caps);
3437 	if (caps.caps_valid) {
3438 		dm->backlight_caps.caps_valid = true;
3439 		if (caps.aux_support)
3440 			return;
3441 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3442 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3443 	} else {
3444 		dm->backlight_caps.min_input_signal =
3445 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3446 		dm->backlight_caps.max_input_signal =
3447 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3448 	}
3449 #else
3450 	if (dm->backlight_caps.aux_support)
3451 		return;
3452 
3453 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3454 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3455 #endif
3456 }
3457 
3458 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3459 				unsigned *min, unsigned *max)
3460 {
3461 	if (!caps)
3462 		return 0;
3463 
3464 	if (caps->aux_support) {
3465 		// Firmware limits are in nits, DC API wants millinits.
3466 		*max = 1000 * caps->aux_max_input_signal;
3467 		*min = 1000 * caps->aux_min_input_signal;
3468 	} else {
3469 		// Firmware limits are 8-bit, PWM control is 16-bit.
3470 		*max = 0x101 * caps->max_input_signal;
3471 		*min = 0x101 * caps->min_input_signal;
3472 	}
3473 	return 1;
3474 }
3475 
3476 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3477 					uint32_t brightness)
3478 {
3479 	unsigned min, max;
3480 
3481 	if (!get_brightness_range(caps, &min, &max))
3482 		return brightness;
3483 
3484 	// Rescale 0..255 to min..max
3485 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3486 				       AMDGPU_MAX_BL_LEVEL);
3487 }
3488 
3489 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3490 				      uint32_t brightness)
3491 {
3492 	unsigned min, max;
3493 
3494 	if (!get_brightness_range(caps, &min, &max))
3495 		return brightness;
3496 
3497 	if (brightness < min)
3498 		return 0;
3499 	// Rescale min..max to 0..255
3500 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3501 				 max - min);
3502 }
3503 
3504 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3505 					 u32 user_brightness)
3506 {
3507 	struct amdgpu_dm_backlight_caps caps;
3508 	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3509 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3510 	bool rc;
3511 	int i;
3512 
3513 	amdgpu_dm_update_backlight_caps(dm);
3514 	caps = dm->backlight_caps;
3515 
3516 	for (i = 0; i < dm->num_of_edps; i++) {
3517 		dm->brightness[i] = user_brightness;
3518 		brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3519 		link[i] = (struct dc_link *)dm->backlight_link[i];
3520 	}
3521 
3522 	/* Change brightness based on AUX property */
3523 	if (caps.aux_support) {
3524 		for (i = 0; i < dm->num_of_edps; i++) {
3525 			rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3526 				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3527 			if (!rc) {
3528 				DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3529 				break;
3530 			}
3531 		}
3532 	} else {
3533 		for (i = 0; i < dm->num_of_edps; i++) {
3534 			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3535 			if (!rc) {
3536 				DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3537 				break;
3538 			}
3539 		}
3540 	}
3541 
3542 	return rc ? 0 : 1;
3543 }
3544 
3545 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3546 {
3547 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3548 
3549 	amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3550 
3551 	return 0;
3552 }
3553 
3554 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3555 {
3556 	struct amdgpu_dm_backlight_caps caps;
3557 
3558 	amdgpu_dm_update_backlight_caps(dm);
3559 	caps = dm->backlight_caps;
3560 
3561 	if (caps.aux_support) {
3562 		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3563 		u32 avg, peak;
3564 		bool rc;
3565 
3566 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3567 		if (!rc)
3568 			return dm->brightness[0];
3569 		return convert_brightness_to_user(&caps, avg);
3570 	} else {
3571 		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3572 
3573 		if (ret == DC_ERROR_UNEXPECTED)
3574 			return dm->brightness[0];
3575 		return convert_brightness_to_user(&caps, ret);
3576 	}
3577 }
3578 
3579 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3580 {
3581 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3582 
3583 	return amdgpu_dm_backlight_get_level(dm);
3584 }
3585 
3586 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3587 	.options = BL_CORE_SUSPENDRESUME,
3588 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3589 	.update_status	= amdgpu_dm_backlight_update_status,
3590 };
3591 
3592 static void
3593 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3594 {
3595 	char bl_name[16];
3596 	struct backlight_properties props = { 0 };
3597 	int i;
3598 
3599 	amdgpu_dm_update_backlight_caps(dm);
3600 	for (i = 0; i < dm->num_of_edps; i++)
3601 		dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3602 
3603 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3604 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3605 	props.type = BACKLIGHT_RAW;
3606 
3607 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3608 		 adev_to_drm(dm->adev)->primary->index);
3609 
3610 	dm->backlight_dev = backlight_device_register(bl_name,
3611 						      adev_to_drm(dm->adev)->dev,
3612 						      dm,
3613 						      &amdgpu_dm_backlight_ops,
3614 						      &props);
3615 
3616 	if (IS_ERR(dm->backlight_dev))
3617 		DRM_ERROR("DM: Backlight registration failed!\n");
3618 	else
3619 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3620 }
3621 
3622 #endif
3623 
3624 static int initialize_plane(struct amdgpu_display_manager *dm,
3625 			    struct amdgpu_mode_info *mode_info, int plane_id,
3626 			    enum drm_plane_type plane_type,
3627 			    const struct dc_plane_cap *plane_cap)
3628 {
3629 	struct drm_plane *plane;
3630 	unsigned long possible_crtcs;
3631 	int ret = 0;
3632 
3633 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3634 	if (!plane) {
3635 		DRM_ERROR("KMS: Failed to allocate plane\n");
3636 		return -ENOMEM;
3637 	}
3638 	plane->type = plane_type;
3639 
3640 	/*
3641 	 * HACK: IGT tests expect that the primary plane for a CRTC
3642 	 * can only have one possible CRTC. Only expose support for
3643 	 * any CRTC if they're not going to be used as a primary plane
3644 	 * for a CRTC - like overlay or underlay planes.
3645 	 */
3646 	possible_crtcs = 1 << plane_id;
3647 	if (plane_id >= dm->dc->caps.max_streams)
3648 		possible_crtcs = 0xff;
3649 
3650 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3651 
3652 	if (ret) {
3653 		DRM_ERROR("KMS: Failed to initialize plane\n");
3654 		kfree(plane);
3655 		return ret;
3656 	}
3657 
3658 	if (mode_info)
3659 		mode_info->planes[plane_id] = plane;
3660 
3661 	return ret;
3662 }
3663 
3664 
3665 static void register_backlight_device(struct amdgpu_display_manager *dm,
3666 				      struct dc_link *link)
3667 {
3668 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3669 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3670 
3671 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3672 	    link->type != dc_connection_none) {
3673 		/*
3674 		 * Event if registration failed, we should continue with
3675 		 * DM initialization because not having a backlight control
3676 		 * is better then a black screen.
3677 		 */
3678 		if (!dm->backlight_dev)
3679 			amdgpu_dm_register_backlight_device(dm);
3680 
3681 		if (dm->backlight_dev) {
3682 			dm->backlight_link[dm->num_of_edps] = link;
3683 			dm->num_of_edps++;
3684 		}
3685 	}
3686 #endif
3687 }
3688 
3689 
3690 /*
3691  * In this architecture, the association
3692  * connector -> encoder -> crtc
3693  * id not really requried. The crtc and connector will hold the
3694  * display_index as an abstraction to use with DAL component
3695  *
3696  * Returns 0 on success
3697  */
3698 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3699 {
3700 	struct amdgpu_display_manager *dm = &adev->dm;
3701 	int32_t i;
3702 	struct amdgpu_dm_connector *aconnector = NULL;
3703 	struct amdgpu_encoder *aencoder = NULL;
3704 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3705 	uint32_t link_cnt;
3706 	int32_t primary_planes;
3707 	enum dc_connection_type new_connection_type = dc_connection_none;
3708 	const struct dc_plane_cap *plane;
3709 
3710 	dm->display_indexes_num = dm->dc->caps.max_streams;
3711 	/* Update the actual used number of crtc */
3712 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3713 
3714 	link_cnt = dm->dc->caps.max_links;
3715 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3716 		DRM_ERROR("DM: Failed to initialize mode config\n");
3717 		return -EINVAL;
3718 	}
3719 
3720 	/* There is one primary plane per CRTC */
3721 	primary_planes = dm->dc->caps.max_streams;
3722 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3723 
3724 	/*
3725 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3726 	 * Order is reversed to match iteration order in atomic check.
3727 	 */
3728 	for (i = (primary_planes - 1); i >= 0; i--) {
3729 		plane = &dm->dc->caps.planes[i];
3730 
3731 		if (initialize_plane(dm, mode_info, i,
3732 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3733 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3734 			goto fail;
3735 		}
3736 	}
3737 
3738 	/*
3739 	 * Initialize overlay planes, index starting after primary planes.
3740 	 * These planes have a higher DRM index than the primary planes since
3741 	 * they should be considered as having a higher z-order.
3742 	 * Order is reversed to match iteration order in atomic check.
3743 	 *
3744 	 * Only support DCN for now, and only expose one so we don't encourage
3745 	 * userspace to use up all the pipes.
3746 	 */
3747 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3748 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3749 
3750 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3751 			continue;
3752 
3753 		if (!plane->blends_with_above || !plane->blends_with_below)
3754 			continue;
3755 
3756 		if (!plane->pixel_format_support.argb8888)
3757 			continue;
3758 
3759 		if (initialize_plane(dm, NULL, primary_planes + i,
3760 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3761 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3762 			goto fail;
3763 		}
3764 
3765 		/* Only create one overlay plane. */
3766 		break;
3767 	}
3768 
3769 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3770 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3771 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3772 			goto fail;
3773 		}
3774 
3775 #if defined(CONFIG_DRM_AMD_DC_DCN)
3776 	/* Use Outbox interrupt */
3777 	switch (adev->asic_type) {
3778 	case CHIP_SIENNA_CICHLID:
3779 	case CHIP_NAVY_FLOUNDER:
3780 	case CHIP_YELLOW_CARP:
3781 	case CHIP_RENOIR:
3782 		if (register_outbox_irq_handlers(dm->adev)) {
3783 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3784 			goto fail;
3785 		}
3786 		break;
3787 	default:
3788 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3789 	}
3790 #endif
3791 
3792 	/* loops over all connectors on the board */
3793 	for (i = 0; i < link_cnt; i++) {
3794 		struct dc_link *link = NULL;
3795 
3796 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3797 			DRM_ERROR(
3798 				"KMS: Cannot support more than %d display indexes\n",
3799 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3800 			continue;
3801 		}
3802 
3803 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3804 		if (!aconnector)
3805 			goto fail;
3806 
3807 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3808 		if (!aencoder)
3809 			goto fail;
3810 
3811 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3812 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3813 			goto fail;
3814 		}
3815 
3816 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3817 			DRM_ERROR("KMS: Failed to initialize connector\n");
3818 			goto fail;
3819 		}
3820 
3821 		link = dc_get_link_at_index(dm->dc, i);
3822 
3823 		if (!dc_link_detect_sink(link, &new_connection_type))
3824 			DRM_ERROR("KMS: Failed to detect connector\n");
3825 
3826 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3827 			emulated_link_detect(link);
3828 			amdgpu_dm_update_connector_after_detect(aconnector);
3829 
3830 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3831 			amdgpu_dm_update_connector_after_detect(aconnector);
3832 			register_backlight_device(dm, link);
3833 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3834 				amdgpu_dm_set_psr_caps(link);
3835 		}
3836 
3837 
3838 	}
3839 
3840 	/* Software is initialized. Now we can register interrupt handlers. */
3841 	switch (adev->asic_type) {
3842 #if defined(CONFIG_DRM_AMD_DC_SI)
3843 	case CHIP_TAHITI:
3844 	case CHIP_PITCAIRN:
3845 	case CHIP_VERDE:
3846 	case CHIP_OLAND:
3847 		if (dce60_register_irq_handlers(dm->adev)) {
3848 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3849 			goto fail;
3850 		}
3851 		break;
3852 #endif
3853 	case CHIP_BONAIRE:
3854 	case CHIP_HAWAII:
3855 	case CHIP_KAVERI:
3856 	case CHIP_KABINI:
3857 	case CHIP_MULLINS:
3858 	case CHIP_TONGA:
3859 	case CHIP_FIJI:
3860 	case CHIP_CARRIZO:
3861 	case CHIP_STONEY:
3862 	case CHIP_POLARIS11:
3863 	case CHIP_POLARIS10:
3864 	case CHIP_POLARIS12:
3865 	case CHIP_VEGAM:
3866 	case CHIP_VEGA10:
3867 	case CHIP_VEGA12:
3868 	case CHIP_VEGA20:
3869 		if (dce110_register_irq_handlers(dm->adev)) {
3870 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3871 			goto fail;
3872 		}
3873 		break;
3874 #if defined(CONFIG_DRM_AMD_DC_DCN)
3875 	case CHIP_RAVEN:
3876 	case CHIP_NAVI12:
3877 	case CHIP_NAVI10:
3878 	case CHIP_NAVI14:
3879 	case CHIP_RENOIR:
3880 	case CHIP_SIENNA_CICHLID:
3881 	case CHIP_NAVY_FLOUNDER:
3882 	case CHIP_DIMGREY_CAVEFISH:
3883 	case CHIP_BEIGE_GOBY:
3884 	case CHIP_VANGOGH:
3885 	case CHIP_YELLOW_CARP:
3886 		if (dcn10_register_irq_handlers(dm->adev)) {
3887 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3888 			goto fail;
3889 		}
3890 		break;
3891 #endif
3892 	default:
3893 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3894 		goto fail;
3895 	}
3896 
3897 	return 0;
3898 fail:
3899 	kfree(aencoder);
3900 	kfree(aconnector);
3901 
3902 	return -EINVAL;
3903 }
3904 
3905 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3906 {
3907 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3908 	return;
3909 }
3910 
3911 /******************************************************************************
3912  * amdgpu_display_funcs functions
3913  *****************************************************************************/
3914 
3915 /*
3916  * dm_bandwidth_update - program display watermarks
3917  *
3918  * @adev: amdgpu_device pointer
3919  *
3920  * Calculate and program the display watermarks and line buffer allocation.
3921  */
3922 static void dm_bandwidth_update(struct amdgpu_device *adev)
3923 {
3924 	/* TODO: implement later */
3925 }
3926 
3927 static const struct amdgpu_display_funcs dm_display_funcs = {
3928 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3929 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3930 	.backlight_set_level = NULL, /* never called for DC */
3931 	.backlight_get_level = NULL, /* never called for DC */
3932 	.hpd_sense = NULL,/* called unconditionally */
3933 	.hpd_set_polarity = NULL, /* called unconditionally */
3934 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3935 	.page_flip_get_scanoutpos =
3936 		dm_crtc_get_scanoutpos,/* called unconditionally */
3937 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3938 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3939 };
3940 
3941 #if defined(CONFIG_DEBUG_KERNEL_DC)
3942 
3943 static ssize_t s3_debug_store(struct device *device,
3944 			      struct device_attribute *attr,
3945 			      const char *buf,
3946 			      size_t count)
3947 {
3948 	int ret;
3949 	int s3_state;
3950 	struct drm_device *drm_dev = dev_get_drvdata(device);
3951 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3952 
3953 	ret = kstrtoint(buf, 0, &s3_state);
3954 
3955 	if (ret == 0) {
3956 		if (s3_state) {
3957 			dm_resume(adev);
3958 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3959 		} else
3960 			dm_suspend(adev);
3961 	}
3962 
3963 	return ret == 0 ? count : 0;
3964 }
3965 
3966 DEVICE_ATTR_WO(s3_debug);
3967 
3968 #endif
3969 
3970 static int dm_early_init(void *handle)
3971 {
3972 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3973 
3974 	switch (adev->asic_type) {
3975 #if defined(CONFIG_DRM_AMD_DC_SI)
3976 	case CHIP_TAHITI:
3977 	case CHIP_PITCAIRN:
3978 	case CHIP_VERDE:
3979 		adev->mode_info.num_crtc = 6;
3980 		adev->mode_info.num_hpd = 6;
3981 		adev->mode_info.num_dig = 6;
3982 		break;
3983 	case CHIP_OLAND:
3984 		adev->mode_info.num_crtc = 2;
3985 		adev->mode_info.num_hpd = 2;
3986 		adev->mode_info.num_dig = 2;
3987 		break;
3988 #endif
3989 	case CHIP_BONAIRE:
3990 	case CHIP_HAWAII:
3991 		adev->mode_info.num_crtc = 6;
3992 		adev->mode_info.num_hpd = 6;
3993 		adev->mode_info.num_dig = 6;
3994 		break;
3995 	case CHIP_KAVERI:
3996 		adev->mode_info.num_crtc = 4;
3997 		adev->mode_info.num_hpd = 6;
3998 		adev->mode_info.num_dig = 7;
3999 		break;
4000 	case CHIP_KABINI:
4001 	case CHIP_MULLINS:
4002 		adev->mode_info.num_crtc = 2;
4003 		adev->mode_info.num_hpd = 6;
4004 		adev->mode_info.num_dig = 6;
4005 		break;
4006 	case CHIP_FIJI:
4007 	case CHIP_TONGA:
4008 		adev->mode_info.num_crtc = 6;
4009 		adev->mode_info.num_hpd = 6;
4010 		adev->mode_info.num_dig = 7;
4011 		break;
4012 	case CHIP_CARRIZO:
4013 		adev->mode_info.num_crtc = 3;
4014 		adev->mode_info.num_hpd = 6;
4015 		adev->mode_info.num_dig = 9;
4016 		break;
4017 	case CHIP_STONEY:
4018 		adev->mode_info.num_crtc = 2;
4019 		adev->mode_info.num_hpd = 6;
4020 		adev->mode_info.num_dig = 9;
4021 		break;
4022 	case CHIP_POLARIS11:
4023 	case CHIP_POLARIS12:
4024 		adev->mode_info.num_crtc = 5;
4025 		adev->mode_info.num_hpd = 5;
4026 		adev->mode_info.num_dig = 5;
4027 		break;
4028 	case CHIP_POLARIS10:
4029 	case CHIP_VEGAM:
4030 		adev->mode_info.num_crtc = 6;
4031 		adev->mode_info.num_hpd = 6;
4032 		adev->mode_info.num_dig = 6;
4033 		break;
4034 	case CHIP_VEGA10:
4035 	case CHIP_VEGA12:
4036 	case CHIP_VEGA20:
4037 		adev->mode_info.num_crtc = 6;
4038 		adev->mode_info.num_hpd = 6;
4039 		adev->mode_info.num_dig = 6;
4040 		break;
4041 #if defined(CONFIG_DRM_AMD_DC_DCN)
4042 	case CHIP_RAVEN:
4043 	case CHIP_RENOIR:
4044 	case CHIP_VANGOGH:
4045 		adev->mode_info.num_crtc = 4;
4046 		adev->mode_info.num_hpd = 4;
4047 		adev->mode_info.num_dig = 4;
4048 		break;
4049 	case CHIP_NAVI10:
4050 	case CHIP_NAVI12:
4051 	case CHIP_SIENNA_CICHLID:
4052 	case CHIP_NAVY_FLOUNDER:
4053 		adev->mode_info.num_crtc = 6;
4054 		adev->mode_info.num_hpd = 6;
4055 		adev->mode_info.num_dig = 6;
4056 		break;
4057 	case CHIP_YELLOW_CARP:
4058 		adev->mode_info.num_crtc = 4;
4059 		adev->mode_info.num_hpd = 4;
4060 		adev->mode_info.num_dig = 4;
4061 		break;
4062 	case CHIP_NAVI14:
4063 	case CHIP_DIMGREY_CAVEFISH:
4064 		adev->mode_info.num_crtc = 5;
4065 		adev->mode_info.num_hpd = 5;
4066 		adev->mode_info.num_dig = 5;
4067 		break;
4068 	case CHIP_BEIGE_GOBY:
4069 		adev->mode_info.num_crtc = 2;
4070 		adev->mode_info.num_hpd = 2;
4071 		adev->mode_info.num_dig = 2;
4072 		break;
4073 #endif
4074 	default:
4075 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4076 		return -EINVAL;
4077 	}
4078 
4079 	amdgpu_dm_set_irq_funcs(adev);
4080 
4081 	if (adev->mode_info.funcs == NULL)
4082 		adev->mode_info.funcs = &dm_display_funcs;
4083 
4084 	/*
4085 	 * Note: Do NOT change adev->audio_endpt_rreg and
4086 	 * adev->audio_endpt_wreg because they are initialised in
4087 	 * amdgpu_device_init()
4088 	 */
4089 #if defined(CONFIG_DEBUG_KERNEL_DC)
4090 	device_create_file(
4091 		adev_to_drm(adev)->dev,
4092 		&dev_attr_s3_debug);
4093 #endif
4094 
4095 	return 0;
4096 }
4097 
4098 static bool modeset_required(struct drm_crtc_state *crtc_state,
4099 			     struct dc_stream_state *new_stream,
4100 			     struct dc_stream_state *old_stream)
4101 {
4102 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4103 }
4104 
4105 static bool modereset_required(struct drm_crtc_state *crtc_state)
4106 {
4107 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4108 }
4109 
4110 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4111 {
4112 	drm_encoder_cleanup(encoder);
4113 	kfree(encoder);
4114 }
4115 
4116 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4117 	.destroy = amdgpu_dm_encoder_destroy,
4118 };
4119 
4120 
4121 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4122 					 struct drm_framebuffer *fb,
4123 					 int *min_downscale, int *max_upscale)
4124 {
4125 	struct amdgpu_device *adev = drm_to_adev(dev);
4126 	struct dc *dc = adev->dm.dc;
4127 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4128 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4129 
4130 	switch (fb->format->format) {
4131 	case DRM_FORMAT_P010:
4132 	case DRM_FORMAT_NV12:
4133 	case DRM_FORMAT_NV21:
4134 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4135 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4136 		break;
4137 
4138 	case DRM_FORMAT_XRGB16161616F:
4139 	case DRM_FORMAT_ARGB16161616F:
4140 	case DRM_FORMAT_XBGR16161616F:
4141 	case DRM_FORMAT_ABGR16161616F:
4142 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4143 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4144 		break;
4145 
4146 	default:
4147 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4148 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4149 		break;
4150 	}
4151 
4152 	/*
4153 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4154 	 * scaling factor of 1.0 == 1000 units.
4155 	 */
4156 	if (*max_upscale == 1)
4157 		*max_upscale = 1000;
4158 
4159 	if (*min_downscale == 1)
4160 		*min_downscale = 1000;
4161 }
4162 
4163 
4164 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4165 				struct dc_scaling_info *scaling_info)
4166 {
4167 	int scale_w, scale_h, min_downscale, max_upscale;
4168 
4169 	memset(scaling_info, 0, sizeof(*scaling_info));
4170 
4171 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4172 	scaling_info->src_rect.x = state->src_x >> 16;
4173 	scaling_info->src_rect.y = state->src_y >> 16;
4174 
4175 	/*
4176 	 * For reasons we don't (yet) fully understand a non-zero
4177 	 * src_y coordinate into an NV12 buffer can cause a
4178 	 * system hang. To avoid hangs (and maybe be overly cautious)
4179 	 * let's reject both non-zero src_x and src_y.
4180 	 *
4181 	 * We currently know of only one use-case to reproduce a
4182 	 * scenario with non-zero src_x and src_y for NV12, which
4183 	 * is to gesture the YouTube Android app into full screen
4184 	 * on ChromeOS.
4185 	 */
4186 	if (state->fb &&
4187 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4188 	    (scaling_info->src_rect.x != 0 ||
4189 	     scaling_info->src_rect.y != 0))
4190 		return -EINVAL;
4191 
4192 	scaling_info->src_rect.width = state->src_w >> 16;
4193 	if (scaling_info->src_rect.width == 0)
4194 		return -EINVAL;
4195 
4196 	scaling_info->src_rect.height = state->src_h >> 16;
4197 	if (scaling_info->src_rect.height == 0)
4198 		return -EINVAL;
4199 
4200 	scaling_info->dst_rect.x = state->crtc_x;
4201 	scaling_info->dst_rect.y = state->crtc_y;
4202 
4203 	if (state->crtc_w == 0)
4204 		return -EINVAL;
4205 
4206 	scaling_info->dst_rect.width = state->crtc_w;
4207 
4208 	if (state->crtc_h == 0)
4209 		return -EINVAL;
4210 
4211 	scaling_info->dst_rect.height = state->crtc_h;
4212 
4213 	/* DRM doesn't specify clipping on destination output. */
4214 	scaling_info->clip_rect = scaling_info->dst_rect;
4215 
4216 	/* Validate scaling per-format with DC plane caps */
4217 	if (state->plane && state->plane->dev && state->fb) {
4218 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4219 					     &min_downscale, &max_upscale);
4220 	} else {
4221 		min_downscale = 250;
4222 		max_upscale = 16000;
4223 	}
4224 
4225 	scale_w = scaling_info->dst_rect.width * 1000 /
4226 		  scaling_info->src_rect.width;
4227 
4228 	if (scale_w < min_downscale || scale_w > max_upscale)
4229 		return -EINVAL;
4230 
4231 	scale_h = scaling_info->dst_rect.height * 1000 /
4232 		  scaling_info->src_rect.height;
4233 
4234 	if (scale_h < min_downscale || scale_h > max_upscale)
4235 		return -EINVAL;
4236 
4237 	/*
4238 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4239 	 * assume reasonable defaults based on the format.
4240 	 */
4241 
4242 	return 0;
4243 }
4244 
4245 static void
4246 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4247 				 uint64_t tiling_flags)
4248 {
4249 	/* Fill GFX8 params */
4250 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4251 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4252 
4253 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4254 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4255 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4256 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4257 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4258 
4259 		/* XXX fix me for VI */
4260 		tiling_info->gfx8.num_banks = num_banks;
4261 		tiling_info->gfx8.array_mode =
4262 				DC_ARRAY_2D_TILED_THIN1;
4263 		tiling_info->gfx8.tile_split = tile_split;
4264 		tiling_info->gfx8.bank_width = bankw;
4265 		tiling_info->gfx8.bank_height = bankh;
4266 		tiling_info->gfx8.tile_aspect = mtaspect;
4267 		tiling_info->gfx8.tile_mode =
4268 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4269 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4270 			== DC_ARRAY_1D_TILED_THIN1) {
4271 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4272 	}
4273 
4274 	tiling_info->gfx8.pipe_config =
4275 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4276 }
4277 
4278 static void
4279 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4280 				  union dc_tiling_info *tiling_info)
4281 {
4282 	tiling_info->gfx9.num_pipes =
4283 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4284 	tiling_info->gfx9.num_banks =
4285 		adev->gfx.config.gb_addr_config_fields.num_banks;
4286 	tiling_info->gfx9.pipe_interleave =
4287 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4288 	tiling_info->gfx9.num_shader_engines =
4289 		adev->gfx.config.gb_addr_config_fields.num_se;
4290 	tiling_info->gfx9.max_compressed_frags =
4291 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4292 	tiling_info->gfx9.num_rb_per_se =
4293 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4294 	tiling_info->gfx9.shaderEnable = 1;
4295 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4296 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4297 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4298 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4299 	    adev->asic_type == CHIP_YELLOW_CARP ||
4300 	    adev->asic_type == CHIP_VANGOGH)
4301 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4302 }
4303 
4304 static int
4305 validate_dcc(struct amdgpu_device *adev,
4306 	     const enum surface_pixel_format format,
4307 	     const enum dc_rotation_angle rotation,
4308 	     const union dc_tiling_info *tiling_info,
4309 	     const struct dc_plane_dcc_param *dcc,
4310 	     const struct dc_plane_address *address,
4311 	     const struct plane_size *plane_size)
4312 {
4313 	struct dc *dc = adev->dm.dc;
4314 	struct dc_dcc_surface_param input;
4315 	struct dc_surface_dcc_cap output;
4316 
4317 	memset(&input, 0, sizeof(input));
4318 	memset(&output, 0, sizeof(output));
4319 
4320 	if (!dcc->enable)
4321 		return 0;
4322 
4323 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4324 	    !dc->cap_funcs.get_dcc_compression_cap)
4325 		return -EINVAL;
4326 
4327 	input.format = format;
4328 	input.surface_size.width = plane_size->surface_size.width;
4329 	input.surface_size.height = plane_size->surface_size.height;
4330 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4331 
4332 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4333 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4334 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4335 		input.scan = SCAN_DIRECTION_VERTICAL;
4336 
4337 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4338 		return -EINVAL;
4339 
4340 	if (!output.capable)
4341 		return -EINVAL;
4342 
4343 	if (dcc->independent_64b_blks == 0 &&
4344 	    output.grph.rgb.independent_64b_blks != 0)
4345 		return -EINVAL;
4346 
4347 	return 0;
4348 }
4349 
4350 static bool
4351 modifier_has_dcc(uint64_t modifier)
4352 {
4353 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4354 }
4355 
4356 static unsigned
4357 modifier_gfx9_swizzle_mode(uint64_t modifier)
4358 {
4359 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4360 		return 0;
4361 
4362 	return AMD_FMT_MOD_GET(TILE, modifier);
4363 }
4364 
4365 static const struct drm_format_info *
4366 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4367 {
4368 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4369 }
4370 
4371 static void
4372 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4373 				    union dc_tiling_info *tiling_info,
4374 				    uint64_t modifier)
4375 {
4376 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4377 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4378 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4379 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4380 
4381 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4382 
4383 	if (!IS_AMD_FMT_MOD(modifier))
4384 		return;
4385 
4386 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4387 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4388 
4389 	if (adev->family >= AMDGPU_FAMILY_NV) {
4390 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4391 	} else {
4392 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4393 
4394 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4395 	}
4396 }
4397 
4398 enum dm_micro_swizzle {
4399 	MICRO_SWIZZLE_Z = 0,
4400 	MICRO_SWIZZLE_S = 1,
4401 	MICRO_SWIZZLE_D = 2,
4402 	MICRO_SWIZZLE_R = 3
4403 };
4404 
4405 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4406 					  uint32_t format,
4407 					  uint64_t modifier)
4408 {
4409 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4410 	const struct drm_format_info *info = drm_format_info(format);
4411 	int i;
4412 
4413 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4414 
4415 	if (!info)
4416 		return false;
4417 
4418 	/*
4419 	 * We always have to allow these modifiers:
4420 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4421 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4422 	 */
4423 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4424 	    modifier == DRM_FORMAT_MOD_INVALID) {
4425 		return true;
4426 	}
4427 
4428 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4429 	for (i = 0; i < plane->modifier_count; i++) {
4430 		if (modifier == plane->modifiers[i])
4431 			break;
4432 	}
4433 	if (i == plane->modifier_count)
4434 		return false;
4435 
4436 	/*
4437 	 * For D swizzle the canonical modifier depends on the bpp, so check
4438 	 * it here.
4439 	 */
4440 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4441 	    adev->family >= AMDGPU_FAMILY_NV) {
4442 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4443 			return false;
4444 	}
4445 
4446 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4447 	    info->cpp[0] < 8)
4448 		return false;
4449 
4450 	if (modifier_has_dcc(modifier)) {
4451 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4452 		if (info->cpp[0] != 4)
4453 			return false;
4454 		/* We support multi-planar formats, but not when combined with
4455 		 * additional DCC metadata planes. */
4456 		if (info->num_planes > 1)
4457 			return false;
4458 	}
4459 
4460 	return true;
4461 }
4462 
4463 static void
4464 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4465 {
4466 	if (!*mods)
4467 		return;
4468 
4469 	if (*cap - *size < 1) {
4470 		uint64_t new_cap = *cap * 2;
4471 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4472 
4473 		if (!new_mods) {
4474 			kfree(*mods);
4475 			*mods = NULL;
4476 			return;
4477 		}
4478 
4479 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4480 		kfree(*mods);
4481 		*mods = new_mods;
4482 		*cap = new_cap;
4483 	}
4484 
4485 	(*mods)[*size] = mod;
4486 	*size += 1;
4487 }
4488 
4489 static void
4490 add_gfx9_modifiers(const struct amdgpu_device *adev,
4491 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4492 {
4493 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4494 	int pipe_xor_bits = min(8, pipes +
4495 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4496 	int bank_xor_bits = min(8 - pipe_xor_bits,
4497 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4498 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4499 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4500 
4501 
4502 	if (adev->family == AMDGPU_FAMILY_RV) {
4503 		/* Raven2 and later */
4504 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4505 
4506 		/*
4507 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4508 		 * doesn't support _D on DCN
4509 		 */
4510 
4511 		if (has_constant_encode) {
4512 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4513 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4514 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4515 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4516 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4517 				    AMD_FMT_MOD_SET(DCC, 1) |
4518 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4519 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4520 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4521 		}
4522 
4523 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4524 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4525 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4526 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4527 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4528 			    AMD_FMT_MOD_SET(DCC, 1) |
4529 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4530 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4531 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4532 
4533 		if (has_constant_encode) {
4534 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4535 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4536 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4537 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4538 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4539 				    AMD_FMT_MOD_SET(DCC, 1) |
4540 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4541 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4542 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4543 
4544 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4545 				    AMD_FMT_MOD_SET(RB, rb) |
4546 				    AMD_FMT_MOD_SET(PIPE, pipes));
4547 		}
4548 
4549 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4551 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4552 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4553 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4554 			    AMD_FMT_MOD_SET(DCC, 1) |
4555 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4556 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4557 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4558 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4559 			    AMD_FMT_MOD_SET(RB, rb) |
4560 			    AMD_FMT_MOD_SET(PIPE, pipes));
4561 	}
4562 
4563 	/*
4564 	 * Only supported for 64bpp on Raven, will be filtered on format in
4565 	 * dm_plane_format_mod_supported.
4566 	 */
4567 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4568 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4569 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4570 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4571 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4572 
4573 	if (adev->family == AMDGPU_FAMILY_RV) {
4574 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4575 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4576 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4577 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4578 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4579 	}
4580 
4581 	/*
4582 	 * Only supported for 64bpp on Raven, will be filtered on format in
4583 	 * dm_plane_format_mod_supported.
4584 	 */
4585 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4587 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4588 
4589 	if (adev->family == AMDGPU_FAMILY_RV) {
4590 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4591 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4592 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4593 	}
4594 }
4595 
4596 static void
4597 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4598 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4599 {
4600 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4601 
4602 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4603 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4604 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4605 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4606 		    AMD_FMT_MOD_SET(DCC, 1) |
4607 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4608 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4609 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4610 
4611 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4612 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4613 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4614 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4615 		    AMD_FMT_MOD_SET(DCC, 1) |
4616 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4617 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4618 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4619 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4620 
4621 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4622 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4623 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4624 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4625 
4626 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4627 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4628 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4629 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4630 
4631 
4632 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4633 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4634 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4635 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4636 
4637 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4638 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4639 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4640 }
4641 
4642 static void
4643 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4644 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4645 {
4646 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4647 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4648 
4649 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4650 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4651 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4652 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4653 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4654 		    AMD_FMT_MOD_SET(DCC, 1) |
4655 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4656 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4657 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4658 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4659 
4660 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4661 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4662 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4663 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4664 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4665 		    AMD_FMT_MOD_SET(DCC, 1) |
4666 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4667 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4668 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4669 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4670 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4671 
4672 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4673 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4674 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4675 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4676 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4677 
4678 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4679 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4680 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4681 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4682 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4683 
4684 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4685 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4686 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4687 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4688 
4689 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4690 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4691 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4692 }
4693 
4694 static int
4695 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4696 {
4697 	uint64_t size = 0, capacity = 128;
4698 	*mods = NULL;
4699 
4700 	/* We have not hooked up any pre-GFX9 modifiers. */
4701 	if (adev->family < AMDGPU_FAMILY_AI)
4702 		return 0;
4703 
4704 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4705 
4706 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4707 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4708 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4709 		return *mods ? 0 : -ENOMEM;
4710 	}
4711 
4712 	switch (adev->family) {
4713 	case AMDGPU_FAMILY_AI:
4714 	case AMDGPU_FAMILY_RV:
4715 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4716 		break;
4717 	case AMDGPU_FAMILY_NV:
4718 	case AMDGPU_FAMILY_VGH:
4719 	case AMDGPU_FAMILY_YC:
4720 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4721 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4722 		else
4723 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4724 		break;
4725 	}
4726 
4727 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4728 
4729 	/* INVALID marks the end of the list. */
4730 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4731 
4732 	if (!*mods)
4733 		return -ENOMEM;
4734 
4735 	return 0;
4736 }
4737 
4738 static int
4739 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4740 					  const struct amdgpu_framebuffer *afb,
4741 					  const enum surface_pixel_format format,
4742 					  const enum dc_rotation_angle rotation,
4743 					  const struct plane_size *plane_size,
4744 					  union dc_tiling_info *tiling_info,
4745 					  struct dc_plane_dcc_param *dcc,
4746 					  struct dc_plane_address *address,
4747 					  const bool force_disable_dcc)
4748 {
4749 	const uint64_t modifier = afb->base.modifier;
4750 	int ret;
4751 
4752 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4753 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4754 
4755 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4756 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4757 
4758 		dcc->enable = 1;
4759 		dcc->meta_pitch = afb->base.pitches[1];
4760 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4761 
4762 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4763 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4764 	}
4765 
4766 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4767 	if (ret)
4768 		return ret;
4769 
4770 	return 0;
4771 }
4772 
4773 static int
4774 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4775 			     const struct amdgpu_framebuffer *afb,
4776 			     const enum surface_pixel_format format,
4777 			     const enum dc_rotation_angle rotation,
4778 			     const uint64_t tiling_flags,
4779 			     union dc_tiling_info *tiling_info,
4780 			     struct plane_size *plane_size,
4781 			     struct dc_plane_dcc_param *dcc,
4782 			     struct dc_plane_address *address,
4783 			     bool tmz_surface,
4784 			     bool force_disable_dcc)
4785 {
4786 	const struct drm_framebuffer *fb = &afb->base;
4787 	int ret;
4788 
4789 	memset(tiling_info, 0, sizeof(*tiling_info));
4790 	memset(plane_size, 0, sizeof(*plane_size));
4791 	memset(dcc, 0, sizeof(*dcc));
4792 	memset(address, 0, sizeof(*address));
4793 
4794 	address->tmz_surface = tmz_surface;
4795 
4796 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4797 		uint64_t addr = afb->address + fb->offsets[0];
4798 
4799 		plane_size->surface_size.x = 0;
4800 		plane_size->surface_size.y = 0;
4801 		plane_size->surface_size.width = fb->width;
4802 		plane_size->surface_size.height = fb->height;
4803 		plane_size->surface_pitch =
4804 			fb->pitches[0] / fb->format->cpp[0];
4805 
4806 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4807 		address->grph.addr.low_part = lower_32_bits(addr);
4808 		address->grph.addr.high_part = upper_32_bits(addr);
4809 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4810 		uint64_t luma_addr = afb->address + fb->offsets[0];
4811 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4812 
4813 		plane_size->surface_size.x = 0;
4814 		plane_size->surface_size.y = 0;
4815 		plane_size->surface_size.width = fb->width;
4816 		plane_size->surface_size.height = fb->height;
4817 		plane_size->surface_pitch =
4818 			fb->pitches[0] / fb->format->cpp[0];
4819 
4820 		plane_size->chroma_size.x = 0;
4821 		plane_size->chroma_size.y = 0;
4822 		/* TODO: set these based on surface format */
4823 		plane_size->chroma_size.width = fb->width / 2;
4824 		plane_size->chroma_size.height = fb->height / 2;
4825 
4826 		plane_size->chroma_pitch =
4827 			fb->pitches[1] / fb->format->cpp[1];
4828 
4829 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4830 		address->video_progressive.luma_addr.low_part =
4831 			lower_32_bits(luma_addr);
4832 		address->video_progressive.luma_addr.high_part =
4833 			upper_32_bits(luma_addr);
4834 		address->video_progressive.chroma_addr.low_part =
4835 			lower_32_bits(chroma_addr);
4836 		address->video_progressive.chroma_addr.high_part =
4837 			upper_32_bits(chroma_addr);
4838 	}
4839 
4840 	if (adev->family >= AMDGPU_FAMILY_AI) {
4841 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4842 								rotation, plane_size,
4843 								tiling_info, dcc,
4844 								address,
4845 								force_disable_dcc);
4846 		if (ret)
4847 			return ret;
4848 	} else {
4849 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4850 	}
4851 
4852 	return 0;
4853 }
4854 
4855 static void
4856 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4857 			       bool *per_pixel_alpha, bool *global_alpha,
4858 			       int *global_alpha_value)
4859 {
4860 	*per_pixel_alpha = false;
4861 	*global_alpha = false;
4862 	*global_alpha_value = 0xff;
4863 
4864 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4865 		return;
4866 
4867 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4868 		static const uint32_t alpha_formats[] = {
4869 			DRM_FORMAT_ARGB8888,
4870 			DRM_FORMAT_RGBA8888,
4871 			DRM_FORMAT_ABGR8888,
4872 		};
4873 		uint32_t format = plane_state->fb->format->format;
4874 		unsigned int i;
4875 
4876 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4877 			if (format == alpha_formats[i]) {
4878 				*per_pixel_alpha = true;
4879 				break;
4880 			}
4881 		}
4882 	}
4883 
4884 	if (plane_state->alpha < 0xffff) {
4885 		*global_alpha = true;
4886 		*global_alpha_value = plane_state->alpha >> 8;
4887 	}
4888 }
4889 
4890 static int
4891 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4892 			    const enum surface_pixel_format format,
4893 			    enum dc_color_space *color_space)
4894 {
4895 	bool full_range;
4896 
4897 	*color_space = COLOR_SPACE_SRGB;
4898 
4899 	/* DRM color properties only affect non-RGB formats. */
4900 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4901 		return 0;
4902 
4903 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4904 
4905 	switch (plane_state->color_encoding) {
4906 	case DRM_COLOR_YCBCR_BT601:
4907 		if (full_range)
4908 			*color_space = COLOR_SPACE_YCBCR601;
4909 		else
4910 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4911 		break;
4912 
4913 	case DRM_COLOR_YCBCR_BT709:
4914 		if (full_range)
4915 			*color_space = COLOR_SPACE_YCBCR709;
4916 		else
4917 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4918 		break;
4919 
4920 	case DRM_COLOR_YCBCR_BT2020:
4921 		if (full_range)
4922 			*color_space = COLOR_SPACE_2020_YCBCR;
4923 		else
4924 			return -EINVAL;
4925 		break;
4926 
4927 	default:
4928 		return -EINVAL;
4929 	}
4930 
4931 	return 0;
4932 }
4933 
4934 static int
4935 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4936 			    const struct drm_plane_state *plane_state,
4937 			    const uint64_t tiling_flags,
4938 			    struct dc_plane_info *plane_info,
4939 			    struct dc_plane_address *address,
4940 			    bool tmz_surface,
4941 			    bool force_disable_dcc)
4942 {
4943 	const struct drm_framebuffer *fb = plane_state->fb;
4944 	const struct amdgpu_framebuffer *afb =
4945 		to_amdgpu_framebuffer(plane_state->fb);
4946 	int ret;
4947 
4948 	memset(plane_info, 0, sizeof(*plane_info));
4949 
4950 	switch (fb->format->format) {
4951 	case DRM_FORMAT_C8:
4952 		plane_info->format =
4953 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4954 		break;
4955 	case DRM_FORMAT_RGB565:
4956 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4957 		break;
4958 	case DRM_FORMAT_XRGB8888:
4959 	case DRM_FORMAT_ARGB8888:
4960 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4961 		break;
4962 	case DRM_FORMAT_XRGB2101010:
4963 	case DRM_FORMAT_ARGB2101010:
4964 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4965 		break;
4966 	case DRM_FORMAT_XBGR2101010:
4967 	case DRM_FORMAT_ABGR2101010:
4968 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4969 		break;
4970 	case DRM_FORMAT_XBGR8888:
4971 	case DRM_FORMAT_ABGR8888:
4972 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4973 		break;
4974 	case DRM_FORMAT_NV21:
4975 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4976 		break;
4977 	case DRM_FORMAT_NV12:
4978 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4979 		break;
4980 	case DRM_FORMAT_P010:
4981 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4982 		break;
4983 	case DRM_FORMAT_XRGB16161616F:
4984 	case DRM_FORMAT_ARGB16161616F:
4985 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4986 		break;
4987 	case DRM_FORMAT_XBGR16161616F:
4988 	case DRM_FORMAT_ABGR16161616F:
4989 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4990 		break;
4991 	case DRM_FORMAT_XRGB16161616:
4992 	case DRM_FORMAT_ARGB16161616:
4993 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4994 		break;
4995 	case DRM_FORMAT_XBGR16161616:
4996 	case DRM_FORMAT_ABGR16161616:
4997 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4998 		break;
4999 	default:
5000 		DRM_ERROR(
5001 			"Unsupported screen format %p4cc\n",
5002 			&fb->format->format);
5003 		return -EINVAL;
5004 	}
5005 
5006 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5007 	case DRM_MODE_ROTATE_0:
5008 		plane_info->rotation = ROTATION_ANGLE_0;
5009 		break;
5010 	case DRM_MODE_ROTATE_90:
5011 		plane_info->rotation = ROTATION_ANGLE_90;
5012 		break;
5013 	case DRM_MODE_ROTATE_180:
5014 		plane_info->rotation = ROTATION_ANGLE_180;
5015 		break;
5016 	case DRM_MODE_ROTATE_270:
5017 		plane_info->rotation = ROTATION_ANGLE_270;
5018 		break;
5019 	default:
5020 		plane_info->rotation = ROTATION_ANGLE_0;
5021 		break;
5022 	}
5023 
5024 	plane_info->visible = true;
5025 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5026 
5027 	plane_info->layer_index = 0;
5028 
5029 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5030 					  &plane_info->color_space);
5031 	if (ret)
5032 		return ret;
5033 
5034 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5035 					   plane_info->rotation, tiling_flags,
5036 					   &plane_info->tiling_info,
5037 					   &plane_info->plane_size,
5038 					   &plane_info->dcc, address, tmz_surface,
5039 					   force_disable_dcc);
5040 	if (ret)
5041 		return ret;
5042 
5043 	fill_blending_from_plane_state(
5044 		plane_state, &plane_info->per_pixel_alpha,
5045 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5046 
5047 	return 0;
5048 }
5049 
5050 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5051 				    struct dc_plane_state *dc_plane_state,
5052 				    struct drm_plane_state *plane_state,
5053 				    struct drm_crtc_state *crtc_state)
5054 {
5055 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5056 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5057 	struct dc_scaling_info scaling_info;
5058 	struct dc_plane_info plane_info;
5059 	int ret;
5060 	bool force_disable_dcc = false;
5061 
5062 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5063 	if (ret)
5064 		return ret;
5065 
5066 	dc_plane_state->src_rect = scaling_info.src_rect;
5067 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5068 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5069 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5070 
5071 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5072 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5073 					  afb->tiling_flags,
5074 					  &plane_info,
5075 					  &dc_plane_state->address,
5076 					  afb->tmz_surface,
5077 					  force_disable_dcc);
5078 	if (ret)
5079 		return ret;
5080 
5081 	dc_plane_state->format = plane_info.format;
5082 	dc_plane_state->color_space = plane_info.color_space;
5083 	dc_plane_state->format = plane_info.format;
5084 	dc_plane_state->plane_size = plane_info.plane_size;
5085 	dc_plane_state->rotation = plane_info.rotation;
5086 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5087 	dc_plane_state->stereo_format = plane_info.stereo_format;
5088 	dc_plane_state->tiling_info = plane_info.tiling_info;
5089 	dc_plane_state->visible = plane_info.visible;
5090 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5091 	dc_plane_state->global_alpha = plane_info.global_alpha;
5092 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5093 	dc_plane_state->dcc = plane_info.dcc;
5094 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5095 	dc_plane_state->flip_int_enabled = true;
5096 
5097 	/*
5098 	 * Always set input transfer function, since plane state is refreshed
5099 	 * every time.
5100 	 */
5101 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5102 	if (ret)
5103 		return ret;
5104 
5105 	return 0;
5106 }
5107 
5108 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5109 					   const struct dm_connector_state *dm_state,
5110 					   struct dc_stream_state *stream)
5111 {
5112 	enum amdgpu_rmx_type rmx_type;
5113 
5114 	struct rect src = { 0 }; /* viewport in composition space*/
5115 	struct rect dst = { 0 }; /* stream addressable area */
5116 
5117 	/* no mode. nothing to be done */
5118 	if (!mode)
5119 		return;
5120 
5121 	/* Full screen scaling by default */
5122 	src.width = mode->hdisplay;
5123 	src.height = mode->vdisplay;
5124 	dst.width = stream->timing.h_addressable;
5125 	dst.height = stream->timing.v_addressable;
5126 
5127 	if (dm_state) {
5128 		rmx_type = dm_state->scaling;
5129 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5130 			if (src.width * dst.height <
5131 					src.height * dst.width) {
5132 				/* height needs less upscaling/more downscaling */
5133 				dst.width = src.width *
5134 						dst.height / src.height;
5135 			} else {
5136 				/* width needs less upscaling/more downscaling */
5137 				dst.height = src.height *
5138 						dst.width / src.width;
5139 			}
5140 		} else if (rmx_type == RMX_CENTER) {
5141 			dst = src;
5142 		}
5143 
5144 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5145 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5146 
5147 		if (dm_state->underscan_enable) {
5148 			dst.x += dm_state->underscan_hborder / 2;
5149 			dst.y += dm_state->underscan_vborder / 2;
5150 			dst.width -= dm_state->underscan_hborder;
5151 			dst.height -= dm_state->underscan_vborder;
5152 		}
5153 	}
5154 
5155 	stream->src = src;
5156 	stream->dst = dst;
5157 
5158 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5159 		      dst.x, dst.y, dst.width, dst.height);
5160 
5161 }
5162 
5163 static enum dc_color_depth
5164 convert_color_depth_from_display_info(const struct drm_connector *connector,
5165 				      bool is_y420, int requested_bpc)
5166 {
5167 	uint8_t bpc;
5168 
5169 	if (is_y420) {
5170 		bpc = 8;
5171 
5172 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5173 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5174 			bpc = 16;
5175 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5176 			bpc = 12;
5177 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5178 			bpc = 10;
5179 	} else {
5180 		bpc = (uint8_t)connector->display_info.bpc;
5181 		/* Assume 8 bpc by default if no bpc is specified. */
5182 		bpc = bpc ? bpc : 8;
5183 	}
5184 
5185 	if (requested_bpc > 0) {
5186 		/*
5187 		 * Cap display bpc based on the user requested value.
5188 		 *
5189 		 * The value for state->max_bpc may not correctly updated
5190 		 * depending on when the connector gets added to the state
5191 		 * or if this was called outside of atomic check, so it
5192 		 * can't be used directly.
5193 		 */
5194 		bpc = min_t(u8, bpc, requested_bpc);
5195 
5196 		/* Round down to the nearest even number. */
5197 		bpc = bpc - (bpc & 1);
5198 	}
5199 
5200 	switch (bpc) {
5201 	case 0:
5202 		/*
5203 		 * Temporary Work around, DRM doesn't parse color depth for
5204 		 * EDID revision before 1.4
5205 		 * TODO: Fix edid parsing
5206 		 */
5207 		return COLOR_DEPTH_888;
5208 	case 6:
5209 		return COLOR_DEPTH_666;
5210 	case 8:
5211 		return COLOR_DEPTH_888;
5212 	case 10:
5213 		return COLOR_DEPTH_101010;
5214 	case 12:
5215 		return COLOR_DEPTH_121212;
5216 	case 14:
5217 		return COLOR_DEPTH_141414;
5218 	case 16:
5219 		return COLOR_DEPTH_161616;
5220 	default:
5221 		return COLOR_DEPTH_UNDEFINED;
5222 	}
5223 }
5224 
5225 static enum dc_aspect_ratio
5226 get_aspect_ratio(const struct drm_display_mode *mode_in)
5227 {
5228 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5229 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5230 }
5231 
5232 static enum dc_color_space
5233 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5234 {
5235 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5236 
5237 	switch (dc_crtc_timing->pixel_encoding)	{
5238 	case PIXEL_ENCODING_YCBCR422:
5239 	case PIXEL_ENCODING_YCBCR444:
5240 	case PIXEL_ENCODING_YCBCR420:
5241 	{
5242 		/*
5243 		 * 27030khz is the separation point between HDTV and SDTV
5244 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5245 		 * respectively
5246 		 */
5247 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5248 			if (dc_crtc_timing->flags.Y_ONLY)
5249 				color_space =
5250 					COLOR_SPACE_YCBCR709_LIMITED;
5251 			else
5252 				color_space = COLOR_SPACE_YCBCR709;
5253 		} else {
5254 			if (dc_crtc_timing->flags.Y_ONLY)
5255 				color_space =
5256 					COLOR_SPACE_YCBCR601_LIMITED;
5257 			else
5258 				color_space = COLOR_SPACE_YCBCR601;
5259 		}
5260 
5261 	}
5262 	break;
5263 	case PIXEL_ENCODING_RGB:
5264 		color_space = COLOR_SPACE_SRGB;
5265 		break;
5266 
5267 	default:
5268 		WARN_ON(1);
5269 		break;
5270 	}
5271 
5272 	return color_space;
5273 }
5274 
5275 static bool adjust_colour_depth_from_display_info(
5276 	struct dc_crtc_timing *timing_out,
5277 	const struct drm_display_info *info)
5278 {
5279 	enum dc_color_depth depth = timing_out->display_color_depth;
5280 	int normalized_clk;
5281 	do {
5282 		normalized_clk = timing_out->pix_clk_100hz / 10;
5283 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5284 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5285 			normalized_clk /= 2;
5286 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5287 		switch (depth) {
5288 		case COLOR_DEPTH_888:
5289 			break;
5290 		case COLOR_DEPTH_101010:
5291 			normalized_clk = (normalized_clk * 30) / 24;
5292 			break;
5293 		case COLOR_DEPTH_121212:
5294 			normalized_clk = (normalized_clk * 36) / 24;
5295 			break;
5296 		case COLOR_DEPTH_161616:
5297 			normalized_clk = (normalized_clk * 48) / 24;
5298 			break;
5299 		default:
5300 			/* The above depths are the only ones valid for HDMI. */
5301 			return false;
5302 		}
5303 		if (normalized_clk <= info->max_tmds_clock) {
5304 			timing_out->display_color_depth = depth;
5305 			return true;
5306 		}
5307 	} while (--depth > COLOR_DEPTH_666);
5308 	return false;
5309 }
5310 
5311 static void fill_stream_properties_from_drm_display_mode(
5312 	struct dc_stream_state *stream,
5313 	const struct drm_display_mode *mode_in,
5314 	const struct drm_connector *connector,
5315 	const struct drm_connector_state *connector_state,
5316 	const struct dc_stream_state *old_stream,
5317 	int requested_bpc)
5318 {
5319 	struct dc_crtc_timing *timing_out = &stream->timing;
5320 	const struct drm_display_info *info = &connector->display_info;
5321 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5322 	struct hdmi_vendor_infoframe hv_frame;
5323 	struct hdmi_avi_infoframe avi_frame;
5324 
5325 	memset(&hv_frame, 0, sizeof(hv_frame));
5326 	memset(&avi_frame, 0, sizeof(avi_frame));
5327 
5328 	timing_out->h_border_left = 0;
5329 	timing_out->h_border_right = 0;
5330 	timing_out->v_border_top = 0;
5331 	timing_out->v_border_bottom = 0;
5332 	/* TODO: un-hardcode */
5333 	if (drm_mode_is_420_only(info, mode_in)
5334 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5335 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5336 	else if (drm_mode_is_420_also(info, mode_in)
5337 			&& aconnector->force_yuv420_output)
5338 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5339 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5340 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5341 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5342 	else
5343 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5344 
5345 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5346 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5347 		connector,
5348 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5349 		requested_bpc);
5350 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5351 	timing_out->hdmi_vic = 0;
5352 
5353 	if(old_stream) {
5354 		timing_out->vic = old_stream->timing.vic;
5355 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5356 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5357 	} else {
5358 		timing_out->vic = drm_match_cea_mode(mode_in);
5359 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5360 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5361 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5362 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5363 	}
5364 
5365 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5366 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5367 		timing_out->vic = avi_frame.video_code;
5368 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5369 		timing_out->hdmi_vic = hv_frame.vic;
5370 	}
5371 
5372 	if (is_freesync_video_mode(mode_in, aconnector)) {
5373 		timing_out->h_addressable = mode_in->hdisplay;
5374 		timing_out->h_total = mode_in->htotal;
5375 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5376 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5377 		timing_out->v_total = mode_in->vtotal;
5378 		timing_out->v_addressable = mode_in->vdisplay;
5379 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5380 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5381 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5382 	} else {
5383 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5384 		timing_out->h_total = mode_in->crtc_htotal;
5385 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5386 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5387 		timing_out->v_total = mode_in->crtc_vtotal;
5388 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5389 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5390 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5391 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5392 	}
5393 
5394 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5395 
5396 	stream->output_color_space = get_output_color_space(timing_out);
5397 
5398 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5399 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5400 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5401 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5402 		    drm_mode_is_420_also(info, mode_in) &&
5403 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5404 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5405 			adjust_colour_depth_from_display_info(timing_out, info);
5406 		}
5407 	}
5408 }
5409 
5410 static void fill_audio_info(struct audio_info *audio_info,
5411 			    const struct drm_connector *drm_connector,
5412 			    const struct dc_sink *dc_sink)
5413 {
5414 	int i = 0;
5415 	int cea_revision = 0;
5416 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5417 
5418 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5419 	audio_info->product_id = edid_caps->product_id;
5420 
5421 	cea_revision = drm_connector->display_info.cea_rev;
5422 
5423 	strscpy(audio_info->display_name,
5424 		edid_caps->display_name,
5425 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5426 
5427 	if (cea_revision >= 3) {
5428 		audio_info->mode_count = edid_caps->audio_mode_count;
5429 
5430 		for (i = 0; i < audio_info->mode_count; ++i) {
5431 			audio_info->modes[i].format_code =
5432 					(enum audio_format_code)
5433 					(edid_caps->audio_modes[i].format_code);
5434 			audio_info->modes[i].channel_count =
5435 					edid_caps->audio_modes[i].channel_count;
5436 			audio_info->modes[i].sample_rates.all =
5437 					edid_caps->audio_modes[i].sample_rate;
5438 			audio_info->modes[i].sample_size =
5439 					edid_caps->audio_modes[i].sample_size;
5440 		}
5441 	}
5442 
5443 	audio_info->flags.all = edid_caps->speaker_flags;
5444 
5445 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5446 	if (drm_connector->latency_present[0]) {
5447 		audio_info->video_latency = drm_connector->video_latency[0];
5448 		audio_info->audio_latency = drm_connector->audio_latency[0];
5449 	}
5450 
5451 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5452 
5453 }
5454 
5455 static void
5456 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5457 				      struct drm_display_mode *dst_mode)
5458 {
5459 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5460 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5461 	dst_mode->crtc_clock = src_mode->crtc_clock;
5462 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5463 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5464 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5465 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5466 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5467 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5468 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5469 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5470 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5471 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5472 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5473 }
5474 
5475 static void
5476 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5477 					const struct drm_display_mode *native_mode,
5478 					bool scale_enabled)
5479 {
5480 	if (scale_enabled) {
5481 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5482 	} else if (native_mode->clock == drm_mode->clock &&
5483 			native_mode->htotal == drm_mode->htotal &&
5484 			native_mode->vtotal == drm_mode->vtotal) {
5485 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5486 	} else {
5487 		/* no scaling nor amdgpu inserted, no need to patch */
5488 	}
5489 }
5490 
5491 static struct dc_sink *
5492 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5493 {
5494 	struct dc_sink_init_data sink_init_data = { 0 };
5495 	struct dc_sink *sink = NULL;
5496 	sink_init_data.link = aconnector->dc_link;
5497 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5498 
5499 	sink = dc_sink_create(&sink_init_data);
5500 	if (!sink) {
5501 		DRM_ERROR("Failed to create sink!\n");
5502 		return NULL;
5503 	}
5504 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5505 
5506 	return sink;
5507 }
5508 
5509 static void set_multisync_trigger_params(
5510 		struct dc_stream_state *stream)
5511 {
5512 	struct dc_stream_state *master = NULL;
5513 
5514 	if (stream->triggered_crtc_reset.enabled) {
5515 		master = stream->triggered_crtc_reset.event_source;
5516 		stream->triggered_crtc_reset.event =
5517 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5518 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5519 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5520 	}
5521 }
5522 
5523 static void set_master_stream(struct dc_stream_state *stream_set[],
5524 			      int stream_count)
5525 {
5526 	int j, highest_rfr = 0, master_stream = 0;
5527 
5528 	for (j = 0;  j < stream_count; j++) {
5529 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5530 			int refresh_rate = 0;
5531 
5532 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5533 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5534 			if (refresh_rate > highest_rfr) {
5535 				highest_rfr = refresh_rate;
5536 				master_stream = j;
5537 			}
5538 		}
5539 	}
5540 	for (j = 0;  j < stream_count; j++) {
5541 		if (stream_set[j])
5542 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5543 	}
5544 }
5545 
5546 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5547 {
5548 	int i = 0;
5549 	struct dc_stream_state *stream;
5550 
5551 	if (context->stream_count < 2)
5552 		return;
5553 	for (i = 0; i < context->stream_count ; i++) {
5554 		if (!context->streams[i])
5555 			continue;
5556 		/*
5557 		 * TODO: add a function to read AMD VSDB bits and set
5558 		 * crtc_sync_master.multi_sync_enabled flag
5559 		 * For now it's set to false
5560 		 */
5561 	}
5562 
5563 	set_master_stream(context->streams, context->stream_count);
5564 
5565 	for (i = 0; i < context->stream_count ; i++) {
5566 		stream = context->streams[i];
5567 
5568 		if (!stream)
5569 			continue;
5570 
5571 		set_multisync_trigger_params(stream);
5572 	}
5573 }
5574 
5575 #if defined(CONFIG_DRM_AMD_DC_DCN)
5576 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5577 							struct dc_sink *sink, struct dc_stream_state *stream,
5578 							struct dsc_dec_dpcd_caps *dsc_caps)
5579 {
5580 	stream->timing.flags.DSC = 0;
5581 
5582 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5583 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5584 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5585 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5586 				      dsc_caps);
5587 	}
5588 }
5589 
5590 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5591 										struct dc_sink *sink, struct dc_stream_state *stream,
5592 										struct dsc_dec_dpcd_caps *dsc_caps)
5593 {
5594 	struct drm_connector *drm_connector = &aconnector->base;
5595 	uint32_t link_bandwidth_kbps;
5596 
5597 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5598 							dc_link_get_link_cap(aconnector->dc_link));
5599 	/* Set DSC policy according to dsc_clock_en */
5600 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5601 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5602 
5603 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5604 
5605 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5606 						dsc_caps,
5607 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5608 						0,
5609 						link_bandwidth_kbps,
5610 						&stream->timing,
5611 						&stream->timing.dsc_cfg)) {
5612 			stream->timing.flags.DSC = 1;
5613 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5614 		}
5615 	}
5616 
5617 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5618 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5619 		stream->timing.flags.DSC = 1;
5620 
5621 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5622 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5623 
5624 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5625 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5626 
5627 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5628 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5629 }
5630 #endif
5631 
5632 /**
5633  * DOC: FreeSync Video
5634  *
5635  * When a userspace application wants to play a video, the content follows a
5636  * standard format definition that usually specifies the FPS for that format.
5637  * The below list illustrates some video format and the expected FPS,
5638  * respectively:
5639  *
5640  * - TV/NTSC (23.976 FPS)
5641  * - Cinema (24 FPS)
5642  * - TV/PAL (25 FPS)
5643  * - TV/NTSC (29.97 FPS)
5644  * - TV/NTSC (30 FPS)
5645  * - Cinema HFR (48 FPS)
5646  * - TV/PAL (50 FPS)
5647  * - Commonly used (60 FPS)
5648  * - Multiples of 24 (48,72,96 FPS)
5649  *
5650  * The list of standards video format is not huge and can be added to the
5651  * connector modeset list beforehand. With that, userspace can leverage
5652  * FreeSync to extends the front porch in order to attain the target refresh
5653  * rate. Such a switch will happen seamlessly, without screen blanking or
5654  * reprogramming of the output in any other way. If the userspace requests a
5655  * modesetting change compatible with FreeSync modes that only differ in the
5656  * refresh rate, DC will skip the full update and avoid blink during the
5657  * transition. For example, the video player can change the modesetting from
5658  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5659  * causing any display blink. This same concept can be applied to a mode
5660  * setting change.
5661  */
5662 static struct drm_display_mode *
5663 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5664 			  bool use_probed_modes)
5665 {
5666 	struct drm_display_mode *m, *m_pref = NULL;
5667 	u16 current_refresh, highest_refresh;
5668 	struct list_head *list_head = use_probed_modes ?
5669 						    &aconnector->base.probed_modes :
5670 						    &aconnector->base.modes;
5671 
5672 	if (aconnector->freesync_vid_base.clock != 0)
5673 		return &aconnector->freesync_vid_base;
5674 
5675 	/* Find the preferred mode */
5676 	list_for_each_entry (m, list_head, head) {
5677 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5678 			m_pref = m;
5679 			break;
5680 		}
5681 	}
5682 
5683 	if (!m_pref) {
5684 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5685 		m_pref = list_first_entry_or_null(
5686 			&aconnector->base.modes, struct drm_display_mode, head);
5687 		if (!m_pref) {
5688 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5689 			return NULL;
5690 		}
5691 	}
5692 
5693 	highest_refresh = drm_mode_vrefresh(m_pref);
5694 
5695 	/*
5696 	 * Find the mode with highest refresh rate with same resolution.
5697 	 * For some monitors, preferred mode is not the mode with highest
5698 	 * supported refresh rate.
5699 	 */
5700 	list_for_each_entry (m, list_head, head) {
5701 		current_refresh  = drm_mode_vrefresh(m);
5702 
5703 		if (m->hdisplay == m_pref->hdisplay &&
5704 		    m->vdisplay == m_pref->vdisplay &&
5705 		    highest_refresh < current_refresh) {
5706 			highest_refresh = current_refresh;
5707 			m_pref = m;
5708 		}
5709 	}
5710 
5711 	aconnector->freesync_vid_base = *m_pref;
5712 	return m_pref;
5713 }
5714 
5715 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5716 				   struct amdgpu_dm_connector *aconnector)
5717 {
5718 	struct drm_display_mode *high_mode;
5719 	int timing_diff;
5720 
5721 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5722 	if (!high_mode || !mode)
5723 		return false;
5724 
5725 	timing_diff = high_mode->vtotal - mode->vtotal;
5726 
5727 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5728 	    high_mode->hdisplay != mode->hdisplay ||
5729 	    high_mode->vdisplay != mode->vdisplay ||
5730 	    high_mode->hsync_start != mode->hsync_start ||
5731 	    high_mode->hsync_end != mode->hsync_end ||
5732 	    high_mode->htotal != mode->htotal ||
5733 	    high_mode->hskew != mode->hskew ||
5734 	    high_mode->vscan != mode->vscan ||
5735 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5736 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5737 		return false;
5738 	else
5739 		return true;
5740 }
5741 
5742 static struct dc_stream_state *
5743 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5744 		       const struct drm_display_mode *drm_mode,
5745 		       const struct dm_connector_state *dm_state,
5746 		       const struct dc_stream_state *old_stream,
5747 		       int requested_bpc)
5748 {
5749 	struct drm_display_mode *preferred_mode = NULL;
5750 	struct drm_connector *drm_connector;
5751 	const struct drm_connector_state *con_state =
5752 		dm_state ? &dm_state->base : NULL;
5753 	struct dc_stream_state *stream = NULL;
5754 	struct drm_display_mode mode = *drm_mode;
5755 	struct drm_display_mode saved_mode;
5756 	struct drm_display_mode *freesync_mode = NULL;
5757 	bool native_mode_found = false;
5758 	bool recalculate_timing = false;
5759 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5760 	int mode_refresh;
5761 	int preferred_refresh = 0;
5762 #if defined(CONFIG_DRM_AMD_DC_DCN)
5763 	struct dsc_dec_dpcd_caps dsc_caps;
5764 #endif
5765 	struct dc_sink *sink = NULL;
5766 
5767 	memset(&saved_mode, 0, sizeof(saved_mode));
5768 
5769 	if (aconnector == NULL) {
5770 		DRM_ERROR("aconnector is NULL!\n");
5771 		return stream;
5772 	}
5773 
5774 	drm_connector = &aconnector->base;
5775 
5776 	if (!aconnector->dc_sink) {
5777 		sink = create_fake_sink(aconnector);
5778 		if (!sink)
5779 			return stream;
5780 	} else {
5781 		sink = aconnector->dc_sink;
5782 		dc_sink_retain(sink);
5783 	}
5784 
5785 	stream = dc_create_stream_for_sink(sink);
5786 
5787 	if (stream == NULL) {
5788 		DRM_ERROR("Failed to create stream for sink!\n");
5789 		goto finish;
5790 	}
5791 
5792 	stream->dm_stream_context = aconnector;
5793 
5794 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5795 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5796 
5797 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5798 		/* Search for preferred mode */
5799 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5800 			native_mode_found = true;
5801 			break;
5802 		}
5803 	}
5804 	if (!native_mode_found)
5805 		preferred_mode = list_first_entry_or_null(
5806 				&aconnector->base.modes,
5807 				struct drm_display_mode,
5808 				head);
5809 
5810 	mode_refresh = drm_mode_vrefresh(&mode);
5811 
5812 	if (preferred_mode == NULL) {
5813 		/*
5814 		 * This may not be an error, the use case is when we have no
5815 		 * usermode calls to reset and set mode upon hotplug. In this
5816 		 * case, we call set mode ourselves to restore the previous mode
5817 		 * and the modelist may not be filled in in time.
5818 		 */
5819 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5820 	} else {
5821 		recalculate_timing = amdgpu_freesync_vid_mode &&
5822 				 is_freesync_video_mode(&mode, aconnector);
5823 		if (recalculate_timing) {
5824 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5825 			saved_mode = mode;
5826 			mode = *freesync_mode;
5827 		} else {
5828 			decide_crtc_timing_for_drm_display_mode(
5829 				&mode, preferred_mode, scale);
5830 
5831 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5832 		}
5833 	}
5834 
5835 	if (recalculate_timing)
5836 		drm_mode_set_crtcinfo(&saved_mode, 0);
5837 	else if (!dm_state)
5838 		drm_mode_set_crtcinfo(&mode, 0);
5839 
5840        /*
5841 	* If scaling is enabled and refresh rate didn't change
5842 	* we copy the vic and polarities of the old timings
5843 	*/
5844 	if (!scale || mode_refresh != preferred_refresh)
5845 		fill_stream_properties_from_drm_display_mode(
5846 			stream, &mode, &aconnector->base, con_state, NULL,
5847 			requested_bpc);
5848 	else
5849 		fill_stream_properties_from_drm_display_mode(
5850 			stream, &mode, &aconnector->base, con_state, old_stream,
5851 			requested_bpc);
5852 
5853 #if defined(CONFIG_DRM_AMD_DC_DCN)
5854 	/* SST DSC determination policy */
5855 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5856 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5857 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5858 #endif
5859 
5860 	update_stream_scaling_settings(&mode, dm_state, stream);
5861 
5862 	fill_audio_info(
5863 		&stream->audio_info,
5864 		drm_connector,
5865 		sink);
5866 
5867 	update_stream_signal(stream, sink);
5868 
5869 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5870 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5871 
5872 	if (stream->link->psr_settings.psr_feature_enabled) {
5873 		//
5874 		// should decide stream support vsc sdp colorimetry capability
5875 		// before building vsc info packet
5876 		//
5877 		stream->use_vsc_sdp_for_colorimetry = false;
5878 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5879 			stream->use_vsc_sdp_for_colorimetry =
5880 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5881 		} else {
5882 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5883 				stream->use_vsc_sdp_for_colorimetry = true;
5884 		}
5885 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5886 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5887 
5888 	}
5889 finish:
5890 	dc_sink_release(sink);
5891 
5892 	return stream;
5893 }
5894 
5895 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5896 {
5897 	drm_crtc_cleanup(crtc);
5898 	kfree(crtc);
5899 }
5900 
5901 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5902 				  struct drm_crtc_state *state)
5903 {
5904 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5905 
5906 	/* TODO Destroy dc_stream objects are stream object is flattened */
5907 	if (cur->stream)
5908 		dc_stream_release(cur->stream);
5909 
5910 
5911 	__drm_atomic_helper_crtc_destroy_state(state);
5912 
5913 
5914 	kfree(state);
5915 }
5916 
5917 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5918 {
5919 	struct dm_crtc_state *state;
5920 
5921 	if (crtc->state)
5922 		dm_crtc_destroy_state(crtc, crtc->state);
5923 
5924 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5925 	if (WARN_ON(!state))
5926 		return;
5927 
5928 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5929 }
5930 
5931 static struct drm_crtc_state *
5932 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5933 {
5934 	struct dm_crtc_state *state, *cur;
5935 
5936 	cur = to_dm_crtc_state(crtc->state);
5937 
5938 	if (WARN_ON(!crtc->state))
5939 		return NULL;
5940 
5941 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5942 	if (!state)
5943 		return NULL;
5944 
5945 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5946 
5947 	if (cur->stream) {
5948 		state->stream = cur->stream;
5949 		dc_stream_retain(state->stream);
5950 	}
5951 
5952 	state->active_planes = cur->active_planes;
5953 	state->vrr_infopacket = cur->vrr_infopacket;
5954 	state->abm_level = cur->abm_level;
5955 	state->vrr_supported = cur->vrr_supported;
5956 	state->freesync_config = cur->freesync_config;
5957 	state->cm_has_degamma = cur->cm_has_degamma;
5958 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5959 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5960 
5961 	return &state->base;
5962 }
5963 
5964 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5965 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5966 {
5967 	crtc_debugfs_init(crtc);
5968 
5969 	return 0;
5970 }
5971 #endif
5972 
5973 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5974 {
5975 	enum dc_irq_source irq_source;
5976 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5977 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5978 	int rc;
5979 
5980 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5981 
5982 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5983 
5984 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5985 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5986 	return rc;
5987 }
5988 
5989 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5990 {
5991 	enum dc_irq_source irq_source;
5992 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5993 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5994 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5995 #if defined(CONFIG_DRM_AMD_DC_DCN)
5996 	struct amdgpu_display_manager *dm = &adev->dm;
5997 	unsigned long flags;
5998 #endif
5999 	int rc = 0;
6000 
6001 	if (enable) {
6002 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6003 		if (amdgpu_dm_vrr_active(acrtc_state))
6004 			rc = dm_set_vupdate_irq(crtc, true);
6005 	} else {
6006 		/* vblank irq off -> vupdate irq off */
6007 		rc = dm_set_vupdate_irq(crtc, false);
6008 	}
6009 
6010 	if (rc)
6011 		return rc;
6012 
6013 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6014 
6015 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6016 		return -EBUSY;
6017 
6018 	if (amdgpu_in_reset(adev))
6019 		return 0;
6020 
6021 #if defined(CONFIG_DRM_AMD_DC_DCN)
6022 	spin_lock_irqsave(&dm->vblank_lock, flags);
6023 	dm->vblank_workqueue->dm = dm;
6024 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
6025 	dm->vblank_workqueue->enable = enable;
6026 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
6027 	schedule_work(&dm->vblank_workqueue->mall_work);
6028 #endif
6029 
6030 	return 0;
6031 }
6032 
6033 static int dm_enable_vblank(struct drm_crtc *crtc)
6034 {
6035 	return dm_set_vblank(crtc, true);
6036 }
6037 
6038 static void dm_disable_vblank(struct drm_crtc *crtc)
6039 {
6040 	dm_set_vblank(crtc, false);
6041 }
6042 
6043 /* Implemented only the options currently availible for the driver */
6044 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6045 	.reset = dm_crtc_reset_state,
6046 	.destroy = amdgpu_dm_crtc_destroy,
6047 	.set_config = drm_atomic_helper_set_config,
6048 	.page_flip = drm_atomic_helper_page_flip,
6049 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6050 	.atomic_destroy_state = dm_crtc_destroy_state,
6051 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6052 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6053 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6054 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6055 	.enable_vblank = dm_enable_vblank,
6056 	.disable_vblank = dm_disable_vblank,
6057 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6058 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6059 	.late_register = amdgpu_dm_crtc_late_register,
6060 #endif
6061 };
6062 
6063 static enum drm_connector_status
6064 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6065 {
6066 	bool connected;
6067 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6068 
6069 	/*
6070 	 * Notes:
6071 	 * 1. This interface is NOT called in context of HPD irq.
6072 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6073 	 * makes it a bad place for *any* MST-related activity.
6074 	 */
6075 
6076 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6077 	    !aconnector->fake_enable)
6078 		connected = (aconnector->dc_sink != NULL);
6079 	else
6080 		connected = (aconnector->base.force == DRM_FORCE_ON);
6081 
6082 	update_subconnector_property(aconnector);
6083 
6084 	return (connected ? connector_status_connected :
6085 			connector_status_disconnected);
6086 }
6087 
6088 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6089 					    struct drm_connector_state *connector_state,
6090 					    struct drm_property *property,
6091 					    uint64_t val)
6092 {
6093 	struct drm_device *dev = connector->dev;
6094 	struct amdgpu_device *adev = drm_to_adev(dev);
6095 	struct dm_connector_state *dm_old_state =
6096 		to_dm_connector_state(connector->state);
6097 	struct dm_connector_state *dm_new_state =
6098 		to_dm_connector_state(connector_state);
6099 
6100 	int ret = -EINVAL;
6101 
6102 	if (property == dev->mode_config.scaling_mode_property) {
6103 		enum amdgpu_rmx_type rmx_type;
6104 
6105 		switch (val) {
6106 		case DRM_MODE_SCALE_CENTER:
6107 			rmx_type = RMX_CENTER;
6108 			break;
6109 		case DRM_MODE_SCALE_ASPECT:
6110 			rmx_type = RMX_ASPECT;
6111 			break;
6112 		case DRM_MODE_SCALE_FULLSCREEN:
6113 			rmx_type = RMX_FULL;
6114 			break;
6115 		case DRM_MODE_SCALE_NONE:
6116 		default:
6117 			rmx_type = RMX_OFF;
6118 			break;
6119 		}
6120 
6121 		if (dm_old_state->scaling == rmx_type)
6122 			return 0;
6123 
6124 		dm_new_state->scaling = rmx_type;
6125 		ret = 0;
6126 	} else if (property == adev->mode_info.underscan_hborder_property) {
6127 		dm_new_state->underscan_hborder = val;
6128 		ret = 0;
6129 	} else if (property == adev->mode_info.underscan_vborder_property) {
6130 		dm_new_state->underscan_vborder = val;
6131 		ret = 0;
6132 	} else if (property == adev->mode_info.underscan_property) {
6133 		dm_new_state->underscan_enable = val;
6134 		ret = 0;
6135 	} else if (property == adev->mode_info.abm_level_property) {
6136 		dm_new_state->abm_level = val;
6137 		ret = 0;
6138 	}
6139 
6140 	return ret;
6141 }
6142 
6143 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6144 					    const struct drm_connector_state *state,
6145 					    struct drm_property *property,
6146 					    uint64_t *val)
6147 {
6148 	struct drm_device *dev = connector->dev;
6149 	struct amdgpu_device *adev = drm_to_adev(dev);
6150 	struct dm_connector_state *dm_state =
6151 		to_dm_connector_state(state);
6152 	int ret = -EINVAL;
6153 
6154 	if (property == dev->mode_config.scaling_mode_property) {
6155 		switch (dm_state->scaling) {
6156 		case RMX_CENTER:
6157 			*val = DRM_MODE_SCALE_CENTER;
6158 			break;
6159 		case RMX_ASPECT:
6160 			*val = DRM_MODE_SCALE_ASPECT;
6161 			break;
6162 		case RMX_FULL:
6163 			*val = DRM_MODE_SCALE_FULLSCREEN;
6164 			break;
6165 		case RMX_OFF:
6166 		default:
6167 			*val = DRM_MODE_SCALE_NONE;
6168 			break;
6169 		}
6170 		ret = 0;
6171 	} else if (property == adev->mode_info.underscan_hborder_property) {
6172 		*val = dm_state->underscan_hborder;
6173 		ret = 0;
6174 	} else if (property == adev->mode_info.underscan_vborder_property) {
6175 		*val = dm_state->underscan_vborder;
6176 		ret = 0;
6177 	} else if (property == adev->mode_info.underscan_property) {
6178 		*val = dm_state->underscan_enable;
6179 		ret = 0;
6180 	} else if (property == adev->mode_info.abm_level_property) {
6181 		*val = dm_state->abm_level;
6182 		ret = 0;
6183 	}
6184 
6185 	return ret;
6186 }
6187 
6188 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6189 {
6190 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6191 
6192 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6193 }
6194 
6195 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6196 {
6197 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6198 	const struct dc_link *link = aconnector->dc_link;
6199 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6200 	struct amdgpu_display_manager *dm = &adev->dm;
6201 
6202 	/*
6203 	 * Call only if mst_mgr was iniitalized before since it's not done
6204 	 * for all connector types.
6205 	 */
6206 	if (aconnector->mst_mgr.dev)
6207 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6208 
6209 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6210 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6211 
6212 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6213 	    link->type != dc_connection_none &&
6214 	    dm->backlight_dev) {
6215 		backlight_device_unregister(dm->backlight_dev);
6216 		dm->backlight_dev = NULL;
6217 	}
6218 #endif
6219 
6220 	if (aconnector->dc_em_sink)
6221 		dc_sink_release(aconnector->dc_em_sink);
6222 	aconnector->dc_em_sink = NULL;
6223 	if (aconnector->dc_sink)
6224 		dc_sink_release(aconnector->dc_sink);
6225 	aconnector->dc_sink = NULL;
6226 
6227 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6228 	drm_connector_unregister(connector);
6229 	drm_connector_cleanup(connector);
6230 	if (aconnector->i2c) {
6231 		i2c_del_adapter(&aconnector->i2c->base);
6232 		kfree(aconnector->i2c);
6233 	}
6234 	kfree(aconnector->dm_dp_aux.aux.name);
6235 
6236 	kfree(connector);
6237 }
6238 
6239 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6240 {
6241 	struct dm_connector_state *state =
6242 		to_dm_connector_state(connector->state);
6243 
6244 	if (connector->state)
6245 		__drm_atomic_helper_connector_destroy_state(connector->state);
6246 
6247 	kfree(state);
6248 
6249 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6250 
6251 	if (state) {
6252 		state->scaling = RMX_OFF;
6253 		state->underscan_enable = false;
6254 		state->underscan_hborder = 0;
6255 		state->underscan_vborder = 0;
6256 		state->base.max_requested_bpc = 8;
6257 		state->vcpi_slots = 0;
6258 		state->pbn = 0;
6259 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6260 			state->abm_level = amdgpu_dm_abm_level;
6261 
6262 		__drm_atomic_helper_connector_reset(connector, &state->base);
6263 	}
6264 }
6265 
6266 struct drm_connector_state *
6267 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6268 {
6269 	struct dm_connector_state *state =
6270 		to_dm_connector_state(connector->state);
6271 
6272 	struct dm_connector_state *new_state =
6273 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6274 
6275 	if (!new_state)
6276 		return NULL;
6277 
6278 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6279 
6280 	new_state->freesync_capable = state->freesync_capable;
6281 	new_state->abm_level = state->abm_level;
6282 	new_state->scaling = state->scaling;
6283 	new_state->underscan_enable = state->underscan_enable;
6284 	new_state->underscan_hborder = state->underscan_hborder;
6285 	new_state->underscan_vborder = state->underscan_vborder;
6286 	new_state->vcpi_slots = state->vcpi_slots;
6287 	new_state->pbn = state->pbn;
6288 	return &new_state->base;
6289 }
6290 
6291 static int
6292 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6293 {
6294 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6295 		to_amdgpu_dm_connector(connector);
6296 	int r;
6297 
6298 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6299 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6300 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6301 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6302 		if (r)
6303 			return r;
6304 	}
6305 
6306 #if defined(CONFIG_DEBUG_FS)
6307 	connector_debugfs_init(amdgpu_dm_connector);
6308 #endif
6309 
6310 	return 0;
6311 }
6312 
6313 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6314 	.reset = amdgpu_dm_connector_funcs_reset,
6315 	.detect = amdgpu_dm_connector_detect,
6316 	.fill_modes = drm_helper_probe_single_connector_modes,
6317 	.destroy = amdgpu_dm_connector_destroy,
6318 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6319 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6320 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6321 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6322 	.late_register = amdgpu_dm_connector_late_register,
6323 	.early_unregister = amdgpu_dm_connector_unregister
6324 };
6325 
6326 static int get_modes(struct drm_connector *connector)
6327 {
6328 	return amdgpu_dm_connector_get_modes(connector);
6329 }
6330 
6331 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6332 {
6333 	struct dc_sink_init_data init_params = {
6334 			.link = aconnector->dc_link,
6335 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6336 	};
6337 	struct edid *edid;
6338 
6339 	if (!aconnector->base.edid_blob_ptr) {
6340 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6341 				aconnector->base.name);
6342 
6343 		aconnector->base.force = DRM_FORCE_OFF;
6344 		aconnector->base.override_edid = false;
6345 		return;
6346 	}
6347 
6348 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6349 
6350 	aconnector->edid = edid;
6351 
6352 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6353 		aconnector->dc_link,
6354 		(uint8_t *)edid,
6355 		(edid->extensions + 1) * EDID_LENGTH,
6356 		&init_params);
6357 
6358 	if (aconnector->base.force == DRM_FORCE_ON) {
6359 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6360 		aconnector->dc_link->local_sink :
6361 		aconnector->dc_em_sink;
6362 		dc_sink_retain(aconnector->dc_sink);
6363 	}
6364 }
6365 
6366 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6367 {
6368 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6369 
6370 	/*
6371 	 * In case of headless boot with force on for DP managed connector
6372 	 * Those settings have to be != 0 to get initial modeset
6373 	 */
6374 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6375 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6376 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6377 	}
6378 
6379 
6380 	aconnector->base.override_edid = true;
6381 	create_eml_sink(aconnector);
6382 }
6383 
6384 static struct dc_stream_state *
6385 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6386 				const struct drm_display_mode *drm_mode,
6387 				const struct dm_connector_state *dm_state,
6388 				const struct dc_stream_state *old_stream)
6389 {
6390 	struct drm_connector *connector = &aconnector->base;
6391 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6392 	struct dc_stream_state *stream;
6393 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6394 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6395 	enum dc_status dc_result = DC_OK;
6396 
6397 	do {
6398 		stream = create_stream_for_sink(aconnector, drm_mode,
6399 						dm_state, old_stream,
6400 						requested_bpc);
6401 		if (stream == NULL) {
6402 			DRM_ERROR("Failed to create stream for sink!\n");
6403 			break;
6404 		}
6405 
6406 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6407 
6408 		if (dc_result != DC_OK) {
6409 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6410 				      drm_mode->hdisplay,
6411 				      drm_mode->vdisplay,
6412 				      drm_mode->clock,
6413 				      dc_result,
6414 				      dc_status_to_str(dc_result));
6415 
6416 			dc_stream_release(stream);
6417 			stream = NULL;
6418 			requested_bpc -= 2; /* lower bpc to retry validation */
6419 		}
6420 
6421 	} while (stream == NULL && requested_bpc >= 6);
6422 
6423 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6424 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6425 
6426 		aconnector->force_yuv420_output = true;
6427 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6428 						dm_state, old_stream);
6429 		aconnector->force_yuv420_output = false;
6430 	}
6431 
6432 	return stream;
6433 }
6434 
6435 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6436 				   struct drm_display_mode *mode)
6437 {
6438 	int result = MODE_ERROR;
6439 	struct dc_sink *dc_sink;
6440 	/* TODO: Unhardcode stream count */
6441 	struct dc_stream_state *stream;
6442 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6443 
6444 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6445 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6446 		return result;
6447 
6448 	/*
6449 	 * Only run this the first time mode_valid is called to initilialize
6450 	 * EDID mgmt
6451 	 */
6452 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6453 		!aconnector->dc_em_sink)
6454 		handle_edid_mgmt(aconnector);
6455 
6456 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6457 
6458 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6459 				aconnector->base.force != DRM_FORCE_ON) {
6460 		DRM_ERROR("dc_sink is NULL!\n");
6461 		goto fail;
6462 	}
6463 
6464 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6465 	if (stream) {
6466 		dc_stream_release(stream);
6467 		result = MODE_OK;
6468 	}
6469 
6470 fail:
6471 	/* TODO: error handling*/
6472 	return result;
6473 }
6474 
6475 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6476 				struct dc_info_packet *out)
6477 {
6478 	struct hdmi_drm_infoframe frame;
6479 	unsigned char buf[30]; /* 26 + 4 */
6480 	ssize_t len;
6481 	int ret, i;
6482 
6483 	memset(out, 0, sizeof(*out));
6484 
6485 	if (!state->hdr_output_metadata)
6486 		return 0;
6487 
6488 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6489 	if (ret)
6490 		return ret;
6491 
6492 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6493 	if (len < 0)
6494 		return (int)len;
6495 
6496 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6497 	if (len != 30)
6498 		return -EINVAL;
6499 
6500 	/* Prepare the infopacket for DC. */
6501 	switch (state->connector->connector_type) {
6502 	case DRM_MODE_CONNECTOR_HDMIA:
6503 		out->hb0 = 0x87; /* type */
6504 		out->hb1 = 0x01; /* version */
6505 		out->hb2 = 0x1A; /* length */
6506 		out->sb[0] = buf[3]; /* checksum */
6507 		i = 1;
6508 		break;
6509 
6510 	case DRM_MODE_CONNECTOR_DisplayPort:
6511 	case DRM_MODE_CONNECTOR_eDP:
6512 		out->hb0 = 0x00; /* sdp id, zero */
6513 		out->hb1 = 0x87; /* type */
6514 		out->hb2 = 0x1D; /* payload len - 1 */
6515 		out->hb3 = (0x13 << 2); /* sdp version */
6516 		out->sb[0] = 0x01; /* version */
6517 		out->sb[1] = 0x1A; /* length */
6518 		i = 2;
6519 		break;
6520 
6521 	default:
6522 		return -EINVAL;
6523 	}
6524 
6525 	memcpy(&out->sb[i], &buf[4], 26);
6526 	out->valid = true;
6527 
6528 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6529 		       sizeof(out->sb), false);
6530 
6531 	return 0;
6532 }
6533 
6534 static int
6535 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6536 				 struct drm_atomic_state *state)
6537 {
6538 	struct drm_connector_state *new_con_state =
6539 		drm_atomic_get_new_connector_state(state, conn);
6540 	struct drm_connector_state *old_con_state =
6541 		drm_atomic_get_old_connector_state(state, conn);
6542 	struct drm_crtc *crtc = new_con_state->crtc;
6543 	struct drm_crtc_state *new_crtc_state;
6544 	int ret;
6545 
6546 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6547 
6548 	if (!crtc)
6549 		return 0;
6550 
6551 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6552 		struct dc_info_packet hdr_infopacket;
6553 
6554 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6555 		if (ret)
6556 			return ret;
6557 
6558 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6559 		if (IS_ERR(new_crtc_state))
6560 			return PTR_ERR(new_crtc_state);
6561 
6562 		/*
6563 		 * DC considers the stream backends changed if the
6564 		 * static metadata changes. Forcing the modeset also
6565 		 * gives a simple way for userspace to switch from
6566 		 * 8bpc to 10bpc when setting the metadata to enter
6567 		 * or exit HDR.
6568 		 *
6569 		 * Changing the static metadata after it's been
6570 		 * set is permissible, however. So only force a
6571 		 * modeset if we're entering or exiting HDR.
6572 		 */
6573 		new_crtc_state->mode_changed =
6574 			!old_con_state->hdr_output_metadata ||
6575 			!new_con_state->hdr_output_metadata;
6576 	}
6577 
6578 	return 0;
6579 }
6580 
6581 static const struct drm_connector_helper_funcs
6582 amdgpu_dm_connector_helper_funcs = {
6583 	/*
6584 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6585 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6586 	 * are missing after user start lightdm. So we need to renew modes list.
6587 	 * in get_modes call back, not just return the modes count
6588 	 */
6589 	.get_modes = get_modes,
6590 	.mode_valid = amdgpu_dm_connector_mode_valid,
6591 	.atomic_check = amdgpu_dm_connector_atomic_check,
6592 };
6593 
6594 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6595 {
6596 }
6597 
6598 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6599 {
6600 	struct drm_atomic_state *state = new_crtc_state->state;
6601 	struct drm_plane *plane;
6602 	int num_active = 0;
6603 
6604 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6605 		struct drm_plane_state *new_plane_state;
6606 
6607 		/* Cursor planes are "fake". */
6608 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6609 			continue;
6610 
6611 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6612 
6613 		if (!new_plane_state) {
6614 			/*
6615 			 * The plane is enable on the CRTC and hasn't changed
6616 			 * state. This means that it previously passed
6617 			 * validation and is therefore enabled.
6618 			 */
6619 			num_active += 1;
6620 			continue;
6621 		}
6622 
6623 		/* We need a framebuffer to be considered enabled. */
6624 		num_active += (new_plane_state->fb != NULL);
6625 	}
6626 
6627 	return num_active;
6628 }
6629 
6630 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6631 					 struct drm_crtc_state *new_crtc_state)
6632 {
6633 	struct dm_crtc_state *dm_new_crtc_state =
6634 		to_dm_crtc_state(new_crtc_state);
6635 
6636 	dm_new_crtc_state->active_planes = 0;
6637 
6638 	if (!dm_new_crtc_state->stream)
6639 		return;
6640 
6641 	dm_new_crtc_state->active_planes =
6642 		count_crtc_active_planes(new_crtc_state);
6643 }
6644 
6645 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6646 				       struct drm_atomic_state *state)
6647 {
6648 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6649 									  crtc);
6650 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6651 	struct dc *dc = adev->dm.dc;
6652 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6653 	int ret = -EINVAL;
6654 
6655 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6656 
6657 	dm_update_crtc_active_planes(crtc, crtc_state);
6658 
6659 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6660 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6661 		return ret;
6662 	}
6663 
6664 	/*
6665 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6666 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6667 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6668 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6669 	 */
6670 	if (crtc_state->enable &&
6671 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6672 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6673 		return -EINVAL;
6674 	}
6675 
6676 	/* In some use cases, like reset, no stream is attached */
6677 	if (!dm_crtc_state->stream)
6678 		return 0;
6679 
6680 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6681 		return 0;
6682 
6683 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6684 	return ret;
6685 }
6686 
6687 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6688 				      const struct drm_display_mode *mode,
6689 				      struct drm_display_mode *adjusted_mode)
6690 {
6691 	return true;
6692 }
6693 
6694 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6695 	.disable = dm_crtc_helper_disable,
6696 	.atomic_check = dm_crtc_helper_atomic_check,
6697 	.mode_fixup = dm_crtc_helper_mode_fixup,
6698 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6699 };
6700 
6701 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6702 {
6703 
6704 }
6705 
6706 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6707 {
6708 	switch (display_color_depth) {
6709 		case COLOR_DEPTH_666:
6710 			return 6;
6711 		case COLOR_DEPTH_888:
6712 			return 8;
6713 		case COLOR_DEPTH_101010:
6714 			return 10;
6715 		case COLOR_DEPTH_121212:
6716 			return 12;
6717 		case COLOR_DEPTH_141414:
6718 			return 14;
6719 		case COLOR_DEPTH_161616:
6720 			return 16;
6721 		default:
6722 			break;
6723 		}
6724 	return 0;
6725 }
6726 
6727 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6728 					  struct drm_crtc_state *crtc_state,
6729 					  struct drm_connector_state *conn_state)
6730 {
6731 	struct drm_atomic_state *state = crtc_state->state;
6732 	struct drm_connector *connector = conn_state->connector;
6733 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6734 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6735 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6736 	struct drm_dp_mst_topology_mgr *mst_mgr;
6737 	struct drm_dp_mst_port *mst_port;
6738 	enum dc_color_depth color_depth;
6739 	int clock, bpp = 0;
6740 	bool is_y420 = false;
6741 
6742 	if (!aconnector->port || !aconnector->dc_sink)
6743 		return 0;
6744 
6745 	mst_port = aconnector->port;
6746 	mst_mgr = &aconnector->mst_port->mst_mgr;
6747 
6748 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6749 		return 0;
6750 
6751 	if (!state->duplicated) {
6752 		int max_bpc = conn_state->max_requested_bpc;
6753 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6754 				aconnector->force_yuv420_output;
6755 		color_depth = convert_color_depth_from_display_info(connector,
6756 								    is_y420,
6757 								    max_bpc);
6758 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6759 		clock = adjusted_mode->clock;
6760 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6761 	}
6762 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6763 									   mst_mgr,
6764 									   mst_port,
6765 									   dm_new_connector_state->pbn,
6766 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6767 	if (dm_new_connector_state->vcpi_slots < 0) {
6768 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6769 		return dm_new_connector_state->vcpi_slots;
6770 	}
6771 	return 0;
6772 }
6773 
6774 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6775 	.disable = dm_encoder_helper_disable,
6776 	.atomic_check = dm_encoder_helper_atomic_check
6777 };
6778 
6779 #if defined(CONFIG_DRM_AMD_DC_DCN)
6780 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6781 					    struct dc_state *dc_state)
6782 {
6783 	struct dc_stream_state *stream = NULL;
6784 	struct drm_connector *connector;
6785 	struct drm_connector_state *new_con_state;
6786 	struct amdgpu_dm_connector *aconnector;
6787 	struct dm_connector_state *dm_conn_state;
6788 	int i, j, clock, bpp;
6789 	int vcpi, pbn_div, pbn = 0;
6790 
6791 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6792 
6793 		aconnector = to_amdgpu_dm_connector(connector);
6794 
6795 		if (!aconnector->port)
6796 			continue;
6797 
6798 		if (!new_con_state || !new_con_state->crtc)
6799 			continue;
6800 
6801 		dm_conn_state = to_dm_connector_state(new_con_state);
6802 
6803 		for (j = 0; j < dc_state->stream_count; j++) {
6804 			stream = dc_state->streams[j];
6805 			if (!stream)
6806 				continue;
6807 
6808 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6809 				break;
6810 
6811 			stream = NULL;
6812 		}
6813 
6814 		if (!stream)
6815 			continue;
6816 
6817 		if (stream->timing.flags.DSC != 1) {
6818 			drm_dp_mst_atomic_enable_dsc(state,
6819 						     aconnector->port,
6820 						     dm_conn_state->pbn,
6821 						     0,
6822 						     false);
6823 			continue;
6824 		}
6825 
6826 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6827 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6828 		clock = stream->timing.pix_clk_100hz / 10;
6829 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6830 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6831 						    aconnector->port,
6832 						    pbn, pbn_div,
6833 						    true);
6834 		if (vcpi < 0)
6835 			return vcpi;
6836 
6837 		dm_conn_state->pbn = pbn;
6838 		dm_conn_state->vcpi_slots = vcpi;
6839 	}
6840 	return 0;
6841 }
6842 #endif
6843 
6844 static void dm_drm_plane_reset(struct drm_plane *plane)
6845 {
6846 	struct dm_plane_state *amdgpu_state = NULL;
6847 
6848 	if (plane->state)
6849 		plane->funcs->atomic_destroy_state(plane, plane->state);
6850 
6851 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6852 	WARN_ON(amdgpu_state == NULL);
6853 
6854 	if (amdgpu_state)
6855 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6856 }
6857 
6858 static struct drm_plane_state *
6859 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6860 {
6861 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6862 
6863 	old_dm_plane_state = to_dm_plane_state(plane->state);
6864 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6865 	if (!dm_plane_state)
6866 		return NULL;
6867 
6868 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6869 
6870 	if (old_dm_plane_state->dc_state) {
6871 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6872 		dc_plane_state_retain(dm_plane_state->dc_state);
6873 	}
6874 
6875 	return &dm_plane_state->base;
6876 }
6877 
6878 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6879 				struct drm_plane_state *state)
6880 {
6881 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6882 
6883 	if (dm_plane_state->dc_state)
6884 		dc_plane_state_release(dm_plane_state->dc_state);
6885 
6886 	drm_atomic_helper_plane_destroy_state(plane, state);
6887 }
6888 
6889 static const struct drm_plane_funcs dm_plane_funcs = {
6890 	.update_plane	= drm_atomic_helper_update_plane,
6891 	.disable_plane	= drm_atomic_helper_disable_plane,
6892 	.destroy	= drm_primary_helper_destroy,
6893 	.reset = dm_drm_plane_reset,
6894 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6895 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6896 	.format_mod_supported = dm_plane_format_mod_supported,
6897 };
6898 
6899 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6900 				      struct drm_plane_state *new_state)
6901 {
6902 	struct amdgpu_framebuffer *afb;
6903 	struct drm_gem_object *obj;
6904 	struct amdgpu_device *adev;
6905 	struct amdgpu_bo *rbo;
6906 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6907 	struct list_head list;
6908 	struct ttm_validate_buffer tv;
6909 	struct ww_acquire_ctx ticket;
6910 	uint32_t domain;
6911 	int r;
6912 
6913 	if (!new_state->fb) {
6914 		DRM_DEBUG_KMS("No FB bound\n");
6915 		return 0;
6916 	}
6917 
6918 	afb = to_amdgpu_framebuffer(new_state->fb);
6919 	obj = new_state->fb->obj[0];
6920 	rbo = gem_to_amdgpu_bo(obj);
6921 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6922 	INIT_LIST_HEAD(&list);
6923 
6924 	tv.bo = &rbo->tbo;
6925 	tv.num_shared = 1;
6926 	list_add(&tv.head, &list);
6927 
6928 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6929 	if (r) {
6930 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6931 		return r;
6932 	}
6933 
6934 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6935 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6936 	else
6937 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6938 
6939 	r = amdgpu_bo_pin(rbo, domain);
6940 	if (unlikely(r != 0)) {
6941 		if (r != -ERESTARTSYS)
6942 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6943 		ttm_eu_backoff_reservation(&ticket, &list);
6944 		return r;
6945 	}
6946 
6947 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6948 	if (unlikely(r != 0)) {
6949 		amdgpu_bo_unpin(rbo);
6950 		ttm_eu_backoff_reservation(&ticket, &list);
6951 		DRM_ERROR("%p bind failed\n", rbo);
6952 		return r;
6953 	}
6954 
6955 	ttm_eu_backoff_reservation(&ticket, &list);
6956 
6957 	afb->address = amdgpu_bo_gpu_offset(rbo);
6958 
6959 	amdgpu_bo_ref(rbo);
6960 
6961 	/**
6962 	 * We don't do surface updates on planes that have been newly created,
6963 	 * but we also don't have the afb->address during atomic check.
6964 	 *
6965 	 * Fill in buffer attributes depending on the address here, but only on
6966 	 * newly created planes since they're not being used by DC yet and this
6967 	 * won't modify global state.
6968 	 */
6969 	dm_plane_state_old = to_dm_plane_state(plane->state);
6970 	dm_plane_state_new = to_dm_plane_state(new_state);
6971 
6972 	if (dm_plane_state_new->dc_state &&
6973 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6974 		struct dc_plane_state *plane_state =
6975 			dm_plane_state_new->dc_state;
6976 		bool force_disable_dcc = !plane_state->dcc.enable;
6977 
6978 		fill_plane_buffer_attributes(
6979 			adev, afb, plane_state->format, plane_state->rotation,
6980 			afb->tiling_flags,
6981 			&plane_state->tiling_info, &plane_state->plane_size,
6982 			&plane_state->dcc, &plane_state->address,
6983 			afb->tmz_surface, force_disable_dcc);
6984 	}
6985 
6986 	return 0;
6987 }
6988 
6989 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6990 				       struct drm_plane_state *old_state)
6991 {
6992 	struct amdgpu_bo *rbo;
6993 	int r;
6994 
6995 	if (!old_state->fb)
6996 		return;
6997 
6998 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6999 	r = amdgpu_bo_reserve(rbo, false);
7000 	if (unlikely(r)) {
7001 		DRM_ERROR("failed to reserve rbo before unpin\n");
7002 		return;
7003 	}
7004 
7005 	amdgpu_bo_unpin(rbo);
7006 	amdgpu_bo_unreserve(rbo);
7007 	amdgpu_bo_unref(&rbo);
7008 }
7009 
7010 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7011 				       struct drm_crtc_state *new_crtc_state)
7012 {
7013 	struct drm_framebuffer *fb = state->fb;
7014 	int min_downscale, max_upscale;
7015 	int min_scale = 0;
7016 	int max_scale = INT_MAX;
7017 
7018 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7019 	if (fb && state->crtc) {
7020 		/* Validate viewport to cover the case when only the position changes */
7021 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7022 			int viewport_width = state->crtc_w;
7023 			int viewport_height = state->crtc_h;
7024 
7025 			if (state->crtc_x < 0)
7026 				viewport_width += state->crtc_x;
7027 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7028 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7029 
7030 			if (state->crtc_y < 0)
7031 				viewport_height += state->crtc_y;
7032 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7033 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7034 
7035 			if (viewport_width < 0 || viewport_height < 0) {
7036 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7037 				return -EINVAL;
7038 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7039 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7040 				return -EINVAL;
7041 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7042 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7043 				return -EINVAL;
7044 			}
7045 
7046 		}
7047 
7048 		/* Get min/max allowed scaling factors from plane caps. */
7049 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7050 					     &min_downscale, &max_upscale);
7051 		/*
7052 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7053 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7054 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7055 		 */
7056 		min_scale = (1000 << 16) / max_upscale;
7057 		max_scale = (1000 << 16) / min_downscale;
7058 	}
7059 
7060 	return drm_atomic_helper_check_plane_state(
7061 		state, new_crtc_state, min_scale, max_scale, true, true);
7062 }
7063 
7064 static int dm_plane_atomic_check(struct drm_plane *plane,
7065 				 struct drm_atomic_state *state)
7066 {
7067 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7068 										 plane);
7069 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7070 	struct dc *dc = adev->dm.dc;
7071 	struct dm_plane_state *dm_plane_state;
7072 	struct dc_scaling_info scaling_info;
7073 	struct drm_crtc_state *new_crtc_state;
7074 	int ret;
7075 
7076 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7077 
7078 	dm_plane_state = to_dm_plane_state(new_plane_state);
7079 
7080 	if (!dm_plane_state->dc_state)
7081 		return 0;
7082 
7083 	new_crtc_state =
7084 		drm_atomic_get_new_crtc_state(state,
7085 					      new_plane_state->crtc);
7086 	if (!new_crtc_state)
7087 		return -EINVAL;
7088 
7089 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7090 	if (ret)
7091 		return ret;
7092 
7093 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7094 	if (ret)
7095 		return ret;
7096 
7097 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7098 		return 0;
7099 
7100 	return -EINVAL;
7101 }
7102 
7103 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7104 				       struct drm_atomic_state *state)
7105 {
7106 	/* Only support async updates on cursor planes. */
7107 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7108 		return -EINVAL;
7109 
7110 	return 0;
7111 }
7112 
7113 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7114 					 struct drm_atomic_state *state)
7115 {
7116 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7117 									   plane);
7118 	struct drm_plane_state *old_state =
7119 		drm_atomic_get_old_plane_state(state, plane);
7120 
7121 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7122 
7123 	swap(plane->state->fb, new_state->fb);
7124 
7125 	plane->state->src_x = new_state->src_x;
7126 	plane->state->src_y = new_state->src_y;
7127 	plane->state->src_w = new_state->src_w;
7128 	plane->state->src_h = new_state->src_h;
7129 	plane->state->crtc_x = new_state->crtc_x;
7130 	plane->state->crtc_y = new_state->crtc_y;
7131 	plane->state->crtc_w = new_state->crtc_w;
7132 	plane->state->crtc_h = new_state->crtc_h;
7133 
7134 	handle_cursor_update(plane, old_state);
7135 }
7136 
7137 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7138 	.prepare_fb = dm_plane_helper_prepare_fb,
7139 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7140 	.atomic_check = dm_plane_atomic_check,
7141 	.atomic_async_check = dm_plane_atomic_async_check,
7142 	.atomic_async_update = dm_plane_atomic_async_update
7143 };
7144 
7145 /*
7146  * TODO: these are currently initialized to rgb formats only.
7147  * For future use cases we should either initialize them dynamically based on
7148  * plane capabilities, or initialize this array to all formats, so internal drm
7149  * check will succeed, and let DC implement proper check
7150  */
7151 static const uint32_t rgb_formats[] = {
7152 	DRM_FORMAT_XRGB8888,
7153 	DRM_FORMAT_ARGB8888,
7154 	DRM_FORMAT_RGBA8888,
7155 	DRM_FORMAT_XRGB2101010,
7156 	DRM_FORMAT_XBGR2101010,
7157 	DRM_FORMAT_ARGB2101010,
7158 	DRM_FORMAT_ABGR2101010,
7159 	DRM_FORMAT_XRGB16161616,
7160 	DRM_FORMAT_XBGR16161616,
7161 	DRM_FORMAT_ARGB16161616,
7162 	DRM_FORMAT_ABGR16161616,
7163 	DRM_FORMAT_XBGR8888,
7164 	DRM_FORMAT_ABGR8888,
7165 	DRM_FORMAT_RGB565,
7166 };
7167 
7168 static const uint32_t overlay_formats[] = {
7169 	DRM_FORMAT_XRGB8888,
7170 	DRM_FORMAT_ARGB8888,
7171 	DRM_FORMAT_RGBA8888,
7172 	DRM_FORMAT_XBGR8888,
7173 	DRM_FORMAT_ABGR8888,
7174 	DRM_FORMAT_RGB565
7175 };
7176 
7177 static const u32 cursor_formats[] = {
7178 	DRM_FORMAT_ARGB8888
7179 };
7180 
7181 static int get_plane_formats(const struct drm_plane *plane,
7182 			     const struct dc_plane_cap *plane_cap,
7183 			     uint32_t *formats, int max_formats)
7184 {
7185 	int i, num_formats = 0;
7186 
7187 	/*
7188 	 * TODO: Query support for each group of formats directly from
7189 	 * DC plane caps. This will require adding more formats to the
7190 	 * caps list.
7191 	 */
7192 
7193 	switch (plane->type) {
7194 	case DRM_PLANE_TYPE_PRIMARY:
7195 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7196 			if (num_formats >= max_formats)
7197 				break;
7198 
7199 			formats[num_formats++] = rgb_formats[i];
7200 		}
7201 
7202 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7203 			formats[num_formats++] = DRM_FORMAT_NV12;
7204 		if (plane_cap && plane_cap->pixel_format_support.p010)
7205 			formats[num_formats++] = DRM_FORMAT_P010;
7206 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7207 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7208 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7209 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7210 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7211 		}
7212 		break;
7213 
7214 	case DRM_PLANE_TYPE_OVERLAY:
7215 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7216 			if (num_formats >= max_formats)
7217 				break;
7218 
7219 			formats[num_formats++] = overlay_formats[i];
7220 		}
7221 		break;
7222 
7223 	case DRM_PLANE_TYPE_CURSOR:
7224 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7225 			if (num_formats >= max_formats)
7226 				break;
7227 
7228 			formats[num_formats++] = cursor_formats[i];
7229 		}
7230 		break;
7231 	}
7232 
7233 	return num_formats;
7234 }
7235 
7236 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7237 				struct drm_plane *plane,
7238 				unsigned long possible_crtcs,
7239 				const struct dc_plane_cap *plane_cap)
7240 {
7241 	uint32_t formats[32];
7242 	int num_formats;
7243 	int res = -EPERM;
7244 	unsigned int supported_rotations;
7245 	uint64_t *modifiers = NULL;
7246 
7247 	num_formats = get_plane_formats(plane, plane_cap, formats,
7248 					ARRAY_SIZE(formats));
7249 
7250 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7251 	if (res)
7252 		return res;
7253 
7254 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7255 				       &dm_plane_funcs, formats, num_formats,
7256 				       modifiers, plane->type, NULL);
7257 	kfree(modifiers);
7258 	if (res)
7259 		return res;
7260 
7261 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7262 	    plane_cap && plane_cap->per_pixel_alpha) {
7263 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7264 					  BIT(DRM_MODE_BLEND_PREMULTI);
7265 
7266 		drm_plane_create_alpha_property(plane);
7267 		drm_plane_create_blend_mode_property(plane, blend_caps);
7268 	}
7269 
7270 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7271 	    plane_cap &&
7272 	    (plane_cap->pixel_format_support.nv12 ||
7273 	     plane_cap->pixel_format_support.p010)) {
7274 		/* This only affects YUV formats. */
7275 		drm_plane_create_color_properties(
7276 			plane,
7277 			BIT(DRM_COLOR_YCBCR_BT601) |
7278 			BIT(DRM_COLOR_YCBCR_BT709) |
7279 			BIT(DRM_COLOR_YCBCR_BT2020),
7280 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7281 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7282 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7283 	}
7284 
7285 	supported_rotations =
7286 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7287 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7288 
7289 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7290 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7291 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7292 						   supported_rotations);
7293 
7294 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7295 
7296 	/* Create (reset) the plane state */
7297 	if (plane->funcs->reset)
7298 		plane->funcs->reset(plane);
7299 
7300 	return 0;
7301 }
7302 
7303 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7304 			       struct drm_plane *plane,
7305 			       uint32_t crtc_index)
7306 {
7307 	struct amdgpu_crtc *acrtc = NULL;
7308 	struct drm_plane *cursor_plane;
7309 
7310 	int res = -ENOMEM;
7311 
7312 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7313 	if (!cursor_plane)
7314 		goto fail;
7315 
7316 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7317 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7318 
7319 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7320 	if (!acrtc)
7321 		goto fail;
7322 
7323 	res = drm_crtc_init_with_planes(
7324 			dm->ddev,
7325 			&acrtc->base,
7326 			plane,
7327 			cursor_plane,
7328 			&amdgpu_dm_crtc_funcs, NULL);
7329 
7330 	if (res)
7331 		goto fail;
7332 
7333 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7334 
7335 	/* Create (reset) the plane state */
7336 	if (acrtc->base.funcs->reset)
7337 		acrtc->base.funcs->reset(&acrtc->base);
7338 
7339 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7340 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7341 
7342 	acrtc->crtc_id = crtc_index;
7343 	acrtc->base.enabled = false;
7344 	acrtc->otg_inst = -1;
7345 
7346 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7347 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7348 				   true, MAX_COLOR_LUT_ENTRIES);
7349 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7350 
7351 	return 0;
7352 
7353 fail:
7354 	kfree(acrtc);
7355 	kfree(cursor_plane);
7356 	return res;
7357 }
7358 
7359 
7360 static int to_drm_connector_type(enum signal_type st)
7361 {
7362 	switch (st) {
7363 	case SIGNAL_TYPE_HDMI_TYPE_A:
7364 		return DRM_MODE_CONNECTOR_HDMIA;
7365 	case SIGNAL_TYPE_EDP:
7366 		return DRM_MODE_CONNECTOR_eDP;
7367 	case SIGNAL_TYPE_LVDS:
7368 		return DRM_MODE_CONNECTOR_LVDS;
7369 	case SIGNAL_TYPE_RGB:
7370 		return DRM_MODE_CONNECTOR_VGA;
7371 	case SIGNAL_TYPE_DISPLAY_PORT:
7372 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7373 		return DRM_MODE_CONNECTOR_DisplayPort;
7374 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7375 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7376 		return DRM_MODE_CONNECTOR_DVID;
7377 	case SIGNAL_TYPE_VIRTUAL:
7378 		return DRM_MODE_CONNECTOR_VIRTUAL;
7379 
7380 	default:
7381 		return DRM_MODE_CONNECTOR_Unknown;
7382 	}
7383 }
7384 
7385 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7386 {
7387 	struct drm_encoder *encoder;
7388 
7389 	/* There is only one encoder per connector */
7390 	drm_connector_for_each_possible_encoder(connector, encoder)
7391 		return encoder;
7392 
7393 	return NULL;
7394 }
7395 
7396 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7397 {
7398 	struct drm_encoder *encoder;
7399 	struct amdgpu_encoder *amdgpu_encoder;
7400 
7401 	encoder = amdgpu_dm_connector_to_encoder(connector);
7402 
7403 	if (encoder == NULL)
7404 		return;
7405 
7406 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7407 
7408 	amdgpu_encoder->native_mode.clock = 0;
7409 
7410 	if (!list_empty(&connector->probed_modes)) {
7411 		struct drm_display_mode *preferred_mode = NULL;
7412 
7413 		list_for_each_entry(preferred_mode,
7414 				    &connector->probed_modes,
7415 				    head) {
7416 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7417 				amdgpu_encoder->native_mode = *preferred_mode;
7418 
7419 			break;
7420 		}
7421 
7422 	}
7423 }
7424 
7425 static struct drm_display_mode *
7426 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7427 			     char *name,
7428 			     int hdisplay, int vdisplay)
7429 {
7430 	struct drm_device *dev = encoder->dev;
7431 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7432 	struct drm_display_mode *mode = NULL;
7433 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7434 
7435 	mode = drm_mode_duplicate(dev, native_mode);
7436 
7437 	if (mode == NULL)
7438 		return NULL;
7439 
7440 	mode->hdisplay = hdisplay;
7441 	mode->vdisplay = vdisplay;
7442 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7443 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7444 
7445 	return mode;
7446 
7447 }
7448 
7449 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7450 						 struct drm_connector *connector)
7451 {
7452 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7453 	struct drm_display_mode *mode = NULL;
7454 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7455 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7456 				to_amdgpu_dm_connector(connector);
7457 	int i;
7458 	int n;
7459 	struct mode_size {
7460 		char name[DRM_DISPLAY_MODE_LEN];
7461 		int w;
7462 		int h;
7463 	} common_modes[] = {
7464 		{  "640x480",  640,  480},
7465 		{  "800x600",  800,  600},
7466 		{ "1024x768", 1024,  768},
7467 		{ "1280x720", 1280,  720},
7468 		{ "1280x800", 1280,  800},
7469 		{"1280x1024", 1280, 1024},
7470 		{ "1440x900", 1440,  900},
7471 		{"1680x1050", 1680, 1050},
7472 		{"1600x1200", 1600, 1200},
7473 		{"1920x1080", 1920, 1080},
7474 		{"1920x1200", 1920, 1200}
7475 	};
7476 
7477 	n = ARRAY_SIZE(common_modes);
7478 
7479 	for (i = 0; i < n; i++) {
7480 		struct drm_display_mode *curmode = NULL;
7481 		bool mode_existed = false;
7482 
7483 		if (common_modes[i].w > native_mode->hdisplay ||
7484 		    common_modes[i].h > native_mode->vdisplay ||
7485 		   (common_modes[i].w == native_mode->hdisplay &&
7486 		    common_modes[i].h == native_mode->vdisplay))
7487 			continue;
7488 
7489 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7490 			if (common_modes[i].w == curmode->hdisplay &&
7491 			    common_modes[i].h == curmode->vdisplay) {
7492 				mode_existed = true;
7493 				break;
7494 			}
7495 		}
7496 
7497 		if (mode_existed)
7498 			continue;
7499 
7500 		mode = amdgpu_dm_create_common_mode(encoder,
7501 				common_modes[i].name, common_modes[i].w,
7502 				common_modes[i].h);
7503 		drm_mode_probed_add(connector, mode);
7504 		amdgpu_dm_connector->num_modes++;
7505 	}
7506 }
7507 
7508 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7509 					      struct edid *edid)
7510 {
7511 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7512 			to_amdgpu_dm_connector(connector);
7513 
7514 	if (edid) {
7515 		/* empty probed_modes */
7516 		INIT_LIST_HEAD(&connector->probed_modes);
7517 		amdgpu_dm_connector->num_modes =
7518 				drm_add_edid_modes(connector, edid);
7519 
7520 		/* sorting the probed modes before calling function
7521 		 * amdgpu_dm_get_native_mode() since EDID can have
7522 		 * more than one preferred mode. The modes that are
7523 		 * later in the probed mode list could be of higher
7524 		 * and preferred resolution. For example, 3840x2160
7525 		 * resolution in base EDID preferred timing and 4096x2160
7526 		 * preferred resolution in DID extension block later.
7527 		 */
7528 		drm_mode_sort(&connector->probed_modes);
7529 		amdgpu_dm_get_native_mode(connector);
7530 
7531 		/* Freesync capabilities are reset by calling
7532 		 * drm_add_edid_modes() and need to be
7533 		 * restored here.
7534 		 */
7535 		amdgpu_dm_update_freesync_caps(connector, edid);
7536 	} else {
7537 		amdgpu_dm_connector->num_modes = 0;
7538 	}
7539 }
7540 
7541 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7542 			      struct drm_display_mode *mode)
7543 {
7544 	struct drm_display_mode *m;
7545 
7546 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7547 		if (drm_mode_equal(m, mode))
7548 			return true;
7549 	}
7550 
7551 	return false;
7552 }
7553 
7554 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7555 {
7556 	const struct drm_display_mode *m;
7557 	struct drm_display_mode *new_mode;
7558 	uint i;
7559 	uint32_t new_modes_count = 0;
7560 
7561 	/* Standard FPS values
7562 	 *
7563 	 * 23.976   - TV/NTSC
7564 	 * 24 	    - Cinema
7565 	 * 25 	    - TV/PAL
7566 	 * 29.97    - TV/NTSC
7567 	 * 30 	    - TV/NTSC
7568 	 * 48 	    - Cinema HFR
7569 	 * 50 	    - TV/PAL
7570 	 * 60 	    - Commonly used
7571 	 * 48,72,96 - Multiples of 24
7572 	 */
7573 	static const uint32_t common_rates[] = {
7574 		23976, 24000, 25000, 29970, 30000,
7575 		48000, 50000, 60000, 72000, 96000
7576 	};
7577 
7578 	/*
7579 	 * Find mode with highest refresh rate with the same resolution
7580 	 * as the preferred mode. Some monitors report a preferred mode
7581 	 * with lower resolution than the highest refresh rate supported.
7582 	 */
7583 
7584 	m = get_highest_refresh_rate_mode(aconnector, true);
7585 	if (!m)
7586 		return 0;
7587 
7588 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7589 		uint64_t target_vtotal, target_vtotal_diff;
7590 		uint64_t num, den;
7591 
7592 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7593 			continue;
7594 
7595 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7596 		    common_rates[i] > aconnector->max_vfreq * 1000)
7597 			continue;
7598 
7599 		num = (unsigned long long)m->clock * 1000 * 1000;
7600 		den = common_rates[i] * (unsigned long long)m->htotal;
7601 		target_vtotal = div_u64(num, den);
7602 		target_vtotal_diff = target_vtotal - m->vtotal;
7603 
7604 		/* Check for illegal modes */
7605 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7606 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7607 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7608 			continue;
7609 
7610 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7611 		if (!new_mode)
7612 			goto out;
7613 
7614 		new_mode->vtotal += (u16)target_vtotal_diff;
7615 		new_mode->vsync_start += (u16)target_vtotal_diff;
7616 		new_mode->vsync_end += (u16)target_vtotal_diff;
7617 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7618 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7619 
7620 		if (!is_duplicate_mode(aconnector, new_mode)) {
7621 			drm_mode_probed_add(&aconnector->base, new_mode);
7622 			new_modes_count += 1;
7623 		} else
7624 			drm_mode_destroy(aconnector->base.dev, new_mode);
7625 	}
7626  out:
7627 	return new_modes_count;
7628 }
7629 
7630 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7631 						   struct edid *edid)
7632 {
7633 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7634 		to_amdgpu_dm_connector(connector);
7635 
7636 	if (!(amdgpu_freesync_vid_mode && edid))
7637 		return;
7638 
7639 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7640 		amdgpu_dm_connector->num_modes +=
7641 			add_fs_modes(amdgpu_dm_connector);
7642 }
7643 
7644 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7645 {
7646 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7647 			to_amdgpu_dm_connector(connector);
7648 	struct drm_encoder *encoder;
7649 	struct edid *edid = amdgpu_dm_connector->edid;
7650 
7651 	encoder = amdgpu_dm_connector_to_encoder(connector);
7652 
7653 	if (!drm_edid_is_valid(edid)) {
7654 		amdgpu_dm_connector->num_modes =
7655 				drm_add_modes_noedid(connector, 640, 480);
7656 	} else {
7657 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7658 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7659 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7660 	}
7661 	amdgpu_dm_fbc_init(connector);
7662 
7663 	return amdgpu_dm_connector->num_modes;
7664 }
7665 
7666 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7667 				     struct amdgpu_dm_connector *aconnector,
7668 				     int connector_type,
7669 				     struct dc_link *link,
7670 				     int link_index)
7671 {
7672 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7673 
7674 	/*
7675 	 * Some of the properties below require access to state, like bpc.
7676 	 * Allocate some default initial connector state with our reset helper.
7677 	 */
7678 	if (aconnector->base.funcs->reset)
7679 		aconnector->base.funcs->reset(&aconnector->base);
7680 
7681 	aconnector->connector_id = link_index;
7682 	aconnector->dc_link = link;
7683 	aconnector->base.interlace_allowed = false;
7684 	aconnector->base.doublescan_allowed = false;
7685 	aconnector->base.stereo_allowed = false;
7686 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7687 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7688 	aconnector->audio_inst = -1;
7689 	mutex_init(&aconnector->hpd_lock);
7690 
7691 	/*
7692 	 * configure support HPD hot plug connector_>polled default value is 0
7693 	 * which means HPD hot plug not supported
7694 	 */
7695 	switch (connector_type) {
7696 	case DRM_MODE_CONNECTOR_HDMIA:
7697 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7698 		aconnector->base.ycbcr_420_allowed =
7699 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7700 		break;
7701 	case DRM_MODE_CONNECTOR_DisplayPort:
7702 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7703 		aconnector->base.ycbcr_420_allowed =
7704 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7705 		break;
7706 	case DRM_MODE_CONNECTOR_DVID:
7707 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7708 		break;
7709 	default:
7710 		break;
7711 	}
7712 
7713 	drm_object_attach_property(&aconnector->base.base,
7714 				dm->ddev->mode_config.scaling_mode_property,
7715 				DRM_MODE_SCALE_NONE);
7716 
7717 	drm_object_attach_property(&aconnector->base.base,
7718 				adev->mode_info.underscan_property,
7719 				UNDERSCAN_OFF);
7720 	drm_object_attach_property(&aconnector->base.base,
7721 				adev->mode_info.underscan_hborder_property,
7722 				0);
7723 	drm_object_attach_property(&aconnector->base.base,
7724 				adev->mode_info.underscan_vborder_property,
7725 				0);
7726 
7727 	if (!aconnector->mst_port)
7728 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7729 
7730 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7731 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7732 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7733 
7734 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7735 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7736 		drm_object_attach_property(&aconnector->base.base,
7737 				adev->mode_info.abm_level_property, 0);
7738 	}
7739 
7740 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7741 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7742 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7743 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7744 
7745 		if (!aconnector->mst_port)
7746 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7747 
7748 #ifdef CONFIG_DRM_AMD_DC_HDCP
7749 		if (adev->dm.hdcp_workqueue)
7750 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7751 #endif
7752 	}
7753 }
7754 
7755 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7756 			      struct i2c_msg *msgs, int num)
7757 {
7758 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7759 	struct ddc_service *ddc_service = i2c->ddc_service;
7760 	struct i2c_command cmd;
7761 	int i;
7762 	int result = -EIO;
7763 
7764 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7765 
7766 	if (!cmd.payloads)
7767 		return result;
7768 
7769 	cmd.number_of_payloads = num;
7770 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7771 	cmd.speed = 100;
7772 
7773 	for (i = 0; i < num; i++) {
7774 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7775 		cmd.payloads[i].address = msgs[i].addr;
7776 		cmd.payloads[i].length = msgs[i].len;
7777 		cmd.payloads[i].data = msgs[i].buf;
7778 	}
7779 
7780 	if (dc_submit_i2c(
7781 			ddc_service->ctx->dc,
7782 			ddc_service->ddc_pin->hw_info.ddc_channel,
7783 			&cmd))
7784 		result = num;
7785 
7786 	kfree(cmd.payloads);
7787 	return result;
7788 }
7789 
7790 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7791 {
7792 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7793 }
7794 
7795 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7796 	.master_xfer = amdgpu_dm_i2c_xfer,
7797 	.functionality = amdgpu_dm_i2c_func,
7798 };
7799 
7800 static struct amdgpu_i2c_adapter *
7801 create_i2c(struct ddc_service *ddc_service,
7802 	   int link_index,
7803 	   int *res)
7804 {
7805 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7806 	struct amdgpu_i2c_adapter *i2c;
7807 
7808 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7809 	if (!i2c)
7810 		return NULL;
7811 	i2c->base.owner = THIS_MODULE;
7812 	i2c->base.class = I2C_CLASS_DDC;
7813 	i2c->base.dev.parent = &adev->pdev->dev;
7814 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7815 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7816 	i2c_set_adapdata(&i2c->base, i2c);
7817 	i2c->ddc_service = ddc_service;
7818 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7819 
7820 	return i2c;
7821 }
7822 
7823 
7824 /*
7825  * Note: this function assumes that dc_link_detect() was called for the
7826  * dc_link which will be represented by this aconnector.
7827  */
7828 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7829 				    struct amdgpu_dm_connector *aconnector,
7830 				    uint32_t link_index,
7831 				    struct amdgpu_encoder *aencoder)
7832 {
7833 	int res = 0;
7834 	int connector_type;
7835 	struct dc *dc = dm->dc;
7836 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7837 	struct amdgpu_i2c_adapter *i2c;
7838 
7839 	link->priv = aconnector;
7840 
7841 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7842 
7843 	i2c = create_i2c(link->ddc, link->link_index, &res);
7844 	if (!i2c) {
7845 		DRM_ERROR("Failed to create i2c adapter data\n");
7846 		return -ENOMEM;
7847 	}
7848 
7849 	aconnector->i2c = i2c;
7850 	res = i2c_add_adapter(&i2c->base);
7851 
7852 	if (res) {
7853 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7854 		goto out_free;
7855 	}
7856 
7857 	connector_type = to_drm_connector_type(link->connector_signal);
7858 
7859 	res = drm_connector_init_with_ddc(
7860 			dm->ddev,
7861 			&aconnector->base,
7862 			&amdgpu_dm_connector_funcs,
7863 			connector_type,
7864 			&i2c->base);
7865 
7866 	if (res) {
7867 		DRM_ERROR("connector_init failed\n");
7868 		aconnector->connector_id = -1;
7869 		goto out_free;
7870 	}
7871 
7872 	drm_connector_helper_add(
7873 			&aconnector->base,
7874 			&amdgpu_dm_connector_helper_funcs);
7875 
7876 	amdgpu_dm_connector_init_helper(
7877 		dm,
7878 		aconnector,
7879 		connector_type,
7880 		link,
7881 		link_index);
7882 
7883 	drm_connector_attach_encoder(
7884 		&aconnector->base, &aencoder->base);
7885 
7886 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7887 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7888 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7889 
7890 out_free:
7891 	if (res) {
7892 		kfree(i2c);
7893 		aconnector->i2c = NULL;
7894 	}
7895 	return res;
7896 }
7897 
7898 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7899 {
7900 	switch (adev->mode_info.num_crtc) {
7901 	case 1:
7902 		return 0x1;
7903 	case 2:
7904 		return 0x3;
7905 	case 3:
7906 		return 0x7;
7907 	case 4:
7908 		return 0xf;
7909 	case 5:
7910 		return 0x1f;
7911 	case 6:
7912 	default:
7913 		return 0x3f;
7914 	}
7915 }
7916 
7917 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7918 				  struct amdgpu_encoder *aencoder,
7919 				  uint32_t link_index)
7920 {
7921 	struct amdgpu_device *adev = drm_to_adev(dev);
7922 
7923 	int res = drm_encoder_init(dev,
7924 				   &aencoder->base,
7925 				   &amdgpu_dm_encoder_funcs,
7926 				   DRM_MODE_ENCODER_TMDS,
7927 				   NULL);
7928 
7929 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7930 
7931 	if (!res)
7932 		aencoder->encoder_id = link_index;
7933 	else
7934 		aencoder->encoder_id = -1;
7935 
7936 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7937 
7938 	return res;
7939 }
7940 
7941 static void manage_dm_interrupts(struct amdgpu_device *adev,
7942 				 struct amdgpu_crtc *acrtc,
7943 				 bool enable)
7944 {
7945 	/*
7946 	 * We have no guarantee that the frontend index maps to the same
7947 	 * backend index - some even map to more than one.
7948 	 *
7949 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7950 	 */
7951 	int irq_type =
7952 		amdgpu_display_crtc_idx_to_irq_type(
7953 			adev,
7954 			acrtc->crtc_id);
7955 
7956 	if (enable) {
7957 		drm_crtc_vblank_on(&acrtc->base);
7958 		amdgpu_irq_get(
7959 			adev,
7960 			&adev->pageflip_irq,
7961 			irq_type);
7962 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7963 		amdgpu_irq_get(
7964 			adev,
7965 			&adev->vline0_irq,
7966 			irq_type);
7967 #endif
7968 	} else {
7969 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7970 		amdgpu_irq_put(
7971 			adev,
7972 			&adev->vline0_irq,
7973 			irq_type);
7974 #endif
7975 		amdgpu_irq_put(
7976 			adev,
7977 			&adev->pageflip_irq,
7978 			irq_type);
7979 		drm_crtc_vblank_off(&acrtc->base);
7980 	}
7981 }
7982 
7983 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7984 				      struct amdgpu_crtc *acrtc)
7985 {
7986 	int irq_type =
7987 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7988 
7989 	/**
7990 	 * This reads the current state for the IRQ and force reapplies
7991 	 * the setting to hardware.
7992 	 */
7993 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7994 }
7995 
7996 static bool
7997 is_scaling_state_different(const struct dm_connector_state *dm_state,
7998 			   const struct dm_connector_state *old_dm_state)
7999 {
8000 	if (dm_state->scaling != old_dm_state->scaling)
8001 		return true;
8002 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8003 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8004 			return true;
8005 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8006 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8007 			return true;
8008 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8009 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8010 		return true;
8011 	return false;
8012 }
8013 
8014 #ifdef CONFIG_DRM_AMD_DC_HDCP
8015 static bool is_content_protection_different(struct drm_connector_state *state,
8016 					    const struct drm_connector_state *old_state,
8017 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8018 {
8019 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8020 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8021 
8022 	/* Handle: Type0/1 change */
8023 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8024 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8025 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8026 		return true;
8027 	}
8028 
8029 	/* CP is being re enabled, ignore this
8030 	 *
8031 	 * Handles:	ENABLED -> DESIRED
8032 	 */
8033 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8034 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8035 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8036 		return false;
8037 	}
8038 
8039 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8040 	 *
8041 	 * Handles:	UNDESIRED -> ENABLED
8042 	 */
8043 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8044 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8045 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8046 
8047 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8048 	 * hot-plug, headless s3, dpms
8049 	 *
8050 	 * Handles:	DESIRED -> DESIRED (Special case)
8051 	 */
8052 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8053 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8054 		dm_con_state->update_hdcp = false;
8055 		return true;
8056 	}
8057 
8058 	/*
8059 	 * Handles:	UNDESIRED -> UNDESIRED
8060 	 *		DESIRED -> DESIRED
8061 	 *		ENABLED -> ENABLED
8062 	 */
8063 	if (old_state->content_protection == state->content_protection)
8064 		return false;
8065 
8066 	/*
8067 	 * Handles:	UNDESIRED -> DESIRED
8068 	 *		DESIRED -> UNDESIRED
8069 	 *		ENABLED -> UNDESIRED
8070 	 */
8071 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8072 		return true;
8073 
8074 	/*
8075 	 * Handles:	DESIRED -> ENABLED
8076 	 */
8077 	return false;
8078 }
8079 
8080 #endif
8081 static void remove_stream(struct amdgpu_device *adev,
8082 			  struct amdgpu_crtc *acrtc,
8083 			  struct dc_stream_state *stream)
8084 {
8085 	/* this is the update mode case */
8086 
8087 	acrtc->otg_inst = -1;
8088 	acrtc->enabled = false;
8089 }
8090 
8091 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8092 			       struct dc_cursor_position *position)
8093 {
8094 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8095 	int x, y;
8096 	int xorigin = 0, yorigin = 0;
8097 
8098 	if (!crtc || !plane->state->fb)
8099 		return 0;
8100 
8101 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8102 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8103 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8104 			  __func__,
8105 			  plane->state->crtc_w,
8106 			  plane->state->crtc_h);
8107 		return -EINVAL;
8108 	}
8109 
8110 	x = plane->state->crtc_x;
8111 	y = plane->state->crtc_y;
8112 
8113 	if (x <= -amdgpu_crtc->max_cursor_width ||
8114 	    y <= -amdgpu_crtc->max_cursor_height)
8115 		return 0;
8116 
8117 	if (x < 0) {
8118 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8119 		x = 0;
8120 	}
8121 	if (y < 0) {
8122 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8123 		y = 0;
8124 	}
8125 	position->enable = true;
8126 	position->translate_by_source = true;
8127 	position->x = x;
8128 	position->y = y;
8129 	position->x_hotspot = xorigin;
8130 	position->y_hotspot = yorigin;
8131 
8132 	return 0;
8133 }
8134 
8135 static void handle_cursor_update(struct drm_plane *plane,
8136 				 struct drm_plane_state *old_plane_state)
8137 {
8138 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8139 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8140 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8141 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8142 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8143 	uint64_t address = afb ? afb->address : 0;
8144 	struct dc_cursor_position position = {0};
8145 	struct dc_cursor_attributes attributes;
8146 	int ret;
8147 
8148 	if (!plane->state->fb && !old_plane_state->fb)
8149 		return;
8150 
8151 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8152 		      __func__,
8153 		      amdgpu_crtc->crtc_id,
8154 		      plane->state->crtc_w,
8155 		      plane->state->crtc_h);
8156 
8157 	ret = get_cursor_position(plane, crtc, &position);
8158 	if (ret)
8159 		return;
8160 
8161 	if (!position.enable) {
8162 		/* turn off cursor */
8163 		if (crtc_state && crtc_state->stream) {
8164 			mutex_lock(&adev->dm.dc_lock);
8165 			dc_stream_set_cursor_position(crtc_state->stream,
8166 						      &position);
8167 			mutex_unlock(&adev->dm.dc_lock);
8168 		}
8169 		return;
8170 	}
8171 
8172 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8173 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8174 
8175 	memset(&attributes, 0, sizeof(attributes));
8176 	attributes.address.high_part = upper_32_bits(address);
8177 	attributes.address.low_part  = lower_32_bits(address);
8178 	attributes.width             = plane->state->crtc_w;
8179 	attributes.height            = plane->state->crtc_h;
8180 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8181 	attributes.rotation_angle    = 0;
8182 	attributes.attribute_flags.value = 0;
8183 
8184 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8185 
8186 	if (crtc_state->stream) {
8187 		mutex_lock(&adev->dm.dc_lock);
8188 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8189 							 &attributes))
8190 			DRM_ERROR("DC failed to set cursor attributes\n");
8191 
8192 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8193 						   &position))
8194 			DRM_ERROR("DC failed to set cursor position\n");
8195 		mutex_unlock(&adev->dm.dc_lock);
8196 	}
8197 }
8198 
8199 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8200 {
8201 
8202 	assert_spin_locked(&acrtc->base.dev->event_lock);
8203 	WARN_ON(acrtc->event);
8204 
8205 	acrtc->event = acrtc->base.state->event;
8206 
8207 	/* Set the flip status */
8208 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8209 
8210 	/* Mark this event as consumed */
8211 	acrtc->base.state->event = NULL;
8212 
8213 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8214 		     acrtc->crtc_id);
8215 }
8216 
8217 static void update_freesync_state_on_stream(
8218 	struct amdgpu_display_manager *dm,
8219 	struct dm_crtc_state *new_crtc_state,
8220 	struct dc_stream_state *new_stream,
8221 	struct dc_plane_state *surface,
8222 	u32 flip_timestamp_in_us)
8223 {
8224 	struct mod_vrr_params vrr_params;
8225 	struct dc_info_packet vrr_infopacket = {0};
8226 	struct amdgpu_device *adev = dm->adev;
8227 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8228 	unsigned long flags;
8229 	bool pack_sdp_v1_3 = false;
8230 
8231 	if (!new_stream)
8232 		return;
8233 
8234 	/*
8235 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8236 	 * For now it's sufficient to just guard against these conditions.
8237 	 */
8238 
8239 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8240 		return;
8241 
8242 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8243         vrr_params = acrtc->dm_irq_params.vrr_params;
8244 
8245 	if (surface) {
8246 		mod_freesync_handle_preflip(
8247 			dm->freesync_module,
8248 			surface,
8249 			new_stream,
8250 			flip_timestamp_in_us,
8251 			&vrr_params);
8252 
8253 		if (adev->family < AMDGPU_FAMILY_AI &&
8254 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8255 			mod_freesync_handle_v_update(dm->freesync_module,
8256 						     new_stream, &vrr_params);
8257 
8258 			/* Need to call this before the frame ends. */
8259 			dc_stream_adjust_vmin_vmax(dm->dc,
8260 						   new_crtc_state->stream,
8261 						   &vrr_params.adjust);
8262 		}
8263 	}
8264 
8265 	mod_freesync_build_vrr_infopacket(
8266 		dm->freesync_module,
8267 		new_stream,
8268 		&vrr_params,
8269 		PACKET_TYPE_VRR,
8270 		TRANSFER_FUNC_UNKNOWN,
8271 		&vrr_infopacket,
8272 		pack_sdp_v1_3);
8273 
8274 	new_crtc_state->freesync_timing_changed |=
8275 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8276 			&vrr_params.adjust,
8277 			sizeof(vrr_params.adjust)) != 0);
8278 
8279 	new_crtc_state->freesync_vrr_info_changed |=
8280 		(memcmp(&new_crtc_state->vrr_infopacket,
8281 			&vrr_infopacket,
8282 			sizeof(vrr_infopacket)) != 0);
8283 
8284 	acrtc->dm_irq_params.vrr_params = vrr_params;
8285 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8286 
8287 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8288 	new_stream->vrr_infopacket = vrr_infopacket;
8289 
8290 	if (new_crtc_state->freesync_vrr_info_changed)
8291 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8292 			      new_crtc_state->base.crtc->base.id,
8293 			      (int)new_crtc_state->base.vrr_enabled,
8294 			      (int)vrr_params.state);
8295 
8296 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8297 }
8298 
8299 static void update_stream_irq_parameters(
8300 	struct amdgpu_display_manager *dm,
8301 	struct dm_crtc_state *new_crtc_state)
8302 {
8303 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8304 	struct mod_vrr_params vrr_params;
8305 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8306 	struct amdgpu_device *adev = dm->adev;
8307 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8308 	unsigned long flags;
8309 
8310 	if (!new_stream)
8311 		return;
8312 
8313 	/*
8314 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8315 	 * For now it's sufficient to just guard against these conditions.
8316 	 */
8317 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8318 		return;
8319 
8320 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8321 	vrr_params = acrtc->dm_irq_params.vrr_params;
8322 
8323 	if (new_crtc_state->vrr_supported &&
8324 	    config.min_refresh_in_uhz &&
8325 	    config.max_refresh_in_uhz) {
8326 		/*
8327 		 * if freesync compatible mode was set, config.state will be set
8328 		 * in atomic check
8329 		 */
8330 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8331 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8332 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8333 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8334 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8335 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8336 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8337 		} else {
8338 			config.state = new_crtc_state->base.vrr_enabled ?
8339 						     VRR_STATE_ACTIVE_VARIABLE :
8340 						     VRR_STATE_INACTIVE;
8341 		}
8342 	} else {
8343 		config.state = VRR_STATE_UNSUPPORTED;
8344 	}
8345 
8346 	mod_freesync_build_vrr_params(dm->freesync_module,
8347 				      new_stream,
8348 				      &config, &vrr_params);
8349 
8350 	new_crtc_state->freesync_timing_changed |=
8351 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8352 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8353 
8354 	new_crtc_state->freesync_config = config;
8355 	/* Copy state for access from DM IRQ handler */
8356 	acrtc->dm_irq_params.freesync_config = config;
8357 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8358 	acrtc->dm_irq_params.vrr_params = vrr_params;
8359 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8360 }
8361 
8362 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8363 					    struct dm_crtc_state *new_state)
8364 {
8365 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8366 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8367 
8368 	if (!old_vrr_active && new_vrr_active) {
8369 		/* Transition VRR inactive -> active:
8370 		 * While VRR is active, we must not disable vblank irq, as a
8371 		 * reenable after disable would compute bogus vblank/pflip
8372 		 * timestamps if it likely happened inside display front-porch.
8373 		 *
8374 		 * We also need vupdate irq for the actual core vblank handling
8375 		 * at end of vblank.
8376 		 */
8377 		dm_set_vupdate_irq(new_state->base.crtc, true);
8378 		drm_crtc_vblank_get(new_state->base.crtc);
8379 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8380 				 __func__, new_state->base.crtc->base.id);
8381 	} else if (old_vrr_active && !new_vrr_active) {
8382 		/* Transition VRR active -> inactive:
8383 		 * Allow vblank irq disable again for fixed refresh rate.
8384 		 */
8385 		dm_set_vupdate_irq(new_state->base.crtc, false);
8386 		drm_crtc_vblank_put(new_state->base.crtc);
8387 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8388 				 __func__, new_state->base.crtc->base.id);
8389 	}
8390 }
8391 
8392 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8393 {
8394 	struct drm_plane *plane;
8395 	struct drm_plane_state *old_plane_state;
8396 	int i;
8397 
8398 	/*
8399 	 * TODO: Make this per-stream so we don't issue redundant updates for
8400 	 * commits with multiple streams.
8401 	 */
8402 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8403 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8404 			handle_cursor_update(plane, old_plane_state);
8405 }
8406 
8407 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8408 				    struct dc_state *dc_state,
8409 				    struct drm_device *dev,
8410 				    struct amdgpu_display_manager *dm,
8411 				    struct drm_crtc *pcrtc,
8412 				    bool wait_for_vblank)
8413 {
8414 	uint32_t i;
8415 	uint64_t timestamp_ns;
8416 	struct drm_plane *plane;
8417 	struct drm_plane_state *old_plane_state, *new_plane_state;
8418 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8419 	struct drm_crtc_state *new_pcrtc_state =
8420 			drm_atomic_get_new_crtc_state(state, pcrtc);
8421 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8422 	struct dm_crtc_state *dm_old_crtc_state =
8423 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8424 	int planes_count = 0, vpos, hpos;
8425 	long r;
8426 	unsigned long flags;
8427 	struct amdgpu_bo *abo;
8428 	uint32_t target_vblank, last_flip_vblank;
8429 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8430 	bool pflip_present = false;
8431 	struct {
8432 		struct dc_surface_update surface_updates[MAX_SURFACES];
8433 		struct dc_plane_info plane_infos[MAX_SURFACES];
8434 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8435 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8436 		struct dc_stream_update stream_update;
8437 	} *bundle;
8438 
8439 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8440 
8441 	if (!bundle) {
8442 		dm_error("Failed to allocate update bundle\n");
8443 		goto cleanup;
8444 	}
8445 
8446 	/*
8447 	 * Disable the cursor first if we're disabling all the planes.
8448 	 * It'll remain on the screen after the planes are re-enabled
8449 	 * if we don't.
8450 	 */
8451 	if (acrtc_state->active_planes == 0)
8452 		amdgpu_dm_commit_cursors(state);
8453 
8454 	/* update planes when needed */
8455 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8456 		struct drm_crtc *crtc = new_plane_state->crtc;
8457 		struct drm_crtc_state *new_crtc_state;
8458 		struct drm_framebuffer *fb = new_plane_state->fb;
8459 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8460 		bool plane_needs_flip;
8461 		struct dc_plane_state *dc_plane;
8462 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8463 
8464 		/* Cursor plane is handled after stream updates */
8465 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8466 			continue;
8467 
8468 		if (!fb || !crtc || pcrtc != crtc)
8469 			continue;
8470 
8471 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8472 		if (!new_crtc_state->active)
8473 			continue;
8474 
8475 		dc_plane = dm_new_plane_state->dc_state;
8476 
8477 		bundle->surface_updates[planes_count].surface = dc_plane;
8478 		if (new_pcrtc_state->color_mgmt_changed) {
8479 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8480 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8481 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8482 		}
8483 
8484 		fill_dc_scaling_info(new_plane_state,
8485 				     &bundle->scaling_infos[planes_count]);
8486 
8487 		bundle->surface_updates[planes_count].scaling_info =
8488 			&bundle->scaling_infos[planes_count];
8489 
8490 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8491 
8492 		pflip_present = pflip_present || plane_needs_flip;
8493 
8494 		if (!plane_needs_flip) {
8495 			planes_count += 1;
8496 			continue;
8497 		}
8498 
8499 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8500 
8501 		/*
8502 		 * Wait for all fences on this FB. Do limited wait to avoid
8503 		 * deadlock during GPU reset when this fence will not signal
8504 		 * but we hold reservation lock for the BO.
8505 		 */
8506 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8507 					  msecs_to_jiffies(5000));
8508 		if (unlikely(r <= 0))
8509 			DRM_ERROR("Waiting for fences timed out!");
8510 
8511 		fill_dc_plane_info_and_addr(
8512 			dm->adev, new_plane_state,
8513 			afb->tiling_flags,
8514 			&bundle->plane_infos[planes_count],
8515 			&bundle->flip_addrs[planes_count].address,
8516 			afb->tmz_surface, false);
8517 
8518 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8519 				 new_plane_state->plane->index,
8520 				 bundle->plane_infos[planes_count].dcc.enable);
8521 
8522 		bundle->surface_updates[planes_count].plane_info =
8523 			&bundle->plane_infos[planes_count];
8524 
8525 		/*
8526 		 * Only allow immediate flips for fast updates that don't
8527 		 * change FB pitch, DCC state, rotation or mirroing.
8528 		 */
8529 		bundle->flip_addrs[planes_count].flip_immediate =
8530 			crtc->state->async_flip &&
8531 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8532 
8533 		timestamp_ns = ktime_get_ns();
8534 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8535 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8536 		bundle->surface_updates[planes_count].surface = dc_plane;
8537 
8538 		if (!bundle->surface_updates[planes_count].surface) {
8539 			DRM_ERROR("No surface for CRTC: id=%d\n",
8540 					acrtc_attach->crtc_id);
8541 			continue;
8542 		}
8543 
8544 		if (plane == pcrtc->primary)
8545 			update_freesync_state_on_stream(
8546 				dm,
8547 				acrtc_state,
8548 				acrtc_state->stream,
8549 				dc_plane,
8550 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8551 
8552 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8553 				 __func__,
8554 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8555 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8556 
8557 		planes_count += 1;
8558 
8559 	}
8560 
8561 	if (pflip_present) {
8562 		if (!vrr_active) {
8563 			/* Use old throttling in non-vrr fixed refresh rate mode
8564 			 * to keep flip scheduling based on target vblank counts
8565 			 * working in a backwards compatible way, e.g., for
8566 			 * clients using the GLX_OML_sync_control extension or
8567 			 * DRI3/Present extension with defined target_msc.
8568 			 */
8569 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8570 		}
8571 		else {
8572 			/* For variable refresh rate mode only:
8573 			 * Get vblank of last completed flip to avoid > 1 vrr
8574 			 * flips per video frame by use of throttling, but allow
8575 			 * flip programming anywhere in the possibly large
8576 			 * variable vrr vblank interval for fine-grained flip
8577 			 * timing control and more opportunity to avoid stutter
8578 			 * on late submission of flips.
8579 			 */
8580 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8581 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8582 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8583 		}
8584 
8585 		target_vblank = last_flip_vblank + wait_for_vblank;
8586 
8587 		/*
8588 		 * Wait until we're out of the vertical blank period before the one
8589 		 * targeted by the flip
8590 		 */
8591 		while ((acrtc_attach->enabled &&
8592 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8593 							    0, &vpos, &hpos, NULL,
8594 							    NULL, &pcrtc->hwmode)
8595 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8596 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8597 			(int)(target_vblank -
8598 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8599 			usleep_range(1000, 1100);
8600 		}
8601 
8602 		/**
8603 		 * Prepare the flip event for the pageflip interrupt to handle.
8604 		 *
8605 		 * This only works in the case where we've already turned on the
8606 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8607 		 * from 0 -> n planes we have to skip a hardware generated event
8608 		 * and rely on sending it from software.
8609 		 */
8610 		if (acrtc_attach->base.state->event &&
8611 		    acrtc_state->active_planes > 0) {
8612 			drm_crtc_vblank_get(pcrtc);
8613 
8614 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8615 
8616 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8617 			prepare_flip_isr(acrtc_attach);
8618 
8619 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8620 		}
8621 
8622 		if (acrtc_state->stream) {
8623 			if (acrtc_state->freesync_vrr_info_changed)
8624 				bundle->stream_update.vrr_infopacket =
8625 					&acrtc_state->stream->vrr_infopacket;
8626 		}
8627 	}
8628 
8629 	/* Update the planes if changed or disable if we don't have any. */
8630 	if ((planes_count || acrtc_state->active_planes == 0) &&
8631 		acrtc_state->stream) {
8632 		bundle->stream_update.stream = acrtc_state->stream;
8633 		if (new_pcrtc_state->mode_changed) {
8634 			bundle->stream_update.src = acrtc_state->stream->src;
8635 			bundle->stream_update.dst = acrtc_state->stream->dst;
8636 		}
8637 
8638 		if (new_pcrtc_state->color_mgmt_changed) {
8639 			/*
8640 			 * TODO: This isn't fully correct since we've actually
8641 			 * already modified the stream in place.
8642 			 */
8643 			bundle->stream_update.gamut_remap =
8644 				&acrtc_state->stream->gamut_remap_matrix;
8645 			bundle->stream_update.output_csc_transform =
8646 				&acrtc_state->stream->csc_color_matrix;
8647 			bundle->stream_update.out_transfer_func =
8648 				acrtc_state->stream->out_transfer_func;
8649 		}
8650 
8651 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8652 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8653 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8654 
8655 		/*
8656 		 * If FreeSync state on the stream has changed then we need to
8657 		 * re-adjust the min/max bounds now that DC doesn't handle this
8658 		 * as part of commit.
8659 		 */
8660 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8661 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8662 			dc_stream_adjust_vmin_vmax(
8663 				dm->dc, acrtc_state->stream,
8664 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8665 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8666 		}
8667 		mutex_lock(&dm->dc_lock);
8668 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8669 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8670 			amdgpu_dm_psr_disable(acrtc_state->stream);
8671 
8672 		dc_commit_updates_for_stream(dm->dc,
8673 						     bundle->surface_updates,
8674 						     planes_count,
8675 						     acrtc_state->stream,
8676 						     &bundle->stream_update,
8677 						     dc_state);
8678 
8679 		/**
8680 		 * Enable or disable the interrupts on the backend.
8681 		 *
8682 		 * Most pipes are put into power gating when unused.
8683 		 *
8684 		 * When power gating is enabled on a pipe we lose the
8685 		 * interrupt enablement state when power gating is disabled.
8686 		 *
8687 		 * So we need to update the IRQ control state in hardware
8688 		 * whenever the pipe turns on (since it could be previously
8689 		 * power gated) or off (since some pipes can't be power gated
8690 		 * on some ASICs).
8691 		 */
8692 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8693 			dm_update_pflip_irq_state(drm_to_adev(dev),
8694 						  acrtc_attach);
8695 
8696 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8697 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8698 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8699 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8700 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8701 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8702 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8703 			struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
8704 					acrtc_state->stream->dm_stream_context;
8705 
8706 			if (aconn->psr_skip_count > 0)
8707 				aconn->psr_skip_count--;
8708 			else
8709 				amdgpu_dm_psr_enable(acrtc_state->stream);
8710 		}
8711 
8712 		mutex_unlock(&dm->dc_lock);
8713 	}
8714 
8715 	/*
8716 	 * Update cursor state *after* programming all the planes.
8717 	 * This avoids redundant programming in the case where we're going
8718 	 * to be disabling a single plane - those pipes are being disabled.
8719 	 */
8720 	if (acrtc_state->active_planes)
8721 		amdgpu_dm_commit_cursors(state);
8722 
8723 cleanup:
8724 	kfree(bundle);
8725 }
8726 
8727 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8728 				   struct drm_atomic_state *state)
8729 {
8730 	struct amdgpu_device *adev = drm_to_adev(dev);
8731 	struct amdgpu_dm_connector *aconnector;
8732 	struct drm_connector *connector;
8733 	struct drm_connector_state *old_con_state, *new_con_state;
8734 	struct drm_crtc_state *new_crtc_state;
8735 	struct dm_crtc_state *new_dm_crtc_state;
8736 	const struct dc_stream_status *status;
8737 	int i, inst;
8738 
8739 	/* Notify device removals. */
8740 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8741 		if (old_con_state->crtc != new_con_state->crtc) {
8742 			/* CRTC changes require notification. */
8743 			goto notify;
8744 		}
8745 
8746 		if (!new_con_state->crtc)
8747 			continue;
8748 
8749 		new_crtc_state = drm_atomic_get_new_crtc_state(
8750 			state, new_con_state->crtc);
8751 
8752 		if (!new_crtc_state)
8753 			continue;
8754 
8755 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8756 			continue;
8757 
8758 	notify:
8759 		aconnector = to_amdgpu_dm_connector(connector);
8760 
8761 		mutex_lock(&adev->dm.audio_lock);
8762 		inst = aconnector->audio_inst;
8763 		aconnector->audio_inst = -1;
8764 		mutex_unlock(&adev->dm.audio_lock);
8765 
8766 		amdgpu_dm_audio_eld_notify(adev, inst);
8767 	}
8768 
8769 	/* Notify audio device additions. */
8770 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8771 		if (!new_con_state->crtc)
8772 			continue;
8773 
8774 		new_crtc_state = drm_atomic_get_new_crtc_state(
8775 			state, new_con_state->crtc);
8776 
8777 		if (!new_crtc_state)
8778 			continue;
8779 
8780 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8781 			continue;
8782 
8783 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8784 		if (!new_dm_crtc_state->stream)
8785 			continue;
8786 
8787 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8788 		if (!status)
8789 			continue;
8790 
8791 		aconnector = to_amdgpu_dm_connector(connector);
8792 
8793 		mutex_lock(&adev->dm.audio_lock);
8794 		inst = status->audio_inst;
8795 		aconnector->audio_inst = inst;
8796 		mutex_unlock(&adev->dm.audio_lock);
8797 
8798 		amdgpu_dm_audio_eld_notify(adev, inst);
8799 	}
8800 }
8801 
8802 /*
8803  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8804  * @crtc_state: the DRM CRTC state
8805  * @stream_state: the DC stream state.
8806  *
8807  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8808  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8809  */
8810 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8811 						struct dc_stream_state *stream_state)
8812 {
8813 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8814 }
8815 
8816 /**
8817  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8818  * @state: The atomic state to commit
8819  *
8820  * This will tell DC to commit the constructed DC state from atomic_check,
8821  * programming the hardware. Any failures here implies a hardware failure, since
8822  * atomic check should have filtered anything non-kosher.
8823  */
8824 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8825 {
8826 	struct drm_device *dev = state->dev;
8827 	struct amdgpu_device *adev = drm_to_adev(dev);
8828 	struct amdgpu_display_manager *dm = &adev->dm;
8829 	struct dm_atomic_state *dm_state;
8830 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8831 	uint32_t i, j;
8832 	struct drm_crtc *crtc;
8833 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8834 	unsigned long flags;
8835 	bool wait_for_vblank = true;
8836 	struct drm_connector *connector;
8837 	struct drm_connector_state *old_con_state, *new_con_state;
8838 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8839 	int crtc_disable_count = 0;
8840 	bool mode_set_reset_required = false;
8841 
8842 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8843 
8844 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8845 
8846 	dm_state = dm_atomic_get_new_state(state);
8847 	if (dm_state && dm_state->context) {
8848 		dc_state = dm_state->context;
8849 	} else {
8850 		/* No state changes, retain current state. */
8851 		dc_state_temp = dc_create_state(dm->dc);
8852 		ASSERT(dc_state_temp);
8853 		dc_state = dc_state_temp;
8854 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8855 	}
8856 
8857 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8858 				       new_crtc_state, i) {
8859 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8860 
8861 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8862 
8863 		if (old_crtc_state->active &&
8864 		    (!new_crtc_state->active ||
8865 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8866 			manage_dm_interrupts(adev, acrtc, false);
8867 			dc_stream_release(dm_old_crtc_state->stream);
8868 		}
8869 	}
8870 
8871 	drm_atomic_helper_calc_timestamping_constants(state);
8872 
8873 	/* update changed items */
8874 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8875 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8876 
8877 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8878 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8879 
8880 		DRM_DEBUG_ATOMIC(
8881 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8882 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8883 			"connectors_changed:%d\n",
8884 			acrtc->crtc_id,
8885 			new_crtc_state->enable,
8886 			new_crtc_state->active,
8887 			new_crtc_state->planes_changed,
8888 			new_crtc_state->mode_changed,
8889 			new_crtc_state->active_changed,
8890 			new_crtc_state->connectors_changed);
8891 
8892 		/* Disable cursor if disabling crtc */
8893 		if (old_crtc_state->active && !new_crtc_state->active) {
8894 			struct dc_cursor_position position;
8895 
8896 			memset(&position, 0, sizeof(position));
8897 			mutex_lock(&dm->dc_lock);
8898 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8899 			mutex_unlock(&dm->dc_lock);
8900 		}
8901 
8902 		/* Copy all transient state flags into dc state */
8903 		if (dm_new_crtc_state->stream) {
8904 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8905 							    dm_new_crtc_state->stream);
8906 		}
8907 
8908 		/* handles headless hotplug case, updating new_state and
8909 		 * aconnector as needed
8910 		 */
8911 
8912 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8913 
8914 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8915 
8916 			if (!dm_new_crtc_state->stream) {
8917 				/*
8918 				 * this could happen because of issues with
8919 				 * userspace notifications delivery.
8920 				 * In this case userspace tries to set mode on
8921 				 * display which is disconnected in fact.
8922 				 * dc_sink is NULL in this case on aconnector.
8923 				 * We expect reset mode will come soon.
8924 				 *
8925 				 * This can also happen when unplug is done
8926 				 * during resume sequence ended
8927 				 *
8928 				 * In this case, we want to pretend we still
8929 				 * have a sink to keep the pipe running so that
8930 				 * hw state is consistent with the sw state
8931 				 */
8932 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8933 						__func__, acrtc->base.base.id);
8934 				continue;
8935 			}
8936 
8937 			if (dm_old_crtc_state->stream)
8938 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8939 
8940 			pm_runtime_get_noresume(dev->dev);
8941 
8942 			acrtc->enabled = true;
8943 			acrtc->hw_mode = new_crtc_state->mode;
8944 			crtc->hwmode = new_crtc_state->mode;
8945 			mode_set_reset_required = true;
8946 		} else if (modereset_required(new_crtc_state)) {
8947 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8948 			/* i.e. reset mode */
8949 			if (dm_old_crtc_state->stream)
8950 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8951 
8952 			mode_set_reset_required = true;
8953 		}
8954 	} /* for_each_crtc_in_state() */
8955 
8956 	if (dc_state) {
8957 		/* if there mode set or reset, disable eDP PSR */
8958 		if (mode_set_reset_required)
8959 			amdgpu_dm_psr_disable_all(dm);
8960 
8961 		dm_enable_per_frame_crtc_master_sync(dc_state);
8962 		mutex_lock(&dm->dc_lock);
8963 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8964 #if defined(CONFIG_DRM_AMD_DC_DCN)
8965                /* Allow idle optimization when vblank count is 0 for display off */
8966                if (dm->active_vblank_irq_count == 0)
8967                    dc_allow_idle_optimizations(dm->dc,true);
8968 #endif
8969 		mutex_unlock(&dm->dc_lock);
8970 	}
8971 
8972 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8973 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8974 
8975 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8976 
8977 		if (dm_new_crtc_state->stream != NULL) {
8978 			const struct dc_stream_status *status =
8979 					dc_stream_get_status(dm_new_crtc_state->stream);
8980 
8981 			if (!status)
8982 				status = dc_stream_get_status_from_state(dc_state,
8983 									 dm_new_crtc_state->stream);
8984 			if (!status)
8985 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8986 			else
8987 				acrtc->otg_inst = status->primary_otg_inst;
8988 		}
8989 	}
8990 #ifdef CONFIG_DRM_AMD_DC_HDCP
8991 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8992 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8993 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8994 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8995 
8996 		new_crtc_state = NULL;
8997 
8998 		if (acrtc)
8999 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9000 
9001 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9002 
9003 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9004 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9005 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9006 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9007 			dm_new_con_state->update_hdcp = true;
9008 			continue;
9009 		}
9010 
9011 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9012 			hdcp_update_display(
9013 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9014 				new_con_state->hdcp_content_type,
9015 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9016 	}
9017 #endif
9018 
9019 	/* Handle connector state changes */
9020 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9021 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9022 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9023 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9024 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9025 		struct dc_stream_update stream_update;
9026 		struct dc_info_packet hdr_packet;
9027 		struct dc_stream_status *status = NULL;
9028 		bool abm_changed, hdr_changed, scaling_changed;
9029 
9030 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9031 		memset(&stream_update, 0, sizeof(stream_update));
9032 
9033 		if (acrtc) {
9034 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9035 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9036 		}
9037 
9038 		/* Skip any modesets/resets */
9039 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9040 			continue;
9041 
9042 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9043 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9044 
9045 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9046 							     dm_old_con_state);
9047 
9048 		abm_changed = dm_new_crtc_state->abm_level !=
9049 			      dm_old_crtc_state->abm_level;
9050 
9051 		hdr_changed =
9052 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9053 
9054 		if (!scaling_changed && !abm_changed && !hdr_changed)
9055 			continue;
9056 
9057 		stream_update.stream = dm_new_crtc_state->stream;
9058 		if (scaling_changed) {
9059 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9060 					dm_new_con_state, dm_new_crtc_state->stream);
9061 
9062 			stream_update.src = dm_new_crtc_state->stream->src;
9063 			stream_update.dst = dm_new_crtc_state->stream->dst;
9064 		}
9065 
9066 		if (abm_changed) {
9067 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9068 
9069 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9070 		}
9071 
9072 		if (hdr_changed) {
9073 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9074 			stream_update.hdr_static_metadata = &hdr_packet;
9075 		}
9076 
9077 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9078 
9079 		if (WARN_ON(!status))
9080 			continue;
9081 
9082 		WARN_ON(!status->plane_count);
9083 
9084 		/*
9085 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9086 		 * Here we create an empty update on each plane.
9087 		 * To fix this, DC should permit updating only stream properties.
9088 		 */
9089 		for (j = 0; j < status->plane_count; j++)
9090 			dummy_updates[j].surface = status->plane_states[0];
9091 
9092 
9093 		mutex_lock(&dm->dc_lock);
9094 		dc_commit_updates_for_stream(dm->dc,
9095 						     dummy_updates,
9096 						     status->plane_count,
9097 						     dm_new_crtc_state->stream,
9098 						     &stream_update,
9099 						     dc_state);
9100 		mutex_unlock(&dm->dc_lock);
9101 	}
9102 
9103 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9104 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9105 				      new_crtc_state, i) {
9106 		if (old_crtc_state->active && !new_crtc_state->active)
9107 			crtc_disable_count++;
9108 
9109 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9110 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9111 
9112 		/* For freesync config update on crtc state and params for irq */
9113 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9114 
9115 		/* Handle vrr on->off / off->on transitions */
9116 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9117 						dm_new_crtc_state);
9118 	}
9119 
9120 	/**
9121 	 * Enable interrupts for CRTCs that are newly enabled or went through
9122 	 * a modeset. It was intentionally deferred until after the front end
9123 	 * state was modified to wait until the OTG was on and so the IRQ
9124 	 * handlers didn't access stale or invalid state.
9125 	 */
9126 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9127 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9128 #ifdef CONFIG_DEBUG_FS
9129 		bool configure_crc = false;
9130 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9131 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9132 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9133 #endif
9134 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9135 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9136 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9137 #endif
9138 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9139 
9140 		if (new_crtc_state->active &&
9141 		    (!old_crtc_state->active ||
9142 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9143 			dc_stream_retain(dm_new_crtc_state->stream);
9144 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9145 			manage_dm_interrupts(adev, acrtc, true);
9146 
9147 #ifdef CONFIG_DEBUG_FS
9148 			/**
9149 			 * Frontend may have changed so reapply the CRC capture
9150 			 * settings for the stream.
9151 			 */
9152 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9153 
9154 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9155 				configure_crc = true;
9156 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9157 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9158 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9159 					acrtc->dm_irq_params.crc_window.update_win = true;
9160 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9161 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9162 					crc_rd_wrk->crtc = crtc;
9163 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9164 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9165 				}
9166 #endif
9167 			}
9168 
9169 			if (configure_crc)
9170 				if (amdgpu_dm_crtc_configure_crc_source(
9171 					crtc, dm_new_crtc_state, cur_crc_src))
9172 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9173 #endif
9174 		}
9175 	}
9176 
9177 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9178 		if (new_crtc_state->async_flip)
9179 			wait_for_vblank = false;
9180 
9181 	/* update planes when needed per crtc*/
9182 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9183 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9184 
9185 		if (dm_new_crtc_state->stream)
9186 			amdgpu_dm_commit_planes(state, dc_state, dev,
9187 						dm, crtc, wait_for_vblank);
9188 	}
9189 
9190 	/* Update audio instances for each connector. */
9191 	amdgpu_dm_commit_audio(dev, state);
9192 
9193 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9194 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9195 	/* restore the backlight level */
9196 	if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
9197 		amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9198 #endif
9199 	/*
9200 	 * send vblank event on all events not handled in flip and
9201 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9202 	 */
9203 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9204 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9205 
9206 		if (new_crtc_state->event)
9207 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9208 
9209 		new_crtc_state->event = NULL;
9210 	}
9211 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9212 
9213 	/* Signal HW programming completion */
9214 	drm_atomic_helper_commit_hw_done(state);
9215 
9216 	if (wait_for_vblank)
9217 		drm_atomic_helper_wait_for_flip_done(dev, state);
9218 
9219 	drm_atomic_helper_cleanup_planes(dev, state);
9220 
9221 	/* return the stolen vga memory back to VRAM */
9222 	if (!adev->mman.keep_stolen_vga_memory)
9223 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9224 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9225 
9226 	/*
9227 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9228 	 * so we can put the GPU into runtime suspend if we're not driving any
9229 	 * displays anymore
9230 	 */
9231 	for (i = 0; i < crtc_disable_count; i++)
9232 		pm_runtime_put_autosuspend(dev->dev);
9233 	pm_runtime_mark_last_busy(dev->dev);
9234 
9235 	if (dc_state_temp)
9236 		dc_release_state(dc_state_temp);
9237 }
9238 
9239 
9240 static int dm_force_atomic_commit(struct drm_connector *connector)
9241 {
9242 	int ret = 0;
9243 	struct drm_device *ddev = connector->dev;
9244 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9245 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9246 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9247 	struct drm_connector_state *conn_state;
9248 	struct drm_crtc_state *crtc_state;
9249 	struct drm_plane_state *plane_state;
9250 
9251 	if (!state)
9252 		return -ENOMEM;
9253 
9254 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9255 
9256 	/* Construct an atomic state to restore previous display setting */
9257 
9258 	/*
9259 	 * Attach connectors to drm_atomic_state
9260 	 */
9261 	conn_state = drm_atomic_get_connector_state(state, connector);
9262 
9263 	ret = PTR_ERR_OR_ZERO(conn_state);
9264 	if (ret)
9265 		goto out;
9266 
9267 	/* Attach crtc to drm_atomic_state*/
9268 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9269 
9270 	ret = PTR_ERR_OR_ZERO(crtc_state);
9271 	if (ret)
9272 		goto out;
9273 
9274 	/* force a restore */
9275 	crtc_state->mode_changed = true;
9276 
9277 	/* Attach plane to drm_atomic_state */
9278 	plane_state = drm_atomic_get_plane_state(state, plane);
9279 
9280 	ret = PTR_ERR_OR_ZERO(plane_state);
9281 	if (ret)
9282 		goto out;
9283 
9284 	/* Call commit internally with the state we just constructed */
9285 	ret = drm_atomic_commit(state);
9286 
9287 out:
9288 	drm_atomic_state_put(state);
9289 	if (ret)
9290 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9291 
9292 	return ret;
9293 }
9294 
9295 /*
9296  * This function handles all cases when set mode does not come upon hotplug.
9297  * This includes when a display is unplugged then plugged back into the
9298  * same port and when running without usermode desktop manager supprot
9299  */
9300 void dm_restore_drm_connector_state(struct drm_device *dev,
9301 				    struct drm_connector *connector)
9302 {
9303 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9304 	struct amdgpu_crtc *disconnected_acrtc;
9305 	struct dm_crtc_state *acrtc_state;
9306 
9307 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9308 		return;
9309 
9310 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9311 	if (!disconnected_acrtc)
9312 		return;
9313 
9314 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9315 	if (!acrtc_state->stream)
9316 		return;
9317 
9318 	/*
9319 	 * If the previous sink is not released and different from the current,
9320 	 * we deduce we are in a state where we can not rely on usermode call
9321 	 * to turn on the display, so we do it here
9322 	 */
9323 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9324 		dm_force_atomic_commit(&aconnector->base);
9325 }
9326 
9327 /*
9328  * Grabs all modesetting locks to serialize against any blocking commits,
9329  * Waits for completion of all non blocking commits.
9330  */
9331 static int do_aquire_global_lock(struct drm_device *dev,
9332 				 struct drm_atomic_state *state)
9333 {
9334 	struct drm_crtc *crtc;
9335 	struct drm_crtc_commit *commit;
9336 	long ret;
9337 
9338 	/*
9339 	 * Adding all modeset locks to aquire_ctx will
9340 	 * ensure that when the framework release it the
9341 	 * extra locks we are locking here will get released to
9342 	 */
9343 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9344 	if (ret)
9345 		return ret;
9346 
9347 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9348 		spin_lock(&crtc->commit_lock);
9349 		commit = list_first_entry_or_null(&crtc->commit_list,
9350 				struct drm_crtc_commit, commit_entry);
9351 		if (commit)
9352 			drm_crtc_commit_get(commit);
9353 		spin_unlock(&crtc->commit_lock);
9354 
9355 		if (!commit)
9356 			continue;
9357 
9358 		/*
9359 		 * Make sure all pending HW programming completed and
9360 		 * page flips done
9361 		 */
9362 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9363 
9364 		if (ret > 0)
9365 			ret = wait_for_completion_interruptible_timeout(
9366 					&commit->flip_done, 10*HZ);
9367 
9368 		if (ret == 0)
9369 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9370 				  "timed out\n", crtc->base.id, crtc->name);
9371 
9372 		drm_crtc_commit_put(commit);
9373 	}
9374 
9375 	return ret < 0 ? ret : 0;
9376 }
9377 
9378 static void get_freesync_config_for_crtc(
9379 	struct dm_crtc_state *new_crtc_state,
9380 	struct dm_connector_state *new_con_state)
9381 {
9382 	struct mod_freesync_config config = {0};
9383 	struct amdgpu_dm_connector *aconnector =
9384 			to_amdgpu_dm_connector(new_con_state->base.connector);
9385 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9386 	int vrefresh = drm_mode_vrefresh(mode);
9387 	bool fs_vid_mode = false;
9388 
9389 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9390 					vrefresh >= aconnector->min_vfreq &&
9391 					vrefresh <= aconnector->max_vfreq;
9392 
9393 	if (new_crtc_state->vrr_supported) {
9394 		new_crtc_state->stream->ignore_msa_timing_param = true;
9395 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9396 
9397 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9398 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9399 		config.vsif_supported = true;
9400 		config.btr = true;
9401 
9402 		if (fs_vid_mode) {
9403 			config.state = VRR_STATE_ACTIVE_FIXED;
9404 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9405 			goto out;
9406 		} else if (new_crtc_state->base.vrr_enabled) {
9407 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9408 		} else {
9409 			config.state = VRR_STATE_INACTIVE;
9410 		}
9411 	}
9412 out:
9413 	new_crtc_state->freesync_config = config;
9414 }
9415 
9416 static void reset_freesync_config_for_crtc(
9417 	struct dm_crtc_state *new_crtc_state)
9418 {
9419 	new_crtc_state->vrr_supported = false;
9420 
9421 	memset(&new_crtc_state->vrr_infopacket, 0,
9422 	       sizeof(new_crtc_state->vrr_infopacket));
9423 }
9424 
9425 static bool
9426 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9427 				 struct drm_crtc_state *new_crtc_state)
9428 {
9429 	struct drm_display_mode old_mode, new_mode;
9430 
9431 	if (!old_crtc_state || !new_crtc_state)
9432 		return false;
9433 
9434 	old_mode = old_crtc_state->mode;
9435 	new_mode = new_crtc_state->mode;
9436 
9437 	if (old_mode.clock       == new_mode.clock &&
9438 	    old_mode.hdisplay    == new_mode.hdisplay &&
9439 	    old_mode.vdisplay    == new_mode.vdisplay &&
9440 	    old_mode.htotal      == new_mode.htotal &&
9441 	    old_mode.vtotal      != new_mode.vtotal &&
9442 	    old_mode.hsync_start == new_mode.hsync_start &&
9443 	    old_mode.vsync_start != new_mode.vsync_start &&
9444 	    old_mode.hsync_end   == new_mode.hsync_end &&
9445 	    old_mode.vsync_end   != new_mode.vsync_end &&
9446 	    old_mode.hskew       == new_mode.hskew &&
9447 	    old_mode.vscan       == new_mode.vscan &&
9448 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9449 	    (new_mode.vsync_end - new_mode.vsync_start))
9450 		return true;
9451 
9452 	return false;
9453 }
9454 
9455 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9456 	uint64_t num, den, res;
9457 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9458 
9459 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9460 
9461 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9462 	den = (unsigned long long)new_crtc_state->mode.htotal *
9463 	      (unsigned long long)new_crtc_state->mode.vtotal;
9464 
9465 	res = div_u64(num, den);
9466 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9467 }
9468 
9469 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9470 				struct drm_atomic_state *state,
9471 				struct drm_crtc *crtc,
9472 				struct drm_crtc_state *old_crtc_state,
9473 				struct drm_crtc_state *new_crtc_state,
9474 				bool enable,
9475 				bool *lock_and_validation_needed)
9476 {
9477 	struct dm_atomic_state *dm_state = NULL;
9478 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9479 	struct dc_stream_state *new_stream;
9480 	int ret = 0;
9481 
9482 	/*
9483 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9484 	 * update changed items
9485 	 */
9486 	struct amdgpu_crtc *acrtc = NULL;
9487 	struct amdgpu_dm_connector *aconnector = NULL;
9488 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9489 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9490 
9491 	new_stream = NULL;
9492 
9493 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9494 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9495 	acrtc = to_amdgpu_crtc(crtc);
9496 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9497 
9498 	/* TODO This hack should go away */
9499 	if (aconnector && enable) {
9500 		/* Make sure fake sink is created in plug-in scenario */
9501 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9502 							    &aconnector->base);
9503 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9504 							    &aconnector->base);
9505 
9506 		if (IS_ERR(drm_new_conn_state)) {
9507 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9508 			goto fail;
9509 		}
9510 
9511 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9512 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9513 
9514 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9515 			goto skip_modeset;
9516 
9517 		new_stream = create_validate_stream_for_sink(aconnector,
9518 							     &new_crtc_state->mode,
9519 							     dm_new_conn_state,
9520 							     dm_old_crtc_state->stream);
9521 
9522 		/*
9523 		 * we can have no stream on ACTION_SET if a display
9524 		 * was disconnected during S3, in this case it is not an
9525 		 * error, the OS will be updated after detection, and
9526 		 * will do the right thing on next atomic commit
9527 		 */
9528 
9529 		if (!new_stream) {
9530 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9531 					__func__, acrtc->base.base.id);
9532 			ret = -ENOMEM;
9533 			goto fail;
9534 		}
9535 
9536 		/*
9537 		 * TODO: Check VSDB bits to decide whether this should
9538 		 * be enabled or not.
9539 		 */
9540 		new_stream->triggered_crtc_reset.enabled =
9541 			dm->force_timing_sync;
9542 
9543 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9544 
9545 		ret = fill_hdr_info_packet(drm_new_conn_state,
9546 					   &new_stream->hdr_static_metadata);
9547 		if (ret)
9548 			goto fail;
9549 
9550 		/*
9551 		 * If we already removed the old stream from the context
9552 		 * (and set the new stream to NULL) then we can't reuse
9553 		 * the old stream even if the stream and scaling are unchanged.
9554 		 * We'll hit the BUG_ON and black screen.
9555 		 *
9556 		 * TODO: Refactor this function to allow this check to work
9557 		 * in all conditions.
9558 		 */
9559 		if (amdgpu_freesync_vid_mode &&
9560 		    dm_new_crtc_state->stream &&
9561 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9562 			goto skip_modeset;
9563 
9564 		if (dm_new_crtc_state->stream &&
9565 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9566 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9567 			new_crtc_state->mode_changed = false;
9568 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9569 					 new_crtc_state->mode_changed);
9570 		}
9571 	}
9572 
9573 	/* mode_changed flag may get updated above, need to check again */
9574 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9575 		goto skip_modeset;
9576 
9577 	DRM_DEBUG_ATOMIC(
9578 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9579 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9580 		"connectors_changed:%d\n",
9581 		acrtc->crtc_id,
9582 		new_crtc_state->enable,
9583 		new_crtc_state->active,
9584 		new_crtc_state->planes_changed,
9585 		new_crtc_state->mode_changed,
9586 		new_crtc_state->active_changed,
9587 		new_crtc_state->connectors_changed);
9588 
9589 	/* Remove stream for any changed/disabled CRTC */
9590 	if (!enable) {
9591 
9592 		if (!dm_old_crtc_state->stream)
9593 			goto skip_modeset;
9594 
9595 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9596 		    is_timing_unchanged_for_freesync(new_crtc_state,
9597 						     old_crtc_state)) {
9598 			new_crtc_state->mode_changed = false;
9599 			DRM_DEBUG_DRIVER(
9600 				"Mode change not required for front porch change, "
9601 				"setting mode_changed to %d",
9602 				new_crtc_state->mode_changed);
9603 
9604 			set_freesync_fixed_config(dm_new_crtc_state);
9605 
9606 			goto skip_modeset;
9607 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9608 			   is_freesync_video_mode(&new_crtc_state->mode,
9609 						  aconnector)) {
9610 			set_freesync_fixed_config(dm_new_crtc_state);
9611 		}
9612 
9613 		ret = dm_atomic_get_state(state, &dm_state);
9614 		if (ret)
9615 			goto fail;
9616 
9617 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9618 				crtc->base.id);
9619 
9620 		/* i.e. reset mode */
9621 		if (dc_remove_stream_from_ctx(
9622 				dm->dc,
9623 				dm_state->context,
9624 				dm_old_crtc_state->stream) != DC_OK) {
9625 			ret = -EINVAL;
9626 			goto fail;
9627 		}
9628 
9629 		dc_stream_release(dm_old_crtc_state->stream);
9630 		dm_new_crtc_state->stream = NULL;
9631 
9632 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9633 
9634 		*lock_and_validation_needed = true;
9635 
9636 	} else {/* Add stream for any updated/enabled CRTC */
9637 		/*
9638 		 * Quick fix to prevent NULL pointer on new_stream when
9639 		 * added MST connectors not found in existing crtc_state in the chained mode
9640 		 * TODO: need to dig out the root cause of that
9641 		 */
9642 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9643 			goto skip_modeset;
9644 
9645 		if (modereset_required(new_crtc_state))
9646 			goto skip_modeset;
9647 
9648 		if (modeset_required(new_crtc_state, new_stream,
9649 				     dm_old_crtc_state->stream)) {
9650 
9651 			WARN_ON(dm_new_crtc_state->stream);
9652 
9653 			ret = dm_atomic_get_state(state, &dm_state);
9654 			if (ret)
9655 				goto fail;
9656 
9657 			dm_new_crtc_state->stream = new_stream;
9658 
9659 			dc_stream_retain(new_stream);
9660 
9661 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9662 					 crtc->base.id);
9663 
9664 			if (dc_add_stream_to_ctx(
9665 					dm->dc,
9666 					dm_state->context,
9667 					dm_new_crtc_state->stream) != DC_OK) {
9668 				ret = -EINVAL;
9669 				goto fail;
9670 			}
9671 
9672 			*lock_and_validation_needed = true;
9673 		}
9674 	}
9675 
9676 skip_modeset:
9677 	/* Release extra reference */
9678 	if (new_stream)
9679 		 dc_stream_release(new_stream);
9680 
9681 	/*
9682 	 * We want to do dc stream updates that do not require a
9683 	 * full modeset below.
9684 	 */
9685 	if (!(enable && aconnector && new_crtc_state->active))
9686 		return 0;
9687 	/*
9688 	 * Given above conditions, the dc state cannot be NULL because:
9689 	 * 1. We're in the process of enabling CRTCs (just been added
9690 	 *    to the dc context, or already is on the context)
9691 	 * 2. Has a valid connector attached, and
9692 	 * 3. Is currently active and enabled.
9693 	 * => The dc stream state currently exists.
9694 	 */
9695 	BUG_ON(dm_new_crtc_state->stream == NULL);
9696 
9697 	/* Scaling or underscan settings */
9698 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9699 				drm_atomic_crtc_needs_modeset(new_crtc_state))
9700 		update_stream_scaling_settings(
9701 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9702 
9703 	/* ABM settings */
9704 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9705 
9706 	/*
9707 	 * Color management settings. We also update color properties
9708 	 * when a modeset is needed, to ensure it gets reprogrammed.
9709 	 */
9710 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9711 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9712 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9713 		if (ret)
9714 			goto fail;
9715 	}
9716 
9717 	/* Update Freesync settings. */
9718 	get_freesync_config_for_crtc(dm_new_crtc_state,
9719 				     dm_new_conn_state);
9720 
9721 	return ret;
9722 
9723 fail:
9724 	if (new_stream)
9725 		dc_stream_release(new_stream);
9726 	return ret;
9727 }
9728 
9729 static bool should_reset_plane(struct drm_atomic_state *state,
9730 			       struct drm_plane *plane,
9731 			       struct drm_plane_state *old_plane_state,
9732 			       struct drm_plane_state *new_plane_state)
9733 {
9734 	struct drm_plane *other;
9735 	struct drm_plane_state *old_other_state, *new_other_state;
9736 	struct drm_crtc_state *new_crtc_state;
9737 	int i;
9738 
9739 	/*
9740 	 * TODO: Remove this hack once the checks below are sufficient
9741 	 * enough to determine when we need to reset all the planes on
9742 	 * the stream.
9743 	 */
9744 	if (state->allow_modeset)
9745 		return true;
9746 
9747 	/* Exit early if we know that we're adding or removing the plane. */
9748 	if (old_plane_state->crtc != new_plane_state->crtc)
9749 		return true;
9750 
9751 	/* old crtc == new_crtc == NULL, plane not in context. */
9752 	if (!new_plane_state->crtc)
9753 		return false;
9754 
9755 	new_crtc_state =
9756 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9757 
9758 	if (!new_crtc_state)
9759 		return true;
9760 
9761 	/* CRTC Degamma changes currently require us to recreate planes. */
9762 	if (new_crtc_state->color_mgmt_changed)
9763 		return true;
9764 
9765 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9766 		return true;
9767 
9768 	/*
9769 	 * If there are any new primary or overlay planes being added or
9770 	 * removed then the z-order can potentially change. To ensure
9771 	 * correct z-order and pipe acquisition the current DC architecture
9772 	 * requires us to remove and recreate all existing planes.
9773 	 *
9774 	 * TODO: Come up with a more elegant solution for this.
9775 	 */
9776 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9777 		struct amdgpu_framebuffer *old_afb, *new_afb;
9778 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9779 			continue;
9780 
9781 		if (old_other_state->crtc != new_plane_state->crtc &&
9782 		    new_other_state->crtc != new_plane_state->crtc)
9783 			continue;
9784 
9785 		if (old_other_state->crtc != new_other_state->crtc)
9786 			return true;
9787 
9788 		/* Src/dst size and scaling updates. */
9789 		if (old_other_state->src_w != new_other_state->src_w ||
9790 		    old_other_state->src_h != new_other_state->src_h ||
9791 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9792 		    old_other_state->crtc_h != new_other_state->crtc_h)
9793 			return true;
9794 
9795 		/* Rotation / mirroring updates. */
9796 		if (old_other_state->rotation != new_other_state->rotation)
9797 			return true;
9798 
9799 		/* Blending updates. */
9800 		if (old_other_state->pixel_blend_mode !=
9801 		    new_other_state->pixel_blend_mode)
9802 			return true;
9803 
9804 		/* Alpha updates. */
9805 		if (old_other_state->alpha != new_other_state->alpha)
9806 			return true;
9807 
9808 		/* Colorspace changes. */
9809 		if (old_other_state->color_range != new_other_state->color_range ||
9810 		    old_other_state->color_encoding != new_other_state->color_encoding)
9811 			return true;
9812 
9813 		/* Framebuffer checks fall at the end. */
9814 		if (!old_other_state->fb || !new_other_state->fb)
9815 			continue;
9816 
9817 		/* Pixel format changes can require bandwidth updates. */
9818 		if (old_other_state->fb->format != new_other_state->fb->format)
9819 			return true;
9820 
9821 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9822 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9823 
9824 		/* Tiling and DCC changes also require bandwidth updates. */
9825 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9826 		    old_afb->base.modifier != new_afb->base.modifier)
9827 			return true;
9828 	}
9829 
9830 	return false;
9831 }
9832 
9833 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9834 			      struct drm_plane_state *new_plane_state,
9835 			      struct drm_framebuffer *fb)
9836 {
9837 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9838 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9839 	unsigned int pitch;
9840 	bool linear;
9841 
9842 	if (fb->width > new_acrtc->max_cursor_width ||
9843 	    fb->height > new_acrtc->max_cursor_height) {
9844 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9845 				 new_plane_state->fb->width,
9846 				 new_plane_state->fb->height);
9847 		return -EINVAL;
9848 	}
9849 	if (new_plane_state->src_w != fb->width << 16 ||
9850 	    new_plane_state->src_h != fb->height << 16) {
9851 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9852 		return -EINVAL;
9853 	}
9854 
9855 	/* Pitch in pixels */
9856 	pitch = fb->pitches[0] / fb->format->cpp[0];
9857 
9858 	if (fb->width != pitch) {
9859 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9860 				 fb->width, pitch);
9861 		return -EINVAL;
9862 	}
9863 
9864 	switch (pitch) {
9865 	case 64:
9866 	case 128:
9867 	case 256:
9868 		/* FB pitch is supported by cursor plane */
9869 		break;
9870 	default:
9871 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9872 		return -EINVAL;
9873 	}
9874 
9875 	/* Core DRM takes care of checking FB modifiers, so we only need to
9876 	 * check tiling flags when the FB doesn't have a modifier. */
9877 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9878 		if (adev->family < AMDGPU_FAMILY_AI) {
9879 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9880 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9881 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9882 		} else {
9883 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9884 		}
9885 		if (!linear) {
9886 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9887 			return -EINVAL;
9888 		}
9889 	}
9890 
9891 	return 0;
9892 }
9893 
9894 static int dm_update_plane_state(struct dc *dc,
9895 				 struct drm_atomic_state *state,
9896 				 struct drm_plane *plane,
9897 				 struct drm_plane_state *old_plane_state,
9898 				 struct drm_plane_state *new_plane_state,
9899 				 bool enable,
9900 				 bool *lock_and_validation_needed)
9901 {
9902 
9903 	struct dm_atomic_state *dm_state = NULL;
9904 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9905 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9906 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9907 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9908 	struct amdgpu_crtc *new_acrtc;
9909 	bool needs_reset;
9910 	int ret = 0;
9911 
9912 
9913 	new_plane_crtc = new_plane_state->crtc;
9914 	old_plane_crtc = old_plane_state->crtc;
9915 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9916 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9917 
9918 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9919 		if (!enable || !new_plane_crtc ||
9920 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9921 			return 0;
9922 
9923 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9924 
9925 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9926 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9927 			return -EINVAL;
9928 		}
9929 
9930 		if (new_plane_state->fb) {
9931 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9932 						 new_plane_state->fb);
9933 			if (ret)
9934 				return ret;
9935 		}
9936 
9937 		return 0;
9938 	}
9939 
9940 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9941 					 new_plane_state);
9942 
9943 	/* Remove any changed/removed planes */
9944 	if (!enable) {
9945 		if (!needs_reset)
9946 			return 0;
9947 
9948 		if (!old_plane_crtc)
9949 			return 0;
9950 
9951 		old_crtc_state = drm_atomic_get_old_crtc_state(
9952 				state, old_plane_crtc);
9953 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9954 
9955 		if (!dm_old_crtc_state->stream)
9956 			return 0;
9957 
9958 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9959 				plane->base.id, old_plane_crtc->base.id);
9960 
9961 		ret = dm_atomic_get_state(state, &dm_state);
9962 		if (ret)
9963 			return ret;
9964 
9965 		if (!dc_remove_plane_from_context(
9966 				dc,
9967 				dm_old_crtc_state->stream,
9968 				dm_old_plane_state->dc_state,
9969 				dm_state->context)) {
9970 
9971 			return -EINVAL;
9972 		}
9973 
9974 
9975 		dc_plane_state_release(dm_old_plane_state->dc_state);
9976 		dm_new_plane_state->dc_state = NULL;
9977 
9978 		*lock_and_validation_needed = true;
9979 
9980 	} else { /* Add new planes */
9981 		struct dc_plane_state *dc_new_plane_state;
9982 
9983 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9984 			return 0;
9985 
9986 		if (!new_plane_crtc)
9987 			return 0;
9988 
9989 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9990 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9991 
9992 		if (!dm_new_crtc_state->stream)
9993 			return 0;
9994 
9995 		if (!needs_reset)
9996 			return 0;
9997 
9998 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9999 		if (ret)
10000 			return ret;
10001 
10002 		WARN_ON(dm_new_plane_state->dc_state);
10003 
10004 		dc_new_plane_state = dc_create_plane_state(dc);
10005 		if (!dc_new_plane_state)
10006 			return -ENOMEM;
10007 
10008 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10009 				 plane->base.id, new_plane_crtc->base.id);
10010 
10011 		ret = fill_dc_plane_attributes(
10012 			drm_to_adev(new_plane_crtc->dev),
10013 			dc_new_plane_state,
10014 			new_plane_state,
10015 			new_crtc_state);
10016 		if (ret) {
10017 			dc_plane_state_release(dc_new_plane_state);
10018 			return ret;
10019 		}
10020 
10021 		ret = dm_atomic_get_state(state, &dm_state);
10022 		if (ret) {
10023 			dc_plane_state_release(dc_new_plane_state);
10024 			return ret;
10025 		}
10026 
10027 		/*
10028 		 * Any atomic check errors that occur after this will
10029 		 * not need a release. The plane state will be attached
10030 		 * to the stream, and therefore part of the atomic
10031 		 * state. It'll be released when the atomic state is
10032 		 * cleaned.
10033 		 */
10034 		if (!dc_add_plane_to_context(
10035 				dc,
10036 				dm_new_crtc_state->stream,
10037 				dc_new_plane_state,
10038 				dm_state->context)) {
10039 
10040 			dc_plane_state_release(dc_new_plane_state);
10041 			return -EINVAL;
10042 		}
10043 
10044 		dm_new_plane_state->dc_state = dc_new_plane_state;
10045 
10046 		/* Tell DC to do a full surface update every time there
10047 		 * is a plane change. Inefficient, but works for now.
10048 		 */
10049 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10050 
10051 		*lock_and_validation_needed = true;
10052 	}
10053 
10054 
10055 	return ret;
10056 }
10057 
10058 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10059 				struct drm_crtc *crtc,
10060 				struct drm_crtc_state *new_crtc_state)
10061 {
10062 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10063 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10064 
10065 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10066 	 * cursor per pipe but it's going to inherit the scaling and
10067 	 * positioning from the underlying pipe. Check the cursor plane's
10068 	 * blending properties match the primary plane's. */
10069 
10070 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10071 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10072 	if (!new_cursor_state || !new_primary_state ||
10073 	    !new_cursor_state->fb || !new_primary_state->fb) {
10074 		return 0;
10075 	}
10076 
10077 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10078 			 (new_cursor_state->src_w >> 16);
10079 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10080 			 (new_cursor_state->src_h >> 16);
10081 
10082 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10083 			 (new_primary_state->src_w >> 16);
10084 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10085 			 (new_primary_state->src_h >> 16);
10086 
10087 	if (cursor_scale_w != primary_scale_w ||
10088 	    cursor_scale_h != primary_scale_h) {
10089 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10090 		return -EINVAL;
10091 	}
10092 
10093 	return 0;
10094 }
10095 
10096 #if defined(CONFIG_DRM_AMD_DC_DCN)
10097 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10098 {
10099 	struct drm_connector *connector;
10100 	struct drm_connector_state *conn_state;
10101 	struct amdgpu_dm_connector *aconnector = NULL;
10102 	int i;
10103 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10104 		if (conn_state->crtc != crtc)
10105 			continue;
10106 
10107 		aconnector = to_amdgpu_dm_connector(connector);
10108 		if (!aconnector->port || !aconnector->mst_port)
10109 			aconnector = NULL;
10110 		else
10111 			break;
10112 	}
10113 
10114 	if (!aconnector)
10115 		return 0;
10116 
10117 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10118 }
10119 #endif
10120 
10121 static int validate_overlay(struct drm_atomic_state *state)
10122 {
10123 	int i;
10124 	struct drm_plane *plane;
10125 	struct drm_plane_state *new_plane_state;
10126 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10127 
10128 	/* Check if primary plane is contained inside overlay */
10129 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10130 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10131 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10132 				return 0;
10133 
10134 			overlay_state = new_plane_state;
10135 			continue;
10136 		}
10137 	}
10138 
10139 	/* check if we're making changes to the overlay plane */
10140 	if (!overlay_state)
10141 		return 0;
10142 
10143 	/* check if overlay plane is enabled */
10144 	if (!overlay_state->crtc)
10145 		return 0;
10146 
10147 	/* find the primary plane for the CRTC that the overlay is enabled on */
10148 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10149 	if (IS_ERR(primary_state))
10150 		return PTR_ERR(primary_state);
10151 
10152 	/* check if primary plane is enabled */
10153 	if (!primary_state->crtc)
10154 		return 0;
10155 
10156 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10157 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10158 	    primary_state->crtc_y < overlay_state->crtc_y ||
10159 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10160 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10161 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10162 		return -EINVAL;
10163 	}
10164 
10165 	return 0;
10166 }
10167 
10168 /**
10169  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10170  * @dev: The DRM device
10171  * @state: The atomic state to commit
10172  *
10173  * Validate that the given atomic state is programmable by DC into hardware.
10174  * This involves constructing a &struct dc_state reflecting the new hardware
10175  * state we wish to commit, then querying DC to see if it is programmable. It's
10176  * important not to modify the existing DC state. Otherwise, atomic_check
10177  * may unexpectedly commit hardware changes.
10178  *
10179  * When validating the DC state, it's important that the right locks are
10180  * acquired. For full updates case which removes/adds/updates streams on one
10181  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10182  * that any such full update commit will wait for completion of any outstanding
10183  * flip using DRMs synchronization events.
10184  *
10185  * Note that DM adds the affected connectors for all CRTCs in state, when that
10186  * might not seem necessary. This is because DC stream creation requires the
10187  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10188  * be possible but non-trivial - a possible TODO item.
10189  *
10190  * Return: -Error code if validation failed.
10191  */
10192 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10193 				  struct drm_atomic_state *state)
10194 {
10195 	struct amdgpu_device *adev = drm_to_adev(dev);
10196 	struct dm_atomic_state *dm_state = NULL;
10197 	struct dc *dc = adev->dm.dc;
10198 	struct drm_connector *connector;
10199 	struct drm_connector_state *old_con_state, *new_con_state;
10200 	struct drm_crtc *crtc;
10201 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10202 	struct drm_plane *plane;
10203 	struct drm_plane_state *old_plane_state, *new_plane_state;
10204 	enum dc_status status;
10205 	int ret, i;
10206 	bool lock_and_validation_needed = false;
10207 	struct dm_crtc_state *dm_old_crtc_state;
10208 
10209 	trace_amdgpu_dm_atomic_check_begin(state);
10210 
10211 	ret = drm_atomic_helper_check_modeset(dev, state);
10212 	if (ret)
10213 		goto fail;
10214 
10215 	/* Check connector changes */
10216 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10217 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10218 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10219 
10220 		/* Skip connectors that are disabled or part of modeset already. */
10221 		if (!old_con_state->crtc && !new_con_state->crtc)
10222 			continue;
10223 
10224 		if (!new_con_state->crtc)
10225 			continue;
10226 
10227 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10228 		if (IS_ERR(new_crtc_state)) {
10229 			ret = PTR_ERR(new_crtc_state);
10230 			goto fail;
10231 		}
10232 
10233 		if (dm_old_con_state->abm_level !=
10234 		    dm_new_con_state->abm_level)
10235 			new_crtc_state->connectors_changed = true;
10236 	}
10237 
10238 #if defined(CONFIG_DRM_AMD_DC_DCN)
10239 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10240 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10241 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10242 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10243 				if (ret)
10244 					goto fail;
10245 			}
10246 		}
10247 	}
10248 #endif
10249 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10250 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10251 
10252 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10253 		    !new_crtc_state->color_mgmt_changed &&
10254 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10255 			dm_old_crtc_state->dsc_force_changed == false)
10256 			continue;
10257 
10258 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10259 		if (ret)
10260 			goto fail;
10261 
10262 		if (!new_crtc_state->enable)
10263 			continue;
10264 
10265 		ret = drm_atomic_add_affected_connectors(state, crtc);
10266 		if (ret)
10267 			return ret;
10268 
10269 		ret = drm_atomic_add_affected_planes(state, crtc);
10270 		if (ret)
10271 			goto fail;
10272 
10273 		if (dm_old_crtc_state->dsc_force_changed)
10274 			new_crtc_state->mode_changed = true;
10275 	}
10276 
10277 	/*
10278 	 * Add all primary and overlay planes on the CRTC to the state
10279 	 * whenever a plane is enabled to maintain correct z-ordering
10280 	 * and to enable fast surface updates.
10281 	 */
10282 	drm_for_each_crtc(crtc, dev) {
10283 		bool modified = false;
10284 
10285 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10286 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10287 				continue;
10288 
10289 			if (new_plane_state->crtc == crtc ||
10290 			    old_plane_state->crtc == crtc) {
10291 				modified = true;
10292 				break;
10293 			}
10294 		}
10295 
10296 		if (!modified)
10297 			continue;
10298 
10299 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10300 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10301 				continue;
10302 
10303 			new_plane_state =
10304 				drm_atomic_get_plane_state(state, plane);
10305 
10306 			if (IS_ERR(new_plane_state)) {
10307 				ret = PTR_ERR(new_plane_state);
10308 				goto fail;
10309 			}
10310 		}
10311 	}
10312 
10313 	/* Remove exiting planes if they are modified */
10314 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10315 		ret = dm_update_plane_state(dc, state, plane,
10316 					    old_plane_state,
10317 					    new_plane_state,
10318 					    false,
10319 					    &lock_and_validation_needed);
10320 		if (ret)
10321 			goto fail;
10322 	}
10323 
10324 	/* Disable all crtcs which require disable */
10325 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10326 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10327 					   old_crtc_state,
10328 					   new_crtc_state,
10329 					   false,
10330 					   &lock_and_validation_needed);
10331 		if (ret)
10332 			goto fail;
10333 	}
10334 
10335 	/* Enable all crtcs which require enable */
10336 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10337 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10338 					   old_crtc_state,
10339 					   new_crtc_state,
10340 					   true,
10341 					   &lock_and_validation_needed);
10342 		if (ret)
10343 			goto fail;
10344 	}
10345 
10346 	ret = validate_overlay(state);
10347 	if (ret)
10348 		goto fail;
10349 
10350 	/* Add new/modified planes */
10351 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10352 		ret = dm_update_plane_state(dc, state, plane,
10353 					    old_plane_state,
10354 					    new_plane_state,
10355 					    true,
10356 					    &lock_and_validation_needed);
10357 		if (ret)
10358 			goto fail;
10359 	}
10360 
10361 	/* Run this here since we want to validate the streams we created */
10362 	ret = drm_atomic_helper_check_planes(dev, state);
10363 	if (ret)
10364 		goto fail;
10365 
10366 	/* Check cursor planes scaling */
10367 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10368 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10369 		if (ret)
10370 			goto fail;
10371 	}
10372 
10373 	if (state->legacy_cursor_update) {
10374 		/*
10375 		 * This is a fast cursor update coming from the plane update
10376 		 * helper, check if it can be done asynchronously for better
10377 		 * performance.
10378 		 */
10379 		state->async_update =
10380 			!drm_atomic_helper_async_check(dev, state);
10381 
10382 		/*
10383 		 * Skip the remaining global validation if this is an async
10384 		 * update. Cursor updates can be done without affecting
10385 		 * state or bandwidth calcs and this avoids the performance
10386 		 * penalty of locking the private state object and
10387 		 * allocating a new dc_state.
10388 		 */
10389 		if (state->async_update)
10390 			return 0;
10391 	}
10392 
10393 	/* Check scaling and underscan changes*/
10394 	/* TODO Removed scaling changes validation due to inability to commit
10395 	 * new stream into context w\o causing full reset. Need to
10396 	 * decide how to handle.
10397 	 */
10398 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10399 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10400 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10401 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10402 
10403 		/* Skip any modesets/resets */
10404 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10405 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10406 			continue;
10407 
10408 		/* Skip any thing not scale or underscan changes */
10409 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10410 			continue;
10411 
10412 		lock_and_validation_needed = true;
10413 	}
10414 
10415 	/**
10416 	 * Streams and planes are reset when there are changes that affect
10417 	 * bandwidth. Anything that affects bandwidth needs to go through
10418 	 * DC global validation to ensure that the configuration can be applied
10419 	 * to hardware.
10420 	 *
10421 	 * We have to currently stall out here in atomic_check for outstanding
10422 	 * commits to finish in this case because our IRQ handlers reference
10423 	 * DRM state directly - we can end up disabling interrupts too early
10424 	 * if we don't.
10425 	 *
10426 	 * TODO: Remove this stall and drop DM state private objects.
10427 	 */
10428 	if (lock_and_validation_needed) {
10429 		ret = dm_atomic_get_state(state, &dm_state);
10430 		if (ret)
10431 			goto fail;
10432 
10433 		ret = do_aquire_global_lock(dev, state);
10434 		if (ret)
10435 			goto fail;
10436 
10437 #if defined(CONFIG_DRM_AMD_DC_DCN)
10438 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10439 			goto fail;
10440 
10441 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10442 		if (ret)
10443 			goto fail;
10444 #endif
10445 
10446 		/*
10447 		 * Perform validation of MST topology in the state:
10448 		 * We need to perform MST atomic check before calling
10449 		 * dc_validate_global_state(), or there is a chance
10450 		 * to get stuck in an infinite loop and hang eventually.
10451 		 */
10452 		ret = drm_dp_mst_atomic_check(state);
10453 		if (ret)
10454 			goto fail;
10455 		status = dc_validate_global_state(dc, dm_state->context, false);
10456 		if (status != DC_OK) {
10457 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10458 				       dc_status_to_str(status), status);
10459 			ret = -EINVAL;
10460 			goto fail;
10461 		}
10462 	} else {
10463 		/*
10464 		 * The commit is a fast update. Fast updates shouldn't change
10465 		 * the DC context, affect global validation, and can have their
10466 		 * commit work done in parallel with other commits not touching
10467 		 * the same resource. If we have a new DC context as part of
10468 		 * the DM atomic state from validation we need to free it and
10469 		 * retain the existing one instead.
10470 		 *
10471 		 * Furthermore, since the DM atomic state only contains the DC
10472 		 * context and can safely be annulled, we can free the state
10473 		 * and clear the associated private object now to free
10474 		 * some memory and avoid a possible use-after-free later.
10475 		 */
10476 
10477 		for (i = 0; i < state->num_private_objs; i++) {
10478 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10479 
10480 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10481 				int j = state->num_private_objs-1;
10482 
10483 				dm_atomic_destroy_state(obj,
10484 						state->private_objs[i].state);
10485 
10486 				/* If i is not at the end of the array then the
10487 				 * last element needs to be moved to where i was
10488 				 * before the array can safely be truncated.
10489 				 */
10490 				if (i != j)
10491 					state->private_objs[i] =
10492 						state->private_objs[j];
10493 
10494 				state->private_objs[j].ptr = NULL;
10495 				state->private_objs[j].state = NULL;
10496 				state->private_objs[j].old_state = NULL;
10497 				state->private_objs[j].new_state = NULL;
10498 
10499 				state->num_private_objs = j;
10500 				break;
10501 			}
10502 		}
10503 	}
10504 
10505 	/* Store the overall update type for use later in atomic check. */
10506 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10507 		struct dm_crtc_state *dm_new_crtc_state =
10508 			to_dm_crtc_state(new_crtc_state);
10509 
10510 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10511 							 UPDATE_TYPE_FULL :
10512 							 UPDATE_TYPE_FAST;
10513 	}
10514 
10515 	/* Must be success */
10516 	WARN_ON(ret);
10517 
10518 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10519 
10520 	return ret;
10521 
10522 fail:
10523 	if (ret == -EDEADLK)
10524 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10525 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10526 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10527 	else
10528 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10529 
10530 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10531 
10532 	return ret;
10533 }
10534 
10535 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10536 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10537 {
10538 	uint8_t dpcd_data;
10539 	bool capable = false;
10540 
10541 	if (amdgpu_dm_connector->dc_link &&
10542 		dm_helpers_dp_read_dpcd(
10543 				NULL,
10544 				amdgpu_dm_connector->dc_link,
10545 				DP_DOWN_STREAM_PORT_COUNT,
10546 				&dpcd_data,
10547 				sizeof(dpcd_data))) {
10548 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10549 	}
10550 
10551 	return capable;
10552 }
10553 
10554 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10555 		unsigned int offset,
10556 		unsigned int total_length,
10557 		uint8_t *data,
10558 		unsigned int length,
10559 		struct amdgpu_hdmi_vsdb_info *vsdb)
10560 {
10561 	bool res;
10562 	union dmub_rb_cmd cmd;
10563 	struct dmub_cmd_send_edid_cea *input;
10564 	struct dmub_cmd_edid_cea_output *output;
10565 
10566 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10567 		return false;
10568 
10569 	memset(&cmd, 0, sizeof(cmd));
10570 
10571 	input = &cmd.edid_cea.data.input;
10572 
10573 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10574 	cmd.edid_cea.header.sub_type = 0;
10575 	cmd.edid_cea.header.payload_bytes =
10576 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10577 	input->offset = offset;
10578 	input->length = length;
10579 	input->total_length = total_length;
10580 	memcpy(input->payload, data, length);
10581 
10582 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10583 	if (!res) {
10584 		DRM_ERROR("EDID CEA parser failed\n");
10585 		return false;
10586 	}
10587 
10588 	output = &cmd.edid_cea.data.output;
10589 
10590 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10591 		if (!output->ack.success) {
10592 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
10593 					output->ack.offset);
10594 		}
10595 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10596 		if (!output->amd_vsdb.vsdb_found)
10597 			return false;
10598 
10599 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10600 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10601 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10602 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10603 	} else {
10604 		DRM_WARN("Unknown EDID CEA parser results\n");
10605 		return false;
10606 	}
10607 
10608 	return true;
10609 }
10610 
10611 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10612 		uint8_t *edid_ext, int len,
10613 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10614 {
10615 	int i;
10616 
10617 	/* send extension block to DMCU for parsing */
10618 	for (i = 0; i < len; i += 8) {
10619 		bool res;
10620 		int offset;
10621 
10622 		/* send 8 bytes a time */
10623 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10624 			return false;
10625 
10626 		if (i+8 == len) {
10627 			/* EDID block sent completed, expect result */
10628 			int version, min_rate, max_rate;
10629 
10630 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10631 			if (res) {
10632 				/* amd vsdb found */
10633 				vsdb_info->freesync_supported = 1;
10634 				vsdb_info->amd_vsdb_version = version;
10635 				vsdb_info->min_refresh_rate_hz = min_rate;
10636 				vsdb_info->max_refresh_rate_hz = max_rate;
10637 				return true;
10638 			}
10639 			/* not amd vsdb */
10640 			return false;
10641 		}
10642 
10643 		/* check for ack*/
10644 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10645 		if (!res)
10646 			return false;
10647 	}
10648 
10649 	return false;
10650 }
10651 
10652 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10653 		uint8_t *edid_ext, int len,
10654 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10655 {
10656 	int i;
10657 
10658 	/* send extension block to DMCU for parsing */
10659 	for (i = 0; i < len; i += 8) {
10660 		/* send 8 bytes a time */
10661 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10662 			return false;
10663 	}
10664 
10665 	return vsdb_info->freesync_supported;
10666 }
10667 
10668 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10669 		uint8_t *edid_ext, int len,
10670 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10671 {
10672 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10673 
10674 	if (adev->dm.dmub_srv)
10675 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10676 	else
10677 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10678 }
10679 
10680 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10681 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10682 {
10683 	uint8_t *edid_ext = NULL;
10684 	int i;
10685 	bool valid_vsdb_found = false;
10686 
10687 	/*----- drm_find_cea_extension() -----*/
10688 	/* No EDID or EDID extensions */
10689 	if (edid == NULL || edid->extensions == 0)
10690 		return -ENODEV;
10691 
10692 	/* Find CEA extension */
10693 	for (i = 0; i < edid->extensions; i++) {
10694 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10695 		if (edid_ext[0] == CEA_EXT)
10696 			break;
10697 	}
10698 
10699 	if (i == edid->extensions)
10700 		return -ENODEV;
10701 
10702 	/*----- cea_db_offsets() -----*/
10703 	if (edid_ext[0] != CEA_EXT)
10704 		return -ENODEV;
10705 
10706 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10707 
10708 	return valid_vsdb_found ? i : -ENODEV;
10709 }
10710 
10711 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10712 					struct edid *edid)
10713 {
10714 	int i = 0;
10715 	struct detailed_timing *timing;
10716 	struct detailed_non_pixel *data;
10717 	struct detailed_data_monitor_range *range;
10718 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10719 			to_amdgpu_dm_connector(connector);
10720 	struct dm_connector_state *dm_con_state = NULL;
10721 
10722 	struct drm_device *dev = connector->dev;
10723 	struct amdgpu_device *adev = drm_to_adev(dev);
10724 	bool freesync_capable = false;
10725 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10726 
10727 	if (!connector->state) {
10728 		DRM_ERROR("%s - Connector has no state", __func__);
10729 		goto update;
10730 	}
10731 
10732 	if (!edid) {
10733 		dm_con_state = to_dm_connector_state(connector->state);
10734 
10735 		amdgpu_dm_connector->min_vfreq = 0;
10736 		amdgpu_dm_connector->max_vfreq = 0;
10737 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10738 
10739 		goto update;
10740 	}
10741 
10742 	dm_con_state = to_dm_connector_state(connector->state);
10743 
10744 	if (!amdgpu_dm_connector->dc_sink) {
10745 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10746 		goto update;
10747 	}
10748 	if (!adev->dm.freesync_module)
10749 		goto update;
10750 
10751 
10752 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10753 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10754 		bool edid_check_required = false;
10755 
10756 		if (edid) {
10757 			edid_check_required = is_dp_capable_without_timing_msa(
10758 						adev->dm.dc,
10759 						amdgpu_dm_connector);
10760 		}
10761 
10762 		if (edid_check_required == true && (edid->version > 1 ||
10763 		   (edid->version == 1 && edid->revision > 1))) {
10764 			for (i = 0; i < 4; i++) {
10765 
10766 				timing	= &edid->detailed_timings[i];
10767 				data	= &timing->data.other_data;
10768 				range	= &data->data.range;
10769 				/*
10770 				 * Check if monitor has continuous frequency mode
10771 				 */
10772 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10773 					continue;
10774 				/*
10775 				 * Check for flag range limits only. If flag == 1 then
10776 				 * no additional timing information provided.
10777 				 * Default GTF, GTF Secondary curve and CVT are not
10778 				 * supported
10779 				 */
10780 				if (range->flags != 1)
10781 					continue;
10782 
10783 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10784 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10785 				amdgpu_dm_connector->pixel_clock_mhz =
10786 					range->pixel_clock_mhz * 10;
10787 
10788 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10789 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10790 
10791 				break;
10792 			}
10793 
10794 			if (amdgpu_dm_connector->max_vfreq -
10795 			    amdgpu_dm_connector->min_vfreq > 10) {
10796 
10797 				freesync_capable = true;
10798 			}
10799 		}
10800 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10801 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10802 		if (i >= 0 && vsdb_info.freesync_supported) {
10803 			timing  = &edid->detailed_timings[i];
10804 			data    = &timing->data.other_data;
10805 
10806 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10807 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10808 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10809 				freesync_capable = true;
10810 
10811 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10812 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10813 		}
10814 	}
10815 
10816 update:
10817 	if (dm_con_state)
10818 		dm_con_state->freesync_capable = freesync_capable;
10819 
10820 	if (connector->vrr_capable_property)
10821 		drm_connector_set_vrr_capable_property(connector,
10822 						       freesync_capable);
10823 }
10824 
10825 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10826 {
10827 	struct amdgpu_device *adev = drm_to_adev(dev);
10828 	struct dc *dc = adev->dm.dc;
10829 	int i;
10830 
10831 	mutex_lock(&adev->dm.dc_lock);
10832 	if (dc->current_state) {
10833 		for (i = 0; i < dc->current_state->stream_count; ++i)
10834 			dc->current_state->streams[i]
10835 				->triggered_crtc_reset.enabled =
10836 				adev->dm.force_timing_sync;
10837 
10838 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10839 		dc_trigger_sync(dc, dc->current_state);
10840 	}
10841 	mutex_unlock(&adev->dm.dc_lock);
10842 }
10843 
10844 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10845 		       uint32_t value, const char *func_name)
10846 {
10847 #ifdef DM_CHECK_ADDR_0
10848 	if (address == 0) {
10849 		DC_ERR("invalid register write. address = 0");
10850 		return;
10851 	}
10852 #endif
10853 	cgs_write_register(ctx->cgs_device, address, value);
10854 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10855 }
10856 
10857 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10858 			  const char *func_name)
10859 {
10860 	uint32_t value;
10861 #ifdef DM_CHECK_ADDR_0
10862 	if (address == 0) {
10863 		DC_ERR("invalid register read; address = 0\n");
10864 		return 0;
10865 	}
10866 #endif
10867 
10868 	if (ctx->dmub_srv &&
10869 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10870 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10871 		ASSERT(false);
10872 		return 0;
10873 	}
10874 
10875 	value = cgs_read_register(ctx->cgs_device, address);
10876 
10877 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10878 
10879 	return value;
10880 }
10881 
10882 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10883 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10884 {
10885 	struct amdgpu_device *adev = ctx->driver_context;
10886 	int ret = 0;
10887 
10888 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10889 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10890 	if (ret == 0) {
10891 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10892 		return -1;
10893 	}
10894 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10895 
10896 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10897 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10898 
10899 		// For read case, Copy data to payload
10900 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10901 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10902 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10903 			adev->dm.dmub_notify->aux_reply.length);
10904 	}
10905 
10906 	return adev->dm.dmub_notify->aux_reply.length;
10907 }
10908