1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 
116 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118 
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121 
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124 
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127 
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137 
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208 
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 				  struct drm_atomic_state *state);
211 
212 static void handle_cursor_update(struct drm_plane *plane,
213 				 struct drm_plane_state *old_plane_state);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 				 struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236 	if (crtc >= adev->mode_info.num_crtc)
237 		return 0;
238 	else {
239 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 
241 		if (acrtc->dm_irq_params.stream == NULL) {
242 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 				  crtc);
244 			return 0;
245 		}
246 
247 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 	}
249 }
250 
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 				  u32 *vbl, u32 *position)
253 {
254 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 
256 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 		return -EINVAL;
258 	else {
259 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 
261 		if (acrtc->dm_irq_params.stream ==  NULL) {
262 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 				  crtc);
264 			return 0;
265 		}
266 
267 		/*
268 		 * TODO rework base driver to use values directly.
269 		 * for now parse it back into reg-format
270 		 */
271 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 					 &v_blank_start,
273 					 &v_blank_end,
274 					 &h_position,
275 					 &v_position);
276 
277 		*position = v_position | (h_position << 16);
278 		*vbl = v_blank_start | (v_blank_end << 16);
279 	}
280 
281 	return 0;
282 }
283 
284 static bool dm_is_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return true;
288 }
289 
290 static int dm_wait_for_idle(void *handle)
291 {
292 	/* XXX todo */
293 	return 0;
294 }
295 
296 static bool dm_check_soft_reset(void *handle)
297 {
298 	return false;
299 }
300 
301 static int dm_soft_reset(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 		     int otg_inst)
310 {
311 	struct drm_device *dev = adev_to_drm(adev);
312 	struct drm_crtc *crtc;
313 	struct amdgpu_crtc *amdgpu_crtc;
314 
315 	if (WARN_ON(otg_inst == -1))
316 		return adev->mode_info.crtcs[0];
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	WARN_ON(!e);
396 
397 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398 
399 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
400 	if (!vrr_active ||
401 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 				      &v_blank_end, &hpos, &vpos) ||
403 	    (vpos < v_blank_start)) {
404 		/* Update to correct count and vblank timestamp if racing with
405 		 * vblank irq. This also updates to the correct vblank timestamp
406 		 * even in VRR mode, as scanout is past the front-porch atm.
407 		 */
408 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409 
410 		/* Wake up userspace by sending the pageflip event with proper
411 		 * count and timestamp of vblank of flip completion.
412 		 */
413 		if (e) {
414 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415 
416 			/* Event sent, so done with vblank for this flip */
417 			drm_crtc_vblank_put(&amdgpu_crtc->base);
418 		}
419 	} else if (e) {
420 		/* VRR active and inside front-porch: vblank count and
421 		 * timestamp for pageflip event will only be up to date after
422 		 * drm_crtc_handle_vblank() has been executed from late vblank
423 		 * irq handler after start of back-porch (vline 0). We queue the
424 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 		 * updated timestamp and count, once it runs after us.
426 		 *
427 		 * We need to open-code this instead of using the helper
428 		 * drm_crtc_arm_vblank_event(), as that helper would
429 		 * call drm_crtc_accurate_vblank_count(), which we must
430 		 * not call in VRR mode while we are in front-porch!
431 		 */
432 
433 		/* sequence will be replaced by real count during send-out. */
434 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 		e->pipe = amdgpu_crtc->crtc_id;
436 
437 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 		e = NULL;
439 	}
440 
441 	/* Keep track of vblank of this flip for flip throttling. We use the
442 	 * cooked hw counter, as that one incremented at start of this vblank
443 	 * of pageflip completion, so last_flip_vblank is the forbidden count
444 	 * for queueing new pageflips if vsync + VRR is enabled.
445 	 */
446 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448 
449 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451 
452 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
454 		     vrr_active, (int) !e);
455 }
456 
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459 	struct common_irq_params *irq_params = interrupt_params;
460 	struct amdgpu_device *adev = irq_params->adev;
461 	struct amdgpu_crtc *acrtc;
462 	struct drm_device *drm_dev;
463 	struct drm_vblank_crtc *vblank;
464 	ktime_t frame_duration_ns, previous_timestamp;
465 	unsigned long flags;
466 	int vrr_active;
467 
468 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469 
470 	if (acrtc) {
471 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 		drm_dev = acrtc->base.dev;
473 		vblank = &drm_dev->vblank[acrtc->base.index];
474 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 		frame_duration_ns = vblank->time - previous_timestamp;
476 
477 		if (frame_duration_ns > 0) {
478 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
479 						frame_duration_ns,
480 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
482 		}
483 
484 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485 			      acrtc->crtc_id,
486 			      vrr_active);
487 
488 		/* Core vblank handling is done here after end of front-porch in
489 		 * vrr mode, as vblank timestamping will give valid results
490 		 * while now done after front-porch. This will also deliver
491 		 * page-flip completion events that have been queued to us
492 		 * if a pageflip happened inside front-porch.
493 		 */
494 		if (vrr_active) {
495 			drm_crtc_handle_vblank(&acrtc->base);
496 
497 			/* BTR processing for pre-DCE12 ASICs */
498 			if (acrtc->dm_irq_params.stream &&
499 			    adev->family < AMDGPU_FAMILY_AI) {
500 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 				mod_freesync_handle_v_update(
502 				    adev->dm.freesync_module,
503 				    acrtc->dm_irq_params.stream,
504 				    &acrtc->dm_irq_params.vrr_params);
505 
506 				dc_stream_adjust_vmin_vmax(
507 				    adev->dm.dc,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params.adjust);
510 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
511 			}
512 		}
513 	}
514 }
515 
516 /**
517  * dm_crtc_high_irq() - Handles CRTC interrupt
518  * @interrupt_params: used for determining the CRTC instance
519  *
520  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
521  * event handler.
522  */
523 static void dm_crtc_high_irq(void *interrupt_params)
524 {
525 	struct common_irq_params *irq_params = interrupt_params;
526 	struct amdgpu_device *adev = irq_params->adev;
527 	struct amdgpu_crtc *acrtc;
528 	unsigned long flags;
529 	int vrr_active;
530 
531 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532 	if (!acrtc)
533 		return;
534 
535 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
536 
537 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 		      vrr_active, acrtc->dm_irq_params.active_planes);
539 
540 	/**
541 	 * Core vblank handling at start of front-porch is only possible
542 	 * in non-vrr mode, as only there vblank timestamping will give
543 	 * valid results while done in front-porch. Otherwise defer it
544 	 * to dm_vupdate_high_irq after end of front-porch.
545 	 */
546 	if (!vrr_active)
547 		drm_crtc_handle_vblank(&acrtc->base);
548 
549 	/**
550 	 * Following stuff must happen at start of vblank, for crc
551 	 * computation and below-the-range btr support in vrr mode.
552 	 */
553 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
554 
555 	/* BTR updates need to happen before VUPDATE on Vega and above. */
556 	if (adev->family < AMDGPU_FAMILY_AI)
557 		return;
558 
559 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
560 
561 	if (acrtc->dm_irq_params.stream &&
562 	    acrtc->dm_irq_params.vrr_params.supported &&
563 	    acrtc->dm_irq_params.freesync_config.state ==
564 		    VRR_STATE_ACTIVE_VARIABLE) {
565 		mod_freesync_handle_v_update(adev->dm.freesync_module,
566 					     acrtc->dm_irq_params.stream,
567 					     &acrtc->dm_irq_params.vrr_params);
568 
569 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 					   &acrtc->dm_irq_params.vrr_params.adjust);
571 	}
572 
573 	/*
574 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 	 * In that case, pageflip completion interrupts won't fire and pageflip
576 	 * completion events won't get delivered. Prevent this by sending
577 	 * pending pageflip events from here if a flip is still pending.
578 	 *
579 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 	 * avoid race conditions between flip programming and completion,
581 	 * which could cause too early flip completion events.
582 	 */
583 	if (adev->family >= AMDGPU_FAMILY_RV &&
584 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 	    acrtc->dm_irq_params.active_planes == 0) {
586 		if (acrtc->event) {
587 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588 			acrtc->event = NULL;
589 			drm_crtc_vblank_put(&acrtc->base);
590 		}
591 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
592 	}
593 
594 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
595 }
596 
597 #if defined(CONFIG_DRM_AMD_DC_DCN)
598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt_params: interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
607 {
608 	struct common_irq_params *irq_params = interrupt_params;
609 	struct amdgpu_device *adev = irq_params->adev;
610 	struct amdgpu_crtc *acrtc;
611 
612 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613 
614 	if (!acrtc)
615 		return;
616 
617 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
618 }
619 #endif
620 
621 /**
622  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
623  * @interrupt_params: used for determining the Outbox instance
624  *
625  * Handles the Outbox Interrupt
626  * event handler.
627  */
628 #define DMUB_TRACE_MAX_READ 64
629 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
630 {
631 	struct dmub_notification notify;
632 	struct common_irq_params *irq_params = interrupt_params;
633 	struct amdgpu_device *adev = irq_params->adev;
634 	struct amdgpu_display_manager *dm = &adev->dm;
635 	struct dmcub_trace_buf_entry entry = { 0 };
636 	uint32_t count = 0;
637 
638 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
640 			do {
641 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
642 			} while (notify.pending_notification);
643 
644 			if (adev->dm.dmub_notify)
645 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
646 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 				complete(&adev->dm.dmub_aux_transfer_done);
648 			// TODO : HPD Implementation
649 
650 		} else {
651 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
652 		}
653 	}
654 
655 
656 	do {
657 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 							entry.param0, entry.param1);
660 
661 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
663 		} else
664 			break;
665 
666 		count++;
667 
668 	} while (count <= DMUB_TRACE_MAX_READ);
669 
670 	ASSERT(count <= DMUB_TRACE_MAX_READ);
671 }
672 #endif
673 
674 static int dm_set_clockgating_state(void *handle,
675 		  enum amd_clockgating_state state)
676 {
677 	return 0;
678 }
679 
680 static int dm_set_powergating_state(void *handle,
681 		  enum amd_powergating_state state)
682 {
683 	return 0;
684 }
685 
686 /* Prototypes of private functions */
687 static int dm_early_init(void* handle);
688 
689 /* Allocate memory for FBC compressed data  */
690 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
691 {
692 	struct drm_device *dev = connector->dev;
693 	struct amdgpu_device *adev = drm_to_adev(dev);
694 	struct dm_compressor_info *compressor = &adev->dm.compressor;
695 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 	struct drm_display_mode *mode;
697 	unsigned long max_size = 0;
698 
699 	if (adev->dm.dc->fbc_compressor == NULL)
700 		return;
701 
702 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
703 		return;
704 
705 	if (compressor->bo_ptr)
706 		return;
707 
708 
709 	list_for_each_entry(mode, &connector->modes, head) {
710 		if (max_size < mode->htotal * mode->vtotal)
711 			max_size = mode->htotal * mode->vtotal;
712 	}
713 
714 	if (max_size) {
715 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 			    &compressor->gpu_addr, &compressor->cpu_addr);
718 
719 		if (r)
720 			DRM_ERROR("DM: Failed to initialize FBC\n");
721 		else {
722 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
724 		}
725 
726 	}
727 
728 }
729 
730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 					  int pipe, bool *enabled,
732 					  unsigned char *buf, int max_bytes)
733 {
734 	struct drm_device *dev = dev_get_drvdata(kdev);
735 	struct amdgpu_device *adev = drm_to_adev(dev);
736 	struct drm_connector *connector;
737 	struct drm_connector_list_iter conn_iter;
738 	struct amdgpu_dm_connector *aconnector;
739 	int ret = 0;
740 
741 	*enabled = false;
742 
743 	mutex_lock(&adev->dm.audio_lock);
744 
745 	drm_connector_list_iter_begin(dev, &conn_iter);
746 	drm_for_each_connector_iter(connector, &conn_iter) {
747 		aconnector = to_amdgpu_dm_connector(connector);
748 		if (aconnector->audio_inst != port)
749 			continue;
750 
751 		*enabled = true;
752 		ret = drm_eld_size(connector->eld);
753 		memcpy(buf, connector->eld, min(max_bytes, ret));
754 
755 		break;
756 	}
757 	drm_connector_list_iter_end(&conn_iter);
758 
759 	mutex_unlock(&adev->dm.audio_lock);
760 
761 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
762 
763 	return ret;
764 }
765 
766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 	.get_eld = amdgpu_dm_audio_component_get_eld,
768 };
769 
770 static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 				       struct device *hda_kdev, void *data)
772 {
773 	struct drm_device *dev = dev_get_drvdata(kdev);
774 	struct amdgpu_device *adev = drm_to_adev(dev);
775 	struct drm_audio_component *acomp = data;
776 
777 	acomp->ops = &amdgpu_dm_audio_component_ops;
778 	acomp->dev = kdev;
779 	adev->dm.audio_component = acomp;
780 
781 	return 0;
782 }
783 
784 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 					  struct device *hda_kdev, void *data)
786 {
787 	struct drm_device *dev = dev_get_drvdata(kdev);
788 	struct amdgpu_device *adev = drm_to_adev(dev);
789 	struct drm_audio_component *acomp = data;
790 
791 	acomp->ops = NULL;
792 	acomp->dev = NULL;
793 	adev->dm.audio_component = NULL;
794 }
795 
796 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
797 	.bind	= amdgpu_dm_audio_component_bind,
798 	.unbind	= amdgpu_dm_audio_component_unbind,
799 };
800 
801 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
802 {
803 	int i, ret;
804 
805 	if (!amdgpu_audio)
806 		return 0;
807 
808 	adev->mode_info.audio.enabled = true;
809 
810 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
811 
812 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
813 		adev->mode_info.audio.pin[i].channels = -1;
814 		adev->mode_info.audio.pin[i].rate = -1;
815 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
816 		adev->mode_info.audio.pin[i].status_bits = 0;
817 		adev->mode_info.audio.pin[i].category_code = 0;
818 		adev->mode_info.audio.pin[i].connected = false;
819 		adev->mode_info.audio.pin[i].id =
820 			adev->dm.dc->res_pool->audios[i]->inst;
821 		adev->mode_info.audio.pin[i].offset = 0;
822 	}
823 
824 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
825 	if (ret < 0)
826 		return ret;
827 
828 	adev->dm.audio_registered = true;
829 
830 	return 0;
831 }
832 
833 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
834 {
835 	if (!amdgpu_audio)
836 		return;
837 
838 	if (!adev->mode_info.audio.enabled)
839 		return;
840 
841 	if (adev->dm.audio_registered) {
842 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
843 		adev->dm.audio_registered = false;
844 	}
845 
846 	/* TODO: Disable audio? */
847 
848 	adev->mode_info.audio.enabled = false;
849 }
850 
851 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
852 {
853 	struct drm_audio_component *acomp = adev->dm.audio_component;
854 
855 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
856 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
857 
858 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
859 						 pin, -1);
860 	}
861 }
862 
863 static int dm_dmub_hw_init(struct amdgpu_device *adev)
864 {
865 	const struct dmcub_firmware_header_v1_0 *hdr;
866 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
867 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
868 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
869 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
870 	struct abm *abm = adev->dm.dc->res_pool->abm;
871 	struct dmub_srv_hw_params hw_params;
872 	enum dmub_status status;
873 	const unsigned char *fw_inst_const, *fw_bss_data;
874 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
875 	bool has_hw_support;
876 
877 	if (!dmub_srv)
878 		/* DMUB isn't supported on the ASIC. */
879 		return 0;
880 
881 	if (!fb_info) {
882 		DRM_ERROR("No framebuffer info for DMUB service.\n");
883 		return -EINVAL;
884 	}
885 
886 	if (!dmub_fw) {
887 		/* Firmware required for DMUB support. */
888 		DRM_ERROR("No firmware provided for DMUB.\n");
889 		return -EINVAL;
890 	}
891 
892 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
893 	if (status != DMUB_STATUS_OK) {
894 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
895 		return -EINVAL;
896 	}
897 
898 	if (!has_hw_support) {
899 		DRM_INFO("DMUB unsupported on ASIC\n");
900 		return 0;
901 	}
902 
903 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
904 
905 	fw_inst_const = dmub_fw->data +
906 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
907 			PSP_HEADER_BYTES;
908 
909 	fw_bss_data = dmub_fw->data +
910 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
911 		      le32_to_cpu(hdr->inst_const_bytes);
912 
913 	/* Copy firmware and bios info into FB memory. */
914 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
915 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
916 
917 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
918 
919 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
920 	 * amdgpu_ucode_init_single_fw will load dmub firmware
921 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
922 	 * will be done by dm_dmub_hw_init
923 	 */
924 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
925 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
926 				fw_inst_const_size);
927 	}
928 
929 	if (fw_bss_data_size)
930 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
931 		       fw_bss_data, fw_bss_data_size);
932 
933 	/* Copy firmware bios info into FB memory. */
934 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
935 	       adev->bios_size);
936 
937 	/* Reset regions that need to be reset. */
938 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
939 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
940 
941 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
942 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
943 
944 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
945 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
946 
947 	/* Initialize hardware. */
948 	memset(&hw_params, 0, sizeof(hw_params));
949 	hw_params.fb_base = adev->gmc.fb_start;
950 	hw_params.fb_offset = adev->gmc.aper_base;
951 
952 	/* backdoor load firmware and trigger dmub running */
953 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
954 		hw_params.load_inst_const = true;
955 
956 	if (dmcu)
957 		hw_params.psp_version = dmcu->psp_version;
958 
959 	for (i = 0; i < fb_info->num_fb; ++i)
960 		hw_params.fb[i] = &fb_info->fb[i];
961 
962 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
963 	if (status != DMUB_STATUS_OK) {
964 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
965 		return -EINVAL;
966 	}
967 
968 	/* Wait for firmware load to finish. */
969 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
970 	if (status != DMUB_STATUS_OK)
971 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
972 
973 	/* Init DMCU and ABM if available. */
974 	if (dmcu && abm) {
975 		dmcu->funcs->dmcu_init(dmcu);
976 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
977 	}
978 
979 	if (!adev->dm.dc->ctx->dmub_srv)
980 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
981 	if (!adev->dm.dc->ctx->dmub_srv) {
982 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
983 		return -ENOMEM;
984 	}
985 
986 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
987 		 adev->dm.dmcub_fw_version);
988 
989 	return 0;
990 }
991 
992 #if defined(CONFIG_DRM_AMD_DC_DCN)
993 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
994 {
995 	uint64_t pt_base;
996 	uint32_t logical_addr_low;
997 	uint32_t logical_addr_high;
998 	uint32_t agp_base, agp_bot, agp_top;
999 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1000 
1001 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1002 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1003 
1004 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1005 		/*
1006 		 * Raven2 has a HW issue that it is unable to use the vram which
1007 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1008 		 * workaround that increase system aperture high address (add 1)
1009 		 * to get rid of the VM fault and hardware hang.
1010 		 */
1011 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1012 	else
1013 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1014 
1015 	agp_base = 0;
1016 	agp_bot = adev->gmc.agp_start >> 24;
1017 	agp_top = adev->gmc.agp_end >> 24;
1018 
1019 
1020 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1021 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1022 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1023 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1024 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1025 	page_table_base.low_part = lower_32_bits(pt_base);
1026 
1027 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1028 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1029 
1030 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1031 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1032 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1033 
1034 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1035 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1036 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1037 
1038 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1039 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1040 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1041 
1042 	pa_config->is_hvm_enabled = 0;
1043 
1044 }
1045 #endif
1046 #if defined(CONFIG_DRM_AMD_DC_DCN)
1047 static void event_mall_stutter(struct work_struct *work)
1048 {
1049 
1050 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1051 	struct amdgpu_display_manager *dm = vblank_work->dm;
1052 
1053 	mutex_lock(&dm->dc_lock);
1054 
1055 	if (vblank_work->enable)
1056 		dm->active_vblank_irq_count++;
1057 	else if(dm->active_vblank_irq_count)
1058 		dm->active_vblank_irq_count--;
1059 
1060 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1061 
1062 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1063 
1064 	mutex_unlock(&dm->dc_lock);
1065 }
1066 
1067 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1068 {
1069 
1070 	int max_caps = dc->caps.max_links;
1071 	struct vblank_workqueue *vblank_work;
1072 	int i = 0;
1073 
1074 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1075 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1076 		kfree(vblank_work);
1077 		return NULL;
1078 	}
1079 
1080 	for (i = 0; i < max_caps; i++)
1081 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1082 
1083 	return vblank_work;
1084 }
1085 #endif
1086 static int amdgpu_dm_init(struct amdgpu_device *adev)
1087 {
1088 	struct dc_init_data init_data;
1089 #ifdef CONFIG_DRM_AMD_DC_HDCP
1090 	struct dc_callback_init init_params;
1091 #endif
1092 	int r;
1093 
1094 	adev->dm.ddev = adev_to_drm(adev);
1095 	adev->dm.adev = adev;
1096 
1097 	/* Zero all the fields */
1098 	memset(&init_data, 0, sizeof(init_data));
1099 #ifdef CONFIG_DRM_AMD_DC_HDCP
1100 	memset(&init_params, 0, sizeof(init_params));
1101 #endif
1102 
1103 	mutex_init(&adev->dm.dc_lock);
1104 	mutex_init(&adev->dm.audio_lock);
1105 #if defined(CONFIG_DRM_AMD_DC_DCN)
1106 	spin_lock_init(&adev->dm.vblank_lock);
1107 #endif
1108 
1109 	if(amdgpu_dm_irq_init(adev)) {
1110 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1111 		goto error;
1112 	}
1113 
1114 	init_data.asic_id.chip_family = adev->family;
1115 
1116 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1117 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1118 
1119 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1120 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1121 	init_data.asic_id.atombios_base_address =
1122 		adev->mode_info.atom_context->bios;
1123 
1124 	init_data.driver = adev;
1125 
1126 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1127 
1128 	if (!adev->dm.cgs_device) {
1129 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1130 		goto error;
1131 	}
1132 
1133 	init_data.cgs_device = adev->dm.cgs_device;
1134 
1135 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1136 
1137 	switch (adev->asic_type) {
1138 	case CHIP_CARRIZO:
1139 	case CHIP_STONEY:
1140 	case CHIP_RAVEN:
1141 	case CHIP_RENOIR:
1142 		init_data.flags.gpu_vm_support = true;
1143 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1144 			init_data.flags.disable_dmcu = true;
1145 		break;
1146 	case CHIP_VANGOGH:
1147 	case CHIP_YELLOW_CARP:
1148 		init_data.flags.gpu_vm_support = true;
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1155 		init_data.flags.fbc_support = true;
1156 
1157 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1158 		init_data.flags.multi_mon_pp_mclk_switch = true;
1159 
1160 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1161 		init_data.flags.disable_fractional_pwm = true;
1162 
1163 	init_data.flags.power_down_display_on_boot = true;
1164 
1165 	INIT_LIST_HEAD(&adev->dm.da_list);
1166 	/* Display Core create. */
1167 	adev->dm.dc = dc_create(&init_data);
1168 
1169 	if (adev->dm.dc) {
1170 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1171 	} else {
1172 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1173 		goto error;
1174 	}
1175 
1176 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1177 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1178 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1179 	}
1180 
1181 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1182 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1183 
1184 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1185 		adev->dm.dc->debug.disable_stutter = true;
1186 
1187 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1188 		adev->dm.dc->debug.disable_dsc = true;
1189 
1190 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1191 		adev->dm.dc->debug.disable_clock_gate = true;
1192 
1193 	r = dm_dmub_hw_init(adev);
1194 	if (r) {
1195 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1196 		goto error;
1197 	}
1198 
1199 	dc_hardware_init(adev->dm.dc);
1200 
1201 #if defined(CONFIG_DRM_AMD_DC_DCN)
1202 	if (adev->apu_flags) {
1203 		struct dc_phy_addr_space_config pa_config;
1204 
1205 		mmhub_read_system_context(adev, &pa_config);
1206 
1207 		// Call the DC init_memory func
1208 		dc_setup_system_context(adev->dm.dc, &pa_config);
1209 	}
1210 #endif
1211 
1212 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1213 	if (!adev->dm.freesync_module) {
1214 		DRM_ERROR(
1215 		"amdgpu: failed to initialize freesync_module.\n");
1216 	} else
1217 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1218 				adev->dm.freesync_module);
1219 
1220 	amdgpu_dm_init_color_mod();
1221 
1222 #if defined(CONFIG_DRM_AMD_DC_DCN)
1223 	if (adev->dm.dc->caps.max_links > 0) {
1224 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1225 
1226 		if (!adev->dm.vblank_workqueue)
1227 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1228 		else
1229 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1230 	}
1231 #endif
1232 
1233 #ifdef CONFIG_DRM_AMD_DC_HDCP
1234 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1235 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1236 
1237 		if (!adev->dm.hdcp_workqueue)
1238 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1239 		else
1240 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1241 
1242 		dc_init_callbacks(adev->dm.dc, &init_params);
1243 	}
1244 #endif
1245 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1246 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1247 #endif
1248 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1249 		init_completion(&adev->dm.dmub_aux_transfer_done);
1250 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1251 		if (!adev->dm.dmub_notify) {
1252 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1253 			goto error;
1254 		}
1255 		amdgpu_dm_outbox_init(adev);
1256 	}
1257 
1258 	if (amdgpu_dm_initialize_drm_device(adev)) {
1259 		DRM_ERROR(
1260 		"amdgpu: failed to initialize sw for display support.\n");
1261 		goto error;
1262 	}
1263 
1264 	/* create fake encoders for MST */
1265 	dm_dp_create_fake_mst_encoders(adev);
1266 
1267 	/* TODO: Add_display_info? */
1268 
1269 	/* TODO use dynamic cursor width */
1270 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1271 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1272 
1273 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1274 		DRM_ERROR(
1275 		"amdgpu: failed to initialize sw for display support.\n");
1276 		goto error;
1277 	}
1278 
1279 
1280 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1281 
1282 	return 0;
1283 error:
1284 	amdgpu_dm_fini(adev);
1285 
1286 	return -EINVAL;
1287 }
1288 
1289 static int amdgpu_dm_early_fini(void *handle)
1290 {
1291 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1292 
1293 	amdgpu_dm_audio_fini(adev);
1294 
1295 	return 0;
1296 }
1297 
1298 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1299 {
1300 	int i;
1301 
1302 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1303 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1304 	}
1305 
1306 	amdgpu_dm_destroy_drm_device(&adev->dm);
1307 
1308 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1309 	if (adev->dm.crc_rd_wrk) {
1310 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1311 		kfree(adev->dm.crc_rd_wrk);
1312 		adev->dm.crc_rd_wrk = NULL;
1313 	}
1314 #endif
1315 #ifdef CONFIG_DRM_AMD_DC_HDCP
1316 	if (adev->dm.hdcp_workqueue) {
1317 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1318 		adev->dm.hdcp_workqueue = NULL;
1319 	}
1320 
1321 	if (adev->dm.dc)
1322 		dc_deinit_callbacks(adev->dm.dc);
1323 #endif
1324 
1325 #if defined(CONFIG_DRM_AMD_DC_DCN)
1326 	if (adev->dm.vblank_workqueue) {
1327 		adev->dm.vblank_workqueue->dm = NULL;
1328 		kfree(adev->dm.vblank_workqueue);
1329 		adev->dm.vblank_workqueue = NULL;
1330 	}
1331 #endif
1332 
1333 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1334 
1335 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1336 		kfree(adev->dm.dmub_notify);
1337 		adev->dm.dmub_notify = NULL;
1338 	}
1339 
1340 	if (adev->dm.dmub_bo)
1341 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1342 				      &adev->dm.dmub_bo_gpu_addr,
1343 				      &adev->dm.dmub_bo_cpu_addr);
1344 
1345 	/* DC Destroy TODO: Replace destroy DAL */
1346 	if (adev->dm.dc)
1347 		dc_destroy(&adev->dm.dc);
1348 	/*
1349 	 * TODO: pageflip, vlank interrupt
1350 	 *
1351 	 * amdgpu_dm_irq_fini(adev);
1352 	 */
1353 
1354 	if (adev->dm.cgs_device) {
1355 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1356 		adev->dm.cgs_device = NULL;
1357 	}
1358 	if (adev->dm.freesync_module) {
1359 		mod_freesync_destroy(adev->dm.freesync_module);
1360 		adev->dm.freesync_module = NULL;
1361 	}
1362 
1363 	mutex_destroy(&adev->dm.audio_lock);
1364 	mutex_destroy(&adev->dm.dc_lock);
1365 
1366 	return;
1367 }
1368 
1369 static int load_dmcu_fw(struct amdgpu_device *adev)
1370 {
1371 	const char *fw_name_dmcu = NULL;
1372 	int r;
1373 	const struct dmcu_firmware_header_v1_0 *hdr;
1374 
1375 	switch(adev->asic_type) {
1376 #if defined(CONFIG_DRM_AMD_DC_SI)
1377 	case CHIP_TAHITI:
1378 	case CHIP_PITCAIRN:
1379 	case CHIP_VERDE:
1380 	case CHIP_OLAND:
1381 #endif
1382 	case CHIP_BONAIRE:
1383 	case CHIP_HAWAII:
1384 	case CHIP_KAVERI:
1385 	case CHIP_KABINI:
1386 	case CHIP_MULLINS:
1387 	case CHIP_TONGA:
1388 	case CHIP_FIJI:
1389 	case CHIP_CARRIZO:
1390 	case CHIP_STONEY:
1391 	case CHIP_POLARIS11:
1392 	case CHIP_POLARIS10:
1393 	case CHIP_POLARIS12:
1394 	case CHIP_VEGAM:
1395 	case CHIP_VEGA10:
1396 	case CHIP_VEGA12:
1397 	case CHIP_VEGA20:
1398 	case CHIP_NAVI10:
1399 	case CHIP_NAVI14:
1400 	case CHIP_RENOIR:
1401 	case CHIP_SIENNA_CICHLID:
1402 	case CHIP_NAVY_FLOUNDER:
1403 	case CHIP_DIMGREY_CAVEFISH:
1404 	case CHIP_BEIGE_GOBY:
1405 	case CHIP_VANGOGH:
1406 	case CHIP_YELLOW_CARP:
1407 		return 0;
1408 	case CHIP_NAVI12:
1409 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1410 		break;
1411 	case CHIP_RAVEN:
1412 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1413 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1414 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1415 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1416 		else
1417 			return 0;
1418 		break;
1419 	default:
1420 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1421 		return -EINVAL;
1422 	}
1423 
1424 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1425 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1426 		return 0;
1427 	}
1428 
1429 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1430 	if (r == -ENOENT) {
1431 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1432 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1433 		adev->dm.fw_dmcu = NULL;
1434 		return 0;
1435 	}
1436 	if (r) {
1437 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1438 			fw_name_dmcu);
1439 		return r;
1440 	}
1441 
1442 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1443 	if (r) {
1444 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1445 			fw_name_dmcu);
1446 		release_firmware(adev->dm.fw_dmcu);
1447 		adev->dm.fw_dmcu = NULL;
1448 		return r;
1449 	}
1450 
1451 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1452 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1453 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1454 	adev->firmware.fw_size +=
1455 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1456 
1457 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1458 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1459 	adev->firmware.fw_size +=
1460 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1461 
1462 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1463 
1464 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1465 
1466 	return 0;
1467 }
1468 
1469 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1470 {
1471 	struct amdgpu_device *adev = ctx;
1472 
1473 	return dm_read_reg(adev->dm.dc->ctx, address);
1474 }
1475 
1476 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1477 				     uint32_t value)
1478 {
1479 	struct amdgpu_device *adev = ctx;
1480 
1481 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1482 }
1483 
1484 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1485 {
1486 	struct dmub_srv_create_params create_params;
1487 	struct dmub_srv_region_params region_params;
1488 	struct dmub_srv_region_info region_info;
1489 	struct dmub_srv_fb_params fb_params;
1490 	struct dmub_srv_fb_info *fb_info;
1491 	struct dmub_srv *dmub_srv;
1492 	const struct dmcub_firmware_header_v1_0 *hdr;
1493 	const char *fw_name_dmub;
1494 	enum dmub_asic dmub_asic;
1495 	enum dmub_status status;
1496 	int r;
1497 
1498 	switch (adev->asic_type) {
1499 	case CHIP_RENOIR:
1500 		dmub_asic = DMUB_ASIC_DCN21;
1501 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1502 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1503 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1504 		break;
1505 	case CHIP_SIENNA_CICHLID:
1506 		dmub_asic = DMUB_ASIC_DCN30;
1507 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1508 		break;
1509 	case CHIP_NAVY_FLOUNDER:
1510 		dmub_asic = DMUB_ASIC_DCN30;
1511 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1512 		break;
1513 	case CHIP_VANGOGH:
1514 		dmub_asic = DMUB_ASIC_DCN301;
1515 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1516 		break;
1517 	case CHIP_DIMGREY_CAVEFISH:
1518 		dmub_asic = DMUB_ASIC_DCN302;
1519 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1520 		break;
1521 	case CHIP_BEIGE_GOBY:
1522 		dmub_asic = DMUB_ASIC_DCN303;
1523 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1524 		break;
1525 	case CHIP_YELLOW_CARP:
1526 		dmub_asic = DMUB_ASIC_DCN31;
1527 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1528 		break;
1529 
1530 	default:
1531 		/* ASIC doesn't support DMUB. */
1532 		return 0;
1533 	}
1534 
1535 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1536 	if (r) {
1537 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1538 		return 0;
1539 	}
1540 
1541 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1542 	if (r) {
1543 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1544 		return 0;
1545 	}
1546 
1547 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1548 
1549 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1550 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1551 			AMDGPU_UCODE_ID_DMCUB;
1552 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1553 			adev->dm.dmub_fw;
1554 		adev->firmware.fw_size +=
1555 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1556 
1557 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1558 			 adev->dm.dmcub_fw_version);
1559 	}
1560 
1561 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1562 
1563 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1564 	dmub_srv = adev->dm.dmub_srv;
1565 
1566 	if (!dmub_srv) {
1567 		DRM_ERROR("Failed to allocate DMUB service!\n");
1568 		return -ENOMEM;
1569 	}
1570 
1571 	memset(&create_params, 0, sizeof(create_params));
1572 	create_params.user_ctx = adev;
1573 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1574 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1575 	create_params.asic = dmub_asic;
1576 
1577 	/* Create the DMUB service. */
1578 	status = dmub_srv_create(dmub_srv, &create_params);
1579 	if (status != DMUB_STATUS_OK) {
1580 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1581 		return -EINVAL;
1582 	}
1583 
1584 	/* Calculate the size of all the regions for the DMUB service. */
1585 	memset(&region_params, 0, sizeof(region_params));
1586 
1587 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1588 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1589 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1590 	region_params.vbios_size = adev->bios_size;
1591 	region_params.fw_bss_data = region_params.bss_data_size ?
1592 		adev->dm.dmub_fw->data +
1593 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1594 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1595 	region_params.fw_inst_const =
1596 		adev->dm.dmub_fw->data +
1597 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1598 		PSP_HEADER_BYTES;
1599 
1600 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1601 					   &region_info);
1602 
1603 	if (status != DMUB_STATUS_OK) {
1604 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1605 		return -EINVAL;
1606 	}
1607 
1608 	/*
1609 	 * Allocate a framebuffer based on the total size of all the regions.
1610 	 * TODO: Move this into GART.
1611 	 */
1612 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1613 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1614 				    &adev->dm.dmub_bo_gpu_addr,
1615 				    &adev->dm.dmub_bo_cpu_addr);
1616 	if (r)
1617 		return r;
1618 
1619 	/* Rebase the regions on the framebuffer address. */
1620 	memset(&fb_params, 0, sizeof(fb_params));
1621 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1622 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1623 	fb_params.region_info = &region_info;
1624 
1625 	adev->dm.dmub_fb_info =
1626 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1627 	fb_info = adev->dm.dmub_fb_info;
1628 
1629 	if (!fb_info) {
1630 		DRM_ERROR(
1631 			"Failed to allocate framebuffer info for DMUB service!\n");
1632 		return -ENOMEM;
1633 	}
1634 
1635 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1636 	if (status != DMUB_STATUS_OK) {
1637 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1638 		return -EINVAL;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 static int dm_sw_init(void *handle)
1645 {
1646 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1647 	int r;
1648 
1649 	r = dm_dmub_sw_init(adev);
1650 	if (r)
1651 		return r;
1652 
1653 	return load_dmcu_fw(adev);
1654 }
1655 
1656 static int dm_sw_fini(void *handle)
1657 {
1658 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1659 
1660 	kfree(adev->dm.dmub_fb_info);
1661 	adev->dm.dmub_fb_info = NULL;
1662 
1663 	if (adev->dm.dmub_srv) {
1664 		dmub_srv_destroy(adev->dm.dmub_srv);
1665 		adev->dm.dmub_srv = NULL;
1666 	}
1667 
1668 	release_firmware(adev->dm.dmub_fw);
1669 	adev->dm.dmub_fw = NULL;
1670 
1671 	release_firmware(adev->dm.fw_dmcu);
1672 	adev->dm.fw_dmcu = NULL;
1673 
1674 	return 0;
1675 }
1676 
1677 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1678 {
1679 	struct amdgpu_dm_connector *aconnector;
1680 	struct drm_connector *connector;
1681 	struct drm_connector_list_iter iter;
1682 	int ret = 0;
1683 
1684 	drm_connector_list_iter_begin(dev, &iter);
1685 	drm_for_each_connector_iter(connector, &iter) {
1686 		aconnector = to_amdgpu_dm_connector(connector);
1687 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1688 		    aconnector->mst_mgr.aux) {
1689 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1690 					 aconnector,
1691 					 aconnector->base.base.id);
1692 
1693 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1694 			if (ret < 0) {
1695 				DRM_ERROR("DM_MST: Failed to start MST\n");
1696 				aconnector->dc_link->type =
1697 					dc_connection_single;
1698 				break;
1699 			}
1700 		}
1701 	}
1702 	drm_connector_list_iter_end(&iter);
1703 
1704 	return ret;
1705 }
1706 
1707 static int dm_late_init(void *handle)
1708 {
1709 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710 
1711 	struct dmcu_iram_parameters params;
1712 	unsigned int linear_lut[16];
1713 	int i;
1714 	struct dmcu *dmcu = NULL;
1715 
1716 	dmcu = adev->dm.dc->res_pool->dmcu;
1717 
1718 	for (i = 0; i < 16; i++)
1719 		linear_lut[i] = 0xFFFF * i / 15;
1720 
1721 	params.set = 0;
1722 	params.backlight_ramping_start = 0xCCCC;
1723 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1724 	params.backlight_lut_array_size = 16;
1725 	params.backlight_lut_array = linear_lut;
1726 
1727 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1728 	 * 0xFFFF x 0.01 = 0x28F
1729 	 */
1730 	params.min_abm_backlight = 0x28F;
1731 	/* In the case where abm is implemented on dmcub,
1732 	* dmcu object will be null.
1733 	* ABM 2.4 and up are implemented on dmcub.
1734 	*/
1735 	if (dmcu) {
1736 		if (!dmcu_load_iram(dmcu, params))
1737 			return -EINVAL;
1738 	} else if (adev->dm.dc->ctx->dmub_srv) {
1739 		struct dc_link *edp_links[MAX_NUM_EDP];
1740 		int edp_num;
1741 
1742 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1743 		for (i = 0; i < edp_num; i++) {
1744 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1745 				return -EINVAL;
1746 		}
1747 	}
1748 
1749 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1750 }
1751 
1752 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1753 {
1754 	struct amdgpu_dm_connector *aconnector;
1755 	struct drm_connector *connector;
1756 	struct drm_connector_list_iter iter;
1757 	struct drm_dp_mst_topology_mgr *mgr;
1758 	int ret;
1759 	bool need_hotplug = false;
1760 
1761 	drm_connector_list_iter_begin(dev, &iter);
1762 	drm_for_each_connector_iter(connector, &iter) {
1763 		aconnector = to_amdgpu_dm_connector(connector);
1764 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1765 		    aconnector->mst_port)
1766 			continue;
1767 
1768 		mgr = &aconnector->mst_mgr;
1769 
1770 		if (suspend) {
1771 			drm_dp_mst_topology_mgr_suspend(mgr);
1772 		} else {
1773 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1774 			if (ret < 0) {
1775 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1776 				need_hotplug = true;
1777 			}
1778 		}
1779 	}
1780 	drm_connector_list_iter_end(&iter);
1781 
1782 	if (need_hotplug)
1783 		drm_kms_helper_hotplug_event(dev);
1784 }
1785 
1786 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1787 {
1788 	struct smu_context *smu = &adev->smu;
1789 	int ret = 0;
1790 
1791 	if (!is_support_sw_smu(adev))
1792 		return 0;
1793 
1794 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1795 	 * on window driver dc implementation.
1796 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1797 	 * should be passed to smu during boot up and resume from s3.
1798 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1799 	 * dcn20_resource_construct
1800 	 * then call pplib functions below to pass the settings to smu:
1801 	 * smu_set_watermarks_for_clock_ranges
1802 	 * smu_set_watermarks_table
1803 	 * navi10_set_watermarks_table
1804 	 * smu_write_watermarks_table
1805 	 *
1806 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1807 	 * dc has implemented different flow for window driver:
1808 	 * dc_hardware_init / dc_set_power_state
1809 	 * dcn10_init_hw
1810 	 * notify_wm_ranges
1811 	 * set_wm_ranges
1812 	 * -- Linux
1813 	 * smu_set_watermarks_for_clock_ranges
1814 	 * renoir_set_watermarks_table
1815 	 * smu_write_watermarks_table
1816 	 *
1817 	 * For Linux,
1818 	 * dc_hardware_init -> amdgpu_dm_init
1819 	 * dc_set_power_state --> dm_resume
1820 	 *
1821 	 * therefore, this function apply to navi10/12/14 but not Renoir
1822 	 * *
1823 	 */
1824 	switch(adev->asic_type) {
1825 	case CHIP_NAVI10:
1826 	case CHIP_NAVI14:
1827 	case CHIP_NAVI12:
1828 		break;
1829 	default:
1830 		return 0;
1831 	}
1832 
1833 	ret = smu_write_watermarks_table(smu);
1834 	if (ret) {
1835 		DRM_ERROR("Failed to update WMTABLE!\n");
1836 		return ret;
1837 	}
1838 
1839 	return 0;
1840 }
1841 
1842 /**
1843  * dm_hw_init() - Initialize DC device
1844  * @handle: The base driver device containing the amdgpu_dm device.
1845  *
1846  * Initialize the &struct amdgpu_display_manager device. This involves calling
1847  * the initializers of each DM component, then populating the struct with them.
1848  *
1849  * Although the function implies hardware initialization, both hardware and
1850  * software are initialized here. Splitting them out to their relevant init
1851  * hooks is a future TODO item.
1852  *
1853  * Some notable things that are initialized here:
1854  *
1855  * - Display Core, both software and hardware
1856  * - DC modules that we need (freesync and color management)
1857  * - DRM software states
1858  * - Interrupt sources and handlers
1859  * - Vblank support
1860  * - Debug FS entries, if enabled
1861  */
1862 static int dm_hw_init(void *handle)
1863 {
1864 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1865 	/* Create DAL display manager */
1866 	amdgpu_dm_init(adev);
1867 	amdgpu_dm_hpd_init(adev);
1868 
1869 	return 0;
1870 }
1871 
1872 /**
1873  * dm_hw_fini() - Teardown DC device
1874  * @handle: The base driver device containing the amdgpu_dm device.
1875  *
1876  * Teardown components within &struct amdgpu_display_manager that require
1877  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1878  * were loaded. Also flush IRQ workqueues and disable them.
1879  */
1880 static int dm_hw_fini(void *handle)
1881 {
1882 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1883 
1884 	amdgpu_dm_hpd_fini(adev);
1885 
1886 	amdgpu_dm_irq_fini(adev);
1887 	amdgpu_dm_fini(adev);
1888 	return 0;
1889 }
1890 
1891 
1892 static int dm_enable_vblank(struct drm_crtc *crtc);
1893 static void dm_disable_vblank(struct drm_crtc *crtc);
1894 
1895 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1896 				 struct dc_state *state, bool enable)
1897 {
1898 	enum dc_irq_source irq_source;
1899 	struct amdgpu_crtc *acrtc;
1900 	int rc = -EBUSY;
1901 	int i = 0;
1902 
1903 	for (i = 0; i < state->stream_count; i++) {
1904 		acrtc = get_crtc_by_otg_inst(
1905 				adev, state->stream_status[i].primary_otg_inst);
1906 
1907 		if (acrtc && state->stream_status[i].plane_count != 0) {
1908 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1909 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1910 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1911 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1912 			if (rc)
1913 				DRM_WARN("Failed to %s pflip interrupts\n",
1914 					 enable ? "enable" : "disable");
1915 
1916 			if (enable) {
1917 				rc = dm_enable_vblank(&acrtc->base);
1918 				if (rc)
1919 					DRM_WARN("Failed to enable vblank interrupts\n");
1920 			} else {
1921 				dm_disable_vblank(&acrtc->base);
1922 			}
1923 
1924 		}
1925 	}
1926 
1927 }
1928 
1929 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1930 {
1931 	struct dc_state *context = NULL;
1932 	enum dc_status res = DC_ERROR_UNEXPECTED;
1933 	int i;
1934 	struct dc_stream_state *del_streams[MAX_PIPES];
1935 	int del_streams_count = 0;
1936 
1937 	memset(del_streams, 0, sizeof(del_streams));
1938 
1939 	context = dc_create_state(dc);
1940 	if (context == NULL)
1941 		goto context_alloc_fail;
1942 
1943 	dc_resource_state_copy_construct_current(dc, context);
1944 
1945 	/* First remove from context all streams */
1946 	for (i = 0; i < context->stream_count; i++) {
1947 		struct dc_stream_state *stream = context->streams[i];
1948 
1949 		del_streams[del_streams_count++] = stream;
1950 	}
1951 
1952 	/* Remove all planes for removed streams and then remove the streams */
1953 	for (i = 0; i < del_streams_count; i++) {
1954 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1955 			res = DC_FAIL_DETACH_SURFACES;
1956 			goto fail;
1957 		}
1958 
1959 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1960 		if (res != DC_OK)
1961 			goto fail;
1962 	}
1963 
1964 
1965 	res = dc_validate_global_state(dc, context, false);
1966 
1967 	if (res != DC_OK) {
1968 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1969 		goto fail;
1970 	}
1971 
1972 	res = dc_commit_state(dc, context);
1973 
1974 fail:
1975 	dc_release_state(context);
1976 
1977 context_alloc_fail:
1978 	return res;
1979 }
1980 
1981 static int dm_suspend(void *handle)
1982 {
1983 	struct amdgpu_device *adev = handle;
1984 	struct amdgpu_display_manager *dm = &adev->dm;
1985 	int ret = 0;
1986 
1987 	if (amdgpu_in_reset(adev)) {
1988 		mutex_lock(&dm->dc_lock);
1989 
1990 #if defined(CONFIG_DRM_AMD_DC_DCN)
1991 		dc_allow_idle_optimizations(adev->dm.dc, false);
1992 #endif
1993 
1994 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1995 
1996 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1997 
1998 		amdgpu_dm_commit_zero_streams(dm->dc);
1999 
2000 		amdgpu_dm_irq_suspend(adev);
2001 
2002 		return ret;
2003 	}
2004 
2005 	WARN_ON(adev->dm.cached_state);
2006 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2007 
2008 	s3_handle_mst(adev_to_drm(adev), true);
2009 
2010 	amdgpu_dm_irq_suspend(adev);
2011 
2012 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2013 
2014 	return 0;
2015 }
2016 
2017 static struct amdgpu_dm_connector *
2018 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2019 					     struct drm_crtc *crtc)
2020 {
2021 	uint32_t i;
2022 	struct drm_connector_state *new_con_state;
2023 	struct drm_connector *connector;
2024 	struct drm_crtc *crtc_from_state;
2025 
2026 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2027 		crtc_from_state = new_con_state->crtc;
2028 
2029 		if (crtc_from_state == crtc)
2030 			return to_amdgpu_dm_connector(connector);
2031 	}
2032 
2033 	return NULL;
2034 }
2035 
2036 static void emulated_link_detect(struct dc_link *link)
2037 {
2038 	struct dc_sink_init_data sink_init_data = { 0 };
2039 	struct display_sink_capability sink_caps = { 0 };
2040 	enum dc_edid_status edid_status;
2041 	struct dc_context *dc_ctx = link->ctx;
2042 	struct dc_sink *sink = NULL;
2043 	struct dc_sink *prev_sink = NULL;
2044 
2045 	link->type = dc_connection_none;
2046 	prev_sink = link->local_sink;
2047 
2048 	if (prev_sink)
2049 		dc_sink_release(prev_sink);
2050 
2051 	switch (link->connector_signal) {
2052 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2053 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2054 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2055 		break;
2056 	}
2057 
2058 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2059 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2060 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2061 		break;
2062 	}
2063 
2064 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2065 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2066 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2067 		break;
2068 	}
2069 
2070 	case SIGNAL_TYPE_LVDS: {
2071 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2072 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2073 		break;
2074 	}
2075 
2076 	case SIGNAL_TYPE_EDP: {
2077 		sink_caps.transaction_type =
2078 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2079 		sink_caps.signal = SIGNAL_TYPE_EDP;
2080 		break;
2081 	}
2082 
2083 	case SIGNAL_TYPE_DISPLAY_PORT: {
2084 		sink_caps.transaction_type =
2085 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2086 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2087 		break;
2088 	}
2089 
2090 	default:
2091 		DC_ERROR("Invalid connector type! signal:%d\n",
2092 			link->connector_signal);
2093 		return;
2094 	}
2095 
2096 	sink_init_data.link = link;
2097 	sink_init_data.sink_signal = sink_caps.signal;
2098 
2099 	sink = dc_sink_create(&sink_init_data);
2100 	if (!sink) {
2101 		DC_ERROR("Failed to create sink!\n");
2102 		return;
2103 	}
2104 
2105 	/* dc_sink_create returns a new reference */
2106 	link->local_sink = sink;
2107 
2108 	edid_status = dm_helpers_read_local_edid(
2109 			link->ctx,
2110 			link,
2111 			sink);
2112 
2113 	if (edid_status != EDID_OK)
2114 		DC_ERROR("Failed to read EDID");
2115 
2116 }
2117 
2118 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2119 				     struct amdgpu_display_manager *dm)
2120 {
2121 	struct {
2122 		struct dc_surface_update surface_updates[MAX_SURFACES];
2123 		struct dc_plane_info plane_infos[MAX_SURFACES];
2124 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2125 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2126 		struct dc_stream_update stream_update;
2127 	} * bundle;
2128 	int k, m;
2129 
2130 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2131 
2132 	if (!bundle) {
2133 		dm_error("Failed to allocate update bundle\n");
2134 		goto cleanup;
2135 	}
2136 
2137 	for (k = 0; k < dc_state->stream_count; k++) {
2138 		bundle->stream_update.stream = dc_state->streams[k];
2139 
2140 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2141 			bundle->surface_updates[m].surface =
2142 				dc_state->stream_status->plane_states[m];
2143 			bundle->surface_updates[m].surface->force_full_update =
2144 				true;
2145 		}
2146 		dc_commit_updates_for_stream(
2147 			dm->dc, bundle->surface_updates,
2148 			dc_state->stream_status->plane_count,
2149 			dc_state->streams[k], &bundle->stream_update, dc_state);
2150 	}
2151 
2152 cleanup:
2153 	kfree(bundle);
2154 
2155 	return;
2156 }
2157 
2158 static void dm_set_dpms_off(struct dc_link *link)
2159 {
2160 	struct dc_stream_state *stream_state;
2161 	struct amdgpu_dm_connector *aconnector = link->priv;
2162 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2163 	struct dc_stream_update stream_update;
2164 	bool dpms_off = true;
2165 
2166 	memset(&stream_update, 0, sizeof(stream_update));
2167 	stream_update.dpms_off = &dpms_off;
2168 
2169 	mutex_lock(&adev->dm.dc_lock);
2170 	stream_state = dc_stream_find_from_link(link);
2171 
2172 	if (stream_state == NULL) {
2173 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2174 		mutex_unlock(&adev->dm.dc_lock);
2175 		return;
2176 	}
2177 
2178 	stream_update.stream = stream_state;
2179 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2180 				     stream_state, &stream_update,
2181 				     stream_state->ctx->dc->current_state);
2182 	mutex_unlock(&adev->dm.dc_lock);
2183 }
2184 
2185 static int dm_resume(void *handle)
2186 {
2187 	struct amdgpu_device *adev = handle;
2188 	struct drm_device *ddev = adev_to_drm(adev);
2189 	struct amdgpu_display_manager *dm = &adev->dm;
2190 	struct amdgpu_dm_connector *aconnector;
2191 	struct drm_connector *connector;
2192 	struct drm_connector_list_iter iter;
2193 	struct drm_crtc *crtc;
2194 	struct drm_crtc_state *new_crtc_state;
2195 	struct dm_crtc_state *dm_new_crtc_state;
2196 	struct drm_plane *plane;
2197 	struct drm_plane_state *new_plane_state;
2198 	struct dm_plane_state *dm_new_plane_state;
2199 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2200 	enum dc_connection_type new_connection_type = dc_connection_none;
2201 	struct dc_state *dc_state;
2202 	int i, r, j;
2203 
2204 	if (amdgpu_in_reset(adev)) {
2205 		dc_state = dm->cached_dc_state;
2206 
2207 		r = dm_dmub_hw_init(adev);
2208 		if (r)
2209 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2210 
2211 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2212 		dc_resume(dm->dc);
2213 
2214 		amdgpu_dm_irq_resume_early(adev);
2215 
2216 		for (i = 0; i < dc_state->stream_count; i++) {
2217 			dc_state->streams[i]->mode_changed = true;
2218 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2219 				dc_state->stream_status->plane_states[j]->update_flags.raw
2220 					= 0xffffffff;
2221 			}
2222 		}
2223 #if defined(CONFIG_DRM_AMD_DC_DCN)
2224 		/*
2225 		 * Resource allocation happens for link encoders for newer ASIC in
2226 		 * dc_validate_global_state, so we need to revalidate it.
2227 		 *
2228 		 * This shouldn't fail (it passed once before), so warn if it does.
2229 		 */
2230 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2231 #endif
2232 
2233 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2234 
2235 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2236 
2237 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2238 
2239 		dc_release_state(dm->cached_dc_state);
2240 		dm->cached_dc_state = NULL;
2241 
2242 		amdgpu_dm_irq_resume_late(adev);
2243 
2244 		mutex_unlock(&dm->dc_lock);
2245 
2246 		return 0;
2247 	}
2248 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2249 	dc_release_state(dm_state->context);
2250 	dm_state->context = dc_create_state(dm->dc);
2251 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2252 	dc_resource_state_construct(dm->dc, dm_state->context);
2253 
2254 	/* Before powering on DC we need to re-initialize DMUB. */
2255 	r = dm_dmub_hw_init(adev);
2256 	if (r)
2257 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2258 
2259 	/* power on hardware */
2260 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2261 
2262 	/* program HPD filter */
2263 	dc_resume(dm->dc);
2264 
2265 	/*
2266 	 * early enable HPD Rx IRQ, should be done before set mode as short
2267 	 * pulse interrupts are used for MST
2268 	 */
2269 	amdgpu_dm_irq_resume_early(adev);
2270 
2271 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2272 	s3_handle_mst(ddev, false);
2273 
2274 	/* Do detection*/
2275 	drm_connector_list_iter_begin(ddev, &iter);
2276 	drm_for_each_connector_iter(connector, &iter) {
2277 		aconnector = to_amdgpu_dm_connector(connector);
2278 
2279 		/*
2280 		 * this is the case when traversing through already created
2281 		 * MST connectors, should be skipped
2282 		 */
2283 		if (aconnector->mst_port)
2284 			continue;
2285 
2286 		mutex_lock(&aconnector->hpd_lock);
2287 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2288 			DRM_ERROR("KMS: Failed to detect connector\n");
2289 
2290 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2291 			emulated_link_detect(aconnector->dc_link);
2292 		else
2293 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2294 
2295 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2296 			aconnector->fake_enable = false;
2297 
2298 		if (aconnector->dc_sink)
2299 			dc_sink_release(aconnector->dc_sink);
2300 		aconnector->dc_sink = NULL;
2301 		amdgpu_dm_update_connector_after_detect(aconnector);
2302 		mutex_unlock(&aconnector->hpd_lock);
2303 	}
2304 	drm_connector_list_iter_end(&iter);
2305 
2306 	/* Force mode set in atomic commit */
2307 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2308 		new_crtc_state->active_changed = true;
2309 
2310 	/*
2311 	 * atomic_check is expected to create the dc states. We need to release
2312 	 * them here, since they were duplicated as part of the suspend
2313 	 * procedure.
2314 	 */
2315 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2316 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2317 		if (dm_new_crtc_state->stream) {
2318 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2319 			dc_stream_release(dm_new_crtc_state->stream);
2320 			dm_new_crtc_state->stream = NULL;
2321 		}
2322 	}
2323 
2324 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2325 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2326 		if (dm_new_plane_state->dc_state) {
2327 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2328 			dc_plane_state_release(dm_new_plane_state->dc_state);
2329 			dm_new_plane_state->dc_state = NULL;
2330 		}
2331 	}
2332 
2333 	drm_atomic_helper_resume(ddev, dm->cached_state);
2334 
2335 	dm->cached_state = NULL;
2336 
2337 	amdgpu_dm_irq_resume_late(adev);
2338 
2339 	amdgpu_dm_smu_write_watermarks_table(adev);
2340 
2341 	return 0;
2342 }
2343 
2344 /**
2345  * DOC: DM Lifecycle
2346  *
2347  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2348  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2349  * the base driver's device list to be initialized and torn down accordingly.
2350  *
2351  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2352  */
2353 
2354 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2355 	.name = "dm",
2356 	.early_init = dm_early_init,
2357 	.late_init = dm_late_init,
2358 	.sw_init = dm_sw_init,
2359 	.sw_fini = dm_sw_fini,
2360 	.early_fini = amdgpu_dm_early_fini,
2361 	.hw_init = dm_hw_init,
2362 	.hw_fini = dm_hw_fini,
2363 	.suspend = dm_suspend,
2364 	.resume = dm_resume,
2365 	.is_idle = dm_is_idle,
2366 	.wait_for_idle = dm_wait_for_idle,
2367 	.check_soft_reset = dm_check_soft_reset,
2368 	.soft_reset = dm_soft_reset,
2369 	.set_clockgating_state = dm_set_clockgating_state,
2370 	.set_powergating_state = dm_set_powergating_state,
2371 };
2372 
2373 const struct amdgpu_ip_block_version dm_ip_block =
2374 {
2375 	.type = AMD_IP_BLOCK_TYPE_DCE,
2376 	.major = 1,
2377 	.minor = 0,
2378 	.rev = 0,
2379 	.funcs = &amdgpu_dm_funcs,
2380 };
2381 
2382 
2383 /**
2384  * DOC: atomic
2385  *
2386  * *WIP*
2387  */
2388 
2389 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2390 	.fb_create = amdgpu_display_user_framebuffer_create,
2391 	.get_format_info = amd_get_format_info,
2392 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2393 	.atomic_check = amdgpu_dm_atomic_check,
2394 	.atomic_commit = drm_atomic_helper_commit,
2395 };
2396 
2397 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2398 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2399 };
2400 
2401 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2402 {
2403 	u32 max_cll, min_cll, max, min, q, r;
2404 	struct amdgpu_dm_backlight_caps *caps;
2405 	struct amdgpu_display_manager *dm;
2406 	struct drm_connector *conn_base;
2407 	struct amdgpu_device *adev;
2408 	struct dc_link *link = NULL;
2409 	static const u8 pre_computed_values[] = {
2410 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2411 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2412 
2413 	if (!aconnector || !aconnector->dc_link)
2414 		return;
2415 
2416 	link = aconnector->dc_link;
2417 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2418 		return;
2419 
2420 	conn_base = &aconnector->base;
2421 	adev = drm_to_adev(conn_base->dev);
2422 	dm = &adev->dm;
2423 	caps = &dm->backlight_caps;
2424 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2425 	caps->aux_support = false;
2426 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2427 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2428 
2429 	if (caps->ext_caps->bits.oled == 1 ||
2430 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2431 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2432 		caps->aux_support = true;
2433 
2434 	if (amdgpu_backlight == 0)
2435 		caps->aux_support = false;
2436 	else if (amdgpu_backlight == 1)
2437 		caps->aux_support = true;
2438 
2439 	/* From the specification (CTA-861-G), for calculating the maximum
2440 	 * luminance we need to use:
2441 	 *	Luminance = 50*2**(CV/32)
2442 	 * Where CV is a one-byte value.
2443 	 * For calculating this expression we may need float point precision;
2444 	 * to avoid this complexity level, we take advantage that CV is divided
2445 	 * by a constant. From the Euclids division algorithm, we know that CV
2446 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2447 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2448 	 * need to pre-compute the value of r/32. For pre-computing the values
2449 	 * We just used the following Ruby line:
2450 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2451 	 * The results of the above expressions can be verified at
2452 	 * pre_computed_values.
2453 	 */
2454 	q = max_cll >> 5;
2455 	r = max_cll % 32;
2456 	max = (1 << q) * pre_computed_values[r];
2457 
2458 	// min luminance: maxLum * (CV/255)^2 / 100
2459 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2460 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2461 
2462 	caps->aux_max_input_signal = max;
2463 	caps->aux_min_input_signal = min;
2464 }
2465 
2466 void amdgpu_dm_update_connector_after_detect(
2467 		struct amdgpu_dm_connector *aconnector)
2468 {
2469 	struct drm_connector *connector = &aconnector->base;
2470 	struct drm_device *dev = connector->dev;
2471 	struct dc_sink *sink;
2472 
2473 	/* MST handled by drm_mst framework */
2474 	if (aconnector->mst_mgr.mst_state == true)
2475 		return;
2476 
2477 	sink = aconnector->dc_link->local_sink;
2478 	if (sink)
2479 		dc_sink_retain(sink);
2480 
2481 	/*
2482 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2483 	 * the connector sink is set to either fake or physical sink depends on link status.
2484 	 * Skip if already done during boot.
2485 	 */
2486 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2487 			&& aconnector->dc_em_sink) {
2488 
2489 		/*
2490 		 * For S3 resume with headless use eml_sink to fake stream
2491 		 * because on resume connector->sink is set to NULL
2492 		 */
2493 		mutex_lock(&dev->mode_config.mutex);
2494 
2495 		if (sink) {
2496 			if (aconnector->dc_sink) {
2497 				amdgpu_dm_update_freesync_caps(connector, NULL);
2498 				/*
2499 				 * retain and release below are used to
2500 				 * bump up refcount for sink because the link doesn't point
2501 				 * to it anymore after disconnect, so on next crtc to connector
2502 				 * reshuffle by UMD we will get into unwanted dc_sink release
2503 				 */
2504 				dc_sink_release(aconnector->dc_sink);
2505 			}
2506 			aconnector->dc_sink = sink;
2507 			dc_sink_retain(aconnector->dc_sink);
2508 			amdgpu_dm_update_freesync_caps(connector,
2509 					aconnector->edid);
2510 		} else {
2511 			amdgpu_dm_update_freesync_caps(connector, NULL);
2512 			if (!aconnector->dc_sink) {
2513 				aconnector->dc_sink = aconnector->dc_em_sink;
2514 				dc_sink_retain(aconnector->dc_sink);
2515 			}
2516 		}
2517 
2518 		mutex_unlock(&dev->mode_config.mutex);
2519 
2520 		if (sink)
2521 			dc_sink_release(sink);
2522 		return;
2523 	}
2524 
2525 	/*
2526 	 * TODO: temporary guard to look for proper fix
2527 	 * if this sink is MST sink, we should not do anything
2528 	 */
2529 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2530 		dc_sink_release(sink);
2531 		return;
2532 	}
2533 
2534 	if (aconnector->dc_sink == sink) {
2535 		/*
2536 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2537 		 * Do nothing!!
2538 		 */
2539 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2540 				aconnector->connector_id);
2541 		if (sink)
2542 			dc_sink_release(sink);
2543 		return;
2544 	}
2545 
2546 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2547 		aconnector->connector_id, aconnector->dc_sink, sink);
2548 
2549 	mutex_lock(&dev->mode_config.mutex);
2550 
2551 	/*
2552 	 * 1. Update status of the drm connector
2553 	 * 2. Send an event and let userspace tell us what to do
2554 	 */
2555 	if (sink) {
2556 		/*
2557 		 * TODO: check if we still need the S3 mode update workaround.
2558 		 * If yes, put it here.
2559 		 */
2560 		if (aconnector->dc_sink) {
2561 			amdgpu_dm_update_freesync_caps(connector, NULL);
2562 			dc_sink_release(aconnector->dc_sink);
2563 		}
2564 
2565 		aconnector->dc_sink = sink;
2566 		dc_sink_retain(aconnector->dc_sink);
2567 		if (sink->dc_edid.length == 0) {
2568 			aconnector->edid = NULL;
2569 			if (aconnector->dc_link->aux_mode) {
2570 				drm_dp_cec_unset_edid(
2571 					&aconnector->dm_dp_aux.aux);
2572 			}
2573 		} else {
2574 			aconnector->edid =
2575 				(struct edid *)sink->dc_edid.raw_edid;
2576 
2577 			drm_connector_update_edid_property(connector,
2578 							   aconnector->edid);
2579 			if (aconnector->dc_link->aux_mode)
2580 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2581 						    aconnector->edid);
2582 		}
2583 
2584 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2585 		update_connector_ext_caps(aconnector);
2586 	} else {
2587 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2588 		amdgpu_dm_update_freesync_caps(connector, NULL);
2589 		drm_connector_update_edid_property(connector, NULL);
2590 		aconnector->num_modes = 0;
2591 		dc_sink_release(aconnector->dc_sink);
2592 		aconnector->dc_sink = NULL;
2593 		aconnector->edid = NULL;
2594 #ifdef CONFIG_DRM_AMD_DC_HDCP
2595 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2596 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2597 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2598 #endif
2599 	}
2600 
2601 	mutex_unlock(&dev->mode_config.mutex);
2602 
2603 	update_subconnector_property(aconnector);
2604 
2605 	if (sink)
2606 		dc_sink_release(sink);
2607 }
2608 
2609 static void handle_hpd_irq(void *param)
2610 {
2611 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2612 	struct drm_connector *connector = &aconnector->base;
2613 	struct drm_device *dev = connector->dev;
2614 	enum dc_connection_type new_connection_type = dc_connection_none;
2615 	struct amdgpu_device *adev = drm_to_adev(dev);
2616 #ifdef CONFIG_DRM_AMD_DC_HDCP
2617 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2618 #endif
2619 
2620 	if (adev->dm.disable_hpd_irq)
2621 		return;
2622 
2623 	/*
2624 	 * In case of failure or MST no need to update connector status or notify the OS
2625 	 * since (for MST case) MST does this in its own context.
2626 	 */
2627 	mutex_lock(&aconnector->hpd_lock);
2628 
2629 #ifdef CONFIG_DRM_AMD_DC_HDCP
2630 	if (adev->dm.hdcp_workqueue) {
2631 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2632 		dm_con_state->update_hdcp = true;
2633 	}
2634 #endif
2635 	if (aconnector->fake_enable)
2636 		aconnector->fake_enable = false;
2637 
2638 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2639 		DRM_ERROR("KMS: Failed to detect connector\n");
2640 
2641 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2642 		emulated_link_detect(aconnector->dc_link);
2643 
2644 
2645 		drm_modeset_lock_all(dev);
2646 		dm_restore_drm_connector_state(dev, connector);
2647 		drm_modeset_unlock_all(dev);
2648 
2649 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2650 			drm_kms_helper_hotplug_event(dev);
2651 
2652 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2653 		if (new_connection_type == dc_connection_none &&
2654 		    aconnector->dc_link->type == dc_connection_none)
2655 			dm_set_dpms_off(aconnector->dc_link);
2656 
2657 		amdgpu_dm_update_connector_after_detect(aconnector);
2658 
2659 		drm_modeset_lock_all(dev);
2660 		dm_restore_drm_connector_state(dev, connector);
2661 		drm_modeset_unlock_all(dev);
2662 
2663 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2664 			drm_kms_helper_hotplug_event(dev);
2665 	}
2666 	mutex_unlock(&aconnector->hpd_lock);
2667 
2668 }
2669 
2670 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2671 {
2672 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2673 	uint8_t dret;
2674 	bool new_irq_handled = false;
2675 	int dpcd_addr;
2676 	int dpcd_bytes_to_read;
2677 
2678 	const int max_process_count = 30;
2679 	int process_count = 0;
2680 
2681 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2682 
2683 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2684 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2685 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2686 		dpcd_addr = DP_SINK_COUNT;
2687 	} else {
2688 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2689 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2690 		dpcd_addr = DP_SINK_COUNT_ESI;
2691 	}
2692 
2693 	dret = drm_dp_dpcd_read(
2694 		&aconnector->dm_dp_aux.aux,
2695 		dpcd_addr,
2696 		esi,
2697 		dpcd_bytes_to_read);
2698 
2699 	while (dret == dpcd_bytes_to_read &&
2700 		process_count < max_process_count) {
2701 		uint8_t retry;
2702 		dret = 0;
2703 
2704 		process_count++;
2705 
2706 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2707 		/* handle HPD short pulse irq */
2708 		if (aconnector->mst_mgr.mst_state)
2709 			drm_dp_mst_hpd_irq(
2710 				&aconnector->mst_mgr,
2711 				esi,
2712 				&new_irq_handled);
2713 
2714 		if (new_irq_handled) {
2715 			/* ACK at DPCD to notify down stream */
2716 			const int ack_dpcd_bytes_to_write =
2717 				dpcd_bytes_to_read - 1;
2718 
2719 			for (retry = 0; retry < 3; retry++) {
2720 				uint8_t wret;
2721 
2722 				wret = drm_dp_dpcd_write(
2723 					&aconnector->dm_dp_aux.aux,
2724 					dpcd_addr + 1,
2725 					&esi[1],
2726 					ack_dpcd_bytes_to_write);
2727 				if (wret == ack_dpcd_bytes_to_write)
2728 					break;
2729 			}
2730 
2731 			/* check if there is new irq to be handled */
2732 			dret = drm_dp_dpcd_read(
2733 				&aconnector->dm_dp_aux.aux,
2734 				dpcd_addr,
2735 				esi,
2736 				dpcd_bytes_to_read);
2737 
2738 			new_irq_handled = false;
2739 		} else {
2740 			break;
2741 		}
2742 	}
2743 
2744 	if (process_count == max_process_count)
2745 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2746 }
2747 
2748 static void handle_hpd_rx_irq(void *param)
2749 {
2750 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2751 	struct drm_connector *connector = &aconnector->base;
2752 	struct drm_device *dev = connector->dev;
2753 	struct dc_link *dc_link = aconnector->dc_link;
2754 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2755 	bool result = false;
2756 	enum dc_connection_type new_connection_type = dc_connection_none;
2757 	struct amdgpu_device *adev = drm_to_adev(dev);
2758 	union hpd_irq_data hpd_irq_data;
2759 	bool lock_flag = 0;
2760 
2761 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2762 
2763 	if (adev->dm.disable_hpd_irq)
2764 		return;
2765 
2766 
2767 	/*
2768 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2769 	 * conflict, after implement i2c helper, this mutex should be
2770 	 * retired.
2771 	 */
2772 	mutex_lock(&aconnector->hpd_lock);
2773 
2774 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2775 
2776 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2777 		(dc_link->type == dc_connection_mst_branch)) {
2778 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2779 			result = true;
2780 			dm_handle_hpd_rx_irq(aconnector);
2781 			goto out;
2782 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2783 			result = false;
2784 			dm_handle_hpd_rx_irq(aconnector);
2785 			goto out;
2786 		}
2787 	}
2788 
2789 	/*
2790 	 * TODO: We need the lock to avoid touching DC state while it's being
2791 	 * modified during automated compliance testing, or when link loss
2792 	 * happens. While this should be split into subhandlers and proper
2793 	 * interfaces to avoid having to conditionally lock like this in the
2794 	 * outer layer, we need this workaround temporarily to allow MST
2795 	 * lightup in some scenarios to avoid timeout.
2796 	 */
2797 	if (!amdgpu_in_reset(adev) &&
2798 	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2799 	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2800 		mutex_lock(&adev->dm.dc_lock);
2801 		lock_flag = 1;
2802 	}
2803 
2804 #ifdef CONFIG_DRM_AMD_DC_HDCP
2805 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2806 #else
2807 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2808 #endif
2809 	if (!amdgpu_in_reset(adev) && lock_flag)
2810 		mutex_unlock(&adev->dm.dc_lock);
2811 
2812 out:
2813 	if (result && !is_mst_root_connector) {
2814 		/* Downstream Port status changed. */
2815 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2816 			DRM_ERROR("KMS: Failed to detect connector\n");
2817 
2818 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2819 			emulated_link_detect(dc_link);
2820 
2821 			if (aconnector->fake_enable)
2822 				aconnector->fake_enable = false;
2823 
2824 			amdgpu_dm_update_connector_after_detect(aconnector);
2825 
2826 
2827 			drm_modeset_lock_all(dev);
2828 			dm_restore_drm_connector_state(dev, connector);
2829 			drm_modeset_unlock_all(dev);
2830 
2831 			drm_kms_helper_hotplug_event(dev);
2832 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2833 
2834 			if (aconnector->fake_enable)
2835 				aconnector->fake_enable = false;
2836 
2837 			amdgpu_dm_update_connector_after_detect(aconnector);
2838 
2839 
2840 			drm_modeset_lock_all(dev);
2841 			dm_restore_drm_connector_state(dev, connector);
2842 			drm_modeset_unlock_all(dev);
2843 
2844 			drm_kms_helper_hotplug_event(dev);
2845 		}
2846 	}
2847 #ifdef CONFIG_DRM_AMD_DC_HDCP
2848 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2849 		if (adev->dm.hdcp_workqueue)
2850 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2851 	}
2852 #endif
2853 
2854 	if (dc_link->type != dc_connection_mst_branch)
2855 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2856 
2857 	mutex_unlock(&aconnector->hpd_lock);
2858 }
2859 
2860 static void register_hpd_handlers(struct amdgpu_device *adev)
2861 {
2862 	struct drm_device *dev = adev_to_drm(adev);
2863 	struct drm_connector *connector;
2864 	struct amdgpu_dm_connector *aconnector;
2865 	const struct dc_link *dc_link;
2866 	struct dc_interrupt_params int_params = {0};
2867 
2868 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2869 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2870 
2871 	list_for_each_entry(connector,
2872 			&dev->mode_config.connector_list, head)	{
2873 
2874 		aconnector = to_amdgpu_dm_connector(connector);
2875 		dc_link = aconnector->dc_link;
2876 
2877 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2878 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2879 			int_params.irq_source = dc_link->irq_source_hpd;
2880 
2881 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882 					handle_hpd_irq,
2883 					(void *) aconnector);
2884 		}
2885 
2886 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2887 
2888 			/* Also register for DP short pulse (hpd_rx). */
2889 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2890 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2891 
2892 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2893 					handle_hpd_rx_irq,
2894 					(void *) aconnector);
2895 		}
2896 	}
2897 }
2898 
2899 #if defined(CONFIG_DRM_AMD_DC_SI)
2900 /* Register IRQ sources and initialize IRQ callbacks */
2901 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2902 {
2903 	struct dc *dc = adev->dm.dc;
2904 	struct common_irq_params *c_irq_params;
2905 	struct dc_interrupt_params int_params = {0};
2906 	int r;
2907 	int i;
2908 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2909 
2910 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2911 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2912 
2913 	/*
2914 	 * Actions of amdgpu_irq_add_id():
2915 	 * 1. Register a set() function with base driver.
2916 	 *    Base driver will call set() function to enable/disable an
2917 	 *    interrupt in DC hardware.
2918 	 * 2. Register amdgpu_dm_irq_handler().
2919 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2920 	 *    coming from DC hardware.
2921 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2922 	 *    for acknowledging and handling. */
2923 
2924 	/* Use VBLANK interrupt */
2925 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2926 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2927 		if (r) {
2928 			DRM_ERROR("Failed to add crtc irq id!\n");
2929 			return r;
2930 		}
2931 
2932 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2933 		int_params.irq_source =
2934 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2935 
2936 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2937 
2938 		c_irq_params->adev = adev;
2939 		c_irq_params->irq_src = int_params.irq_source;
2940 
2941 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2942 				dm_crtc_high_irq, c_irq_params);
2943 	}
2944 
2945 	/* Use GRPH_PFLIP interrupt */
2946 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2947 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2948 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2949 		if (r) {
2950 			DRM_ERROR("Failed to add page flip irq id!\n");
2951 			return r;
2952 		}
2953 
2954 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2955 		int_params.irq_source =
2956 			dc_interrupt_to_irq_source(dc, i, 0);
2957 
2958 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2959 
2960 		c_irq_params->adev = adev;
2961 		c_irq_params->irq_src = int_params.irq_source;
2962 
2963 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2964 				dm_pflip_high_irq, c_irq_params);
2965 
2966 	}
2967 
2968 	/* HPD */
2969 	r = amdgpu_irq_add_id(adev, client_id,
2970 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2971 	if (r) {
2972 		DRM_ERROR("Failed to add hpd irq id!\n");
2973 		return r;
2974 	}
2975 
2976 	register_hpd_handlers(adev);
2977 
2978 	return 0;
2979 }
2980 #endif
2981 
2982 /* Register IRQ sources and initialize IRQ callbacks */
2983 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2984 {
2985 	struct dc *dc = adev->dm.dc;
2986 	struct common_irq_params *c_irq_params;
2987 	struct dc_interrupt_params int_params = {0};
2988 	int r;
2989 	int i;
2990 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2991 
2992 	if (adev->asic_type >= CHIP_VEGA10)
2993 		client_id = SOC15_IH_CLIENTID_DCE;
2994 
2995 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2996 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2997 
2998 	/*
2999 	 * Actions of amdgpu_irq_add_id():
3000 	 * 1. Register a set() function with base driver.
3001 	 *    Base driver will call set() function to enable/disable an
3002 	 *    interrupt in DC hardware.
3003 	 * 2. Register amdgpu_dm_irq_handler().
3004 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3005 	 *    coming from DC hardware.
3006 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3007 	 *    for acknowledging and handling. */
3008 
3009 	/* Use VBLANK interrupt */
3010 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3011 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3012 		if (r) {
3013 			DRM_ERROR("Failed to add crtc irq id!\n");
3014 			return r;
3015 		}
3016 
3017 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3018 		int_params.irq_source =
3019 			dc_interrupt_to_irq_source(dc, i, 0);
3020 
3021 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3022 
3023 		c_irq_params->adev = adev;
3024 		c_irq_params->irq_src = int_params.irq_source;
3025 
3026 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3027 				dm_crtc_high_irq, c_irq_params);
3028 	}
3029 
3030 	/* Use VUPDATE interrupt */
3031 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3032 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3033 		if (r) {
3034 			DRM_ERROR("Failed to add vupdate irq id!\n");
3035 			return r;
3036 		}
3037 
3038 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3039 		int_params.irq_source =
3040 			dc_interrupt_to_irq_source(dc, i, 0);
3041 
3042 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3043 
3044 		c_irq_params->adev = adev;
3045 		c_irq_params->irq_src = int_params.irq_source;
3046 
3047 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3048 				dm_vupdate_high_irq, c_irq_params);
3049 	}
3050 
3051 	/* Use GRPH_PFLIP interrupt */
3052 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3053 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3054 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3055 		if (r) {
3056 			DRM_ERROR("Failed to add page flip irq id!\n");
3057 			return r;
3058 		}
3059 
3060 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3061 		int_params.irq_source =
3062 			dc_interrupt_to_irq_source(dc, i, 0);
3063 
3064 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3065 
3066 		c_irq_params->adev = adev;
3067 		c_irq_params->irq_src = int_params.irq_source;
3068 
3069 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3070 				dm_pflip_high_irq, c_irq_params);
3071 
3072 	}
3073 
3074 	/* HPD */
3075 	r = amdgpu_irq_add_id(adev, client_id,
3076 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3077 	if (r) {
3078 		DRM_ERROR("Failed to add hpd irq id!\n");
3079 		return r;
3080 	}
3081 
3082 	register_hpd_handlers(adev);
3083 
3084 	return 0;
3085 }
3086 
3087 #if defined(CONFIG_DRM_AMD_DC_DCN)
3088 /* Register IRQ sources and initialize IRQ callbacks */
3089 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3090 {
3091 	struct dc *dc = adev->dm.dc;
3092 	struct common_irq_params *c_irq_params;
3093 	struct dc_interrupt_params int_params = {0};
3094 	int r;
3095 	int i;
3096 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3097 	static const unsigned int vrtl_int_srcid[] = {
3098 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3099 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3100 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3101 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3102 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3103 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3104 	};
3105 #endif
3106 
3107 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3108 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3109 
3110 	/*
3111 	 * Actions of amdgpu_irq_add_id():
3112 	 * 1. Register a set() function with base driver.
3113 	 *    Base driver will call set() function to enable/disable an
3114 	 *    interrupt in DC hardware.
3115 	 * 2. Register amdgpu_dm_irq_handler().
3116 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3117 	 *    coming from DC hardware.
3118 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3119 	 *    for acknowledging and handling.
3120 	 */
3121 
3122 	/* Use VSTARTUP interrupt */
3123 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3124 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3125 			i++) {
3126 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3127 
3128 		if (r) {
3129 			DRM_ERROR("Failed to add crtc irq id!\n");
3130 			return r;
3131 		}
3132 
3133 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134 		int_params.irq_source =
3135 			dc_interrupt_to_irq_source(dc, i, 0);
3136 
3137 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3138 
3139 		c_irq_params->adev = adev;
3140 		c_irq_params->irq_src = int_params.irq_source;
3141 
3142 		amdgpu_dm_irq_register_interrupt(
3143 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3144 	}
3145 
3146 	/* Use otg vertical line interrupt */
3147 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3148 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3149 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3150 				vrtl_int_srcid[i], &adev->vline0_irq);
3151 
3152 		if (r) {
3153 			DRM_ERROR("Failed to add vline0 irq id!\n");
3154 			return r;
3155 		}
3156 
3157 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3158 		int_params.irq_source =
3159 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3160 
3161 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3162 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3163 			break;
3164 		}
3165 
3166 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3167 					- DC_IRQ_SOURCE_DC1_VLINE0];
3168 
3169 		c_irq_params->adev = adev;
3170 		c_irq_params->irq_src = int_params.irq_source;
3171 
3172 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3173 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3174 	}
3175 #endif
3176 
3177 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3178 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3179 	 * to trigger at end of each vblank, regardless of state of the lock,
3180 	 * matching DCE behaviour.
3181 	 */
3182 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3183 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3184 	     i++) {
3185 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3186 
3187 		if (r) {
3188 			DRM_ERROR("Failed to add vupdate irq id!\n");
3189 			return r;
3190 		}
3191 
3192 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3193 		int_params.irq_source =
3194 			dc_interrupt_to_irq_source(dc, i, 0);
3195 
3196 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3197 
3198 		c_irq_params->adev = adev;
3199 		c_irq_params->irq_src = int_params.irq_source;
3200 
3201 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3202 				dm_vupdate_high_irq, c_irq_params);
3203 	}
3204 
3205 	/* Use GRPH_PFLIP interrupt */
3206 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3207 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3208 			i++) {
3209 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3210 		if (r) {
3211 			DRM_ERROR("Failed to add page flip irq id!\n");
3212 			return r;
3213 		}
3214 
3215 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3216 		int_params.irq_source =
3217 			dc_interrupt_to_irq_source(dc, i, 0);
3218 
3219 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3220 
3221 		c_irq_params->adev = adev;
3222 		c_irq_params->irq_src = int_params.irq_source;
3223 
3224 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3225 				dm_pflip_high_irq, c_irq_params);
3226 
3227 	}
3228 
3229 	/* HPD */
3230 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3231 			&adev->hpd_irq);
3232 	if (r) {
3233 		DRM_ERROR("Failed to add hpd irq id!\n");
3234 		return r;
3235 	}
3236 
3237 	register_hpd_handlers(adev);
3238 
3239 	return 0;
3240 }
3241 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3242 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3243 {
3244 	struct dc *dc = adev->dm.dc;
3245 	struct common_irq_params *c_irq_params;
3246 	struct dc_interrupt_params int_params = {0};
3247 	int r, i;
3248 
3249 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3250 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3251 
3252 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3253 			&adev->dmub_outbox_irq);
3254 	if (r) {
3255 		DRM_ERROR("Failed to add outbox irq id!\n");
3256 		return r;
3257 	}
3258 
3259 	if (dc->ctx->dmub_srv) {
3260 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3261 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3262 		int_params.irq_source =
3263 		dc_interrupt_to_irq_source(dc, i, 0);
3264 
3265 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3266 
3267 		c_irq_params->adev = adev;
3268 		c_irq_params->irq_src = int_params.irq_source;
3269 
3270 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3271 				dm_dmub_outbox1_low_irq, c_irq_params);
3272 	}
3273 
3274 	return 0;
3275 }
3276 #endif
3277 
3278 /*
3279  * Acquires the lock for the atomic state object and returns
3280  * the new atomic state.
3281  *
3282  * This should only be called during atomic check.
3283  */
3284 static int dm_atomic_get_state(struct drm_atomic_state *state,
3285 			       struct dm_atomic_state **dm_state)
3286 {
3287 	struct drm_device *dev = state->dev;
3288 	struct amdgpu_device *adev = drm_to_adev(dev);
3289 	struct amdgpu_display_manager *dm = &adev->dm;
3290 	struct drm_private_state *priv_state;
3291 
3292 	if (*dm_state)
3293 		return 0;
3294 
3295 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3296 	if (IS_ERR(priv_state))
3297 		return PTR_ERR(priv_state);
3298 
3299 	*dm_state = to_dm_atomic_state(priv_state);
3300 
3301 	return 0;
3302 }
3303 
3304 static struct dm_atomic_state *
3305 dm_atomic_get_new_state(struct drm_atomic_state *state)
3306 {
3307 	struct drm_device *dev = state->dev;
3308 	struct amdgpu_device *adev = drm_to_adev(dev);
3309 	struct amdgpu_display_manager *dm = &adev->dm;
3310 	struct drm_private_obj *obj;
3311 	struct drm_private_state *new_obj_state;
3312 	int i;
3313 
3314 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3315 		if (obj->funcs == dm->atomic_obj.funcs)
3316 			return to_dm_atomic_state(new_obj_state);
3317 	}
3318 
3319 	return NULL;
3320 }
3321 
3322 static struct drm_private_state *
3323 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3324 {
3325 	struct dm_atomic_state *old_state, *new_state;
3326 
3327 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3328 	if (!new_state)
3329 		return NULL;
3330 
3331 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3332 
3333 	old_state = to_dm_atomic_state(obj->state);
3334 
3335 	if (old_state && old_state->context)
3336 		new_state->context = dc_copy_state(old_state->context);
3337 
3338 	if (!new_state->context) {
3339 		kfree(new_state);
3340 		return NULL;
3341 	}
3342 
3343 	return &new_state->base;
3344 }
3345 
3346 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3347 				    struct drm_private_state *state)
3348 {
3349 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3350 
3351 	if (dm_state && dm_state->context)
3352 		dc_release_state(dm_state->context);
3353 
3354 	kfree(dm_state);
3355 }
3356 
3357 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3358 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3359 	.atomic_destroy_state = dm_atomic_destroy_state,
3360 };
3361 
3362 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3363 {
3364 	struct dm_atomic_state *state;
3365 	int r;
3366 
3367 	adev->mode_info.mode_config_initialized = true;
3368 
3369 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3370 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3371 
3372 	adev_to_drm(adev)->mode_config.max_width = 16384;
3373 	adev_to_drm(adev)->mode_config.max_height = 16384;
3374 
3375 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3376 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3377 	/* indicates support for immediate flip */
3378 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3379 
3380 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3381 
3382 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3383 	if (!state)
3384 		return -ENOMEM;
3385 
3386 	state->context = dc_create_state(adev->dm.dc);
3387 	if (!state->context) {
3388 		kfree(state);
3389 		return -ENOMEM;
3390 	}
3391 
3392 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3393 
3394 	drm_atomic_private_obj_init(adev_to_drm(adev),
3395 				    &adev->dm.atomic_obj,
3396 				    &state->base,
3397 				    &dm_atomic_state_funcs);
3398 
3399 	r = amdgpu_display_modeset_create_props(adev);
3400 	if (r) {
3401 		dc_release_state(state->context);
3402 		kfree(state);
3403 		return r;
3404 	}
3405 
3406 	r = amdgpu_dm_audio_init(adev);
3407 	if (r) {
3408 		dc_release_state(state->context);
3409 		kfree(state);
3410 		return r;
3411 	}
3412 
3413 	return 0;
3414 }
3415 
3416 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3417 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3418 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3419 
3420 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3421 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3422 
3423 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3424 {
3425 #if defined(CONFIG_ACPI)
3426 	struct amdgpu_dm_backlight_caps caps;
3427 
3428 	memset(&caps, 0, sizeof(caps));
3429 
3430 	if (dm->backlight_caps.caps_valid)
3431 		return;
3432 
3433 	amdgpu_acpi_get_backlight_caps(&caps);
3434 	if (caps.caps_valid) {
3435 		dm->backlight_caps.caps_valid = true;
3436 		if (caps.aux_support)
3437 			return;
3438 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3439 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3440 	} else {
3441 		dm->backlight_caps.min_input_signal =
3442 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3443 		dm->backlight_caps.max_input_signal =
3444 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3445 	}
3446 #else
3447 	if (dm->backlight_caps.aux_support)
3448 		return;
3449 
3450 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3451 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3452 #endif
3453 }
3454 
3455 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3456 				unsigned *min, unsigned *max)
3457 {
3458 	if (!caps)
3459 		return 0;
3460 
3461 	if (caps->aux_support) {
3462 		// Firmware limits are in nits, DC API wants millinits.
3463 		*max = 1000 * caps->aux_max_input_signal;
3464 		*min = 1000 * caps->aux_min_input_signal;
3465 	} else {
3466 		// Firmware limits are 8-bit, PWM control is 16-bit.
3467 		*max = 0x101 * caps->max_input_signal;
3468 		*min = 0x101 * caps->min_input_signal;
3469 	}
3470 	return 1;
3471 }
3472 
3473 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3474 					uint32_t brightness)
3475 {
3476 	unsigned min, max;
3477 
3478 	if (!get_brightness_range(caps, &min, &max))
3479 		return brightness;
3480 
3481 	// Rescale 0..255 to min..max
3482 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3483 				       AMDGPU_MAX_BL_LEVEL);
3484 }
3485 
3486 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3487 				      uint32_t brightness)
3488 {
3489 	unsigned min, max;
3490 
3491 	if (!get_brightness_range(caps, &min, &max))
3492 		return brightness;
3493 
3494 	if (brightness < min)
3495 		return 0;
3496 	// Rescale min..max to 0..255
3497 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3498 				 max - min);
3499 }
3500 
3501 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3502 					 u32 user_brightness)
3503 {
3504 	struct amdgpu_dm_backlight_caps caps;
3505 	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3506 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3507 	bool rc;
3508 	int i;
3509 
3510 	amdgpu_dm_update_backlight_caps(dm);
3511 	caps = dm->backlight_caps;
3512 
3513 	for (i = 0; i < dm->num_of_edps; i++) {
3514 		dm->brightness[i] = user_brightness;
3515 		brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3516 		link[i] = (struct dc_link *)dm->backlight_link[i];
3517 	}
3518 
3519 	/* Change brightness based on AUX property */
3520 	if (caps.aux_support) {
3521 		for (i = 0; i < dm->num_of_edps; i++) {
3522 			rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3523 				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3524 			if (!rc) {
3525 				DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3526 				break;
3527 			}
3528 		}
3529 	} else {
3530 		for (i = 0; i < dm->num_of_edps; i++) {
3531 			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3532 			if (!rc) {
3533 				DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3534 				break;
3535 			}
3536 		}
3537 	}
3538 
3539 	return rc ? 0 : 1;
3540 }
3541 
3542 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3543 {
3544 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3545 
3546 	amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3547 
3548 	return 0;
3549 }
3550 
3551 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3552 {
3553 	struct amdgpu_dm_backlight_caps caps;
3554 
3555 	amdgpu_dm_update_backlight_caps(dm);
3556 	caps = dm->backlight_caps;
3557 
3558 	if (caps.aux_support) {
3559 		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3560 		u32 avg, peak;
3561 		bool rc;
3562 
3563 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3564 		if (!rc)
3565 			return dm->brightness[0];
3566 		return convert_brightness_to_user(&caps, avg);
3567 	} else {
3568 		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3569 
3570 		if (ret == DC_ERROR_UNEXPECTED)
3571 			return dm->brightness[0];
3572 		return convert_brightness_to_user(&caps, ret);
3573 	}
3574 }
3575 
3576 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3577 {
3578 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3579 
3580 	return amdgpu_dm_backlight_get_level(dm);
3581 }
3582 
3583 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3584 	.options = BL_CORE_SUSPENDRESUME,
3585 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3586 	.update_status	= amdgpu_dm_backlight_update_status,
3587 };
3588 
3589 static void
3590 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3591 {
3592 	char bl_name[16];
3593 	struct backlight_properties props = { 0 };
3594 	int i;
3595 
3596 	amdgpu_dm_update_backlight_caps(dm);
3597 	for (i = 0; i < dm->num_of_edps; i++)
3598 		dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3599 
3600 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3601 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3602 	props.type = BACKLIGHT_RAW;
3603 
3604 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3605 		 adev_to_drm(dm->adev)->primary->index);
3606 
3607 	dm->backlight_dev = backlight_device_register(bl_name,
3608 						      adev_to_drm(dm->adev)->dev,
3609 						      dm,
3610 						      &amdgpu_dm_backlight_ops,
3611 						      &props);
3612 
3613 	if (IS_ERR(dm->backlight_dev))
3614 		DRM_ERROR("DM: Backlight registration failed!\n");
3615 	else
3616 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3617 }
3618 
3619 #endif
3620 
3621 static int initialize_plane(struct amdgpu_display_manager *dm,
3622 			    struct amdgpu_mode_info *mode_info, int plane_id,
3623 			    enum drm_plane_type plane_type,
3624 			    const struct dc_plane_cap *plane_cap)
3625 {
3626 	struct drm_plane *plane;
3627 	unsigned long possible_crtcs;
3628 	int ret = 0;
3629 
3630 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3631 	if (!plane) {
3632 		DRM_ERROR("KMS: Failed to allocate plane\n");
3633 		return -ENOMEM;
3634 	}
3635 	plane->type = plane_type;
3636 
3637 	/*
3638 	 * HACK: IGT tests expect that the primary plane for a CRTC
3639 	 * can only have one possible CRTC. Only expose support for
3640 	 * any CRTC if they're not going to be used as a primary plane
3641 	 * for a CRTC - like overlay or underlay planes.
3642 	 */
3643 	possible_crtcs = 1 << plane_id;
3644 	if (plane_id >= dm->dc->caps.max_streams)
3645 		possible_crtcs = 0xff;
3646 
3647 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3648 
3649 	if (ret) {
3650 		DRM_ERROR("KMS: Failed to initialize plane\n");
3651 		kfree(plane);
3652 		return ret;
3653 	}
3654 
3655 	if (mode_info)
3656 		mode_info->planes[plane_id] = plane;
3657 
3658 	return ret;
3659 }
3660 
3661 
3662 static void register_backlight_device(struct amdgpu_display_manager *dm,
3663 				      struct dc_link *link)
3664 {
3665 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3666 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3667 
3668 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3669 	    link->type != dc_connection_none) {
3670 		/*
3671 		 * Event if registration failed, we should continue with
3672 		 * DM initialization because not having a backlight control
3673 		 * is better then a black screen.
3674 		 */
3675 		if (!dm->backlight_dev)
3676 			amdgpu_dm_register_backlight_device(dm);
3677 
3678 		if (dm->backlight_dev) {
3679 			dm->backlight_link[dm->num_of_edps] = link;
3680 			dm->num_of_edps++;
3681 		}
3682 	}
3683 #endif
3684 }
3685 
3686 
3687 /*
3688  * In this architecture, the association
3689  * connector -> encoder -> crtc
3690  * id not really requried. The crtc and connector will hold the
3691  * display_index as an abstraction to use with DAL component
3692  *
3693  * Returns 0 on success
3694  */
3695 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3696 {
3697 	struct amdgpu_display_manager *dm = &adev->dm;
3698 	int32_t i;
3699 	struct amdgpu_dm_connector *aconnector = NULL;
3700 	struct amdgpu_encoder *aencoder = NULL;
3701 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3702 	uint32_t link_cnt;
3703 	int32_t primary_planes;
3704 	enum dc_connection_type new_connection_type = dc_connection_none;
3705 	const struct dc_plane_cap *plane;
3706 
3707 	dm->display_indexes_num = dm->dc->caps.max_streams;
3708 	/* Update the actual used number of crtc */
3709 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3710 
3711 	link_cnt = dm->dc->caps.max_links;
3712 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3713 		DRM_ERROR("DM: Failed to initialize mode config\n");
3714 		return -EINVAL;
3715 	}
3716 
3717 	/* There is one primary plane per CRTC */
3718 	primary_planes = dm->dc->caps.max_streams;
3719 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3720 
3721 	/*
3722 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3723 	 * Order is reversed to match iteration order in atomic check.
3724 	 */
3725 	for (i = (primary_planes - 1); i >= 0; i--) {
3726 		plane = &dm->dc->caps.planes[i];
3727 
3728 		if (initialize_plane(dm, mode_info, i,
3729 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3730 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3731 			goto fail;
3732 		}
3733 	}
3734 
3735 	/*
3736 	 * Initialize overlay planes, index starting after primary planes.
3737 	 * These planes have a higher DRM index than the primary planes since
3738 	 * they should be considered as having a higher z-order.
3739 	 * Order is reversed to match iteration order in atomic check.
3740 	 *
3741 	 * Only support DCN for now, and only expose one so we don't encourage
3742 	 * userspace to use up all the pipes.
3743 	 */
3744 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3745 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3746 
3747 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3748 			continue;
3749 
3750 		if (!plane->blends_with_above || !plane->blends_with_below)
3751 			continue;
3752 
3753 		if (!plane->pixel_format_support.argb8888)
3754 			continue;
3755 
3756 		if (initialize_plane(dm, NULL, primary_planes + i,
3757 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3758 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3759 			goto fail;
3760 		}
3761 
3762 		/* Only create one overlay plane. */
3763 		break;
3764 	}
3765 
3766 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3767 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3768 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3769 			goto fail;
3770 		}
3771 
3772 #if defined(CONFIG_DRM_AMD_DC_DCN)
3773 	/* Use Outbox interrupt */
3774 	switch (adev->asic_type) {
3775 	case CHIP_SIENNA_CICHLID:
3776 	case CHIP_NAVY_FLOUNDER:
3777 	case CHIP_YELLOW_CARP:
3778 	case CHIP_RENOIR:
3779 		if (register_outbox_irq_handlers(dm->adev)) {
3780 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3781 			goto fail;
3782 		}
3783 		break;
3784 	default:
3785 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3786 	}
3787 #endif
3788 
3789 	/* loops over all connectors on the board */
3790 	for (i = 0; i < link_cnt; i++) {
3791 		struct dc_link *link = NULL;
3792 
3793 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3794 			DRM_ERROR(
3795 				"KMS: Cannot support more than %d display indexes\n",
3796 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3797 			continue;
3798 		}
3799 
3800 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3801 		if (!aconnector)
3802 			goto fail;
3803 
3804 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3805 		if (!aencoder)
3806 			goto fail;
3807 
3808 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3809 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3810 			goto fail;
3811 		}
3812 
3813 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3814 			DRM_ERROR("KMS: Failed to initialize connector\n");
3815 			goto fail;
3816 		}
3817 
3818 		link = dc_get_link_at_index(dm->dc, i);
3819 
3820 		if (!dc_link_detect_sink(link, &new_connection_type))
3821 			DRM_ERROR("KMS: Failed to detect connector\n");
3822 
3823 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3824 			emulated_link_detect(link);
3825 			amdgpu_dm_update_connector_after_detect(aconnector);
3826 
3827 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3828 			amdgpu_dm_update_connector_after_detect(aconnector);
3829 			register_backlight_device(dm, link);
3830 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3831 				amdgpu_dm_set_psr_caps(link);
3832 		}
3833 
3834 
3835 	}
3836 
3837 	/* Software is initialized. Now we can register interrupt handlers. */
3838 	switch (adev->asic_type) {
3839 #if defined(CONFIG_DRM_AMD_DC_SI)
3840 	case CHIP_TAHITI:
3841 	case CHIP_PITCAIRN:
3842 	case CHIP_VERDE:
3843 	case CHIP_OLAND:
3844 		if (dce60_register_irq_handlers(dm->adev)) {
3845 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3846 			goto fail;
3847 		}
3848 		break;
3849 #endif
3850 	case CHIP_BONAIRE:
3851 	case CHIP_HAWAII:
3852 	case CHIP_KAVERI:
3853 	case CHIP_KABINI:
3854 	case CHIP_MULLINS:
3855 	case CHIP_TONGA:
3856 	case CHIP_FIJI:
3857 	case CHIP_CARRIZO:
3858 	case CHIP_STONEY:
3859 	case CHIP_POLARIS11:
3860 	case CHIP_POLARIS10:
3861 	case CHIP_POLARIS12:
3862 	case CHIP_VEGAM:
3863 	case CHIP_VEGA10:
3864 	case CHIP_VEGA12:
3865 	case CHIP_VEGA20:
3866 		if (dce110_register_irq_handlers(dm->adev)) {
3867 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3868 			goto fail;
3869 		}
3870 		break;
3871 #if defined(CONFIG_DRM_AMD_DC_DCN)
3872 	case CHIP_RAVEN:
3873 	case CHIP_NAVI12:
3874 	case CHIP_NAVI10:
3875 	case CHIP_NAVI14:
3876 	case CHIP_RENOIR:
3877 	case CHIP_SIENNA_CICHLID:
3878 	case CHIP_NAVY_FLOUNDER:
3879 	case CHIP_DIMGREY_CAVEFISH:
3880 	case CHIP_BEIGE_GOBY:
3881 	case CHIP_VANGOGH:
3882 	case CHIP_YELLOW_CARP:
3883 		if (dcn10_register_irq_handlers(dm->adev)) {
3884 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3885 			goto fail;
3886 		}
3887 		break;
3888 #endif
3889 	default:
3890 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3891 		goto fail;
3892 	}
3893 
3894 	return 0;
3895 fail:
3896 	kfree(aencoder);
3897 	kfree(aconnector);
3898 
3899 	return -EINVAL;
3900 }
3901 
3902 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3903 {
3904 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3905 	return;
3906 }
3907 
3908 /******************************************************************************
3909  * amdgpu_display_funcs functions
3910  *****************************************************************************/
3911 
3912 /*
3913  * dm_bandwidth_update - program display watermarks
3914  *
3915  * @adev: amdgpu_device pointer
3916  *
3917  * Calculate and program the display watermarks and line buffer allocation.
3918  */
3919 static void dm_bandwidth_update(struct amdgpu_device *adev)
3920 {
3921 	/* TODO: implement later */
3922 }
3923 
3924 static const struct amdgpu_display_funcs dm_display_funcs = {
3925 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3926 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3927 	.backlight_set_level = NULL, /* never called for DC */
3928 	.backlight_get_level = NULL, /* never called for DC */
3929 	.hpd_sense = NULL,/* called unconditionally */
3930 	.hpd_set_polarity = NULL, /* called unconditionally */
3931 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3932 	.page_flip_get_scanoutpos =
3933 		dm_crtc_get_scanoutpos,/* called unconditionally */
3934 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3935 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3936 };
3937 
3938 #if defined(CONFIG_DEBUG_KERNEL_DC)
3939 
3940 static ssize_t s3_debug_store(struct device *device,
3941 			      struct device_attribute *attr,
3942 			      const char *buf,
3943 			      size_t count)
3944 {
3945 	int ret;
3946 	int s3_state;
3947 	struct drm_device *drm_dev = dev_get_drvdata(device);
3948 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3949 
3950 	ret = kstrtoint(buf, 0, &s3_state);
3951 
3952 	if (ret == 0) {
3953 		if (s3_state) {
3954 			dm_resume(adev);
3955 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3956 		} else
3957 			dm_suspend(adev);
3958 	}
3959 
3960 	return ret == 0 ? count : 0;
3961 }
3962 
3963 DEVICE_ATTR_WO(s3_debug);
3964 
3965 #endif
3966 
3967 static int dm_early_init(void *handle)
3968 {
3969 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3970 
3971 	switch (adev->asic_type) {
3972 #if defined(CONFIG_DRM_AMD_DC_SI)
3973 	case CHIP_TAHITI:
3974 	case CHIP_PITCAIRN:
3975 	case CHIP_VERDE:
3976 		adev->mode_info.num_crtc = 6;
3977 		adev->mode_info.num_hpd = 6;
3978 		adev->mode_info.num_dig = 6;
3979 		break;
3980 	case CHIP_OLAND:
3981 		adev->mode_info.num_crtc = 2;
3982 		adev->mode_info.num_hpd = 2;
3983 		adev->mode_info.num_dig = 2;
3984 		break;
3985 #endif
3986 	case CHIP_BONAIRE:
3987 	case CHIP_HAWAII:
3988 		adev->mode_info.num_crtc = 6;
3989 		adev->mode_info.num_hpd = 6;
3990 		adev->mode_info.num_dig = 6;
3991 		break;
3992 	case CHIP_KAVERI:
3993 		adev->mode_info.num_crtc = 4;
3994 		adev->mode_info.num_hpd = 6;
3995 		adev->mode_info.num_dig = 7;
3996 		break;
3997 	case CHIP_KABINI:
3998 	case CHIP_MULLINS:
3999 		adev->mode_info.num_crtc = 2;
4000 		adev->mode_info.num_hpd = 6;
4001 		adev->mode_info.num_dig = 6;
4002 		break;
4003 	case CHIP_FIJI:
4004 	case CHIP_TONGA:
4005 		adev->mode_info.num_crtc = 6;
4006 		adev->mode_info.num_hpd = 6;
4007 		adev->mode_info.num_dig = 7;
4008 		break;
4009 	case CHIP_CARRIZO:
4010 		adev->mode_info.num_crtc = 3;
4011 		adev->mode_info.num_hpd = 6;
4012 		adev->mode_info.num_dig = 9;
4013 		break;
4014 	case CHIP_STONEY:
4015 		adev->mode_info.num_crtc = 2;
4016 		adev->mode_info.num_hpd = 6;
4017 		adev->mode_info.num_dig = 9;
4018 		break;
4019 	case CHIP_POLARIS11:
4020 	case CHIP_POLARIS12:
4021 		adev->mode_info.num_crtc = 5;
4022 		adev->mode_info.num_hpd = 5;
4023 		adev->mode_info.num_dig = 5;
4024 		break;
4025 	case CHIP_POLARIS10:
4026 	case CHIP_VEGAM:
4027 		adev->mode_info.num_crtc = 6;
4028 		adev->mode_info.num_hpd = 6;
4029 		adev->mode_info.num_dig = 6;
4030 		break;
4031 	case CHIP_VEGA10:
4032 	case CHIP_VEGA12:
4033 	case CHIP_VEGA20:
4034 		adev->mode_info.num_crtc = 6;
4035 		adev->mode_info.num_hpd = 6;
4036 		adev->mode_info.num_dig = 6;
4037 		break;
4038 #if defined(CONFIG_DRM_AMD_DC_DCN)
4039 	case CHIP_RAVEN:
4040 	case CHIP_RENOIR:
4041 	case CHIP_VANGOGH:
4042 		adev->mode_info.num_crtc = 4;
4043 		adev->mode_info.num_hpd = 4;
4044 		adev->mode_info.num_dig = 4;
4045 		break;
4046 	case CHIP_NAVI10:
4047 	case CHIP_NAVI12:
4048 	case CHIP_SIENNA_CICHLID:
4049 	case CHIP_NAVY_FLOUNDER:
4050 		adev->mode_info.num_crtc = 6;
4051 		adev->mode_info.num_hpd = 6;
4052 		adev->mode_info.num_dig = 6;
4053 		break;
4054 	case CHIP_YELLOW_CARP:
4055 		adev->mode_info.num_crtc = 4;
4056 		adev->mode_info.num_hpd = 4;
4057 		adev->mode_info.num_dig = 4;
4058 		break;
4059 	case CHIP_NAVI14:
4060 	case CHIP_DIMGREY_CAVEFISH:
4061 		adev->mode_info.num_crtc = 5;
4062 		adev->mode_info.num_hpd = 5;
4063 		adev->mode_info.num_dig = 5;
4064 		break;
4065 	case CHIP_BEIGE_GOBY:
4066 		adev->mode_info.num_crtc = 2;
4067 		adev->mode_info.num_hpd = 2;
4068 		adev->mode_info.num_dig = 2;
4069 		break;
4070 #endif
4071 	default:
4072 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4073 		return -EINVAL;
4074 	}
4075 
4076 	amdgpu_dm_set_irq_funcs(adev);
4077 
4078 	if (adev->mode_info.funcs == NULL)
4079 		adev->mode_info.funcs = &dm_display_funcs;
4080 
4081 	/*
4082 	 * Note: Do NOT change adev->audio_endpt_rreg and
4083 	 * adev->audio_endpt_wreg because they are initialised in
4084 	 * amdgpu_device_init()
4085 	 */
4086 #if defined(CONFIG_DEBUG_KERNEL_DC)
4087 	device_create_file(
4088 		adev_to_drm(adev)->dev,
4089 		&dev_attr_s3_debug);
4090 #endif
4091 
4092 	return 0;
4093 }
4094 
4095 static bool modeset_required(struct drm_crtc_state *crtc_state,
4096 			     struct dc_stream_state *new_stream,
4097 			     struct dc_stream_state *old_stream)
4098 {
4099 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4100 }
4101 
4102 static bool modereset_required(struct drm_crtc_state *crtc_state)
4103 {
4104 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4105 }
4106 
4107 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4108 {
4109 	drm_encoder_cleanup(encoder);
4110 	kfree(encoder);
4111 }
4112 
4113 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4114 	.destroy = amdgpu_dm_encoder_destroy,
4115 };
4116 
4117 
4118 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4119 					 struct drm_framebuffer *fb,
4120 					 int *min_downscale, int *max_upscale)
4121 {
4122 	struct amdgpu_device *adev = drm_to_adev(dev);
4123 	struct dc *dc = adev->dm.dc;
4124 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4125 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4126 
4127 	switch (fb->format->format) {
4128 	case DRM_FORMAT_P010:
4129 	case DRM_FORMAT_NV12:
4130 	case DRM_FORMAT_NV21:
4131 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4132 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4133 		break;
4134 
4135 	case DRM_FORMAT_XRGB16161616F:
4136 	case DRM_FORMAT_ARGB16161616F:
4137 	case DRM_FORMAT_XBGR16161616F:
4138 	case DRM_FORMAT_ABGR16161616F:
4139 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4140 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4141 		break;
4142 
4143 	default:
4144 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4145 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4146 		break;
4147 	}
4148 
4149 	/*
4150 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4151 	 * scaling factor of 1.0 == 1000 units.
4152 	 */
4153 	if (*max_upscale == 1)
4154 		*max_upscale = 1000;
4155 
4156 	if (*min_downscale == 1)
4157 		*min_downscale = 1000;
4158 }
4159 
4160 
4161 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4162 				struct dc_scaling_info *scaling_info)
4163 {
4164 	int scale_w, scale_h, min_downscale, max_upscale;
4165 
4166 	memset(scaling_info, 0, sizeof(*scaling_info));
4167 
4168 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4169 	scaling_info->src_rect.x = state->src_x >> 16;
4170 	scaling_info->src_rect.y = state->src_y >> 16;
4171 
4172 	/*
4173 	 * For reasons we don't (yet) fully understand a non-zero
4174 	 * src_y coordinate into an NV12 buffer can cause a
4175 	 * system hang. To avoid hangs (and maybe be overly cautious)
4176 	 * let's reject both non-zero src_x and src_y.
4177 	 *
4178 	 * We currently know of only one use-case to reproduce a
4179 	 * scenario with non-zero src_x and src_y for NV12, which
4180 	 * is to gesture the YouTube Android app into full screen
4181 	 * on ChromeOS.
4182 	 */
4183 	if (state->fb &&
4184 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4185 	    (scaling_info->src_rect.x != 0 ||
4186 	     scaling_info->src_rect.y != 0))
4187 		return -EINVAL;
4188 
4189 	scaling_info->src_rect.width = state->src_w >> 16;
4190 	if (scaling_info->src_rect.width == 0)
4191 		return -EINVAL;
4192 
4193 	scaling_info->src_rect.height = state->src_h >> 16;
4194 	if (scaling_info->src_rect.height == 0)
4195 		return -EINVAL;
4196 
4197 	scaling_info->dst_rect.x = state->crtc_x;
4198 	scaling_info->dst_rect.y = state->crtc_y;
4199 
4200 	if (state->crtc_w == 0)
4201 		return -EINVAL;
4202 
4203 	scaling_info->dst_rect.width = state->crtc_w;
4204 
4205 	if (state->crtc_h == 0)
4206 		return -EINVAL;
4207 
4208 	scaling_info->dst_rect.height = state->crtc_h;
4209 
4210 	/* DRM doesn't specify clipping on destination output. */
4211 	scaling_info->clip_rect = scaling_info->dst_rect;
4212 
4213 	/* Validate scaling per-format with DC plane caps */
4214 	if (state->plane && state->plane->dev && state->fb) {
4215 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4216 					     &min_downscale, &max_upscale);
4217 	} else {
4218 		min_downscale = 250;
4219 		max_upscale = 16000;
4220 	}
4221 
4222 	scale_w = scaling_info->dst_rect.width * 1000 /
4223 		  scaling_info->src_rect.width;
4224 
4225 	if (scale_w < min_downscale || scale_w > max_upscale)
4226 		return -EINVAL;
4227 
4228 	scale_h = scaling_info->dst_rect.height * 1000 /
4229 		  scaling_info->src_rect.height;
4230 
4231 	if (scale_h < min_downscale || scale_h > max_upscale)
4232 		return -EINVAL;
4233 
4234 	/*
4235 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4236 	 * assume reasonable defaults based on the format.
4237 	 */
4238 
4239 	return 0;
4240 }
4241 
4242 static void
4243 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4244 				 uint64_t tiling_flags)
4245 {
4246 	/* Fill GFX8 params */
4247 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4248 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4249 
4250 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4251 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4252 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4253 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4254 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4255 
4256 		/* XXX fix me for VI */
4257 		tiling_info->gfx8.num_banks = num_banks;
4258 		tiling_info->gfx8.array_mode =
4259 				DC_ARRAY_2D_TILED_THIN1;
4260 		tiling_info->gfx8.tile_split = tile_split;
4261 		tiling_info->gfx8.bank_width = bankw;
4262 		tiling_info->gfx8.bank_height = bankh;
4263 		tiling_info->gfx8.tile_aspect = mtaspect;
4264 		tiling_info->gfx8.tile_mode =
4265 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4266 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4267 			== DC_ARRAY_1D_TILED_THIN1) {
4268 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4269 	}
4270 
4271 	tiling_info->gfx8.pipe_config =
4272 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4273 }
4274 
4275 static void
4276 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4277 				  union dc_tiling_info *tiling_info)
4278 {
4279 	tiling_info->gfx9.num_pipes =
4280 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4281 	tiling_info->gfx9.num_banks =
4282 		adev->gfx.config.gb_addr_config_fields.num_banks;
4283 	tiling_info->gfx9.pipe_interleave =
4284 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4285 	tiling_info->gfx9.num_shader_engines =
4286 		adev->gfx.config.gb_addr_config_fields.num_se;
4287 	tiling_info->gfx9.max_compressed_frags =
4288 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4289 	tiling_info->gfx9.num_rb_per_se =
4290 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4291 	tiling_info->gfx9.shaderEnable = 1;
4292 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4293 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4294 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4295 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4296 	    adev->asic_type == CHIP_YELLOW_CARP ||
4297 	    adev->asic_type == CHIP_VANGOGH)
4298 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4299 }
4300 
4301 static int
4302 validate_dcc(struct amdgpu_device *adev,
4303 	     const enum surface_pixel_format format,
4304 	     const enum dc_rotation_angle rotation,
4305 	     const union dc_tiling_info *tiling_info,
4306 	     const struct dc_plane_dcc_param *dcc,
4307 	     const struct dc_plane_address *address,
4308 	     const struct plane_size *plane_size)
4309 {
4310 	struct dc *dc = adev->dm.dc;
4311 	struct dc_dcc_surface_param input;
4312 	struct dc_surface_dcc_cap output;
4313 
4314 	memset(&input, 0, sizeof(input));
4315 	memset(&output, 0, sizeof(output));
4316 
4317 	if (!dcc->enable)
4318 		return 0;
4319 
4320 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4321 	    !dc->cap_funcs.get_dcc_compression_cap)
4322 		return -EINVAL;
4323 
4324 	input.format = format;
4325 	input.surface_size.width = plane_size->surface_size.width;
4326 	input.surface_size.height = plane_size->surface_size.height;
4327 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4328 
4329 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4330 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4331 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4332 		input.scan = SCAN_DIRECTION_VERTICAL;
4333 
4334 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4335 		return -EINVAL;
4336 
4337 	if (!output.capable)
4338 		return -EINVAL;
4339 
4340 	if (dcc->independent_64b_blks == 0 &&
4341 	    output.grph.rgb.independent_64b_blks != 0)
4342 		return -EINVAL;
4343 
4344 	return 0;
4345 }
4346 
4347 static bool
4348 modifier_has_dcc(uint64_t modifier)
4349 {
4350 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4351 }
4352 
4353 static unsigned
4354 modifier_gfx9_swizzle_mode(uint64_t modifier)
4355 {
4356 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4357 		return 0;
4358 
4359 	return AMD_FMT_MOD_GET(TILE, modifier);
4360 }
4361 
4362 static const struct drm_format_info *
4363 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4364 {
4365 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4366 }
4367 
4368 static void
4369 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4370 				    union dc_tiling_info *tiling_info,
4371 				    uint64_t modifier)
4372 {
4373 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4374 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4375 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4376 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4377 
4378 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4379 
4380 	if (!IS_AMD_FMT_MOD(modifier))
4381 		return;
4382 
4383 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4384 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4385 
4386 	if (adev->family >= AMDGPU_FAMILY_NV) {
4387 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4388 	} else {
4389 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4390 
4391 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4392 	}
4393 }
4394 
4395 enum dm_micro_swizzle {
4396 	MICRO_SWIZZLE_Z = 0,
4397 	MICRO_SWIZZLE_S = 1,
4398 	MICRO_SWIZZLE_D = 2,
4399 	MICRO_SWIZZLE_R = 3
4400 };
4401 
4402 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4403 					  uint32_t format,
4404 					  uint64_t modifier)
4405 {
4406 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4407 	const struct drm_format_info *info = drm_format_info(format);
4408 	int i;
4409 
4410 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4411 
4412 	if (!info)
4413 		return false;
4414 
4415 	/*
4416 	 * We always have to allow these modifiers:
4417 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4418 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4419 	 */
4420 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4421 	    modifier == DRM_FORMAT_MOD_INVALID) {
4422 		return true;
4423 	}
4424 
4425 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4426 	for (i = 0; i < plane->modifier_count; i++) {
4427 		if (modifier == plane->modifiers[i])
4428 			break;
4429 	}
4430 	if (i == plane->modifier_count)
4431 		return false;
4432 
4433 	/*
4434 	 * For D swizzle the canonical modifier depends on the bpp, so check
4435 	 * it here.
4436 	 */
4437 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4438 	    adev->family >= AMDGPU_FAMILY_NV) {
4439 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4440 			return false;
4441 	}
4442 
4443 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4444 	    info->cpp[0] < 8)
4445 		return false;
4446 
4447 	if (modifier_has_dcc(modifier)) {
4448 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4449 		if (info->cpp[0] != 4)
4450 			return false;
4451 		/* We support multi-planar formats, but not when combined with
4452 		 * additional DCC metadata planes. */
4453 		if (info->num_planes > 1)
4454 			return false;
4455 	}
4456 
4457 	return true;
4458 }
4459 
4460 static void
4461 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4462 {
4463 	if (!*mods)
4464 		return;
4465 
4466 	if (*cap - *size < 1) {
4467 		uint64_t new_cap = *cap * 2;
4468 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4469 
4470 		if (!new_mods) {
4471 			kfree(*mods);
4472 			*mods = NULL;
4473 			return;
4474 		}
4475 
4476 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4477 		kfree(*mods);
4478 		*mods = new_mods;
4479 		*cap = new_cap;
4480 	}
4481 
4482 	(*mods)[*size] = mod;
4483 	*size += 1;
4484 }
4485 
4486 static void
4487 add_gfx9_modifiers(const struct amdgpu_device *adev,
4488 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4489 {
4490 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4491 	int pipe_xor_bits = min(8, pipes +
4492 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4493 	int bank_xor_bits = min(8 - pipe_xor_bits,
4494 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4495 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4496 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4497 
4498 
4499 	if (adev->family == AMDGPU_FAMILY_RV) {
4500 		/* Raven2 and later */
4501 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4502 
4503 		/*
4504 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4505 		 * doesn't support _D on DCN
4506 		 */
4507 
4508 		if (has_constant_encode) {
4509 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4510 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4511 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4512 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4513 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4514 				    AMD_FMT_MOD_SET(DCC, 1) |
4515 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4516 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4517 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4518 		}
4519 
4520 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4521 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4522 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4523 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4524 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4525 			    AMD_FMT_MOD_SET(DCC, 1) |
4526 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4527 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4528 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4529 
4530 		if (has_constant_encode) {
4531 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4532 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4533 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4534 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4535 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4536 				    AMD_FMT_MOD_SET(DCC, 1) |
4537 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4538 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4539 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4540 
4541 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4542 				    AMD_FMT_MOD_SET(RB, rb) |
4543 				    AMD_FMT_MOD_SET(PIPE, pipes));
4544 		}
4545 
4546 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4547 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4548 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4549 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4550 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4551 			    AMD_FMT_MOD_SET(DCC, 1) |
4552 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4553 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4554 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4555 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4556 			    AMD_FMT_MOD_SET(RB, rb) |
4557 			    AMD_FMT_MOD_SET(PIPE, pipes));
4558 	}
4559 
4560 	/*
4561 	 * Only supported for 64bpp on Raven, will be filtered on format in
4562 	 * dm_plane_format_mod_supported.
4563 	 */
4564 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4565 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4566 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4567 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4568 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4569 
4570 	if (adev->family == AMDGPU_FAMILY_RV) {
4571 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4572 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4573 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4574 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4575 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4576 	}
4577 
4578 	/*
4579 	 * Only supported for 64bpp on Raven, will be filtered on format in
4580 	 * dm_plane_format_mod_supported.
4581 	 */
4582 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4583 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4584 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4585 
4586 	if (adev->family == AMDGPU_FAMILY_RV) {
4587 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4588 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4589 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4590 	}
4591 }
4592 
4593 static void
4594 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4595 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4596 {
4597 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4598 
4599 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4600 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4601 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4602 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4603 		    AMD_FMT_MOD_SET(DCC, 1) |
4604 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4605 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4606 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4607 
4608 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4610 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4611 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4612 		    AMD_FMT_MOD_SET(DCC, 1) |
4613 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4614 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4615 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4616 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4617 
4618 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4619 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4620 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4621 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4622 
4623 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4624 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4625 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4626 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4627 
4628 
4629 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4630 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4631 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4632 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4633 
4634 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4635 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4636 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4637 }
4638 
4639 static void
4640 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4641 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4642 {
4643 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4644 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4645 
4646 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4647 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4648 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4649 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4650 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4651 		    AMD_FMT_MOD_SET(DCC, 1) |
4652 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4653 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4654 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4655 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4656 
4657 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4658 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4659 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4660 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4661 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4662 		    AMD_FMT_MOD_SET(DCC, 1) |
4663 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4664 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4665 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4666 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4667 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4668 
4669 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4670 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4671 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4672 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4673 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4674 
4675 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4676 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4677 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4678 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4679 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4680 
4681 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4682 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4683 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4684 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4685 
4686 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4687 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4688 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4689 }
4690 
4691 static int
4692 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4693 {
4694 	uint64_t size = 0, capacity = 128;
4695 	*mods = NULL;
4696 
4697 	/* We have not hooked up any pre-GFX9 modifiers. */
4698 	if (adev->family < AMDGPU_FAMILY_AI)
4699 		return 0;
4700 
4701 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4702 
4703 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4704 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4705 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4706 		return *mods ? 0 : -ENOMEM;
4707 	}
4708 
4709 	switch (adev->family) {
4710 	case AMDGPU_FAMILY_AI:
4711 	case AMDGPU_FAMILY_RV:
4712 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4713 		break;
4714 	case AMDGPU_FAMILY_NV:
4715 	case AMDGPU_FAMILY_VGH:
4716 	case AMDGPU_FAMILY_YC:
4717 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4718 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4719 		else
4720 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4721 		break;
4722 	}
4723 
4724 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4725 
4726 	/* INVALID marks the end of the list. */
4727 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4728 
4729 	if (!*mods)
4730 		return -ENOMEM;
4731 
4732 	return 0;
4733 }
4734 
4735 static int
4736 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4737 					  const struct amdgpu_framebuffer *afb,
4738 					  const enum surface_pixel_format format,
4739 					  const enum dc_rotation_angle rotation,
4740 					  const struct plane_size *plane_size,
4741 					  union dc_tiling_info *tiling_info,
4742 					  struct dc_plane_dcc_param *dcc,
4743 					  struct dc_plane_address *address,
4744 					  const bool force_disable_dcc)
4745 {
4746 	const uint64_t modifier = afb->base.modifier;
4747 	int ret;
4748 
4749 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4750 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4751 
4752 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4753 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4754 
4755 		dcc->enable = 1;
4756 		dcc->meta_pitch = afb->base.pitches[1];
4757 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4758 
4759 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4760 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4761 	}
4762 
4763 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4764 	if (ret)
4765 		return ret;
4766 
4767 	return 0;
4768 }
4769 
4770 static int
4771 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4772 			     const struct amdgpu_framebuffer *afb,
4773 			     const enum surface_pixel_format format,
4774 			     const enum dc_rotation_angle rotation,
4775 			     const uint64_t tiling_flags,
4776 			     union dc_tiling_info *tiling_info,
4777 			     struct plane_size *plane_size,
4778 			     struct dc_plane_dcc_param *dcc,
4779 			     struct dc_plane_address *address,
4780 			     bool tmz_surface,
4781 			     bool force_disable_dcc)
4782 {
4783 	const struct drm_framebuffer *fb = &afb->base;
4784 	int ret;
4785 
4786 	memset(tiling_info, 0, sizeof(*tiling_info));
4787 	memset(plane_size, 0, sizeof(*plane_size));
4788 	memset(dcc, 0, sizeof(*dcc));
4789 	memset(address, 0, sizeof(*address));
4790 
4791 	address->tmz_surface = tmz_surface;
4792 
4793 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4794 		uint64_t addr = afb->address + fb->offsets[0];
4795 
4796 		plane_size->surface_size.x = 0;
4797 		plane_size->surface_size.y = 0;
4798 		plane_size->surface_size.width = fb->width;
4799 		plane_size->surface_size.height = fb->height;
4800 		plane_size->surface_pitch =
4801 			fb->pitches[0] / fb->format->cpp[0];
4802 
4803 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4804 		address->grph.addr.low_part = lower_32_bits(addr);
4805 		address->grph.addr.high_part = upper_32_bits(addr);
4806 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4807 		uint64_t luma_addr = afb->address + fb->offsets[0];
4808 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4809 
4810 		plane_size->surface_size.x = 0;
4811 		plane_size->surface_size.y = 0;
4812 		plane_size->surface_size.width = fb->width;
4813 		plane_size->surface_size.height = fb->height;
4814 		plane_size->surface_pitch =
4815 			fb->pitches[0] / fb->format->cpp[0];
4816 
4817 		plane_size->chroma_size.x = 0;
4818 		plane_size->chroma_size.y = 0;
4819 		/* TODO: set these based on surface format */
4820 		plane_size->chroma_size.width = fb->width / 2;
4821 		plane_size->chroma_size.height = fb->height / 2;
4822 
4823 		plane_size->chroma_pitch =
4824 			fb->pitches[1] / fb->format->cpp[1];
4825 
4826 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4827 		address->video_progressive.luma_addr.low_part =
4828 			lower_32_bits(luma_addr);
4829 		address->video_progressive.luma_addr.high_part =
4830 			upper_32_bits(luma_addr);
4831 		address->video_progressive.chroma_addr.low_part =
4832 			lower_32_bits(chroma_addr);
4833 		address->video_progressive.chroma_addr.high_part =
4834 			upper_32_bits(chroma_addr);
4835 	}
4836 
4837 	if (adev->family >= AMDGPU_FAMILY_AI) {
4838 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4839 								rotation, plane_size,
4840 								tiling_info, dcc,
4841 								address,
4842 								force_disable_dcc);
4843 		if (ret)
4844 			return ret;
4845 	} else {
4846 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4847 	}
4848 
4849 	return 0;
4850 }
4851 
4852 static void
4853 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4854 			       bool *per_pixel_alpha, bool *global_alpha,
4855 			       int *global_alpha_value)
4856 {
4857 	*per_pixel_alpha = false;
4858 	*global_alpha = false;
4859 	*global_alpha_value = 0xff;
4860 
4861 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4862 		return;
4863 
4864 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4865 		static const uint32_t alpha_formats[] = {
4866 			DRM_FORMAT_ARGB8888,
4867 			DRM_FORMAT_RGBA8888,
4868 			DRM_FORMAT_ABGR8888,
4869 		};
4870 		uint32_t format = plane_state->fb->format->format;
4871 		unsigned int i;
4872 
4873 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4874 			if (format == alpha_formats[i]) {
4875 				*per_pixel_alpha = true;
4876 				break;
4877 			}
4878 		}
4879 	}
4880 
4881 	if (plane_state->alpha < 0xffff) {
4882 		*global_alpha = true;
4883 		*global_alpha_value = plane_state->alpha >> 8;
4884 	}
4885 }
4886 
4887 static int
4888 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4889 			    const enum surface_pixel_format format,
4890 			    enum dc_color_space *color_space)
4891 {
4892 	bool full_range;
4893 
4894 	*color_space = COLOR_SPACE_SRGB;
4895 
4896 	/* DRM color properties only affect non-RGB formats. */
4897 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4898 		return 0;
4899 
4900 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4901 
4902 	switch (plane_state->color_encoding) {
4903 	case DRM_COLOR_YCBCR_BT601:
4904 		if (full_range)
4905 			*color_space = COLOR_SPACE_YCBCR601;
4906 		else
4907 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4908 		break;
4909 
4910 	case DRM_COLOR_YCBCR_BT709:
4911 		if (full_range)
4912 			*color_space = COLOR_SPACE_YCBCR709;
4913 		else
4914 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4915 		break;
4916 
4917 	case DRM_COLOR_YCBCR_BT2020:
4918 		if (full_range)
4919 			*color_space = COLOR_SPACE_2020_YCBCR;
4920 		else
4921 			return -EINVAL;
4922 		break;
4923 
4924 	default:
4925 		return -EINVAL;
4926 	}
4927 
4928 	return 0;
4929 }
4930 
4931 static int
4932 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4933 			    const struct drm_plane_state *plane_state,
4934 			    const uint64_t tiling_flags,
4935 			    struct dc_plane_info *plane_info,
4936 			    struct dc_plane_address *address,
4937 			    bool tmz_surface,
4938 			    bool force_disable_dcc)
4939 {
4940 	const struct drm_framebuffer *fb = plane_state->fb;
4941 	const struct amdgpu_framebuffer *afb =
4942 		to_amdgpu_framebuffer(plane_state->fb);
4943 	int ret;
4944 
4945 	memset(plane_info, 0, sizeof(*plane_info));
4946 
4947 	switch (fb->format->format) {
4948 	case DRM_FORMAT_C8:
4949 		plane_info->format =
4950 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4951 		break;
4952 	case DRM_FORMAT_RGB565:
4953 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4954 		break;
4955 	case DRM_FORMAT_XRGB8888:
4956 	case DRM_FORMAT_ARGB8888:
4957 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4958 		break;
4959 	case DRM_FORMAT_XRGB2101010:
4960 	case DRM_FORMAT_ARGB2101010:
4961 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4962 		break;
4963 	case DRM_FORMAT_XBGR2101010:
4964 	case DRM_FORMAT_ABGR2101010:
4965 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4966 		break;
4967 	case DRM_FORMAT_XBGR8888:
4968 	case DRM_FORMAT_ABGR8888:
4969 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4970 		break;
4971 	case DRM_FORMAT_NV21:
4972 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4973 		break;
4974 	case DRM_FORMAT_NV12:
4975 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4976 		break;
4977 	case DRM_FORMAT_P010:
4978 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4979 		break;
4980 	case DRM_FORMAT_XRGB16161616F:
4981 	case DRM_FORMAT_ARGB16161616F:
4982 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4983 		break;
4984 	case DRM_FORMAT_XBGR16161616F:
4985 	case DRM_FORMAT_ABGR16161616F:
4986 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4987 		break;
4988 	case DRM_FORMAT_XRGB16161616:
4989 	case DRM_FORMAT_ARGB16161616:
4990 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4991 		break;
4992 	case DRM_FORMAT_XBGR16161616:
4993 	case DRM_FORMAT_ABGR16161616:
4994 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4995 		break;
4996 	default:
4997 		DRM_ERROR(
4998 			"Unsupported screen format %p4cc\n",
4999 			&fb->format->format);
5000 		return -EINVAL;
5001 	}
5002 
5003 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5004 	case DRM_MODE_ROTATE_0:
5005 		plane_info->rotation = ROTATION_ANGLE_0;
5006 		break;
5007 	case DRM_MODE_ROTATE_90:
5008 		plane_info->rotation = ROTATION_ANGLE_90;
5009 		break;
5010 	case DRM_MODE_ROTATE_180:
5011 		plane_info->rotation = ROTATION_ANGLE_180;
5012 		break;
5013 	case DRM_MODE_ROTATE_270:
5014 		plane_info->rotation = ROTATION_ANGLE_270;
5015 		break;
5016 	default:
5017 		plane_info->rotation = ROTATION_ANGLE_0;
5018 		break;
5019 	}
5020 
5021 	plane_info->visible = true;
5022 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5023 
5024 	plane_info->layer_index = 0;
5025 
5026 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5027 					  &plane_info->color_space);
5028 	if (ret)
5029 		return ret;
5030 
5031 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5032 					   plane_info->rotation, tiling_flags,
5033 					   &plane_info->tiling_info,
5034 					   &plane_info->plane_size,
5035 					   &plane_info->dcc, address, tmz_surface,
5036 					   force_disable_dcc);
5037 	if (ret)
5038 		return ret;
5039 
5040 	fill_blending_from_plane_state(
5041 		plane_state, &plane_info->per_pixel_alpha,
5042 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5043 
5044 	return 0;
5045 }
5046 
5047 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5048 				    struct dc_plane_state *dc_plane_state,
5049 				    struct drm_plane_state *plane_state,
5050 				    struct drm_crtc_state *crtc_state)
5051 {
5052 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5053 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5054 	struct dc_scaling_info scaling_info;
5055 	struct dc_plane_info plane_info;
5056 	int ret;
5057 	bool force_disable_dcc = false;
5058 
5059 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5060 	if (ret)
5061 		return ret;
5062 
5063 	dc_plane_state->src_rect = scaling_info.src_rect;
5064 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5065 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5066 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5067 
5068 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5069 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5070 					  afb->tiling_flags,
5071 					  &plane_info,
5072 					  &dc_plane_state->address,
5073 					  afb->tmz_surface,
5074 					  force_disable_dcc);
5075 	if (ret)
5076 		return ret;
5077 
5078 	dc_plane_state->format = plane_info.format;
5079 	dc_plane_state->color_space = plane_info.color_space;
5080 	dc_plane_state->format = plane_info.format;
5081 	dc_plane_state->plane_size = plane_info.plane_size;
5082 	dc_plane_state->rotation = plane_info.rotation;
5083 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5084 	dc_plane_state->stereo_format = plane_info.stereo_format;
5085 	dc_plane_state->tiling_info = plane_info.tiling_info;
5086 	dc_plane_state->visible = plane_info.visible;
5087 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5088 	dc_plane_state->global_alpha = plane_info.global_alpha;
5089 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5090 	dc_plane_state->dcc = plane_info.dcc;
5091 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5092 	dc_plane_state->flip_int_enabled = true;
5093 
5094 	/*
5095 	 * Always set input transfer function, since plane state is refreshed
5096 	 * every time.
5097 	 */
5098 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5099 	if (ret)
5100 		return ret;
5101 
5102 	return 0;
5103 }
5104 
5105 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5106 					   const struct dm_connector_state *dm_state,
5107 					   struct dc_stream_state *stream)
5108 {
5109 	enum amdgpu_rmx_type rmx_type;
5110 
5111 	struct rect src = { 0 }; /* viewport in composition space*/
5112 	struct rect dst = { 0 }; /* stream addressable area */
5113 
5114 	/* no mode. nothing to be done */
5115 	if (!mode)
5116 		return;
5117 
5118 	/* Full screen scaling by default */
5119 	src.width = mode->hdisplay;
5120 	src.height = mode->vdisplay;
5121 	dst.width = stream->timing.h_addressable;
5122 	dst.height = stream->timing.v_addressable;
5123 
5124 	if (dm_state) {
5125 		rmx_type = dm_state->scaling;
5126 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5127 			if (src.width * dst.height <
5128 					src.height * dst.width) {
5129 				/* height needs less upscaling/more downscaling */
5130 				dst.width = src.width *
5131 						dst.height / src.height;
5132 			} else {
5133 				/* width needs less upscaling/more downscaling */
5134 				dst.height = src.height *
5135 						dst.width / src.width;
5136 			}
5137 		} else if (rmx_type == RMX_CENTER) {
5138 			dst = src;
5139 		}
5140 
5141 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5142 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5143 
5144 		if (dm_state->underscan_enable) {
5145 			dst.x += dm_state->underscan_hborder / 2;
5146 			dst.y += dm_state->underscan_vborder / 2;
5147 			dst.width -= dm_state->underscan_hborder;
5148 			dst.height -= dm_state->underscan_vborder;
5149 		}
5150 	}
5151 
5152 	stream->src = src;
5153 	stream->dst = dst;
5154 
5155 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5156 		      dst.x, dst.y, dst.width, dst.height);
5157 
5158 }
5159 
5160 static enum dc_color_depth
5161 convert_color_depth_from_display_info(const struct drm_connector *connector,
5162 				      bool is_y420, int requested_bpc)
5163 {
5164 	uint8_t bpc;
5165 
5166 	if (is_y420) {
5167 		bpc = 8;
5168 
5169 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5170 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5171 			bpc = 16;
5172 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5173 			bpc = 12;
5174 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5175 			bpc = 10;
5176 	} else {
5177 		bpc = (uint8_t)connector->display_info.bpc;
5178 		/* Assume 8 bpc by default if no bpc is specified. */
5179 		bpc = bpc ? bpc : 8;
5180 	}
5181 
5182 	if (requested_bpc > 0) {
5183 		/*
5184 		 * Cap display bpc based on the user requested value.
5185 		 *
5186 		 * The value for state->max_bpc may not correctly updated
5187 		 * depending on when the connector gets added to the state
5188 		 * or if this was called outside of atomic check, so it
5189 		 * can't be used directly.
5190 		 */
5191 		bpc = min_t(u8, bpc, requested_bpc);
5192 
5193 		/* Round down to the nearest even number. */
5194 		bpc = bpc - (bpc & 1);
5195 	}
5196 
5197 	switch (bpc) {
5198 	case 0:
5199 		/*
5200 		 * Temporary Work around, DRM doesn't parse color depth for
5201 		 * EDID revision before 1.4
5202 		 * TODO: Fix edid parsing
5203 		 */
5204 		return COLOR_DEPTH_888;
5205 	case 6:
5206 		return COLOR_DEPTH_666;
5207 	case 8:
5208 		return COLOR_DEPTH_888;
5209 	case 10:
5210 		return COLOR_DEPTH_101010;
5211 	case 12:
5212 		return COLOR_DEPTH_121212;
5213 	case 14:
5214 		return COLOR_DEPTH_141414;
5215 	case 16:
5216 		return COLOR_DEPTH_161616;
5217 	default:
5218 		return COLOR_DEPTH_UNDEFINED;
5219 	}
5220 }
5221 
5222 static enum dc_aspect_ratio
5223 get_aspect_ratio(const struct drm_display_mode *mode_in)
5224 {
5225 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5226 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5227 }
5228 
5229 static enum dc_color_space
5230 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5231 {
5232 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5233 
5234 	switch (dc_crtc_timing->pixel_encoding)	{
5235 	case PIXEL_ENCODING_YCBCR422:
5236 	case PIXEL_ENCODING_YCBCR444:
5237 	case PIXEL_ENCODING_YCBCR420:
5238 	{
5239 		/*
5240 		 * 27030khz is the separation point between HDTV and SDTV
5241 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5242 		 * respectively
5243 		 */
5244 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5245 			if (dc_crtc_timing->flags.Y_ONLY)
5246 				color_space =
5247 					COLOR_SPACE_YCBCR709_LIMITED;
5248 			else
5249 				color_space = COLOR_SPACE_YCBCR709;
5250 		} else {
5251 			if (dc_crtc_timing->flags.Y_ONLY)
5252 				color_space =
5253 					COLOR_SPACE_YCBCR601_LIMITED;
5254 			else
5255 				color_space = COLOR_SPACE_YCBCR601;
5256 		}
5257 
5258 	}
5259 	break;
5260 	case PIXEL_ENCODING_RGB:
5261 		color_space = COLOR_SPACE_SRGB;
5262 		break;
5263 
5264 	default:
5265 		WARN_ON(1);
5266 		break;
5267 	}
5268 
5269 	return color_space;
5270 }
5271 
5272 static bool adjust_colour_depth_from_display_info(
5273 	struct dc_crtc_timing *timing_out,
5274 	const struct drm_display_info *info)
5275 {
5276 	enum dc_color_depth depth = timing_out->display_color_depth;
5277 	int normalized_clk;
5278 	do {
5279 		normalized_clk = timing_out->pix_clk_100hz / 10;
5280 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5281 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5282 			normalized_clk /= 2;
5283 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5284 		switch (depth) {
5285 		case COLOR_DEPTH_888:
5286 			break;
5287 		case COLOR_DEPTH_101010:
5288 			normalized_clk = (normalized_clk * 30) / 24;
5289 			break;
5290 		case COLOR_DEPTH_121212:
5291 			normalized_clk = (normalized_clk * 36) / 24;
5292 			break;
5293 		case COLOR_DEPTH_161616:
5294 			normalized_clk = (normalized_clk * 48) / 24;
5295 			break;
5296 		default:
5297 			/* The above depths are the only ones valid for HDMI. */
5298 			return false;
5299 		}
5300 		if (normalized_clk <= info->max_tmds_clock) {
5301 			timing_out->display_color_depth = depth;
5302 			return true;
5303 		}
5304 	} while (--depth > COLOR_DEPTH_666);
5305 	return false;
5306 }
5307 
5308 static void fill_stream_properties_from_drm_display_mode(
5309 	struct dc_stream_state *stream,
5310 	const struct drm_display_mode *mode_in,
5311 	const struct drm_connector *connector,
5312 	const struct drm_connector_state *connector_state,
5313 	const struct dc_stream_state *old_stream,
5314 	int requested_bpc)
5315 {
5316 	struct dc_crtc_timing *timing_out = &stream->timing;
5317 	const struct drm_display_info *info = &connector->display_info;
5318 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5319 	struct hdmi_vendor_infoframe hv_frame;
5320 	struct hdmi_avi_infoframe avi_frame;
5321 
5322 	memset(&hv_frame, 0, sizeof(hv_frame));
5323 	memset(&avi_frame, 0, sizeof(avi_frame));
5324 
5325 	timing_out->h_border_left = 0;
5326 	timing_out->h_border_right = 0;
5327 	timing_out->v_border_top = 0;
5328 	timing_out->v_border_bottom = 0;
5329 	/* TODO: un-hardcode */
5330 	if (drm_mode_is_420_only(info, mode_in)
5331 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5332 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5333 	else if (drm_mode_is_420_also(info, mode_in)
5334 			&& aconnector->force_yuv420_output)
5335 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5336 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5337 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5338 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5339 	else
5340 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5341 
5342 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5343 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5344 		connector,
5345 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5346 		requested_bpc);
5347 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5348 	timing_out->hdmi_vic = 0;
5349 
5350 	if(old_stream) {
5351 		timing_out->vic = old_stream->timing.vic;
5352 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5353 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5354 	} else {
5355 		timing_out->vic = drm_match_cea_mode(mode_in);
5356 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5357 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5358 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5359 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5360 	}
5361 
5362 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5363 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5364 		timing_out->vic = avi_frame.video_code;
5365 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5366 		timing_out->hdmi_vic = hv_frame.vic;
5367 	}
5368 
5369 	if (is_freesync_video_mode(mode_in, aconnector)) {
5370 		timing_out->h_addressable = mode_in->hdisplay;
5371 		timing_out->h_total = mode_in->htotal;
5372 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5373 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5374 		timing_out->v_total = mode_in->vtotal;
5375 		timing_out->v_addressable = mode_in->vdisplay;
5376 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5377 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5378 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5379 	} else {
5380 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5381 		timing_out->h_total = mode_in->crtc_htotal;
5382 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5383 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5384 		timing_out->v_total = mode_in->crtc_vtotal;
5385 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5386 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5387 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5388 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5389 	}
5390 
5391 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5392 
5393 	stream->output_color_space = get_output_color_space(timing_out);
5394 
5395 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5396 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5397 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5398 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5399 		    drm_mode_is_420_also(info, mode_in) &&
5400 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5401 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5402 			adjust_colour_depth_from_display_info(timing_out, info);
5403 		}
5404 	}
5405 }
5406 
5407 static void fill_audio_info(struct audio_info *audio_info,
5408 			    const struct drm_connector *drm_connector,
5409 			    const struct dc_sink *dc_sink)
5410 {
5411 	int i = 0;
5412 	int cea_revision = 0;
5413 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5414 
5415 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5416 	audio_info->product_id = edid_caps->product_id;
5417 
5418 	cea_revision = drm_connector->display_info.cea_rev;
5419 
5420 	strscpy(audio_info->display_name,
5421 		edid_caps->display_name,
5422 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5423 
5424 	if (cea_revision >= 3) {
5425 		audio_info->mode_count = edid_caps->audio_mode_count;
5426 
5427 		for (i = 0; i < audio_info->mode_count; ++i) {
5428 			audio_info->modes[i].format_code =
5429 					(enum audio_format_code)
5430 					(edid_caps->audio_modes[i].format_code);
5431 			audio_info->modes[i].channel_count =
5432 					edid_caps->audio_modes[i].channel_count;
5433 			audio_info->modes[i].sample_rates.all =
5434 					edid_caps->audio_modes[i].sample_rate;
5435 			audio_info->modes[i].sample_size =
5436 					edid_caps->audio_modes[i].sample_size;
5437 		}
5438 	}
5439 
5440 	audio_info->flags.all = edid_caps->speaker_flags;
5441 
5442 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5443 	if (drm_connector->latency_present[0]) {
5444 		audio_info->video_latency = drm_connector->video_latency[0];
5445 		audio_info->audio_latency = drm_connector->audio_latency[0];
5446 	}
5447 
5448 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5449 
5450 }
5451 
5452 static void
5453 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5454 				      struct drm_display_mode *dst_mode)
5455 {
5456 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5457 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5458 	dst_mode->crtc_clock = src_mode->crtc_clock;
5459 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5460 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5461 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5462 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5463 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5464 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5465 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5466 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5467 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5468 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5469 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5470 }
5471 
5472 static void
5473 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5474 					const struct drm_display_mode *native_mode,
5475 					bool scale_enabled)
5476 {
5477 	if (scale_enabled) {
5478 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5479 	} else if (native_mode->clock == drm_mode->clock &&
5480 			native_mode->htotal == drm_mode->htotal &&
5481 			native_mode->vtotal == drm_mode->vtotal) {
5482 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5483 	} else {
5484 		/* no scaling nor amdgpu inserted, no need to patch */
5485 	}
5486 }
5487 
5488 static struct dc_sink *
5489 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5490 {
5491 	struct dc_sink_init_data sink_init_data = { 0 };
5492 	struct dc_sink *sink = NULL;
5493 	sink_init_data.link = aconnector->dc_link;
5494 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5495 
5496 	sink = dc_sink_create(&sink_init_data);
5497 	if (!sink) {
5498 		DRM_ERROR("Failed to create sink!\n");
5499 		return NULL;
5500 	}
5501 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5502 
5503 	return sink;
5504 }
5505 
5506 static void set_multisync_trigger_params(
5507 		struct dc_stream_state *stream)
5508 {
5509 	struct dc_stream_state *master = NULL;
5510 
5511 	if (stream->triggered_crtc_reset.enabled) {
5512 		master = stream->triggered_crtc_reset.event_source;
5513 		stream->triggered_crtc_reset.event =
5514 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5515 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5516 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5517 	}
5518 }
5519 
5520 static void set_master_stream(struct dc_stream_state *stream_set[],
5521 			      int stream_count)
5522 {
5523 	int j, highest_rfr = 0, master_stream = 0;
5524 
5525 	for (j = 0;  j < stream_count; j++) {
5526 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5527 			int refresh_rate = 0;
5528 
5529 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5530 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5531 			if (refresh_rate > highest_rfr) {
5532 				highest_rfr = refresh_rate;
5533 				master_stream = j;
5534 			}
5535 		}
5536 	}
5537 	for (j = 0;  j < stream_count; j++) {
5538 		if (stream_set[j])
5539 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5540 	}
5541 }
5542 
5543 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5544 {
5545 	int i = 0;
5546 	struct dc_stream_state *stream;
5547 
5548 	if (context->stream_count < 2)
5549 		return;
5550 	for (i = 0; i < context->stream_count ; i++) {
5551 		if (!context->streams[i])
5552 			continue;
5553 		/*
5554 		 * TODO: add a function to read AMD VSDB bits and set
5555 		 * crtc_sync_master.multi_sync_enabled flag
5556 		 * For now it's set to false
5557 		 */
5558 	}
5559 
5560 	set_master_stream(context->streams, context->stream_count);
5561 
5562 	for (i = 0; i < context->stream_count ; i++) {
5563 		stream = context->streams[i];
5564 
5565 		if (!stream)
5566 			continue;
5567 
5568 		set_multisync_trigger_params(stream);
5569 	}
5570 }
5571 
5572 #if defined(CONFIG_DRM_AMD_DC_DCN)
5573 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5574 							struct dc_sink *sink, struct dc_stream_state *stream,
5575 							struct dsc_dec_dpcd_caps *dsc_caps)
5576 {
5577 	stream->timing.flags.DSC = 0;
5578 
5579 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5580 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5581 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5582 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5583 				      dsc_caps);
5584 	}
5585 }
5586 
5587 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5588 										struct dc_sink *sink, struct dc_stream_state *stream,
5589 										struct dsc_dec_dpcd_caps *dsc_caps)
5590 {
5591 	struct drm_connector *drm_connector = &aconnector->base;
5592 	uint32_t link_bandwidth_kbps;
5593 
5594 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5595 							dc_link_get_link_cap(aconnector->dc_link));
5596 	/* Set DSC policy according to dsc_clock_en */
5597 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5598 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5599 
5600 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5601 
5602 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5603 						dsc_caps,
5604 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5605 						0,
5606 						link_bandwidth_kbps,
5607 						&stream->timing,
5608 						&stream->timing.dsc_cfg)) {
5609 			stream->timing.flags.DSC = 1;
5610 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5611 		}
5612 	}
5613 
5614 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5615 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5616 		stream->timing.flags.DSC = 1;
5617 
5618 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5619 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5620 
5621 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5622 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5623 
5624 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5625 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5626 }
5627 #endif
5628 
5629 /**
5630  * DOC: FreeSync Video
5631  *
5632  * When a userspace application wants to play a video, the content follows a
5633  * standard format definition that usually specifies the FPS for that format.
5634  * The below list illustrates some video format and the expected FPS,
5635  * respectively:
5636  *
5637  * - TV/NTSC (23.976 FPS)
5638  * - Cinema (24 FPS)
5639  * - TV/PAL (25 FPS)
5640  * - TV/NTSC (29.97 FPS)
5641  * - TV/NTSC (30 FPS)
5642  * - Cinema HFR (48 FPS)
5643  * - TV/PAL (50 FPS)
5644  * - Commonly used (60 FPS)
5645  * - Multiples of 24 (48,72,96 FPS)
5646  *
5647  * The list of standards video format is not huge and can be added to the
5648  * connector modeset list beforehand. With that, userspace can leverage
5649  * FreeSync to extends the front porch in order to attain the target refresh
5650  * rate. Such a switch will happen seamlessly, without screen blanking or
5651  * reprogramming of the output in any other way. If the userspace requests a
5652  * modesetting change compatible with FreeSync modes that only differ in the
5653  * refresh rate, DC will skip the full update and avoid blink during the
5654  * transition. For example, the video player can change the modesetting from
5655  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5656  * causing any display blink. This same concept can be applied to a mode
5657  * setting change.
5658  */
5659 static struct drm_display_mode *
5660 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5661 			  bool use_probed_modes)
5662 {
5663 	struct drm_display_mode *m, *m_pref = NULL;
5664 	u16 current_refresh, highest_refresh;
5665 	struct list_head *list_head = use_probed_modes ?
5666 						    &aconnector->base.probed_modes :
5667 						    &aconnector->base.modes;
5668 
5669 	if (aconnector->freesync_vid_base.clock != 0)
5670 		return &aconnector->freesync_vid_base;
5671 
5672 	/* Find the preferred mode */
5673 	list_for_each_entry (m, list_head, head) {
5674 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5675 			m_pref = m;
5676 			break;
5677 		}
5678 	}
5679 
5680 	if (!m_pref) {
5681 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5682 		m_pref = list_first_entry_or_null(
5683 			&aconnector->base.modes, struct drm_display_mode, head);
5684 		if (!m_pref) {
5685 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5686 			return NULL;
5687 		}
5688 	}
5689 
5690 	highest_refresh = drm_mode_vrefresh(m_pref);
5691 
5692 	/*
5693 	 * Find the mode with highest refresh rate with same resolution.
5694 	 * For some monitors, preferred mode is not the mode with highest
5695 	 * supported refresh rate.
5696 	 */
5697 	list_for_each_entry (m, list_head, head) {
5698 		current_refresh  = drm_mode_vrefresh(m);
5699 
5700 		if (m->hdisplay == m_pref->hdisplay &&
5701 		    m->vdisplay == m_pref->vdisplay &&
5702 		    highest_refresh < current_refresh) {
5703 			highest_refresh = current_refresh;
5704 			m_pref = m;
5705 		}
5706 	}
5707 
5708 	aconnector->freesync_vid_base = *m_pref;
5709 	return m_pref;
5710 }
5711 
5712 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5713 				   struct amdgpu_dm_connector *aconnector)
5714 {
5715 	struct drm_display_mode *high_mode;
5716 	int timing_diff;
5717 
5718 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5719 	if (!high_mode || !mode)
5720 		return false;
5721 
5722 	timing_diff = high_mode->vtotal - mode->vtotal;
5723 
5724 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5725 	    high_mode->hdisplay != mode->hdisplay ||
5726 	    high_mode->vdisplay != mode->vdisplay ||
5727 	    high_mode->hsync_start != mode->hsync_start ||
5728 	    high_mode->hsync_end != mode->hsync_end ||
5729 	    high_mode->htotal != mode->htotal ||
5730 	    high_mode->hskew != mode->hskew ||
5731 	    high_mode->vscan != mode->vscan ||
5732 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5733 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5734 		return false;
5735 	else
5736 		return true;
5737 }
5738 
5739 static struct dc_stream_state *
5740 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5741 		       const struct drm_display_mode *drm_mode,
5742 		       const struct dm_connector_state *dm_state,
5743 		       const struct dc_stream_state *old_stream,
5744 		       int requested_bpc)
5745 {
5746 	struct drm_display_mode *preferred_mode = NULL;
5747 	struct drm_connector *drm_connector;
5748 	const struct drm_connector_state *con_state =
5749 		dm_state ? &dm_state->base : NULL;
5750 	struct dc_stream_state *stream = NULL;
5751 	struct drm_display_mode mode = *drm_mode;
5752 	struct drm_display_mode saved_mode;
5753 	struct drm_display_mode *freesync_mode = NULL;
5754 	bool native_mode_found = false;
5755 	bool recalculate_timing = false;
5756 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5757 	int mode_refresh;
5758 	int preferred_refresh = 0;
5759 #if defined(CONFIG_DRM_AMD_DC_DCN)
5760 	struct dsc_dec_dpcd_caps dsc_caps;
5761 #endif
5762 	struct dc_sink *sink = NULL;
5763 
5764 	memset(&saved_mode, 0, sizeof(saved_mode));
5765 
5766 	if (aconnector == NULL) {
5767 		DRM_ERROR("aconnector is NULL!\n");
5768 		return stream;
5769 	}
5770 
5771 	drm_connector = &aconnector->base;
5772 
5773 	if (!aconnector->dc_sink) {
5774 		sink = create_fake_sink(aconnector);
5775 		if (!sink)
5776 			return stream;
5777 	} else {
5778 		sink = aconnector->dc_sink;
5779 		dc_sink_retain(sink);
5780 	}
5781 
5782 	stream = dc_create_stream_for_sink(sink);
5783 
5784 	if (stream == NULL) {
5785 		DRM_ERROR("Failed to create stream for sink!\n");
5786 		goto finish;
5787 	}
5788 
5789 	stream->dm_stream_context = aconnector;
5790 
5791 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5792 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5793 
5794 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5795 		/* Search for preferred mode */
5796 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5797 			native_mode_found = true;
5798 			break;
5799 		}
5800 	}
5801 	if (!native_mode_found)
5802 		preferred_mode = list_first_entry_or_null(
5803 				&aconnector->base.modes,
5804 				struct drm_display_mode,
5805 				head);
5806 
5807 	mode_refresh = drm_mode_vrefresh(&mode);
5808 
5809 	if (preferred_mode == NULL) {
5810 		/*
5811 		 * This may not be an error, the use case is when we have no
5812 		 * usermode calls to reset and set mode upon hotplug. In this
5813 		 * case, we call set mode ourselves to restore the previous mode
5814 		 * and the modelist may not be filled in in time.
5815 		 */
5816 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5817 	} else {
5818 		recalculate_timing = amdgpu_freesync_vid_mode &&
5819 				 is_freesync_video_mode(&mode, aconnector);
5820 		if (recalculate_timing) {
5821 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5822 			saved_mode = mode;
5823 			mode = *freesync_mode;
5824 		} else {
5825 			decide_crtc_timing_for_drm_display_mode(
5826 				&mode, preferred_mode, scale);
5827 
5828 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5829 		}
5830 	}
5831 
5832 	if (recalculate_timing)
5833 		drm_mode_set_crtcinfo(&saved_mode, 0);
5834 	else if (!dm_state)
5835 		drm_mode_set_crtcinfo(&mode, 0);
5836 
5837        /*
5838 	* If scaling is enabled and refresh rate didn't change
5839 	* we copy the vic and polarities of the old timings
5840 	*/
5841 	if (!scale || mode_refresh != preferred_refresh)
5842 		fill_stream_properties_from_drm_display_mode(
5843 			stream, &mode, &aconnector->base, con_state, NULL,
5844 			requested_bpc);
5845 	else
5846 		fill_stream_properties_from_drm_display_mode(
5847 			stream, &mode, &aconnector->base, con_state, old_stream,
5848 			requested_bpc);
5849 
5850 #if defined(CONFIG_DRM_AMD_DC_DCN)
5851 	/* SST DSC determination policy */
5852 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5853 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5854 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5855 #endif
5856 
5857 	update_stream_scaling_settings(&mode, dm_state, stream);
5858 
5859 	fill_audio_info(
5860 		&stream->audio_info,
5861 		drm_connector,
5862 		sink);
5863 
5864 	update_stream_signal(stream, sink);
5865 
5866 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5867 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5868 
5869 	if (stream->link->psr_settings.psr_feature_enabled) {
5870 		//
5871 		// should decide stream support vsc sdp colorimetry capability
5872 		// before building vsc info packet
5873 		//
5874 		stream->use_vsc_sdp_for_colorimetry = false;
5875 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5876 			stream->use_vsc_sdp_for_colorimetry =
5877 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5878 		} else {
5879 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5880 				stream->use_vsc_sdp_for_colorimetry = true;
5881 		}
5882 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5883 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5884 
5885 	}
5886 finish:
5887 	dc_sink_release(sink);
5888 
5889 	return stream;
5890 }
5891 
5892 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5893 {
5894 	drm_crtc_cleanup(crtc);
5895 	kfree(crtc);
5896 }
5897 
5898 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5899 				  struct drm_crtc_state *state)
5900 {
5901 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5902 
5903 	/* TODO Destroy dc_stream objects are stream object is flattened */
5904 	if (cur->stream)
5905 		dc_stream_release(cur->stream);
5906 
5907 
5908 	__drm_atomic_helper_crtc_destroy_state(state);
5909 
5910 
5911 	kfree(state);
5912 }
5913 
5914 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5915 {
5916 	struct dm_crtc_state *state;
5917 
5918 	if (crtc->state)
5919 		dm_crtc_destroy_state(crtc, crtc->state);
5920 
5921 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5922 	if (WARN_ON(!state))
5923 		return;
5924 
5925 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5926 }
5927 
5928 static struct drm_crtc_state *
5929 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5930 {
5931 	struct dm_crtc_state *state, *cur;
5932 
5933 	cur = to_dm_crtc_state(crtc->state);
5934 
5935 	if (WARN_ON(!crtc->state))
5936 		return NULL;
5937 
5938 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5939 	if (!state)
5940 		return NULL;
5941 
5942 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5943 
5944 	if (cur->stream) {
5945 		state->stream = cur->stream;
5946 		dc_stream_retain(state->stream);
5947 	}
5948 
5949 	state->active_planes = cur->active_planes;
5950 	state->vrr_infopacket = cur->vrr_infopacket;
5951 	state->abm_level = cur->abm_level;
5952 	state->vrr_supported = cur->vrr_supported;
5953 	state->freesync_config = cur->freesync_config;
5954 	state->cm_has_degamma = cur->cm_has_degamma;
5955 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5956 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5957 
5958 	return &state->base;
5959 }
5960 
5961 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5962 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5963 {
5964 	crtc_debugfs_init(crtc);
5965 
5966 	return 0;
5967 }
5968 #endif
5969 
5970 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5971 {
5972 	enum dc_irq_source irq_source;
5973 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5974 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5975 	int rc;
5976 
5977 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5978 
5979 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5980 
5981 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5982 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5983 	return rc;
5984 }
5985 
5986 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5987 {
5988 	enum dc_irq_source irq_source;
5989 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5990 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5991 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5992 #if defined(CONFIG_DRM_AMD_DC_DCN)
5993 	struct amdgpu_display_manager *dm = &adev->dm;
5994 	unsigned long flags;
5995 #endif
5996 	int rc = 0;
5997 
5998 	if (enable) {
5999 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6000 		if (amdgpu_dm_vrr_active(acrtc_state))
6001 			rc = dm_set_vupdate_irq(crtc, true);
6002 	} else {
6003 		/* vblank irq off -> vupdate irq off */
6004 		rc = dm_set_vupdate_irq(crtc, false);
6005 	}
6006 
6007 	if (rc)
6008 		return rc;
6009 
6010 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6011 
6012 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6013 		return -EBUSY;
6014 
6015 	if (amdgpu_in_reset(adev))
6016 		return 0;
6017 
6018 #if defined(CONFIG_DRM_AMD_DC_DCN)
6019 	spin_lock_irqsave(&dm->vblank_lock, flags);
6020 	dm->vblank_workqueue->dm = dm;
6021 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
6022 	dm->vblank_workqueue->enable = enable;
6023 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
6024 	schedule_work(&dm->vblank_workqueue->mall_work);
6025 #endif
6026 
6027 	return 0;
6028 }
6029 
6030 static int dm_enable_vblank(struct drm_crtc *crtc)
6031 {
6032 	return dm_set_vblank(crtc, true);
6033 }
6034 
6035 static void dm_disable_vblank(struct drm_crtc *crtc)
6036 {
6037 	dm_set_vblank(crtc, false);
6038 }
6039 
6040 /* Implemented only the options currently availible for the driver */
6041 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6042 	.reset = dm_crtc_reset_state,
6043 	.destroy = amdgpu_dm_crtc_destroy,
6044 	.set_config = drm_atomic_helper_set_config,
6045 	.page_flip = drm_atomic_helper_page_flip,
6046 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6047 	.atomic_destroy_state = dm_crtc_destroy_state,
6048 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6049 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6050 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6051 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6052 	.enable_vblank = dm_enable_vblank,
6053 	.disable_vblank = dm_disable_vblank,
6054 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6055 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6056 	.late_register = amdgpu_dm_crtc_late_register,
6057 #endif
6058 };
6059 
6060 static enum drm_connector_status
6061 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6062 {
6063 	bool connected;
6064 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6065 
6066 	/*
6067 	 * Notes:
6068 	 * 1. This interface is NOT called in context of HPD irq.
6069 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6070 	 * makes it a bad place for *any* MST-related activity.
6071 	 */
6072 
6073 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6074 	    !aconnector->fake_enable)
6075 		connected = (aconnector->dc_sink != NULL);
6076 	else
6077 		connected = (aconnector->base.force == DRM_FORCE_ON);
6078 
6079 	update_subconnector_property(aconnector);
6080 
6081 	return (connected ? connector_status_connected :
6082 			connector_status_disconnected);
6083 }
6084 
6085 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6086 					    struct drm_connector_state *connector_state,
6087 					    struct drm_property *property,
6088 					    uint64_t val)
6089 {
6090 	struct drm_device *dev = connector->dev;
6091 	struct amdgpu_device *adev = drm_to_adev(dev);
6092 	struct dm_connector_state *dm_old_state =
6093 		to_dm_connector_state(connector->state);
6094 	struct dm_connector_state *dm_new_state =
6095 		to_dm_connector_state(connector_state);
6096 
6097 	int ret = -EINVAL;
6098 
6099 	if (property == dev->mode_config.scaling_mode_property) {
6100 		enum amdgpu_rmx_type rmx_type;
6101 
6102 		switch (val) {
6103 		case DRM_MODE_SCALE_CENTER:
6104 			rmx_type = RMX_CENTER;
6105 			break;
6106 		case DRM_MODE_SCALE_ASPECT:
6107 			rmx_type = RMX_ASPECT;
6108 			break;
6109 		case DRM_MODE_SCALE_FULLSCREEN:
6110 			rmx_type = RMX_FULL;
6111 			break;
6112 		case DRM_MODE_SCALE_NONE:
6113 		default:
6114 			rmx_type = RMX_OFF;
6115 			break;
6116 		}
6117 
6118 		if (dm_old_state->scaling == rmx_type)
6119 			return 0;
6120 
6121 		dm_new_state->scaling = rmx_type;
6122 		ret = 0;
6123 	} else if (property == adev->mode_info.underscan_hborder_property) {
6124 		dm_new_state->underscan_hborder = val;
6125 		ret = 0;
6126 	} else if (property == adev->mode_info.underscan_vborder_property) {
6127 		dm_new_state->underscan_vborder = val;
6128 		ret = 0;
6129 	} else if (property == adev->mode_info.underscan_property) {
6130 		dm_new_state->underscan_enable = val;
6131 		ret = 0;
6132 	} else if (property == adev->mode_info.abm_level_property) {
6133 		dm_new_state->abm_level = val;
6134 		ret = 0;
6135 	}
6136 
6137 	return ret;
6138 }
6139 
6140 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6141 					    const struct drm_connector_state *state,
6142 					    struct drm_property *property,
6143 					    uint64_t *val)
6144 {
6145 	struct drm_device *dev = connector->dev;
6146 	struct amdgpu_device *adev = drm_to_adev(dev);
6147 	struct dm_connector_state *dm_state =
6148 		to_dm_connector_state(state);
6149 	int ret = -EINVAL;
6150 
6151 	if (property == dev->mode_config.scaling_mode_property) {
6152 		switch (dm_state->scaling) {
6153 		case RMX_CENTER:
6154 			*val = DRM_MODE_SCALE_CENTER;
6155 			break;
6156 		case RMX_ASPECT:
6157 			*val = DRM_MODE_SCALE_ASPECT;
6158 			break;
6159 		case RMX_FULL:
6160 			*val = DRM_MODE_SCALE_FULLSCREEN;
6161 			break;
6162 		case RMX_OFF:
6163 		default:
6164 			*val = DRM_MODE_SCALE_NONE;
6165 			break;
6166 		}
6167 		ret = 0;
6168 	} else if (property == adev->mode_info.underscan_hborder_property) {
6169 		*val = dm_state->underscan_hborder;
6170 		ret = 0;
6171 	} else if (property == adev->mode_info.underscan_vborder_property) {
6172 		*val = dm_state->underscan_vborder;
6173 		ret = 0;
6174 	} else if (property == adev->mode_info.underscan_property) {
6175 		*val = dm_state->underscan_enable;
6176 		ret = 0;
6177 	} else if (property == adev->mode_info.abm_level_property) {
6178 		*val = dm_state->abm_level;
6179 		ret = 0;
6180 	}
6181 
6182 	return ret;
6183 }
6184 
6185 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6186 {
6187 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6188 
6189 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6190 }
6191 
6192 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6193 {
6194 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6195 	const struct dc_link *link = aconnector->dc_link;
6196 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6197 	struct amdgpu_display_manager *dm = &adev->dm;
6198 
6199 	/*
6200 	 * Call only if mst_mgr was iniitalized before since it's not done
6201 	 * for all connector types.
6202 	 */
6203 	if (aconnector->mst_mgr.dev)
6204 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6205 
6206 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6207 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6208 
6209 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6210 	    link->type != dc_connection_none &&
6211 	    dm->backlight_dev) {
6212 		backlight_device_unregister(dm->backlight_dev);
6213 		dm->backlight_dev = NULL;
6214 	}
6215 #endif
6216 
6217 	if (aconnector->dc_em_sink)
6218 		dc_sink_release(aconnector->dc_em_sink);
6219 	aconnector->dc_em_sink = NULL;
6220 	if (aconnector->dc_sink)
6221 		dc_sink_release(aconnector->dc_sink);
6222 	aconnector->dc_sink = NULL;
6223 
6224 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6225 	drm_connector_unregister(connector);
6226 	drm_connector_cleanup(connector);
6227 	if (aconnector->i2c) {
6228 		i2c_del_adapter(&aconnector->i2c->base);
6229 		kfree(aconnector->i2c);
6230 	}
6231 	kfree(aconnector->dm_dp_aux.aux.name);
6232 
6233 	kfree(connector);
6234 }
6235 
6236 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6237 {
6238 	struct dm_connector_state *state =
6239 		to_dm_connector_state(connector->state);
6240 
6241 	if (connector->state)
6242 		__drm_atomic_helper_connector_destroy_state(connector->state);
6243 
6244 	kfree(state);
6245 
6246 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6247 
6248 	if (state) {
6249 		state->scaling = RMX_OFF;
6250 		state->underscan_enable = false;
6251 		state->underscan_hborder = 0;
6252 		state->underscan_vborder = 0;
6253 		state->base.max_requested_bpc = 8;
6254 		state->vcpi_slots = 0;
6255 		state->pbn = 0;
6256 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6257 			state->abm_level = amdgpu_dm_abm_level;
6258 
6259 		__drm_atomic_helper_connector_reset(connector, &state->base);
6260 	}
6261 }
6262 
6263 struct drm_connector_state *
6264 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6265 {
6266 	struct dm_connector_state *state =
6267 		to_dm_connector_state(connector->state);
6268 
6269 	struct dm_connector_state *new_state =
6270 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6271 
6272 	if (!new_state)
6273 		return NULL;
6274 
6275 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6276 
6277 	new_state->freesync_capable = state->freesync_capable;
6278 	new_state->abm_level = state->abm_level;
6279 	new_state->scaling = state->scaling;
6280 	new_state->underscan_enable = state->underscan_enable;
6281 	new_state->underscan_hborder = state->underscan_hborder;
6282 	new_state->underscan_vborder = state->underscan_vborder;
6283 	new_state->vcpi_slots = state->vcpi_slots;
6284 	new_state->pbn = state->pbn;
6285 	return &new_state->base;
6286 }
6287 
6288 static int
6289 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6290 {
6291 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6292 		to_amdgpu_dm_connector(connector);
6293 	int r;
6294 
6295 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6296 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6297 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6298 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6299 		if (r)
6300 			return r;
6301 	}
6302 
6303 #if defined(CONFIG_DEBUG_FS)
6304 	connector_debugfs_init(amdgpu_dm_connector);
6305 #endif
6306 
6307 	return 0;
6308 }
6309 
6310 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6311 	.reset = amdgpu_dm_connector_funcs_reset,
6312 	.detect = amdgpu_dm_connector_detect,
6313 	.fill_modes = drm_helper_probe_single_connector_modes,
6314 	.destroy = amdgpu_dm_connector_destroy,
6315 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6316 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6317 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6318 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6319 	.late_register = amdgpu_dm_connector_late_register,
6320 	.early_unregister = amdgpu_dm_connector_unregister
6321 };
6322 
6323 static int get_modes(struct drm_connector *connector)
6324 {
6325 	return amdgpu_dm_connector_get_modes(connector);
6326 }
6327 
6328 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6329 {
6330 	struct dc_sink_init_data init_params = {
6331 			.link = aconnector->dc_link,
6332 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6333 	};
6334 	struct edid *edid;
6335 
6336 	if (!aconnector->base.edid_blob_ptr) {
6337 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6338 				aconnector->base.name);
6339 
6340 		aconnector->base.force = DRM_FORCE_OFF;
6341 		aconnector->base.override_edid = false;
6342 		return;
6343 	}
6344 
6345 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6346 
6347 	aconnector->edid = edid;
6348 
6349 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6350 		aconnector->dc_link,
6351 		(uint8_t *)edid,
6352 		(edid->extensions + 1) * EDID_LENGTH,
6353 		&init_params);
6354 
6355 	if (aconnector->base.force == DRM_FORCE_ON) {
6356 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6357 		aconnector->dc_link->local_sink :
6358 		aconnector->dc_em_sink;
6359 		dc_sink_retain(aconnector->dc_sink);
6360 	}
6361 }
6362 
6363 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6364 {
6365 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6366 
6367 	/*
6368 	 * In case of headless boot with force on for DP managed connector
6369 	 * Those settings have to be != 0 to get initial modeset
6370 	 */
6371 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6372 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6373 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6374 	}
6375 
6376 
6377 	aconnector->base.override_edid = true;
6378 	create_eml_sink(aconnector);
6379 }
6380 
6381 static struct dc_stream_state *
6382 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6383 				const struct drm_display_mode *drm_mode,
6384 				const struct dm_connector_state *dm_state,
6385 				const struct dc_stream_state *old_stream)
6386 {
6387 	struct drm_connector *connector = &aconnector->base;
6388 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6389 	struct dc_stream_state *stream;
6390 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6391 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6392 	enum dc_status dc_result = DC_OK;
6393 
6394 	do {
6395 		stream = create_stream_for_sink(aconnector, drm_mode,
6396 						dm_state, old_stream,
6397 						requested_bpc);
6398 		if (stream == NULL) {
6399 			DRM_ERROR("Failed to create stream for sink!\n");
6400 			break;
6401 		}
6402 
6403 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6404 
6405 		if (dc_result != DC_OK) {
6406 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6407 				      drm_mode->hdisplay,
6408 				      drm_mode->vdisplay,
6409 				      drm_mode->clock,
6410 				      dc_result,
6411 				      dc_status_to_str(dc_result));
6412 
6413 			dc_stream_release(stream);
6414 			stream = NULL;
6415 			requested_bpc -= 2; /* lower bpc to retry validation */
6416 		}
6417 
6418 	} while (stream == NULL && requested_bpc >= 6);
6419 
6420 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6421 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6422 
6423 		aconnector->force_yuv420_output = true;
6424 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6425 						dm_state, old_stream);
6426 		aconnector->force_yuv420_output = false;
6427 	}
6428 
6429 	return stream;
6430 }
6431 
6432 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6433 				   struct drm_display_mode *mode)
6434 {
6435 	int result = MODE_ERROR;
6436 	struct dc_sink *dc_sink;
6437 	/* TODO: Unhardcode stream count */
6438 	struct dc_stream_state *stream;
6439 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6440 
6441 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6442 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6443 		return result;
6444 
6445 	/*
6446 	 * Only run this the first time mode_valid is called to initilialize
6447 	 * EDID mgmt
6448 	 */
6449 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6450 		!aconnector->dc_em_sink)
6451 		handle_edid_mgmt(aconnector);
6452 
6453 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6454 
6455 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6456 				aconnector->base.force != DRM_FORCE_ON) {
6457 		DRM_ERROR("dc_sink is NULL!\n");
6458 		goto fail;
6459 	}
6460 
6461 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6462 	if (stream) {
6463 		dc_stream_release(stream);
6464 		result = MODE_OK;
6465 	}
6466 
6467 fail:
6468 	/* TODO: error handling*/
6469 	return result;
6470 }
6471 
6472 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6473 				struct dc_info_packet *out)
6474 {
6475 	struct hdmi_drm_infoframe frame;
6476 	unsigned char buf[30]; /* 26 + 4 */
6477 	ssize_t len;
6478 	int ret, i;
6479 
6480 	memset(out, 0, sizeof(*out));
6481 
6482 	if (!state->hdr_output_metadata)
6483 		return 0;
6484 
6485 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6486 	if (ret)
6487 		return ret;
6488 
6489 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6490 	if (len < 0)
6491 		return (int)len;
6492 
6493 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6494 	if (len != 30)
6495 		return -EINVAL;
6496 
6497 	/* Prepare the infopacket for DC. */
6498 	switch (state->connector->connector_type) {
6499 	case DRM_MODE_CONNECTOR_HDMIA:
6500 		out->hb0 = 0x87; /* type */
6501 		out->hb1 = 0x01; /* version */
6502 		out->hb2 = 0x1A; /* length */
6503 		out->sb[0] = buf[3]; /* checksum */
6504 		i = 1;
6505 		break;
6506 
6507 	case DRM_MODE_CONNECTOR_DisplayPort:
6508 	case DRM_MODE_CONNECTOR_eDP:
6509 		out->hb0 = 0x00; /* sdp id, zero */
6510 		out->hb1 = 0x87; /* type */
6511 		out->hb2 = 0x1D; /* payload len - 1 */
6512 		out->hb3 = (0x13 << 2); /* sdp version */
6513 		out->sb[0] = 0x01; /* version */
6514 		out->sb[1] = 0x1A; /* length */
6515 		i = 2;
6516 		break;
6517 
6518 	default:
6519 		return -EINVAL;
6520 	}
6521 
6522 	memcpy(&out->sb[i], &buf[4], 26);
6523 	out->valid = true;
6524 
6525 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6526 		       sizeof(out->sb), false);
6527 
6528 	return 0;
6529 }
6530 
6531 static int
6532 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6533 				 struct drm_atomic_state *state)
6534 {
6535 	struct drm_connector_state *new_con_state =
6536 		drm_atomic_get_new_connector_state(state, conn);
6537 	struct drm_connector_state *old_con_state =
6538 		drm_atomic_get_old_connector_state(state, conn);
6539 	struct drm_crtc *crtc = new_con_state->crtc;
6540 	struct drm_crtc_state *new_crtc_state;
6541 	int ret;
6542 
6543 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6544 
6545 	if (!crtc)
6546 		return 0;
6547 
6548 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6549 		struct dc_info_packet hdr_infopacket;
6550 
6551 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6552 		if (ret)
6553 			return ret;
6554 
6555 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6556 		if (IS_ERR(new_crtc_state))
6557 			return PTR_ERR(new_crtc_state);
6558 
6559 		/*
6560 		 * DC considers the stream backends changed if the
6561 		 * static metadata changes. Forcing the modeset also
6562 		 * gives a simple way for userspace to switch from
6563 		 * 8bpc to 10bpc when setting the metadata to enter
6564 		 * or exit HDR.
6565 		 *
6566 		 * Changing the static metadata after it's been
6567 		 * set is permissible, however. So only force a
6568 		 * modeset if we're entering or exiting HDR.
6569 		 */
6570 		new_crtc_state->mode_changed =
6571 			!old_con_state->hdr_output_metadata ||
6572 			!new_con_state->hdr_output_metadata;
6573 	}
6574 
6575 	return 0;
6576 }
6577 
6578 static const struct drm_connector_helper_funcs
6579 amdgpu_dm_connector_helper_funcs = {
6580 	/*
6581 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6582 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6583 	 * are missing after user start lightdm. So we need to renew modes list.
6584 	 * in get_modes call back, not just return the modes count
6585 	 */
6586 	.get_modes = get_modes,
6587 	.mode_valid = amdgpu_dm_connector_mode_valid,
6588 	.atomic_check = amdgpu_dm_connector_atomic_check,
6589 };
6590 
6591 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6592 {
6593 }
6594 
6595 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6596 {
6597 	struct drm_atomic_state *state = new_crtc_state->state;
6598 	struct drm_plane *plane;
6599 	int num_active = 0;
6600 
6601 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6602 		struct drm_plane_state *new_plane_state;
6603 
6604 		/* Cursor planes are "fake". */
6605 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6606 			continue;
6607 
6608 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6609 
6610 		if (!new_plane_state) {
6611 			/*
6612 			 * The plane is enable on the CRTC and hasn't changed
6613 			 * state. This means that it previously passed
6614 			 * validation and is therefore enabled.
6615 			 */
6616 			num_active += 1;
6617 			continue;
6618 		}
6619 
6620 		/* We need a framebuffer to be considered enabled. */
6621 		num_active += (new_plane_state->fb != NULL);
6622 	}
6623 
6624 	return num_active;
6625 }
6626 
6627 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6628 					 struct drm_crtc_state *new_crtc_state)
6629 {
6630 	struct dm_crtc_state *dm_new_crtc_state =
6631 		to_dm_crtc_state(new_crtc_state);
6632 
6633 	dm_new_crtc_state->active_planes = 0;
6634 
6635 	if (!dm_new_crtc_state->stream)
6636 		return;
6637 
6638 	dm_new_crtc_state->active_planes =
6639 		count_crtc_active_planes(new_crtc_state);
6640 }
6641 
6642 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6643 				       struct drm_atomic_state *state)
6644 {
6645 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6646 									  crtc);
6647 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6648 	struct dc *dc = adev->dm.dc;
6649 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6650 	int ret = -EINVAL;
6651 
6652 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6653 
6654 	dm_update_crtc_active_planes(crtc, crtc_state);
6655 
6656 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6657 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6658 		return ret;
6659 	}
6660 
6661 	/*
6662 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6663 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6664 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6665 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6666 	 */
6667 	if (crtc_state->enable &&
6668 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6669 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6670 		return -EINVAL;
6671 	}
6672 
6673 	/* In some use cases, like reset, no stream is attached */
6674 	if (!dm_crtc_state->stream)
6675 		return 0;
6676 
6677 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6678 		return 0;
6679 
6680 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6681 	return ret;
6682 }
6683 
6684 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6685 				      const struct drm_display_mode *mode,
6686 				      struct drm_display_mode *adjusted_mode)
6687 {
6688 	return true;
6689 }
6690 
6691 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6692 	.disable = dm_crtc_helper_disable,
6693 	.atomic_check = dm_crtc_helper_atomic_check,
6694 	.mode_fixup = dm_crtc_helper_mode_fixup,
6695 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6696 };
6697 
6698 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6699 {
6700 
6701 }
6702 
6703 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6704 {
6705 	switch (display_color_depth) {
6706 		case COLOR_DEPTH_666:
6707 			return 6;
6708 		case COLOR_DEPTH_888:
6709 			return 8;
6710 		case COLOR_DEPTH_101010:
6711 			return 10;
6712 		case COLOR_DEPTH_121212:
6713 			return 12;
6714 		case COLOR_DEPTH_141414:
6715 			return 14;
6716 		case COLOR_DEPTH_161616:
6717 			return 16;
6718 		default:
6719 			break;
6720 		}
6721 	return 0;
6722 }
6723 
6724 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6725 					  struct drm_crtc_state *crtc_state,
6726 					  struct drm_connector_state *conn_state)
6727 {
6728 	struct drm_atomic_state *state = crtc_state->state;
6729 	struct drm_connector *connector = conn_state->connector;
6730 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6731 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6732 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6733 	struct drm_dp_mst_topology_mgr *mst_mgr;
6734 	struct drm_dp_mst_port *mst_port;
6735 	enum dc_color_depth color_depth;
6736 	int clock, bpp = 0;
6737 	bool is_y420 = false;
6738 
6739 	if (!aconnector->port || !aconnector->dc_sink)
6740 		return 0;
6741 
6742 	mst_port = aconnector->port;
6743 	mst_mgr = &aconnector->mst_port->mst_mgr;
6744 
6745 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6746 		return 0;
6747 
6748 	if (!state->duplicated) {
6749 		int max_bpc = conn_state->max_requested_bpc;
6750 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6751 				aconnector->force_yuv420_output;
6752 		color_depth = convert_color_depth_from_display_info(connector,
6753 								    is_y420,
6754 								    max_bpc);
6755 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6756 		clock = adjusted_mode->clock;
6757 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6758 	}
6759 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6760 									   mst_mgr,
6761 									   mst_port,
6762 									   dm_new_connector_state->pbn,
6763 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6764 	if (dm_new_connector_state->vcpi_slots < 0) {
6765 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6766 		return dm_new_connector_state->vcpi_slots;
6767 	}
6768 	return 0;
6769 }
6770 
6771 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6772 	.disable = dm_encoder_helper_disable,
6773 	.atomic_check = dm_encoder_helper_atomic_check
6774 };
6775 
6776 #if defined(CONFIG_DRM_AMD_DC_DCN)
6777 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6778 					    struct dc_state *dc_state)
6779 {
6780 	struct dc_stream_state *stream = NULL;
6781 	struct drm_connector *connector;
6782 	struct drm_connector_state *new_con_state;
6783 	struct amdgpu_dm_connector *aconnector;
6784 	struct dm_connector_state *dm_conn_state;
6785 	int i, j, clock, bpp;
6786 	int vcpi, pbn_div, pbn = 0;
6787 
6788 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6789 
6790 		aconnector = to_amdgpu_dm_connector(connector);
6791 
6792 		if (!aconnector->port)
6793 			continue;
6794 
6795 		if (!new_con_state || !new_con_state->crtc)
6796 			continue;
6797 
6798 		dm_conn_state = to_dm_connector_state(new_con_state);
6799 
6800 		for (j = 0; j < dc_state->stream_count; j++) {
6801 			stream = dc_state->streams[j];
6802 			if (!stream)
6803 				continue;
6804 
6805 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6806 				break;
6807 
6808 			stream = NULL;
6809 		}
6810 
6811 		if (!stream)
6812 			continue;
6813 
6814 		if (stream->timing.flags.DSC != 1) {
6815 			drm_dp_mst_atomic_enable_dsc(state,
6816 						     aconnector->port,
6817 						     dm_conn_state->pbn,
6818 						     0,
6819 						     false);
6820 			continue;
6821 		}
6822 
6823 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6824 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6825 		clock = stream->timing.pix_clk_100hz / 10;
6826 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6827 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6828 						    aconnector->port,
6829 						    pbn, pbn_div,
6830 						    true);
6831 		if (vcpi < 0)
6832 			return vcpi;
6833 
6834 		dm_conn_state->pbn = pbn;
6835 		dm_conn_state->vcpi_slots = vcpi;
6836 	}
6837 	return 0;
6838 }
6839 #endif
6840 
6841 static void dm_drm_plane_reset(struct drm_plane *plane)
6842 {
6843 	struct dm_plane_state *amdgpu_state = NULL;
6844 
6845 	if (plane->state)
6846 		plane->funcs->atomic_destroy_state(plane, plane->state);
6847 
6848 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6849 	WARN_ON(amdgpu_state == NULL);
6850 
6851 	if (amdgpu_state)
6852 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6853 }
6854 
6855 static struct drm_plane_state *
6856 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6857 {
6858 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6859 
6860 	old_dm_plane_state = to_dm_plane_state(plane->state);
6861 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6862 	if (!dm_plane_state)
6863 		return NULL;
6864 
6865 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6866 
6867 	if (old_dm_plane_state->dc_state) {
6868 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6869 		dc_plane_state_retain(dm_plane_state->dc_state);
6870 	}
6871 
6872 	return &dm_plane_state->base;
6873 }
6874 
6875 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6876 				struct drm_plane_state *state)
6877 {
6878 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6879 
6880 	if (dm_plane_state->dc_state)
6881 		dc_plane_state_release(dm_plane_state->dc_state);
6882 
6883 	drm_atomic_helper_plane_destroy_state(plane, state);
6884 }
6885 
6886 static const struct drm_plane_funcs dm_plane_funcs = {
6887 	.update_plane	= drm_atomic_helper_update_plane,
6888 	.disable_plane	= drm_atomic_helper_disable_plane,
6889 	.destroy	= drm_primary_helper_destroy,
6890 	.reset = dm_drm_plane_reset,
6891 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6892 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6893 	.format_mod_supported = dm_plane_format_mod_supported,
6894 };
6895 
6896 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6897 				      struct drm_plane_state *new_state)
6898 {
6899 	struct amdgpu_framebuffer *afb;
6900 	struct drm_gem_object *obj;
6901 	struct amdgpu_device *adev;
6902 	struct amdgpu_bo *rbo;
6903 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6904 	struct list_head list;
6905 	struct ttm_validate_buffer tv;
6906 	struct ww_acquire_ctx ticket;
6907 	uint32_t domain;
6908 	int r;
6909 
6910 	if (!new_state->fb) {
6911 		DRM_DEBUG_KMS("No FB bound\n");
6912 		return 0;
6913 	}
6914 
6915 	afb = to_amdgpu_framebuffer(new_state->fb);
6916 	obj = new_state->fb->obj[0];
6917 	rbo = gem_to_amdgpu_bo(obj);
6918 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6919 	INIT_LIST_HEAD(&list);
6920 
6921 	tv.bo = &rbo->tbo;
6922 	tv.num_shared = 1;
6923 	list_add(&tv.head, &list);
6924 
6925 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6926 	if (r) {
6927 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6928 		return r;
6929 	}
6930 
6931 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6932 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6933 	else
6934 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6935 
6936 	r = amdgpu_bo_pin(rbo, domain);
6937 	if (unlikely(r != 0)) {
6938 		if (r != -ERESTARTSYS)
6939 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6940 		ttm_eu_backoff_reservation(&ticket, &list);
6941 		return r;
6942 	}
6943 
6944 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6945 	if (unlikely(r != 0)) {
6946 		amdgpu_bo_unpin(rbo);
6947 		ttm_eu_backoff_reservation(&ticket, &list);
6948 		DRM_ERROR("%p bind failed\n", rbo);
6949 		return r;
6950 	}
6951 
6952 	ttm_eu_backoff_reservation(&ticket, &list);
6953 
6954 	afb->address = amdgpu_bo_gpu_offset(rbo);
6955 
6956 	amdgpu_bo_ref(rbo);
6957 
6958 	/**
6959 	 * We don't do surface updates on planes that have been newly created,
6960 	 * but we also don't have the afb->address during atomic check.
6961 	 *
6962 	 * Fill in buffer attributes depending on the address here, but only on
6963 	 * newly created planes since they're not being used by DC yet and this
6964 	 * won't modify global state.
6965 	 */
6966 	dm_plane_state_old = to_dm_plane_state(plane->state);
6967 	dm_plane_state_new = to_dm_plane_state(new_state);
6968 
6969 	if (dm_plane_state_new->dc_state &&
6970 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6971 		struct dc_plane_state *plane_state =
6972 			dm_plane_state_new->dc_state;
6973 		bool force_disable_dcc = !plane_state->dcc.enable;
6974 
6975 		fill_plane_buffer_attributes(
6976 			adev, afb, plane_state->format, plane_state->rotation,
6977 			afb->tiling_flags,
6978 			&plane_state->tiling_info, &plane_state->plane_size,
6979 			&plane_state->dcc, &plane_state->address,
6980 			afb->tmz_surface, force_disable_dcc);
6981 	}
6982 
6983 	return 0;
6984 }
6985 
6986 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6987 				       struct drm_plane_state *old_state)
6988 {
6989 	struct amdgpu_bo *rbo;
6990 	int r;
6991 
6992 	if (!old_state->fb)
6993 		return;
6994 
6995 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6996 	r = amdgpu_bo_reserve(rbo, false);
6997 	if (unlikely(r)) {
6998 		DRM_ERROR("failed to reserve rbo before unpin\n");
6999 		return;
7000 	}
7001 
7002 	amdgpu_bo_unpin(rbo);
7003 	amdgpu_bo_unreserve(rbo);
7004 	amdgpu_bo_unref(&rbo);
7005 }
7006 
7007 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7008 				       struct drm_crtc_state *new_crtc_state)
7009 {
7010 	struct drm_framebuffer *fb = state->fb;
7011 	int min_downscale, max_upscale;
7012 	int min_scale = 0;
7013 	int max_scale = INT_MAX;
7014 
7015 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7016 	if (fb && state->crtc) {
7017 		/* Validate viewport to cover the case when only the position changes */
7018 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7019 			int viewport_width = state->crtc_w;
7020 			int viewport_height = state->crtc_h;
7021 
7022 			if (state->crtc_x < 0)
7023 				viewport_width += state->crtc_x;
7024 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7025 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7026 
7027 			if (state->crtc_y < 0)
7028 				viewport_height += state->crtc_y;
7029 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7030 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7031 
7032 			if (viewport_width < 0 || viewport_height < 0) {
7033 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7034 				return -EINVAL;
7035 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7036 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7037 				return -EINVAL;
7038 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7039 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7040 				return -EINVAL;
7041 			}
7042 
7043 		}
7044 
7045 		/* Get min/max allowed scaling factors from plane caps. */
7046 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7047 					     &min_downscale, &max_upscale);
7048 		/*
7049 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7050 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7051 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7052 		 */
7053 		min_scale = (1000 << 16) / max_upscale;
7054 		max_scale = (1000 << 16) / min_downscale;
7055 	}
7056 
7057 	return drm_atomic_helper_check_plane_state(
7058 		state, new_crtc_state, min_scale, max_scale, true, true);
7059 }
7060 
7061 static int dm_plane_atomic_check(struct drm_plane *plane,
7062 				 struct drm_atomic_state *state)
7063 {
7064 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7065 										 plane);
7066 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7067 	struct dc *dc = adev->dm.dc;
7068 	struct dm_plane_state *dm_plane_state;
7069 	struct dc_scaling_info scaling_info;
7070 	struct drm_crtc_state *new_crtc_state;
7071 	int ret;
7072 
7073 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7074 
7075 	dm_plane_state = to_dm_plane_state(new_plane_state);
7076 
7077 	if (!dm_plane_state->dc_state)
7078 		return 0;
7079 
7080 	new_crtc_state =
7081 		drm_atomic_get_new_crtc_state(state,
7082 					      new_plane_state->crtc);
7083 	if (!new_crtc_state)
7084 		return -EINVAL;
7085 
7086 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7087 	if (ret)
7088 		return ret;
7089 
7090 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7091 	if (ret)
7092 		return ret;
7093 
7094 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7095 		return 0;
7096 
7097 	return -EINVAL;
7098 }
7099 
7100 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7101 				       struct drm_atomic_state *state)
7102 {
7103 	/* Only support async updates on cursor planes. */
7104 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7105 		return -EINVAL;
7106 
7107 	return 0;
7108 }
7109 
7110 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7111 					 struct drm_atomic_state *state)
7112 {
7113 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7114 									   plane);
7115 	struct drm_plane_state *old_state =
7116 		drm_atomic_get_old_plane_state(state, plane);
7117 
7118 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7119 
7120 	swap(plane->state->fb, new_state->fb);
7121 
7122 	plane->state->src_x = new_state->src_x;
7123 	plane->state->src_y = new_state->src_y;
7124 	plane->state->src_w = new_state->src_w;
7125 	plane->state->src_h = new_state->src_h;
7126 	plane->state->crtc_x = new_state->crtc_x;
7127 	plane->state->crtc_y = new_state->crtc_y;
7128 	plane->state->crtc_w = new_state->crtc_w;
7129 	plane->state->crtc_h = new_state->crtc_h;
7130 
7131 	handle_cursor_update(plane, old_state);
7132 }
7133 
7134 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7135 	.prepare_fb = dm_plane_helper_prepare_fb,
7136 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7137 	.atomic_check = dm_plane_atomic_check,
7138 	.atomic_async_check = dm_plane_atomic_async_check,
7139 	.atomic_async_update = dm_plane_atomic_async_update
7140 };
7141 
7142 /*
7143  * TODO: these are currently initialized to rgb formats only.
7144  * For future use cases we should either initialize them dynamically based on
7145  * plane capabilities, or initialize this array to all formats, so internal drm
7146  * check will succeed, and let DC implement proper check
7147  */
7148 static const uint32_t rgb_formats[] = {
7149 	DRM_FORMAT_XRGB8888,
7150 	DRM_FORMAT_ARGB8888,
7151 	DRM_FORMAT_RGBA8888,
7152 	DRM_FORMAT_XRGB2101010,
7153 	DRM_FORMAT_XBGR2101010,
7154 	DRM_FORMAT_ARGB2101010,
7155 	DRM_FORMAT_ABGR2101010,
7156 	DRM_FORMAT_XRGB16161616,
7157 	DRM_FORMAT_XBGR16161616,
7158 	DRM_FORMAT_ARGB16161616,
7159 	DRM_FORMAT_ABGR16161616,
7160 	DRM_FORMAT_XBGR8888,
7161 	DRM_FORMAT_ABGR8888,
7162 	DRM_FORMAT_RGB565,
7163 };
7164 
7165 static const uint32_t overlay_formats[] = {
7166 	DRM_FORMAT_XRGB8888,
7167 	DRM_FORMAT_ARGB8888,
7168 	DRM_FORMAT_RGBA8888,
7169 	DRM_FORMAT_XBGR8888,
7170 	DRM_FORMAT_ABGR8888,
7171 	DRM_FORMAT_RGB565
7172 };
7173 
7174 static const u32 cursor_formats[] = {
7175 	DRM_FORMAT_ARGB8888
7176 };
7177 
7178 static int get_plane_formats(const struct drm_plane *plane,
7179 			     const struct dc_plane_cap *plane_cap,
7180 			     uint32_t *formats, int max_formats)
7181 {
7182 	int i, num_formats = 0;
7183 
7184 	/*
7185 	 * TODO: Query support for each group of formats directly from
7186 	 * DC plane caps. This will require adding more formats to the
7187 	 * caps list.
7188 	 */
7189 
7190 	switch (plane->type) {
7191 	case DRM_PLANE_TYPE_PRIMARY:
7192 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7193 			if (num_formats >= max_formats)
7194 				break;
7195 
7196 			formats[num_formats++] = rgb_formats[i];
7197 		}
7198 
7199 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7200 			formats[num_formats++] = DRM_FORMAT_NV12;
7201 		if (plane_cap && plane_cap->pixel_format_support.p010)
7202 			formats[num_formats++] = DRM_FORMAT_P010;
7203 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7204 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7205 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7206 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7207 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7208 		}
7209 		break;
7210 
7211 	case DRM_PLANE_TYPE_OVERLAY:
7212 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7213 			if (num_formats >= max_formats)
7214 				break;
7215 
7216 			formats[num_formats++] = overlay_formats[i];
7217 		}
7218 		break;
7219 
7220 	case DRM_PLANE_TYPE_CURSOR:
7221 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7222 			if (num_formats >= max_formats)
7223 				break;
7224 
7225 			formats[num_formats++] = cursor_formats[i];
7226 		}
7227 		break;
7228 	}
7229 
7230 	return num_formats;
7231 }
7232 
7233 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7234 				struct drm_plane *plane,
7235 				unsigned long possible_crtcs,
7236 				const struct dc_plane_cap *plane_cap)
7237 {
7238 	uint32_t formats[32];
7239 	int num_formats;
7240 	int res = -EPERM;
7241 	unsigned int supported_rotations;
7242 	uint64_t *modifiers = NULL;
7243 
7244 	num_formats = get_plane_formats(plane, plane_cap, formats,
7245 					ARRAY_SIZE(formats));
7246 
7247 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7248 	if (res)
7249 		return res;
7250 
7251 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7252 				       &dm_plane_funcs, formats, num_formats,
7253 				       modifiers, plane->type, NULL);
7254 	kfree(modifiers);
7255 	if (res)
7256 		return res;
7257 
7258 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7259 	    plane_cap && plane_cap->per_pixel_alpha) {
7260 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7261 					  BIT(DRM_MODE_BLEND_PREMULTI);
7262 
7263 		drm_plane_create_alpha_property(plane);
7264 		drm_plane_create_blend_mode_property(plane, blend_caps);
7265 	}
7266 
7267 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7268 	    plane_cap &&
7269 	    (plane_cap->pixel_format_support.nv12 ||
7270 	     plane_cap->pixel_format_support.p010)) {
7271 		/* This only affects YUV formats. */
7272 		drm_plane_create_color_properties(
7273 			plane,
7274 			BIT(DRM_COLOR_YCBCR_BT601) |
7275 			BIT(DRM_COLOR_YCBCR_BT709) |
7276 			BIT(DRM_COLOR_YCBCR_BT2020),
7277 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7278 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7279 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7280 	}
7281 
7282 	supported_rotations =
7283 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7284 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7285 
7286 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7287 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7288 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7289 						   supported_rotations);
7290 
7291 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7292 
7293 	/* Create (reset) the plane state */
7294 	if (plane->funcs->reset)
7295 		plane->funcs->reset(plane);
7296 
7297 	return 0;
7298 }
7299 
7300 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7301 			       struct drm_plane *plane,
7302 			       uint32_t crtc_index)
7303 {
7304 	struct amdgpu_crtc *acrtc = NULL;
7305 	struct drm_plane *cursor_plane;
7306 
7307 	int res = -ENOMEM;
7308 
7309 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7310 	if (!cursor_plane)
7311 		goto fail;
7312 
7313 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7314 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7315 
7316 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7317 	if (!acrtc)
7318 		goto fail;
7319 
7320 	res = drm_crtc_init_with_planes(
7321 			dm->ddev,
7322 			&acrtc->base,
7323 			plane,
7324 			cursor_plane,
7325 			&amdgpu_dm_crtc_funcs, NULL);
7326 
7327 	if (res)
7328 		goto fail;
7329 
7330 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7331 
7332 	/* Create (reset) the plane state */
7333 	if (acrtc->base.funcs->reset)
7334 		acrtc->base.funcs->reset(&acrtc->base);
7335 
7336 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7337 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7338 
7339 	acrtc->crtc_id = crtc_index;
7340 	acrtc->base.enabled = false;
7341 	acrtc->otg_inst = -1;
7342 
7343 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7344 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7345 				   true, MAX_COLOR_LUT_ENTRIES);
7346 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7347 
7348 	return 0;
7349 
7350 fail:
7351 	kfree(acrtc);
7352 	kfree(cursor_plane);
7353 	return res;
7354 }
7355 
7356 
7357 static int to_drm_connector_type(enum signal_type st)
7358 {
7359 	switch (st) {
7360 	case SIGNAL_TYPE_HDMI_TYPE_A:
7361 		return DRM_MODE_CONNECTOR_HDMIA;
7362 	case SIGNAL_TYPE_EDP:
7363 		return DRM_MODE_CONNECTOR_eDP;
7364 	case SIGNAL_TYPE_LVDS:
7365 		return DRM_MODE_CONNECTOR_LVDS;
7366 	case SIGNAL_TYPE_RGB:
7367 		return DRM_MODE_CONNECTOR_VGA;
7368 	case SIGNAL_TYPE_DISPLAY_PORT:
7369 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7370 		return DRM_MODE_CONNECTOR_DisplayPort;
7371 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7372 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7373 		return DRM_MODE_CONNECTOR_DVID;
7374 	case SIGNAL_TYPE_VIRTUAL:
7375 		return DRM_MODE_CONNECTOR_VIRTUAL;
7376 
7377 	default:
7378 		return DRM_MODE_CONNECTOR_Unknown;
7379 	}
7380 }
7381 
7382 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7383 {
7384 	struct drm_encoder *encoder;
7385 
7386 	/* There is only one encoder per connector */
7387 	drm_connector_for_each_possible_encoder(connector, encoder)
7388 		return encoder;
7389 
7390 	return NULL;
7391 }
7392 
7393 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7394 {
7395 	struct drm_encoder *encoder;
7396 	struct amdgpu_encoder *amdgpu_encoder;
7397 
7398 	encoder = amdgpu_dm_connector_to_encoder(connector);
7399 
7400 	if (encoder == NULL)
7401 		return;
7402 
7403 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7404 
7405 	amdgpu_encoder->native_mode.clock = 0;
7406 
7407 	if (!list_empty(&connector->probed_modes)) {
7408 		struct drm_display_mode *preferred_mode = NULL;
7409 
7410 		list_for_each_entry(preferred_mode,
7411 				    &connector->probed_modes,
7412 				    head) {
7413 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7414 				amdgpu_encoder->native_mode = *preferred_mode;
7415 
7416 			break;
7417 		}
7418 
7419 	}
7420 }
7421 
7422 static struct drm_display_mode *
7423 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7424 			     char *name,
7425 			     int hdisplay, int vdisplay)
7426 {
7427 	struct drm_device *dev = encoder->dev;
7428 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7429 	struct drm_display_mode *mode = NULL;
7430 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7431 
7432 	mode = drm_mode_duplicate(dev, native_mode);
7433 
7434 	if (mode == NULL)
7435 		return NULL;
7436 
7437 	mode->hdisplay = hdisplay;
7438 	mode->vdisplay = vdisplay;
7439 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7440 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7441 
7442 	return mode;
7443 
7444 }
7445 
7446 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7447 						 struct drm_connector *connector)
7448 {
7449 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7450 	struct drm_display_mode *mode = NULL;
7451 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7452 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7453 				to_amdgpu_dm_connector(connector);
7454 	int i;
7455 	int n;
7456 	struct mode_size {
7457 		char name[DRM_DISPLAY_MODE_LEN];
7458 		int w;
7459 		int h;
7460 	} common_modes[] = {
7461 		{  "640x480",  640,  480},
7462 		{  "800x600",  800,  600},
7463 		{ "1024x768", 1024,  768},
7464 		{ "1280x720", 1280,  720},
7465 		{ "1280x800", 1280,  800},
7466 		{"1280x1024", 1280, 1024},
7467 		{ "1440x900", 1440,  900},
7468 		{"1680x1050", 1680, 1050},
7469 		{"1600x1200", 1600, 1200},
7470 		{"1920x1080", 1920, 1080},
7471 		{"1920x1200", 1920, 1200}
7472 	};
7473 
7474 	n = ARRAY_SIZE(common_modes);
7475 
7476 	for (i = 0; i < n; i++) {
7477 		struct drm_display_mode *curmode = NULL;
7478 		bool mode_existed = false;
7479 
7480 		if (common_modes[i].w > native_mode->hdisplay ||
7481 		    common_modes[i].h > native_mode->vdisplay ||
7482 		   (common_modes[i].w == native_mode->hdisplay &&
7483 		    common_modes[i].h == native_mode->vdisplay))
7484 			continue;
7485 
7486 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7487 			if (common_modes[i].w == curmode->hdisplay &&
7488 			    common_modes[i].h == curmode->vdisplay) {
7489 				mode_existed = true;
7490 				break;
7491 			}
7492 		}
7493 
7494 		if (mode_existed)
7495 			continue;
7496 
7497 		mode = amdgpu_dm_create_common_mode(encoder,
7498 				common_modes[i].name, common_modes[i].w,
7499 				common_modes[i].h);
7500 		drm_mode_probed_add(connector, mode);
7501 		amdgpu_dm_connector->num_modes++;
7502 	}
7503 }
7504 
7505 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7506 					      struct edid *edid)
7507 {
7508 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7509 			to_amdgpu_dm_connector(connector);
7510 
7511 	if (edid) {
7512 		/* empty probed_modes */
7513 		INIT_LIST_HEAD(&connector->probed_modes);
7514 		amdgpu_dm_connector->num_modes =
7515 				drm_add_edid_modes(connector, edid);
7516 
7517 		/* sorting the probed modes before calling function
7518 		 * amdgpu_dm_get_native_mode() since EDID can have
7519 		 * more than one preferred mode. The modes that are
7520 		 * later in the probed mode list could be of higher
7521 		 * and preferred resolution. For example, 3840x2160
7522 		 * resolution in base EDID preferred timing and 4096x2160
7523 		 * preferred resolution in DID extension block later.
7524 		 */
7525 		drm_mode_sort(&connector->probed_modes);
7526 		amdgpu_dm_get_native_mode(connector);
7527 
7528 		/* Freesync capabilities are reset by calling
7529 		 * drm_add_edid_modes() and need to be
7530 		 * restored here.
7531 		 */
7532 		amdgpu_dm_update_freesync_caps(connector, edid);
7533 	} else {
7534 		amdgpu_dm_connector->num_modes = 0;
7535 	}
7536 }
7537 
7538 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7539 			      struct drm_display_mode *mode)
7540 {
7541 	struct drm_display_mode *m;
7542 
7543 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7544 		if (drm_mode_equal(m, mode))
7545 			return true;
7546 	}
7547 
7548 	return false;
7549 }
7550 
7551 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7552 {
7553 	const struct drm_display_mode *m;
7554 	struct drm_display_mode *new_mode;
7555 	uint i;
7556 	uint32_t new_modes_count = 0;
7557 
7558 	/* Standard FPS values
7559 	 *
7560 	 * 23.976   - TV/NTSC
7561 	 * 24 	    - Cinema
7562 	 * 25 	    - TV/PAL
7563 	 * 29.97    - TV/NTSC
7564 	 * 30 	    - TV/NTSC
7565 	 * 48 	    - Cinema HFR
7566 	 * 50 	    - TV/PAL
7567 	 * 60 	    - Commonly used
7568 	 * 48,72,96 - Multiples of 24
7569 	 */
7570 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7571 					 48000, 50000, 60000, 72000, 96000 };
7572 
7573 	/*
7574 	 * Find mode with highest refresh rate with the same resolution
7575 	 * as the preferred mode. Some monitors report a preferred mode
7576 	 * with lower resolution than the highest refresh rate supported.
7577 	 */
7578 
7579 	m = get_highest_refresh_rate_mode(aconnector, true);
7580 	if (!m)
7581 		return 0;
7582 
7583 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7584 		uint64_t target_vtotal, target_vtotal_diff;
7585 		uint64_t num, den;
7586 
7587 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7588 			continue;
7589 
7590 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7591 		    common_rates[i] > aconnector->max_vfreq * 1000)
7592 			continue;
7593 
7594 		num = (unsigned long long)m->clock * 1000 * 1000;
7595 		den = common_rates[i] * (unsigned long long)m->htotal;
7596 		target_vtotal = div_u64(num, den);
7597 		target_vtotal_diff = target_vtotal - m->vtotal;
7598 
7599 		/* Check for illegal modes */
7600 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7601 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7602 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7603 			continue;
7604 
7605 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7606 		if (!new_mode)
7607 			goto out;
7608 
7609 		new_mode->vtotal += (u16)target_vtotal_diff;
7610 		new_mode->vsync_start += (u16)target_vtotal_diff;
7611 		new_mode->vsync_end += (u16)target_vtotal_diff;
7612 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7613 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7614 
7615 		if (!is_duplicate_mode(aconnector, new_mode)) {
7616 			drm_mode_probed_add(&aconnector->base, new_mode);
7617 			new_modes_count += 1;
7618 		} else
7619 			drm_mode_destroy(aconnector->base.dev, new_mode);
7620 	}
7621  out:
7622 	return new_modes_count;
7623 }
7624 
7625 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7626 						   struct edid *edid)
7627 {
7628 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7629 		to_amdgpu_dm_connector(connector);
7630 
7631 	if (!(amdgpu_freesync_vid_mode && edid))
7632 		return;
7633 
7634 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7635 		amdgpu_dm_connector->num_modes +=
7636 			add_fs_modes(amdgpu_dm_connector);
7637 }
7638 
7639 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7640 {
7641 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7642 			to_amdgpu_dm_connector(connector);
7643 	struct drm_encoder *encoder;
7644 	struct edid *edid = amdgpu_dm_connector->edid;
7645 
7646 	encoder = amdgpu_dm_connector_to_encoder(connector);
7647 
7648 	if (!drm_edid_is_valid(edid)) {
7649 		amdgpu_dm_connector->num_modes =
7650 				drm_add_modes_noedid(connector, 640, 480);
7651 	} else {
7652 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7653 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7654 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7655 	}
7656 	amdgpu_dm_fbc_init(connector);
7657 
7658 	return amdgpu_dm_connector->num_modes;
7659 }
7660 
7661 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7662 				     struct amdgpu_dm_connector *aconnector,
7663 				     int connector_type,
7664 				     struct dc_link *link,
7665 				     int link_index)
7666 {
7667 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7668 
7669 	/*
7670 	 * Some of the properties below require access to state, like bpc.
7671 	 * Allocate some default initial connector state with our reset helper.
7672 	 */
7673 	if (aconnector->base.funcs->reset)
7674 		aconnector->base.funcs->reset(&aconnector->base);
7675 
7676 	aconnector->connector_id = link_index;
7677 	aconnector->dc_link = link;
7678 	aconnector->base.interlace_allowed = false;
7679 	aconnector->base.doublescan_allowed = false;
7680 	aconnector->base.stereo_allowed = false;
7681 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7682 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7683 	aconnector->audio_inst = -1;
7684 	mutex_init(&aconnector->hpd_lock);
7685 
7686 	/*
7687 	 * configure support HPD hot plug connector_>polled default value is 0
7688 	 * which means HPD hot plug not supported
7689 	 */
7690 	switch (connector_type) {
7691 	case DRM_MODE_CONNECTOR_HDMIA:
7692 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7693 		aconnector->base.ycbcr_420_allowed =
7694 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7695 		break;
7696 	case DRM_MODE_CONNECTOR_DisplayPort:
7697 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7698 		aconnector->base.ycbcr_420_allowed =
7699 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7700 		break;
7701 	case DRM_MODE_CONNECTOR_DVID:
7702 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7703 		break;
7704 	default:
7705 		break;
7706 	}
7707 
7708 	drm_object_attach_property(&aconnector->base.base,
7709 				dm->ddev->mode_config.scaling_mode_property,
7710 				DRM_MODE_SCALE_NONE);
7711 
7712 	drm_object_attach_property(&aconnector->base.base,
7713 				adev->mode_info.underscan_property,
7714 				UNDERSCAN_OFF);
7715 	drm_object_attach_property(&aconnector->base.base,
7716 				adev->mode_info.underscan_hborder_property,
7717 				0);
7718 	drm_object_attach_property(&aconnector->base.base,
7719 				adev->mode_info.underscan_vborder_property,
7720 				0);
7721 
7722 	if (!aconnector->mst_port)
7723 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7724 
7725 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7726 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7727 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7728 
7729 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7730 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7731 		drm_object_attach_property(&aconnector->base.base,
7732 				adev->mode_info.abm_level_property, 0);
7733 	}
7734 
7735 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7736 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7737 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7738 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7739 
7740 		if (!aconnector->mst_port)
7741 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7742 
7743 #ifdef CONFIG_DRM_AMD_DC_HDCP
7744 		if (adev->dm.hdcp_workqueue)
7745 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7746 #endif
7747 	}
7748 }
7749 
7750 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7751 			      struct i2c_msg *msgs, int num)
7752 {
7753 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7754 	struct ddc_service *ddc_service = i2c->ddc_service;
7755 	struct i2c_command cmd;
7756 	int i;
7757 	int result = -EIO;
7758 
7759 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7760 
7761 	if (!cmd.payloads)
7762 		return result;
7763 
7764 	cmd.number_of_payloads = num;
7765 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7766 	cmd.speed = 100;
7767 
7768 	for (i = 0; i < num; i++) {
7769 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7770 		cmd.payloads[i].address = msgs[i].addr;
7771 		cmd.payloads[i].length = msgs[i].len;
7772 		cmd.payloads[i].data = msgs[i].buf;
7773 	}
7774 
7775 	if (dc_submit_i2c(
7776 			ddc_service->ctx->dc,
7777 			ddc_service->ddc_pin->hw_info.ddc_channel,
7778 			&cmd))
7779 		result = num;
7780 
7781 	kfree(cmd.payloads);
7782 	return result;
7783 }
7784 
7785 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7786 {
7787 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7788 }
7789 
7790 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7791 	.master_xfer = amdgpu_dm_i2c_xfer,
7792 	.functionality = amdgpu_dm_i2c_func,
7793 };
7794 
7795 static struct amdgpu_i2c_adapter *
7796 create_i2c(struct ddc_service *ddc_service,
7797 	   int link_index,
7798 	   int *res)
7799 {
7800 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7801 	struct amdgpu_i2c_adapter *i2c;
7802 
7803 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7804 	if (!i2c)
7805 		return NULL;
7806 	i2c->base.owner = THIS_MODULE;
7807 	i2c->base.class = I2C_CLASS_DDC;
7808 	i2c->base.dev.parent = &adev->pdev->dev;
7809 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7810 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7811 	i2c_set_adapdata(&i2c->base, i2c);
7812 	i2c->ddc_service = ddc_service;
7813 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7814 
7815 	return i2c;
7816 }
7817 
7818 
7819 /*
7820  * Note: this function assumes that dc_link_detect() was called for the
7821  * dc_link which will be represented by this aconnector.
7822  */
7823 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7824 				    struct amdgpu_dm_connector *aconnector,
7825 				    uint32_t link_index,
7826 				    struct amdgpu_encoder *aencoder)
7827 {
7828 	int res = 0;
7829 	int connector_type;
7830 	struct dc *dc = dm->dc;
7831 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7832 	struct amdgpu_i2c_adapter *i2c;
7833 
7834 	link->priv = aconnector;
7835 
7836 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7837 
7838 	i2c = create_i2c(link->ddc, link->link_index, &res);
7839 	if (!i2c) {
7840 		DRM_ERROR("Failed to create i2c adapter data\n");
7841 		return -ENOMEM;
7842 	}
7843 
7844 	aconnector->i2c = i2c;
7845 	res = i2c_add_adapter(&i2c->base);
7846 
7847 	if (res) {
7848 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7849 		goto out_free;
7850 	}
7851 
7852 	connector_type = to_drm_connector_type(link->connector_signal);
7853 
7854 	res = drm_connector_init_with_ddc(
7855 			dm->ddev,
7856 			&aconnector->base,
7857 			&amdgpu_dm_connector_funcs,
7858 			connector_type,
7859 			&i2c->base);
7860 
7861 	if (res) {
7862 		DRM_ERROR("connector_init failed\n");
7863 		aconnector->connector_id = -1;
7864 		goto out_free;
7865 	}
7866 
7867 	drm_connector_helper_add(
7868 			&aconnector->base,
7869 			&amdgpu_dm_connector_helper_funcs);
7870 
7871 	amdgpu_dm_connector_init_helper(
7872 		dm,
7873 		aconnector,
7874 		connector_type,
7875 		link,
7876 		link_index);
7877 
7878 	drm_connector_attach_encoder(
7879 		&aconnector->base, &aencoder->base);
7880 
7881 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7882 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7883 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7884 
7885 out_free:
7886 	if (res) {
7887 		kfree(i2c);
7888 		aconnector->i2c = NULL;
7889 	}
7890 	return res;
7891 }
7892 
7893 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7894 {
7895 	switch (adev->mode_info.num_crtc) {
7896 	case 1:
7897 		return 0x1;
7898 	case 2:
7899 		return 0x3;
7900 	case 3:
7901 		return 0x7;
7902 	case 4:
7903 		return 0xf;
7904 	case 5:
7905 		return 0x1f;
7906 	case 6:
7907 	default:
7908 		return 0x3f;
7909 	}
7910 }
7911 
7912 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7913 				  struct amdgpu_encoder *aencoder,
7914 				  uint32_t link_index)
7915 {
7916 	struct amdgpu_device *adev = drm_to_adev(dev);
7917 
7918 	int res = drm_encoder_init(dev,
7919 				   &aencoder->base,
7920 				   &amdgpu_dm_encoder_funcs,
7921 				   DRM_MODE_ENCODER_TMDS,
7922 				   NULL);
7923 
7924 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7925 
7926 	if (!res)
7927 		aencoder->encoder_id = link_index;
7928 	else
7929 		aencoder->encoder_id = -1;
7930 
7931 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7932 
7933 	return res;
7934 }
7935 
7936 static void manage_dm_interrupts(struct amdgpu_device *adev,
7937 				 struct amdgpu_crtc *acrtc,
7938 				 bool enable)
7939 {
7940 	/*
7941 	 * We have no guarantee that the frontend index maps to the same
7942 	 * backend index - some even map to more than one.
7943 	 *
7944 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7945 	 */
7946 	int irq_type =
7947 		amdgpu_display_crtc_idx_to_irq_type(
7948 			adev,
7949 			acrtc->crtc_id);
7950 
7951 	if (enable) {
7952 		drm_crtc_vblank_on(&acrtc->base);
7953 		amdgpu_irq_get(
7954 			adev,
7955 			&adev->pageflip_irq,
7956 			irq_type);
7957 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7958 		amdgpu_irq_get(
7959 			adev,
7960 			&adev->vline0_irq,
7961 			irq_type);
7962 #endif
7963 	} else {
7964 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7965 		amdgpu_irq_put(
7966 			adev,
7967 			&adev->vline0_irq,
7968 			irq_type);
7969 #endif
7970 		amdgpu_irq_put(
7971 			adev,
7972 			&adev->pageflip_irq,
7973 			irq_type);
7974 		drm_crtc_vblank_off(&acrtc->base);
7975 	}
7976 }
7977 
7978 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7979 				      struct amdgpu_crtc *acrtc)
7980 {
7981 	int irq_type =
7982 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7983 
7984 	/**
7985 	 * This reads the current state for the IRQ and force reapplies
7986 	 * the setting to hardware.
7987 	 */
7988 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7989 }
7990 
7991 static bool
7992 is_scaling_state_different(const struct dm_connector_state *dm_state,
7993 			   const struct dm_connector_state *old_dm_state)
7994 {
7995 	if (dm_state->scaling != old_dm_state->scaling)
7996 		return true;
7997 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7998 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7999 			return true;
8000 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8001 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8002 			return true;
8003 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8004 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8005 		return true;
8006 	return false;
8007 }
8008 
8009 #ifdef CONFIG_DRM_AMD_DC_HDCP
8010 static bool is_content_protection_different(struct drm_connector_state *state,
8011 					    const struct drm_connector_state *old_state,
8012 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8013 {
8014 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8015 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8016 
8017 	/* Handle: Type0/1 change */
8018 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8019 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8020 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8021 		return true;
8022 	}
8023 
8024 	/* CP is being re enabled, ignore this
8025 	 *
8026 	 * Handles:	ENABLED -> DESIRED
8027 	 */
8028 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8029 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8030 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8031 		return false;
8032 	}
8033 
8034 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8035 	 *
8036 	 * Handles:	UNDESIRED -> ENABLED
8037 	 */
8038 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8039 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8040 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8041 
8042 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8043 	 * hot-plug, headless s3, dpms
8044 	 *
8045 	 * Handles:	DESIRED -> DESIRED (Special case)
8046 	 */
8047 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8048 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8049 		dm_con_state->update_hdcp = false;
8050 		return true;
8051 	}
8052 
8053 	/*
8054 	 * Handles:	UNDESIRED -> UNDESIRED
8055 	 *		DESIRED -> DESIRED
8056 	 *		ENABLED -> ENABLED
8057 	 */
8058 	if (old_state->content_protection == state->content_protection)
8059 		return false;
8060 
8061 	/*
8062 	 * Handles:	UNDESIRED -> DESIRED
8063 	 *		DESIRED -> UNDESIRED
8064 	 *		ENABLED -> UNDESIRED
8065 	 */
8066 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8067 		return true;
8068 
8069 	/*
8070 	 * Handles:	DESIRED -> ENABLED
8071 	 */
8072 	return false;
8073 }
8074 
8075 #endif
8076 static void remove_stream(struct amdgpu_device *adev,
8077 			  struct amdgpu_crtc *acrtc,
8078 			  struct dc_stream_state *stream)
8079 {
8080 	/* this is the update mode case */
8081 
8082 	acrtc->otg_inst = -1;
8083 	acrtc->enabled = false;
8084 }
8085 
8086 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8087 			       struct dc_cursor_position *position)
8088 {
8089 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8090 	int x, y;
8091 	int xorigin = 0, yorigin = 0;
8092 
8093 	if (!crtc || !plane->state->fb)
8094 		return 0;
8095 
8096 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8097 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8098 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8099 			  __func__,
8100 			  plane->state->crtc_w,
8101 			  plane->state->crtc_h);
8102 		return -EINVAL;
8103 	}
8104 
8105 	x = plane->state->crtc_x;
8106 	y = plane->state->crtc_y;
8107 
8108 	if (x <= -amdgpu_crtc->max_cursor_width ||
8109 	    y <= -amdgpu_crtc->max_cursor_height)
8110 		return 0;
8111 
8112 	if (x < 0) {
8113 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8114 		x = 0;
8115 	}
8116 	if (y < 0) {
8117 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8118 		y = 0;
8119 	}
8120 	position->enable = true;
8121 	position->translate_by_source = true;
8122 	position->x = x;
8123 	position->y = y;
8124 	position->x_hotspot = xorigin;
8125 	position->y_hotspot = yorigin;
8126 
8127 	return 0;
8128 }
8129 
8130 static void handle_cursor_update(struct drm_plane *plane,
8131 				 struct drm_plane_state *old_plane_state)
8132 {
8133 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8134 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8135 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8136 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8137 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8138 	uint64_t address = afb ? afb->address : 0;
8139 	struct dc_cursor_position position = {0};
8140 	struct dc_cursor_attributes attributes;
8141 	int ret;
8142 
8143 	if (!plane->state->fb && !old_plane_state->fb)
8144 		return;
8145 
8146 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8147 		      __func__,
8148 		      amdgpu_crtc->crtc_id,
8149 		      plane->state->crtc_w,
8150 		      plane->state->crtc_h);
8151 
8152 	ret = get_cursor_position(plane, crtc, &position);
8153 	if (ret)
8154 		return;
8155 
8156 	if (!position.enable) {
8157 		/* turn off cursor */
8158 		if (crtc_state && crtc_state->stream) {
8159 			mutex_lock(&adev->dm.dc_lock);
8160 			dc_stream_set_cursor_position(crtc_state->stream,
8161 						      &position);
8162 			mutex_unlock(&adev->dm.dc_lock);
8163 		}
8164 		return;
8165 	}
8166 
8167 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8168 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8169 
8170 	memset(&attributes, 0, sizeof(attributes));
8171 	attributes.address.high_part = upper_32_bits(address);
8172 	attributes.address.low_part  = lower_32_bits(address);
8173 	attributes.width             = plane->state->crtc_w;
8174 	attributes.height            = plane->state->crtc_h;
8175 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8176 	attributes.rotation_angle    = 0;
8177 	attributes.attribute_flags.value = 0;
8178 
8179 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8180 
8181 	if (crtc_state->stream) {
8182 		mutex_lock(&adev->dm.dc_lock);
8183 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8184 							 &attributes))
8185 			DRM_ERROR("DC failed to set cursor attributes\n");
8186 
8187 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8188 						   &position))
8189 			DRM_ERROR("DC failed to set cursor position\n");
8190 		mutex_unlock(&adev->dm.dc_lock);
8191 	}
8192 }
8193 
8194 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8195 {
8196 
8197 	assert_spin_locked(&acrtc->base.dev->event_lock);
8198 	WARN_ON(acrtc->event);
8199 
8200 	acrtc->event = acrtc->base.state->event;
8201 
8202 	/* Set the flip status */
8203 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8204 
8205 	/* Mark this event as consumed */
8206 	acrtc->base.state->event = NULL;
8207 
8208 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8209 		     acrtc->crtc_id);
8210 }
8211 
8212 static void update_freesync_state_on_stream(
8213 	struct amdgpu_display_manager *dm,
8214 	struct dm_crtc_state *new_crtc_state,
8215 	struct dc_stream_state *new_stream,
8216 	struct dc_plane_state *surface,
8217 	u32 flip_timestamp_in_us)
8218 {
8219 	struct mod_vrr_params vrr_params;
8220 	struct dc_info_packet vrr_infopacket = {0};
8221 	struct amdgpu_device *adev = dm->adev;
8222 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8223 	unsigned long flags;
8224 	bool pack_sdp_v1_3 = false;
8225 
8226 	if (!new_stream)
8227 		return;
8228 
8229 	/*
8230 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8231 	 * For now it's sufficient to just guard against these conditions.
8232 	 */
8233 
8234 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8235 		return;
8236 
8237 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8238         vrr_params = acrtc->dm_irq_params.vrr_params;
8239 
8240 	if (surface) {
8241 		mod_freesync_handle_preflip(
8242 			dm->freesync_module,
8243 			surface,
8244 			new_stream,
8245 			flip_timestamp_in_us,
8246 			&vrr_params);
8247 
8248 		if (adev->family < AMDGPU_FAMILY_AI &&
8249 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8250 			mod_freesync_handle_v_update(dm->freesync_module,
8251 						     new_stream, &vrr_params);
8252 
8253 			/* Need to call this before the frame ends. */
8254 			dc_stream_adjust_vmin_vmax(dm->dc,
8255 						   new_crtc_state->stream,
8256 						   &vrr_params.adjust);
8257 		}
8258 	}
8259 
8260 	mod_freesync_build_vrr_infopacket(
8261 		dm->freesync_module,
8262 		new_stream,
8263 		&vrr_params,
8264 		PACKET_TYPE_VRR,
8265 		TRANSFER_FUNC_UNKNOWN,
8266 		&vrr_infopacket,
8267 		pack_sdp_v1_3);
8268 
8269 	new_crtc_state->freesync_timing_changed |=
8270 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8271 			&vrr_params.adjust,
8272 			sizeof(vrr_params.adjust)) != 0);
8273 
8274 	new_crtc_state->freesync_vrr_info_changed |=
8275 		(memcmp(&new_crtc_state->vrr_infopacket,
8276 			&vrr_infopacket,
8277 			sizeof(vrr_infopacket)) != 0);
8278 
8279 	acrtc->dm_irq_params.vrr_params = vrr_params;
8280 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8281 
8282 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8283 	new_stream->vrr_infopacket = vrr_infopacket;
8284 
8285 	if (new_crtc_state->freesync_vrr_info_changed)
8286 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8287 			      new_crtc_state->base.crtc->base.id,
8288 			      (int)new_crtc_state->base.vrr_enabled,
8289 			      (int)vrr_params.state);
8290 
8291 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8292 }
8293 
8294 static void update_stream_irq_parameters(
8295 	struct amdgpu_display_manager *dm,
8296 	struct dm_crtc_state *new_crtc_state)
8297 {
8298 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8299 	struct mod_vrr_params vrr_params;
8300 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8301 	struct amdgpu_device *adev = dm->adev;
8302 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8303 	unsigned long flags;
8304 
8305 	if (!new_stream)
8306 		return;
8307 
8308 	/*
8309 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8310 	 * For now it's sufficient to just guard against these conditions.
8311 	 */
8312 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8313 		return;
8314 
8315 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8316 	vrr_params = acrtc->dm_irq_params.vrr_params;
8317 
8318 	if (new_crtc_state->vrr_supported &&
8319 	    config.min_refresh_in_uhz &&
8320 	    config.max_refresh_in_uhz) {
8321 		/*
8322 		 * if freesync compatible mode was set, config.state will be set
8323 		 * in atomic check
8324 		 */
8325 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8326 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8327 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8328 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8329 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8330 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8331 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8332 		} else {
8333 			config.state = new_crtc_state->base.vrr_enabled ?
8334 						     VRR_STATE_ACTIVE_VARIABLE :
8335 						     VRR_STATE_INACTIVE;
8336 		}
8337 	} else {
8338 		config.state = VRR_STATE_UNSUPPORTED;
8339 	}
8340 
8341 	mod_freesync_build_vrr_params(dm->freesync_module,
8342 				      new_stream,
8343 				      &config, &vrr_params);
8344 
8345 	new_crtc_state->freesync_timing_changed |=
8346 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8347 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8348 
8349 	new_crtc_state->freesync_config = config;
8350 	/* Copy state for access from DM IRQ handler */
8351 	acrtc->dm_irq_params.freesync_config = config;
8352 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8353 	acrtc->dm_irq_params.vrr_params = vrr_params;
8354 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8355 }
8356 
8357 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8358 					    struct dm_crtc_state *new_state)
8359 {
8360 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8361 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8362 
8363 	if (!old_vrr_active && new_vrr_active) {
8364 		/* Transition VRR inactive -> active:
8365 		 * While VRR is active, we must not disable vblank irq, as a
8366 		 * reenable after disable would compute bogus vblank/pflip
8367 		 * timestamps if it likely happened inside display front-porch.
8368 		 *
8369 		 * We also need vupdate irq for the actual core vblank handling
8370 		 * at end of vblank.
8371 		 */
8372 		dm_set_vupdate_irq(new_state->base.crtc, true);
8373 		drm_crtc_vblank_get(new_state->base.crtc);
8374 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8375 				 __func__, new_state->base.crtc->base.id);
8376 	} else if (old_vrr_active && !new_vrr_active) {
8377 		/* Transition VRR active -> inactive:
8378 		 * Allow vblank irq disable again for fixed refresh rate.
8379 		 */
8380 		dm_set_vupdate_irq(new_state->base.crtc, false);
8381 		drm_crtc_vblank_put(new_state->base.crtc);
8382 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8383 				 __func__, new_state->base.crtc->base.id);
8384 	}
8385 }
8386 
8387 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8388 {
8389 	struct drm_plane *plane;
8390 	struct drm_plane_state *old_plane_state;
8391 	int i;
8392 
8393 	/*
8394 	 * TODO: Make this per-stream so we don't issue redundant updates for
8395 	 * commits with multiple streams.
8396 	 */
8397 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8398 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8399 			handle_cursor_update(plane, old_plane_state);
8400 }
8401 
8402 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8403 				    struct dc_state *dc_state,
8404 				    struct drm_device *dev,
8405 				    struct amdgpu_display_manager *dm,
8406 				    struct drm_crtc *pcrtc,
8407 				    bool wait_for_vblank)
8408 {
8409 	uint32_t i;
8410 	uint64_t timestamp_ns;
8411 	struct drm_plane *plane;
8412 	struct drm_plane_state *old_plane_state, *new_plane_state;
8413 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8414 	struct drm_crtc_state *new_pcrtc_state =
8415 			drm_atomic_get_new_crtc_state(state, pcrtc);
8416 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8417 	struct dm_crtc_state *dm_old_crtc_state =
8418 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8419 	int planes_count = 0, vpos, hpos;
8420 	long r;
8421 	unsigned long flags;
8422 	struct amdgpu_bo *abo;
8423 	uint32_t target_vblank, last_flip_vblank;
8424 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8425 	bool pflip_present = false;
8426 	struct {
8427 		struct dc_surface_update surface_updates[MAX_SURFACES];
8428 		struct dc_plane_info plane_infos[MAX_SURFACES];
8429 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8430 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8431 		struct dc_stream_update stream_update;
8432 	} *bundle;
8433 
8434 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8435 
8436 	if (!bundle) {
8437 		dm_error("Failed to allocate update bundle\n");
8438 		goto cleanup;
8439 	}
8440 
8441 	/*
8442 	 * Disable the cursor first if we're disabling all the planes.
8443 	 * It'll remain on the screen after the planes are re-enabled
8444 	 * if we don't.
8445 	 */
8446 	if (acrtc_state->active_planes == 0)
8447 		amdgpu_dm_commit_cursors(state);
8448 
8449 	/* update planes when needed */
8450 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8451 		struct drm_crtc *crtc = new_plane_state->crtc;
8452 		struct drm_crtc_state *new_crtc_state;
8453 		struct drm_framebuffer *fb = new_plane_state->fb;
8454 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8455 		bool plane_needs_flip;
8456 		struct dc_plane_state *dc_plane;
8457 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8458 
8459 		/* Cursor plane is handled after stream updates */
8460 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8461 			continue;
8462 
8463 		if (!fb || !crtc || pcrtc != crtc)
8464 			continue;
8465 
8466 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8467 		if (!new_crtc_state->active)
8468 			continue;
8469 
8470 		dc_plane = dm_new_plane_state->dc_state;
8471 
8472 		bundle->surface_updates[planes_count].surface = dc_plane;
8473 		if (new_pcrtc_state->color_mgmt_changed) {
8474 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8475 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8476 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8477 		}
8478 
8479 		fill_dc_scaling_info(new_plane_state,
8480 				     &bundle->scaling_infos[planes_count]);
8481 
8482 		bundle->surface_updates[planes_count].scaling_info =
8483 			&bundle->scaling_infos[planes_count];
8484 
8485 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8486 
8487 		pflip_present = pflip_present || plane_needs_flip;
8488 
8489 		if (!plane_needs_flip) {
8490 			planes_count += 1;
8491 			continue;
8492 		}
8493 
8494 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8495 
8496 		/*
8497 		 * Wait for all fences on this FB. Do limited wait to avoid
8498 		 * deadlock during GPU reset when this fence will not signal
8499 		 * but we hold reservation lock for the BO.
8500 		 */
8501 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8502 					  msecs_to_jiffies(5000));
8503 		if (unlikely(r <= 0))
8504 			DRM_ERROR("Waiting for fences timed out!");
8505 
8506 		fill_dc_plane_info_and_addr(
8507 			dm->adev, new_plane_state,
8508 			afb->tiling_flags,
8509 			&bundle->plane_infos[planes_count],
8510 			&bundle->flip_addrs[planes_count].address,
8511 			afb->tmz_surface, false);
8512 
8513 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8514 				 new_plane_state->plane->index,
8515 				 bundle->plane_infos[planes_count].dcc.enable);
8516 
8517 		bundle->surface_updates[planes_count].plane_info =
8518 			&bundle->plane_infos[planes_count];
8519 
8520 		/*
8521 		 * Only allow immediate flips for fast updates that don't
8522 		 * change FB pitch, DCC state, rotation or mirroing.
8523 		 */
8524 		bundle->flip_addrs[planes_count].flip_immediate =
8525 			crtc->state->async_flip &&
8526 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8527 
8528 		timestamp_ns = ktime_get_ns();
8529 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8530 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8531 		bundle->surface_updates[planes_count].surface = dc_plane;
8532 
8533 		if (!bundle->surface_updates[planes_count].surface) {
8534 			DRM_ERROR("No surface for CRTC: id=%d\n",
8535 					acrtc_attach->crtc_id);
8536 			continue;
8537 		}
8538 
8539 		if (plane == pcrtc->primary)
8540 			update_freesync_state_on_stream(
8541 				dm,
8542 				acrtc_state,
8543 				acrtc_state->stream,
8544 				dc_plane,
8545 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8546 
8547 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8548 				 __func__,
8549 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8550 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8551 
8552 		planes_count += 1;
8553 
8554 	}
8555 
8556 	if (pflip_present) {
8557 		if (!vrr_active) {
8558 			/* Use old throttling in non-vrr fixed refresh rate mode
8559 			 * to keep flip scheduling based on target vblank counts
8560 			 * working in a backwards compatible way, e.g., for
8561 			 * clients using the GLX_OML_sync_control extension or
8562 			 * DRI3/Present extension with defined target_msc.
8563 			 */
8564 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8565 		}
8566 		else {
8567 			/* For variable refresh rate mode only:
8568 			 * Get vblank of last completed flip to avoid > 1 vrr
8569 			 * flips per video frame by use of throttling, but allow
8570 			 * flip programming anywhere in the possibly large
8571 			 * variable vrr vblank interval for fine-grained flip
8572 			 * timing control and more opportunity to avoid stutter
8573 			 * on late submission of flips.
8574 			 */
8575 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8576 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8577 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8578 		}
8579 
8580 		target_vblank = last_flip_vblank + wait_for_vblank;
8581 
8582 		/*
8583 		 * Wait until we're out of the vertical blank period before the one
8584 		 * targeted by the flip
8585 		 */
8586 		while ((acrtc_attach->enabled &&
8587 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8588 							    0, &vpos, &hpos, NULL,
8589 							    NULL, &pcrtc->hwmode)
8590 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8591 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8592 			(int)(target_vblank -
8593 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8594 			usleep_range(1000, 1100);
8595 		}
8596 
8597 		/**
8598 		 * Prepare the flip event for the pageflip interrupt to handle.
8599 		 *
8600 		 * This only works in the case where we've already turned on the
8601 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8602 		 * from 0 -> n planes we have to skip a hardware generated event
8603 		 * and rely on sending it from software.
8604 		 */
8605 		if (acrtc_attach->base.state->event &&
8606 		    acrtc_state->active_planes > 0) {
8607 			drm_crtc_vblank_get(pcrtc);
8608 
8609 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8610 
8611 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8612 			prepare_flip_isr(acrtc_attach);
8613 
8614 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8615 		}
8616 
8617 		if (acrtc_state->stream) {
8618 			if (acrtc_state->freesync_vrr_info_changed)
8619 				bundle->stream_update.vrr_infopacket =
8620 					&acrtc_state->stream->vrr_infopacket;
8621 		}
8622 	}
8623 
8624 	/* Update the planes if changed or disable if we don't have any. */
8625 	if ((planes_count || acrtc_state->active_planes == 0) &&
8626 		acrtc_state->stream) {
8627 		bundle->stream_update.stream = acrtc_state->stream;
8628 		if (new_pcrtc_state->mode_changed) {
8629 			bundle->stream_update.src = acrtc_state->stream->src;
8630 			bundle->stream_update.dst = acrtc_state->stream->dst;
8631 		}
8632 
8633 		if (new_pcrtc_state->color_mgmt_changed) {
8634 			/*
8635 			 * TODO: This isn't fully correct since we've actually
8636 			 * already modified the stream in place.
8637 			 */
8638 			bundle->stream_update.gamut_remap =
8639 				&acrtc_state->stream->gamut_remap_matrix;
8640 			bundle->stream_update.output_csc_transform =
8641 				&acrtc_state->stream->csc_color_matrix;
8642 			bundle->stream_update.out_transfer_func =
8643 				acrtc_state->stream->out_transfer_func;
8644 		}
8645 
8646 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8647 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8648 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8649 
8650 		/*
8651 		 * If FreeSync state on the stream has changed then we need to
8652 		 * re-adjust the min/max bounds now that DC doesn't handle this
8653 		 * as part of commit.
8654 		 */
8655 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8656 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8657 			dc_stream_adjust_vmin_vmax(
8658 				dm->dc, acrtc_state->stream,
8659 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8660 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8661 		}
8662 		mutex_lock(&dm->dc_lock);
8663 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8664 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8665 			amdgpu_dm_psr_disable(acrtc_state->stream);
8666 
8667 		dc_commit_updates_for_stream(dm->dc,
8668 						     bundle->surface_updates,
8669 						     planes_count,
8670 						     acrtc_state->stream,
8671 						     &bundle->stream_update,
8672 						     dc_state);
8673 
8674 		/**
8675 		 * Enable or disable the interrupts on the backend.
8676 		 *
8677 		 * Most pipes are put into power gating when unused.
8678 		 *
8679 		 * When power gating is enabled on a pipe we lose the
8680 		 * interrupt enablement state when power gating is disabled.
8681 		 *
8682 		 * So we need to update the IRQ control state in hardware
8683 		 * whenever the pipe turns on (since it could be previously
8684 		 * power gated) or off (since some pipes can't be power gated
8685 		 * on some ASICs).
8686 		 */
8687 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8688 			dm_update_pflip_irq_state(drm_to_adev(dev),
8689 						  acrtc_attach);
8690 
8691 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8692 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8693 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8694 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8695 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8696 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8697 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8698 			struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
8699 					acrtc_state->stream->dm_stream_context;
8700 
8701 			if (aconn->psr_skip_count > 0)
8702 				aconn->psr_skip_count--;
8703 			else
8704 				amdgpu_dm_psr_enable(acrtc_state->stream);
8705 		}
8706 
8707 		mutex_unlock(&dm->dc_lock);
8708 	}
8709 
8710 	/*
8711 	 * Update cursor state *after* programming all the planes.
8712 	 * This avoids redundant programming in the case where we're going
8713 	 * to be disabling a single plane - those pipes are being disabled.
8714 	 */
8715 	if (acrtc_state->active_planes)
8716 		amdgpu_dm_commit_cursors(state);
8717 
8718 cleanup:
8719 	kfree(bundle);
8720 }
8721 
8722 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8723 				   struct drm_atomic_state *state)
8724 {
8725 	struct amdgpu_device *adev = drm_to_adev(dev);
8726 	struct amdgpu_dm_connector *aconnector;
8727 	struct drm_connector *connector;
8728 	struct drm_connector_state *old_con_state, *new_con_state;
8729 	struct drm_crtc_state *new_crtc_state;
8730 	struct dm_crtc_state *new_dm_crtc_state;
8731 	const struct dc_stream_status *status;
8732 	int i, inst;
8733 
8734 	/* Notify device removals. */
8735 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8736 		if (old_con_state->crtc != new_con_state->crtc) {
8737 			/* CRTC changes require notification. */
8738 			goto notify;
8739 		}
8740 
8741 		if (!new_con_state->crtc)
8742 			continue;
8743 
8744 		new_crtc_state = drm_atomic_get_new_crtc_state(
8745 			state, new_con_state->crtc);
8746 
8747 		if (!new_crtc_state)
8748 			continue;
8749 
8750 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8751 			continue;
8752 
8753 	notify:
8754 		aconnector = to_amdgpu_dm_connector(connector);
8755 
8756 		mutex_lock(&adev->dm.audio_lock);
8757 		inst = aconnector->audio_inst;
8758 		aconnector->audio_inst = -1;
8759 		mutex_unlock(&adev->dm.audio_lock);
8760 
8761 		amdgpu_dm_audio_eld_notify(adev, inst);
8762 	}
8763 
8764 	/* Notify audio device additions. */
8765 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8766 		if (!new_con_state->crtc)
8767 			continue;
8768 
8769 		new_crtc_state = drm_atomic_get_new_crtc_state(
8770 			state, new_con_state->crtc);
8771 
8772 		if (!new_crtc_state)
8773 			continue;
8774 
8775 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8776 			continue;
8777 
8778 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8779 		if (!new_dm_crtc_state->stream)
8780 			continue;
8781 
8782 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8783 		if (!status)
8784 			continue;
8785 
8786 		aconnector = to_amdgpu_dm_connector(connector);
8787 
8788 		mutex_lock(&adev->dm.audio_lock);
8789 		inst = status->audio_inst;
8790 		aconnector->audio_inst = inst;
8791 		mutex_unlock(&adev->dm.audio_lock);
8792 
8793 		amdgpu_dm_audio_eld_notify(adev, inst);
8794 	}
8795 }
8796 
8797 /*
8798  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8799  * @crtc_state: the DRM CRTC state
8800  * @stream_state: the DC stream state.
8801  *
8802  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8803  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8804  */
8805 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8806 						struct dc_stream_state *stream_state)
8807 {
8808 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8809 }
8810 
8811 /**
8812  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8813  * @state: The atomic state to commit
8814  *
8815  * This will tell DC to commit the constructed DC state from atomic_check,
8816  * programming the hardware. Any failures here implies a hardware failure, since
8817  * atomic check should have filtered anything non-kosher.
8818  */
8819 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8820 {
8821 	struct drm_device *dev = state->dev;
8822 	struct amdgpu_device *adev = drm_to_adev(dev);
8823 	struct amdgpu_display_manager *dm = &adev->dm;
8824 	struct dm_atomic_state *dm_state;
8825 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8826 	uint32_t i, j;
8827 	struct drm_crtc *crtc;
8828 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8829 	unsigned long flags;
8830 	bool wait_for_vblank = true;
8831 	struct drm_connector *connector;
8832 	struct drm_connector_state *old_con_state, *new_con_state;
8833 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8834 	int crtc_disable_count = 0;
8835 	bool mode_set_reset_required = false;
8836 
8837 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8838 
8839 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8840 
8841 	dm_state = dm_atomic_get_new_state(state);
8842 	if (dm_state && dm_state->context) {
8843 		dc_state = dm_state->context;
8844 	} else {
8845 		/* No state changes, retain current state. */
8846 		dc_state_temp = dc_create_state(dm->dc);
8847 		ASSERT(dc_state_temp);
8848 		dc_state = dc_state_temp;
8849 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8850 	}
8851 
8852 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8853 				       new_crtc_state, i) {
8854 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8855 
8856 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8857 
8858 		if (old_crtc_state->active &&
8859 		    (!new_crtc_state->active ||
8860 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8861 			manage_dm_interrupts(adev, acrtc, false);
8862 			dc_stream_release(dm_old_crtc_state->stream);
8863 		}
8864 	}
8865 
8866 	drm_atomic_helper_calc_timestamping_constants(state);
8867 
8868 	/* update changed items */
8869 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8870 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8871 
8872 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8873 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8874 
8875 		DRM_DEBUG_ATOMIC(
8876 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8877 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8878 			"connectors_changed:%d\n",
8879 			acrtc->crtc_id,
8880 			new_crtc_state->enable,
8881 			new_crtc_state->active,
8882 			new_crtc_state->planes_changed,
8883 			new_crtc_state->mode_changed,
8884 			new_crtc_state->active_changed,
8885 			new_crtc_state->connectors_changed);
8886 
8887 		/* Disable cursor if disabling crtc */
8888 		if (old_crtc_state->active && !new_crtc_state->active) {
8889 			struct dc_cursor_position position;
8890 
8891 			memset(&position, 0, sizeof(position));
8892 			mutex_lock(&dm->dc_lock);
8893 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8894 			mutex_unlock(&dm->dc_lock);
8895 		}
8896 
8897 		/* Copy all transient state flags into dc state */
8898 		if (dm_new_crtc_state->stream) {
8899 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8900 							    dm_new_crtc_state->stream);
8901 		}
8902 
8903 		/* handles headless hotplug case, updating new_state and
8904 		 * aconnector as needed
8905 		 */
8906 
8907 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8908 
8909 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8910 
8911 			if (!dm_new_crtc_state->stream) {
8912 				/*
8913 				 * this could happen because of issues with
8914 				 * userspace notifications delivery.
8915 				 * In this case userspace tries to set mode on
8916 				 * display which is disconnected in fact.
8917 				 * dc_sink is NULL in this case on aconnector.
8918 				 * We expect reset mode will come soon.
8919 				 *
8920 				 * This can also happen when unplug is done
8921 				 * during resume sequence ended
8922 				 *
8923 				 * In this case, we want to pretend we still
8924 				 * have a sink to keep the pipe running so that
8925 				 * hw state is consistent with the sw state
8926 				 */
8927 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8928 						__func__, acrtc->base.base.id);
8929 				continue;
8930 			}
8931 
8932 			if (dm_old_crtc_state->stream)
8933 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8934 
8935 			pm_runtime_get_noresume(dev->dev);
8936 
8937 			acrtc->enabled = true;
8938 			acrtc->hw_mode = new_crtc_state->mode;
8939 			crtc->hwmode = new_crtc_state->mode;
8940 			mode_set_reset_required = true;
8941 		} else if (modereset_required(new_crtc_state)) {
8942 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8943 			/* i.e. reset mode */
8944 			if (dm_old_crtc_state->stream)
8945 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8946 
8947 			mode_set_reset_required = true;
8948 		}
8949 	} /* for_each_crtc_in_state() */
8950 
8951 	if (dc_state) {
8952 		/* if there mode set or reset, disable eDP PSR */
8953 		if (mode_set_reset_required)
8954 			amdgpu_dm_psr_disable_all(dm);
8955 
8956 		dm_enable_per_frame_crtc_master_sync(dc_state);
8957 		mutex_lock(&dm->dc_lock);
8958 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8959 #if defined(CONFIG_DRM_AMD_DC_DCN)
8960                /* Allow idle optimization when vblank count is 0 for display off */
8961                if (dm->active_vblank_irq_count == 0)
8962                    dc_allow_idle_optimizations(dm->dc,true);
8963 #endif
8964 		mutex_unlock(&dm->dc_lock);
8965 	}
8966 
8967 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8968 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8969 
8970 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8971 
8972 		if (dm_new_crtc_state->stream != NULL) {
8973 			const struct dc_stream_status *status =
8974 					dc_stream_get_status(dm_new_crtc_state->stream);
8975 
8976 			if (!status)
8977 				status = dc_stream_get_status_from_state(dc_state,
8978 									 dm_new_crtc_state->stream);
8979 			if (!status)
8980 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8981 			else
8982 				acrtc->otg_inst = status->primary_otg_inst;
8983 		}
8984 	}
8985 #ifdef CONFIG_DRM_AMD_DC_HDCP
8986 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8987 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8988 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8989 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8990 
8991 		new_crtc_state = NULL;
8992 
8993 		if (acrtc)
8994 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8995 
8996 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8997 
8998 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8999 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9000 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9001 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9002 			dm_new_con_state->update_hdcp = true;
9003 			continue;
9004 		}
9005 
9006 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9007 			hdcp_update_display(
9008 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9009 				new_con_state->hdcp_content_type,
9010 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9011 	}
9012 #endif
9013 
9014 	/* Handle connector state changes */
9015 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9016 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9017 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9018 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9019 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9020 		struct dc_stream_update stream_update;
9021 		struct dc_info_packet hdr_packet;
9022 		struct dc_stream_status *status = NULL;
9023 		bool abm_changed, hdr_changed, scaling_changed;
9024 
9025 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9026 		memset(&stream_update, 0, sizeof(stream_update));
9027 
9028 		if (acrtc) {
9029 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9030 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9031 		}
9032 
9033 		/* Skip any modesets/resets */
9034 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9035 			continue;
9036 
9037 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9038 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9039 
9040 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9041 							     dm_old_con_state);
9042 
9043 		abm_changed = dm_new_crtc_state->abm_level !=
9044 			      dm_old_crtc_state->abm_level;
9045 
9046 		hdr_changed =
9047 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9048 
9049 		if (!scaling_changed && !abm_changed && !hdr_changed)
9050 			continue;
9051 
9052 		stream_update.stream = dm_new_crtc_state->stream;
9053 		if (scaling_changed) {
9054 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9055 					dm_new_con_state, dm_new_crtc_state->stream);
9056 
9057 			stream_update.src = dm_new_crtc_state->stream->src;
9058 			stream_update.dst = dm_new_crtc_state->stream->dst;
9059 		}
9060 
9061 		if (abm_changed) {
9062 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9063 
9064 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9065 		}
9066 
9067 		if (hdr_changed) {
9068 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9069 			stream_update.hdr_static_metadata = &hdr_packet;
9070 		}
9071 
9072 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9073 
9074 		if (WARN_ON(!status))
9075 			continue;
9076 
9077 		WARN_ON(!status->plane_count);
9078 
9079 		/*
9080 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9081 		 * Here we create an empty update on each plane.
9082 		 * To fix this, DC should permit updating only stream properties.
9083 		 */
9084 		for (j = 0; j < status->plane_count; j++)
9085 			dummy_updates[j].surface = status->plane_states[0];
9086 
9087 
9088 		mutex_lock(&dm->dc_lock);
9089 		dc_commit_updates_for_stream(dm->dc,
9090 						     dummy_updates,
9091 						     status->plane_count,
9092 						     dm_new_crtc_state->stream,
9093 						     &stream_update,
9094 						     dc_state);
9095 		mutex_unlock(&dm->dc_lock);
9096 	}
9097 
9098 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9099 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9100 				      new_crtc_state, i) {
9101 		if (old_crtc_state->active && !new_crtc_state->active)
9102 			crtc_disable_count++;
9103 
9104 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9105 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9106 
9107 		/* For freesync config update on crtc state and params for irq */
9108 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9109 
9110 		/* Handle vrr on->off / off->on transitions */
9111 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9112 						dm_new_crtc_state);
9113 	}
9114 
9115 	/**
9116 	 * Enable interrupts for CRTCs that are newly enabled or went through
9117 	 * a modeset. It was intentionally deferred until after the front end
9118 	 * state was modified to wait until the OTG was on and so the IRQ
9119 	 * handlers didn't access stale or invalid state.
9120 	 */
9121 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9122 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9123 #ifdef CONFIG_DEBUG_FS
9124 		bool configure_crc = false;
9125 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9126 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9127 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9128 #endif
9129 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9130 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9131 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9132 #endif
9133 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9134 
9135 		if (new_crtc_state->active &&
9136 		    (!old_crtc_state->active ||
9137 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9138 			dc_stream_retain(dm_new_crtc_state->stream);
9139 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9140 			manage_dm_interrupts(adev, acrtc, true);
9141 
9142 #ifdef CONFIG_DEBUG_FS
9143 			/**
9144 			 * Frontend may have changed so reapply the CRC capture
9145 			 * settings for the stream.
9146 			 */
9147 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9148 
9149 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9150 				configure_crc = true;
9151 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9152 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9153 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9154 					acrtc->dm_irq_params.crc_window.update_win = true;
9155 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9156 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9157 					crc_rd_wrk->crtc = crtc;
9158 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9159 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9160 				}
9161 #endif
9162 			}
9163 
9164 			if (configure_crc)
9165 				if (amdgpu_dm_crtc_configure_crc_source(
9166 					crtc, dm_new_crtc_state, cur_crc_src))
9167 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9168 #endif
9169 		}
9170 	}
9171 
9172 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9173 		if (new_crtc_state->async_flip)
9174 			wait_for_vblank = false;
9175 
9176 	/* update planes when needed per crtc*/
9177 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9178 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9179 
9180 		if (dm_new_crtc_state->stream)
9181 			amdgpu_dm_commit_planes(state, dc_state, dev,
9182 						dm, crtc, wait_for_vblank);
9183 	}
9184 
9185 	/* Update audio instances for each connector. */
9186 	amdgpu_dm_commit_audio(dev, state);
9187 
9188 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9189 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9190 	/* restore the backlight level */
9191 	if (dm->backlight_dev)
9192 		amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9193 #endif
9194 	/*
9195 	 * send vblank event on all events not handled in flip and
9196 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9197 	 */
9198 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9199 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9200 
9201 		if (new_crtc_state->event)
9202 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9203 
9204 		new_crtc_state->event = NULL;
9205 	}
9206 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9207 
9208 	/* Signal HW programming completion */
9209 	drm_atomic_helper_commit_hw_done(state);
9210 
9211 	if (wait_for_vblank)
9212 		drm_atomic_helper_wait_for_flip_done(dev, state);
9213 
9214 	drm_atomic_helper_cleanup_planes(dev, state);
9215 
9216 	/* return the stolen vga memory back to VRAM */
9217 	if (!adev->mman.keep_stolen_vga_memory)
9218 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9219 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9220 
9221 	/*
9222 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9223 	 * so we can put the GPU into runtime suspend if we're not driving any
9224 	 * displays anymore
9225 	 */
9226 	for (i = 0; i < crtc_disable_count; i++)
9227 		pm_runtime_put_autosuspend(dev->dev);
9228 	pm_runtime_mark_last_busy(dev->dev);
9229 
9230 	if (dc_state_temp)
9231 		dc_release_state(dc_state_temp);
9232 }
9233 
9234 
9235 static int dm_force_atomic_commit(struct drm_connector *connector)
9236 {
9237 	int ret = 0;
9238 	struct drm_device *ddev = connector->dev;
9239 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9240 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9241 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9242 	struct drm_connector_state *conn_state;
9243 	struct drm_crtc_state *crtc_state;
9244 	struct drm_plane_state *plane_state;
9245 
9246 	if (!state)
9247 		return -ENOMEM;
9248 
9249 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9250 
9251 	/* Construct an atomic state to restore previous display setting */
9252 
9253 	/*
9254 	 * Attach connectors to drm_atomic_state
9255 	 */
9256 	conn_state = drm_atomic_get_connector_state(state, connector);
9257 
9258 	ret = PTR_ERR_OR_ZERO(conn_state);
9259 	if (ret)
9260 		goto out;
9261 
9262 	/* Attach crtc to drm_atomic_state*/
9263 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9264 
9265 	ret = PTR_ERR_OR_ZERO(crtc_state);
9266 	if (ret)
9267 		goto out;
9268 
9269 	/* force a restore */
9270 	crtc_state->mode_changed = true;
9271 
9272 	/* Attach plane to drm_atomic_state */
9273 	plane_state = drm_atomic_get_plane_state(state, plane);
9274 
9275 	ret = PTR_ERR_OR_ZERO(plane_state);
9276 	if (ret)
9277 		goto out;
9278 
9279 	/* Call commit internally with the state we just constructed */
9280 	ret = drm_atomic_commit(state);
9281 
9282 out:
9283 	drm_atomic_state_put(state);
9284 	if (ret)
9285 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9286 
9287 	return ret;
9288 }
9289 
9290 /*
9291  * This function handles all cases when set mode does not come upon hotplug.
9292  * This includes when a display is unplugged then plugged back into the
9293  * same port and when running without usermode desktop manager supprot
9294  */
9295 void dm_restore_drm_connector_state(struct drm_device *dev,
9296 				    struct drm_connector *connector)
9297 {
9298 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9299 	struct amdgpu_crtc *disconnected_acrtc;
9300 	struct dm_crtc_state *acrtc_state;
9301 
9302 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9303 		return;
9304 
9305 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9306 	if (!disconnected_acrtc)
9307 		return;
9308 
9309 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9310 	if (!acrtc_state->stream)
9311 		return;
9312 
9313 	/*
9314 	 * If the previous sink is not released and different from the current,
9315 	 * we deduce we are in a state where we can not rely on usermode call
9316 	 * to turn on the display, so we do it here
9317 	 */
9318 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9319 		dm_force_atomic_commit(&aconnector->base);
9320 }
9321 
9322 /*
9323  * Grabs all modesetting locks to serialize against any blocking commits,
9324  * Waits for completion of all non blocking commits.
9325  */
9326 static int do_aquire_global_lock(struct drm_device *dev,
9327 				 struct drm_atomic_state *state)
9328 {
9329 	struct drm_crtc *crtc;
9330 	struct drm_crtc_commit *commit;
9331 	long ret;
9332 
9333 	/*
9334 	 * Adding all modeset locks to aquire_ctx will
9335 	 * ensure that when the framework release it the
9336 	 * extra locks we are locking here will get released to
9337 	 */
9338 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9339 	if (ret)
9340 		return ret;
9341 
9342 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9343 		spin_lock(&crtc->commit_lock);
9344 		commit = list_first_entry_or_null(&crtc->commit_list,
9345 				struct drm_crtc_commit, commit_entry);
9346 		if (commit)
9347 			drm_crtc_commit_get(commit);
9348 		spin_unlock(&crtc->commit_lock);
9349 
9350 		if (!commit)
9351 			continue;
9352 
9353 		/*
9354 		 * Make sure all pending HW programming completed and
9355 		 * page flips done
9356 		 */
9357 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9358 
9359 		if (ret > 0)
9360 			ret = wait_for_completion_interruptible_timeout(
9361 					&commit->flip_done, 10*HZ);
9362 
9363 		if (ret == 0)
9364 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9365 				  "timed out\n", crtc->base.id, crtc->name);
9366 
9367 		drm_crtc_commit_put(commit);
9368 	}
9369 
9370 	return ret < 0 ? ret : 0;
9371 }
9372 
9373 static void get_freesync_config_for_crtc(
9374 	struct dm_crtc_state *new_crtc_state,
9375 	struct dm_connector_state *new_con_state)
9376 {
9377 	struct mod_freesync_config config = {0};
9378 	struct amdgpu_dm_connector *aconnector =
9379 			to_amdgpu_dm_connector(new_con_state->base.connector);
9380 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9381 	int vrefresh = drm_mode_vrefresh(mode);
9382 	bool fs_vid_mode = false;
9383 
9384 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9385 					vrefresh >= aconnector->min_vfreq &&
9386 					vrefresh <= aconnector->max_vfreq;
9387 
9388 	if (new_crtc_state->vrr_supported) {
9389 		new_crtc_state->stream->ignore_msa_timing_param = true;
9390 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9391 
9392 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9393 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9394 		config.vsif_supported = true;
9395 		config.btr = true;
9396 
9397 		if (fs_vid_mode) {
9398 			config.state = VRR_STATE_ACTIVE_FIXED;
9399 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9400 			goto out;
9401 		} else if (new_crtc_state->base.vrr_enabled) {
9402 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9403 		} else {
9404 			config.state = VRR_STATE_INACTIVE;
9405 		}
9406 	}
9407 out:
9408 	new_crtc_state->freesync_config = config;
9409 }
9410 
9411 static void reset_freesync_config_for_crtc(
9412 	struct dm_crtc_state *new_crtc_state)
9413 {
9414 	new_crtc_state->vrr_supported = false;
9415 
9416 	memset(&new_crtc_state->vrr_infopacket, 0,
9417 	       sizeof(new_crtc_state->vrr_infopacket));
9418 }
9419 
9420 static bool
9421 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9422 				 struct drm_crtc_state *new_crtc_state)
9423 {
9424 	struct drm_display_mode old_mode, new_mode;
9425 
9426 	if (!old_crtc_state || !new_crtc_state)
9427 		return false;
9428 
9429 	old_mode = old_crtc_state->mode;
9430 	new_mode = new_crtc_state->mode;
9431 
9432 	if (old_mode.clock       == new_mode.clock &&
9433 	    old_mode.hdisplay    == new_mode.hdisplay &&
9434 	    old_mode.vdisplay    == new_mode.vdisplay &&
9435 	    old_mode.htotal      == new_mode.htotal &&
9436 	    old_mode.vtotal      != new_mode.vtotal &&
9437 	    old_mode.hsync_start == new_mode.hsync_start &&
9438 	    old_mode.vsync_start != new_mode.vsync_start &&
9439 	    old_mode.hsync_end   == new_mode.hsync_end &&
9440 	    old_mode.vsync_end   != new_mode.vsync_end &&
9441 	    old_mode.hskew       == new_mode.hskew &&
9442 	    old_mode.vscan       == new_mode.vscan &&
9443 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9444 	    (new_mode.vsync_end - new_mode.vsync_start))
9445 		return true;
9446 
9447 	return false;
9448 }
9449 
9450 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9451 	uint64_t num, den, res;
9452 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9453 
9454 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9455 
9456 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9457 	den = (unsigned long long)new_crtc_state->mode.htotal *
9458 	      (unsigned long long)new_crtc_state->mode.vtotal;
9459 
9460 	res = div_u64(num, den);
9461 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9462 }
9463 
9464 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9465 				struct drm_atomic_state *state,
9466 				struct drm_crtc *crtc,
9467 				struct drm_crtc_state *old_crtc_state,
9468 				struct drm_crtc_state *new_crtc_state,
9469 				bool enable,
9470 				bool *lock_and_validation_needed)
9471 {
9472 	struct dm_atomic_state *dm_state = NULL;
9473 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9474 	struct dc_stream_state *new_stream;
9475 	int ret = 0;
9476 
9477 	/*
9478 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9479 	 * update changed items
9480 	 */
9481 	struct amdgpu_crtc *acrtc = NULL;
9482 	struct amdgpu_dm_connector *aconnector = NULL;
9483 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9484 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9485 
9486 	new_stream = NULL;
9487 
9488 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9489 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9490 	acrtc = to_amdgpu_crtc(crtc);
9491 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9492 
9493 	/* TODO This hack should go away */
9494 	if (aconnector && enable) {
9495 		/* Make sure fake sink is created in plug-in scenario */
9496 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9497 							    &aconnector->base);
9498 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9499 							    &aconnector->base);
9500 
9501 		if (IS_ERR(drm_new_conn_state)) {
9502 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9503 			goto fail;
9504 		}
9505 
9506 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9507 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9508 
9509 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9510 			goto skip_modeset;
9511 
9512 		new_stream = create_validate_stream_for_sink(aconnector,
9513 							     &new_crtc_state->mode,
9514 							     dm_new_conn_state,
9515 							     dm_old_crtc_state->stream);
9516 
9517 		/*
9518 		 * we can have no stream on ACTION_SET if a display
9519 		 * was disconnected during S3, in this case it is not an
9520 		 * error, the OS will be updated after detection, and
9521 		 * will do the right thing on next atomic commit
9522 		 */
9523 
9524 		if (!new_stream) {
9525 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9526 					__func__, acrtc->base.base.id);
9527 			ret = -ENOMEM;
9528 			goto fail;
9529 		}
9530 
9531 		/*
9532 		 * TODO: Check VSDB bits to decide whether this should
9533 		 * be enabled or not.
9534 		 */
9535 		new_stream->triggered_crtc_reset.enabled =
9536 			dm->force_timing_sync;
9537 
9538 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9539 
9540 		ret = fill_hdr_info_packet(drm_new_conn_state,
9541 					   &new_stream->hdr_static_metadata);
9542 		if (ret)
9543 			goto fail;
9544 
9545 		/*
9546 		 * If we already removed the old stream from the context
9547 		 * (and set the new stream to NULL) then we can't reuse
9548 		 * the old stream even if the stream and scaling are unchanged.
9549 		 * We'll hit the BUG_ON and black screen.
9550 		 *
9551 		 * TODO: Refactor this function to allow this check to work
9552 		 * in all conditions.
9553 		 */
9554 		if (amdgpu_freesync_vid_mode &&
9555 		    dm_new_crtc_state->stream &&
9556 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9557 			goto skip_modeset;
9558 
9559 		if (dm_new_crtc_state->stream &&
9560 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9561 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9562 			new_crtc_state->mode_changed = false;
9563 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9564 					 new_crtc_state->mode_changed);
9565 		}
9566 	}
9567 
9568 	/* mode_changed flag may get updated above, need to check again */
9569 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9570 		goto skip_modeset;
9571 
9572 	DRM_DEBUG_ATOMIC(
9573 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9574 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9575 		"connectors_changed:%d\n",
9576 		acrtc->crtc_id,
9577 		new_crtc_state->enable,
9578 		new_crtc_state->active,
9579 		new_crtc_state->planes_changed,
9580 		new_crtc_state->mode_changed,
9581 		new_crtc_state->active_changed,
9582 		new_crtc_state->connectors_changed);
9583 
9584 	/* Remove stream for any changed/disabled CRTC */
9585 	if (!enable) {
9586 
9587 		if (!dm_old_crtc_state->stream)
9588 			goto skip_modeset;
9589 
9590 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9591 		    is_timing_unchanged_for_freesync(new_crtc_state,
9592 						     old_crtc_state)) {
9593 			new_crtc_state->mode_changed = false;
9594 			DRM_DEBUG_DRIVER(
9595 				"Mode change not required for front porch change, "
9596 				"setting mode_changed to %d",
9597 				new_crtc_state->mode_changed);
9598 
9599 			set_freesync_fixed_config(dm_new_crtc_state);
9600 
9601 			goto skip_modeset;
9602 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9603 			   is_freesync_video_mode(&new_crtc_state->mode,
9604 						  aconnector)) {
9605 			set_freesync_fixed_config(dm_new_crtc_state);
9606 		}
9607 
9608 		ret = dm_atomic_get_state(state, &dm_state);
9609 		if (ret)
9610 			goto fail;
9611 
9612 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9613 				crtc->base.id);
9614 
9615 		/* i.e. reset mode */
9616 		if (dc_remove_stream_from_ctx(
9617 				dm->dc,
9618 				dm_state->context,
9619 				dm_old_crtc_state->stream) != DC_OK) {
9620 			ret = -EINVAL;
9621 			goto fail;
9622 		}
9623 
9624 		dc_stream_release(dm_old_crtc_state->stream);
9625 		dm_new_crtc_state->stream = NULL;
9626 
9627 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9628 
9629 		*lock_and_validation_needed = true;
9630 
9631 	} else {/* Add stream for any updated/enabled CRTC */
9632 		/*
9633 		 * Quick fix to prevent NULL pointer on new_stream when
9634 		 * added MST connectors not found in existing crtc_state in the chained mode
9635 		 * TODO: need to dig out the root cause of that
9636 		 */
9637 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9638 			goto skip_modeset;
9639 
9640 		if (modereset_required(new_crtc_state))
9641 			goto skip_modeset;
9642 
9643 		if (modeset_required(new_crtc_state, new_stream,
9644 				     dm_old_crtc_state->stream)) {
9645 
9646 			WARN_ON(dm_new_crtc_state->stream);
9647 
9648 			ret = dm_atomic_get_state(state, &dm_state);
9649 			if (ret)
9650 				goto fail;
9651 
9652 			dm_new_crtc_state->stream = new_stream;
9653 
9654 			dc_stream_retain(new_stream);
9655 
9656 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9657 					 crtc->base.id);
9658 
9659 			if (dc_add_stream_to_ctx(
9660 					dm->dc,
9661 					dm_state->context,
9662 					dm_new_crtc_state->stream) != DC_OK) {
9663 				ret = -EINVAL;
9664 				goto fail;
9665 			}
9666 
9667 			*lock_and_validation_needed = true;
9668 		}
9669 	}
9670 
9671 skip_modeset:
9672 	/* Release extra reference */
9673 	if (new_stream)
9674 		 dc_stream_release(new_stream);
9675 
9676 	/*
9677 	 * We want to do dc stream updates that do not require a
9678 	 * full modeset below.
9679 	 */
9680 	if (!(enable && aconnector && new_crtc_state->active))
9681 		return 0;
9682 	/*
9683 	 * Given above conditions, the dc state cannot be NULL because:
9684 	 * 1. We're in the process of enabling CRTCs (just been added
9685 	 *    to the dc context, or already is on the context)
9686 	 * 2. Has a valid connector attached, and
9687 	 * 3. Is currently active and enabled.
9688 	 * => The dc stream state currently exists.
9689 	 */
9690 	BUG_ON(dm_new_crtc_state->stream == NULL);
9691 
9692 	/* Scaling or underscan settings */
9693 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9694 				drm_atomic_crtc_needs_modeset(new_crtc_state))
9695 		update_stream_scaling_settings(
9696 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9697 
9698 	/* ABM settings */
9699 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9700 
9701 	/*
9702 	 * Color management settings. We also update color properties
9703 	 * when a modeset is needed, to ensure it gets reprogrammed.
9704 	 */
9705 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9706 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9707 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9708 		if (ret)
9709 			goto fail;
9710 	}
9711 
9712 	/* Update Freesync settings. */
9713 	get_freesync_config_for_crtc(dm_new_crtc_state,
9714 				     dm_new_conn_state);
9715 
9716 	return ret;
9717 
9718 fail:
9719 	if (new_stream)
9720 		dc_stream_release(new_stream);
9721 	return ret;
9722 }
9723 
9724 static bool should_reset_plane(struct drm_atomic_state *state,
9725 			       struct drm_plane *plane,
9726 			       struct drm_plane_state *old_plane_state,
9727 			       struct drm_plane_state *new_plane_state)
9728 {
9729 	struct drm_plane *other;
9730 	struct drm_plane_state *old_other_state, *new_other_state;
9731 	struct drm_crtc_state *new_crtc_state;
9732 	int i;
9733 
9734 	/*
9735 	 * TODO: Remove this hack once the checks below are sufficient
9736 	 * enough to determine when we need to reset all the planes on
9737 	 * the stream.
9738 	 */
9739 	if (state->allow_modeset)
9740 		return true;
9741 
9742 	/* Exit early if we know that we're adding or removing the plane. */
9743 	if (old_plane_state->crtc != new_plane_state->crtc)
9744 		return true;
9745 
9746 	/* old crtc == new_crtc == NULL, plane not in context. */
9747 	if (!new_plane_state->crtc)
9748 		return false;
9749 
9750 	new_crtc_state =
9751 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9752 
9753 	if (!new_crtc_state)
9754 		return true;
9755 
9756 	/* CRTC Degamma changes currently require us to recreate planes. */
9757 	if (new_crtc_state->color_mgmt_changed)
9758 		return true;
9759 
9760 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9761 		return true;
9762 
9763 	/*
9764 	 * If there are any new primary or overlay planes being added or
9765 	 * removed then the z-order can potentially change. To ensure
9766 	 * correct z-order and pipe acquisition the current DC architecture
9767 	 * requires us to remove and recreate all existing planes.
9768 	 *
9769 	 * TODO: Come up with a more elegant solution for this.
9770 	 */
9771 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9772 		struct amdgpu_framebuffer *old_afb, *new_afb;
9773 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9774 			continue;
9775 
9776 		if (old_other_state->crtc != new_plane_state->crtc &&
9777 		    new_other_state->crtc != new_plane_state->crtc)
9778 			continue;
9779 
9780 		if (old_other_state->crtc != new_other_state->crtc)
9781 			return true;
9782 
9783 		/* Src/dst size and scaling updates. */
9784 		if (old_other_state->src_w != new_other_state->src_w ||
9785 		    old_other_state->src_h != new_other_state->src_h ||
9786 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9787 		    old_other_state->crtc_h != new_other_state->crtc_h)
9788 			return true;
9789 
9790 		/* Rotation / mirroring updates. */
9791 		if (old_other_state->rotation != new_other_state->rotation)
9792 			return true;
9793 
9794 		/* Blending updates. */
9795 		if (old_other_state->pixel_blend_mode !=
9796 		    new_other_state->pixel_blend_mode)
9797 			return true;
9798 
9799 		/* Alpha updates. */
9800 		if (old_other_state->alpha != new_other_state->alpha)
9801 			return true;
9802 
9803 		/* Colorspace changes. */
9804 		if (old_other_state->color_range != new_other_state->color_range ||
9805 		    old_other_state->color_encoding != new_other_state->color_encoding)
9806 			return true;
9807 
9808 		/* Framebuffer checks fall at the end. */
9809 		if (!old_other_state->fb || !new_other_state->fb)
9810 			continue;
9811 
9812 		/* Pixel format changes can require bandwidth updates. */
9813 		if (old_other_state->fb->format != new_other_state->fb->format)
9814 			return true;
9815 
9816 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9817 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9818 
9819 		/* Tiling and DCC changes also require bandwidth updates. */
9820 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9821 		    old_afb->base.modifier != new_afb->base.modifier)
9822 			return true;
9823 	}
9824 
9825 	return false;
9826 }
9827 
9828 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9829 			      struct drm_plane_state *new_plane_state,
9830 			      struct drm_framebuffer *fb)
9831 {
9832 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9833 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9834 	unsigned int pitch;
9835 	bool linear;
9836 
9837 	if (fb->width > new_acrtc->max_cursor_width ||
9838 	    fb->height > new_acrtc->max_cursor_height) {
9839 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9840 				 new_plane_state->fb->width,
9841 				 new_plane_state->fb->height);
9842 		return -EINVAL;
9843 	}
9844 	if (new_plane_state->src_w != fb->width << 16 ||
9845 	    new_plane_state->src_h != fb->height << 16) {
9846 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9847 		return -EINVAL;
9848 	}
9849 
9850 	/* Pitch in pixels */
9851 	pitch = fb->pitches[0] / fb->format->cpp[0];
9852 
9853 	if (fb->width != pitch) {
9854 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9855 				 fb->width, pitch);
9856 		return -EINVAL;
9857 	}
9858 
9859 	switch (pitch) {
9860 	case 64:
9861 	case 128:
9862 	case 256:
9863 		/* FB pitch is supported by cursor plane */
9864 		break;
9865 	default:
9866 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9867 		return -EINVAL;
9868 	}
9869 
9870 	/* Core DRM takes care of checking FB modifiers, so we only need to
9871 	 * check tiling flags when the FB doesn't have a modifier. */
9872 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9873 		if (adev->family < AMDGPU_FAMILY_AI) {
9874 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9875 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9876 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9877 		} else {
9878 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9879 		}
9880 		if (!linear) {
9881 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9882 			return -EINVAL;
9883 		}
9884 	}
9885 
9886 	return 0;
9887 }
9888 
9889 static int dm_update_plane_state(struct dc *dc,
9890 				 struct drm_atomic_state *state,
9891 				 struct drm_plane *plane,
9892 				 struct drm_plane_state *old_plane_state,
9893 				 struct drm_plane_state *new_plane_state,
9894 				 bool enable,
9895 				 bool *lock_and_validation_needed)
9896 {
9897 
9898 	struct dm_atomic_state *dm_state = NULL;
9899 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9900 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9901 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9902 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9903 	struct amdgpu_crtc *new_acrtc;
9904 	bool needs_reset;
9905 	int ret = 0;
9906 
9907 
9908 	new_plane_crtc = new_plane_state->crtc;
9909 	old_plane_crtc = old_plane_state->crtc;
9910 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9911 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9912 
9913 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9914 		if (!enable || !new_plane_crtc ||
9915 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9916 			return 0;
9917 
9918 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9919 
9920 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9921 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9922 			return -EINVAL;
9923 		}
9924 
9925 		if (new_plane_state->fb) {
9926 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9927 						 new_plane_state->fb);
9928 			if (ret)
9929 				return ret;
9930 		}
9931 
9932 		return 0;
9933 	}
9934 
9935 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9936 					 new_plane_state);
9937 
9938 	/* Remove any changed/removed planes */
9939 	if (!enable) {
9940 		if (!needs_reset)
9941 			return 0;
9942 
9943 		if (!old_plane_crtc)
9944 			return 0;
9945 
9946 		old_crtc_state = drm_atomic_get_old_crtc_state(
9947 				state, old_plane_crtc);
9948 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9949 
9950 		if (!dm_old_crtc_state->stream)
9951 			return 0;
9952 
9953 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9954 				plane->base.id, old_plane_crtc->base.id);
9955 
9956 		ret = dm_atomic_get_state(state, &dm_state);
9957 		if (ret)
9958 			return ret;
9959 
9960 		if (!dc_remove_plane_from_context(
9961 				dc,
9962 				dm_old_crtc_state->stream,
9963 				dm_old_plane_state->dc_state,
9964 				dm_state->context)) {
9965 
9966 			return -EINVAL;
9967 		}
9968 
9969 
9970 		dc_plane_state_release(dm_old_plane_state->dc_state);
9971 		dm_new_plane_state->dc_state = NULL;
9972 
9973 		*lock_and_validation_needed = true;
9974 
9975 	} else { /* Add new planes */
9976 		struct dc_plane_state *dc_new_plane_state;
9977 
9978 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9979 			return 0;
9980 
9981 		if (!new_plane_crtc)
9982 			return 0;
9983 
9984 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9985 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9986 
9987 		if (!dm_new_crtc_state->stream)
9988 			return 0;
9989 
9990 		if (!needs_reset)
9991 			return 0;
9992 
9993 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9994 		if (ret)
9995 			return ret;
9996 
9997 		WARN_ON(dm_new_plane_state->dc_state);
9998 
9999 		dc_new_plane_state = dc_create_plane_state(dc);
10000 		if (!dc_new_plane_state)
10001 			return -ENOMEM;
10002 
10003 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10004 				 plane->base.id, new_plane_crtc->base.id);
10005 
10006 		ret = fill_dc_plane_attributes(
10007 			drm_to_adev(new_plane_crtc->dev),
10008 			dc_new_plane_state,
10009 			new_plane_state,
10010 			new_crtc_state);
10011 		if (ret) {
10012 			dc_plane_state_release(dc_new_plane_state);
10013 			return ret;
10014 		}
10015 
10016 		ret = dm_atomic_get_state(state, &dm_state);
10017 		if (ret) {
10018 			dc_plane_state_release(dc_new_plane_state);
10019 			return ret;
10020 		}
10021 
10022 		/*
10023 		 * Any atomic check errors that occur after this will
10024 		 * not need a release. The plane state will be attached
10025 		 * to the stream, and therefore part of the atomic
10026 		 * state. It'll be released when the atomic state is
10027 		 * cleaned.
10028 		 */
10029 		if (!dc_add_plane_to_context(
10030 				dc,
10031 				dm_new_crtc_state->stream,
10032 				dc_new_plane_state,
10033 				dm_state->context)) {
10034 
10035 			dc_plane_state_release(dc_new_plane_state);
10036 			return -EINVAL;
10037 		}
10038 
10039 		dm_new_plane_state->dc_state = dc_new_plane_state;
10040 
10041 		/* Tell DC to do a full surface update every time there
10042 		 * is a plane change. Inefficient, but works for now.
10043 		 */
10044 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10045 
10046 		*lock_and_validation_needed = true;
10047 	}
10048 
10049 
10050 	return ret;
10051 }
10052 
10053 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10054 				struct drm_crtc *crtc,
10055 				struct drm_crtc_state *new_crtc_state)
10056 {
10057 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10058 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10059 
10060 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10061 	 * cursor per pipe but it's going to inherit the scaling and
10062 	 * positioning from the underlying pipe. Check the cursor plane's
10063 	 * blending properties match the primary plane's. */
10064 
10065 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10066 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10067 	if (!new_cursor_state || !new_primary_state ||
10068 	    !new_cursor_state->fb || !new_primary_state->fb) {
10069 		return 0;
10070 	}
10071 
10072 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10073 			 (new_cursor_state->src_w >> 16);
10074 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10075 			 (new_cursor_state->src_h >> 16);
10076 
10077 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10078 			 (new_primary_state->src_w >> 16);
10079 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10080 			 (new_primary_state->src_h >> 16);
10081 
10082 	if (cursor_scale_w != primary_scale_w ||
10083 	    cursor_scale_h != primary_scale_h) {
10084 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10085 		return -EINVAL;
10086 	}
10087 
10088 	return 0;
10089 }
10090 
10091 #if defined(CONFIG_DRM_AMD_DC_DCN)
10092 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10093 {
10094 	struct drm_connector *connector;
10095 	struct drm_connector_state *conn_state;
10096 	struct amdgpu_dm_connector *aconnector = NULL;
10097 	int i;
10098 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10099 		if (conn_state->crtc != crtc)
10100 			continue;
10101 
10102 		aconnector = to_amdgpu_dm_connector(connector);
10103 		if (!aconnector->port || !aconnector->mst_port)
10104 			aconnector = NULL;
10105 		else
10106 			break;
10107 	}
10108 
10109 	if (!aconnector)
10110 		return 0;
10111 
10112 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10113 }
10114 #endif
10115 
10116 static int validate_overlay(struct drm_atomic_state *state)
10117 {
10118 	int i;
10119 	struct drm_plane *plane;
10120 	struct drm_plane_state *new_plane_state;
10121 	struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
10122 
10123 	/* Check if primary plane is contained inside overlay */
10124 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10125 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10126 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10127 				return 0;
10128 
10129 			overlay_state = new_plane_state;
10130 			continue;
10131 		}
10132 	}
10133 
10134 	/* check if we're making changes to the overlay plane */
10135 	if (!overlay_state)
10136 		return 0;
10137 
10138 	/* check if overlay plane is enabled */
10139 	if (!overlay_state->crtc)
10140 		return 0;
10141 
10142 	/* find the primary plane for the CRTC that the overlay is enabled on */
10143 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10144 	if (IS_ERR(primary_state))
10145 		return PTR_ERR(primary_state);
10146 
10147 	/* check if primary plane is enabled */
10148 	if (!primary_state->crtc)
10149 		return 0;
10150 
10151 	/* check if cursor plane is enabled */
10152 	cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10153 	if (IS_ERR(cursor_state))
10154 		return PTR_ERR(cursor_state);
10155 
10156 	if (drm_atomic_plane_disabling(plane->state, cursor_state))
10157 		return 0;
10158 
10159 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10160 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10161 	    primary_state->crtc_y < overlay_state->crtc_y ||
10162 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10163 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10164 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10165 		return -EINVAL;
10166 	}
10167 
10168 	return 0;
10169 }
10170 
10171 /**
10172  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10173  * @dev: The DRM device
10174  * @state: The atomic state to commit
10175  *
10176  * Validate that the given atomic state is programmable by DC into hardware.
10177  * This involves constructing a &struct dc_state reflecting the new hardware
10178  * state we wish to commit, then querying DC to see if it is programmable. It's
10179  * important not to modify the existing DC state. Otherwise, atomic_check
10180  * may unexpectedly commit hardware changes.
10181  *
10182  * When validating the DC state, it's important that the right locks are
10183  * acquired. For full updates case which removes/adds/updates streams on one
10184  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10185  * that any such full update commit will wait for completion of any outstanding
10186  * flip using DRMs synchronization events.
10187  *
10188  * Note that DM adds the affected connectors for all CRTCs in state, when that
10189  * might not seem necessary. This is because DC stream creation requires the
10190  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10191  * be possible but non-trivial - a possible TODO item.
10192  *
10193  * Return: -Error code if validation failed.
10194  */
10195 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10196 				  struct drm_atomic_state *state)
10197 {
10198 	struct amdgpu_device *adev = drm_to_adev(dev);
10199 	struct dm_atomic_state *dm_state = NULL;
10200 	struct dc *dc = adev->dm.dc;
10201 	struct drm_connector *connector;
10202 	struct drm_connector_state *old_con_state, *new_con_state;
10203 	struct drm_crtc *crtc;
10204 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10205 	struct drm_plane *plane;
10206 	struct drm_plane_state *old_plane_state, *new_plane_state;
10207 	enum dc_status status;
10208 	int ret, i;
10209 	bool lock_and_validation_needed = false;
10210 	struct dm_crtc_state *dm_old_crtc_state;
10211 
10212 	trace_amdgpu_dm_atomic_check_begin(state);
10213 
10214 	ret = drm_atomic_helper_check_modeset(dev, state);
10215 	if (ret)
10216 		goto fail;
10217 
10218 	/* Check connector changes */
10219 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10220 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10221 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10222 
10223 		/* Skip connectors that are disabled or part of modeset already. */
10224 		if (!old_con_state->crtc && !new_con_state->crtc)
10225 			continue;
10226 
10227 		if (!new_con_state->crtc)
10228 			continue;
10229 
10230 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10231 		if (IS_ERR(new_crtc_state)) {
10232 			ret = PTR_ERR(new_crtc_state);
10233 			goto fail;
10234 		}
10235 
10236 		if (dm_old_con_state->abm_level !=
10237 		    dm_new_con_state->abm_level)
10238 			new_crtc_state->connectors_changed = true;
10239 	}
10240 
10241 #if defined(CONFIG_DRM_AMD_DC_DCN)
10242 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10243 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10244 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10245 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10246 				if (ret)
10247 					goto fail;
10248 			}
10249 		}
10250 	}
10251 #endif
10252 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10253 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10254 
10255 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10256 		    !new_crtc_state->color_mgmt_changed &&
10257 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10258 			dm_old_crtc_state->dsc_force_changed == false)
10259 			continue;
10260 
10261 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10262 		if (ret)
10263 			goto fail;
10264 
10265 		if (!new_crtc_state->enable)
10266 			continue;
10267 
10268 		ret = drm_atomic_add_affected_connectors(state, crtc);
10269 		if (ret)
10270 			return ret;
10271 
10272 		ret = drm_atomic_add_affected_planes(state, crtc);
10273 		if (ret)
10274 			goto fail;
10275 
10276 		if (dm_old_crtc_state->dsc_force_changed)
10277 			new_crtc_state->mode_changed = true;
10278 	}
10279 
10280 	/*
10281 	 * Add all primary and overlay planes on the CRTC to the state
10282 	 * whenever a plane is enabled to maintain correct z-ordering
10283 	 * and to enable fast surface updates.
10284 	 */
10285 	drm_for_each_crtc(crtc, dev) {
10286 		bool modified = false;
10287 
10288 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10289 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10290 				continue;
10291 
10292 			if (new_plane_state->crtc == crtc ||
10293 			    old_plane_state->crtc == crtc) {
10294 				modified = true;
10295 				break;
10296 			}
10297 		}
10298 
10299 		if (!modified)
10300 			continue;
10301 
10302 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10303 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10304 				continue;
10305 
10306 			new_plane_state =
10307 				drm_atomic_get_plane_state(state, plane);
10308 
10309 			if (IS_ERR(new_plane_state)) {
10310 				ret = PTR_ERR(new_plane_state);
10311 				goto fail;
10312 			}
10313 		}
10314 	}
10315 
10316 	/* Remove exiting planes if they are modified */
10317 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10318 		ret = dm_update_plane_state(dc, state, plane,
10319 					    old_plane_state,
10320 					    new_plane_state,
10321 					    false,
10322 					    &lock_and_validation_needed);
10323 		if (ret)
10324 			goto fail;
10325 	}
10326 
10327 	/* Disable all crtcs which require disable */
10328 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10329 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10330 					   old_crtc_state,
10331 					   new_crtc_state,
10332 					   false,
10333 					   &lock_and_validation_needed);
10334 		if (ret)
10335 			goto fail;
10336 	}
10337 
10338 	/* Enable all crtcs which require enable */
10339 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10340 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10341 					   old_crtc_state,
10342 					   new_crtc_state,
10343 					   true,
10344 					   &lock_and_validation_needed);
10345 		if (ret)
10346 			goto fail;
10347 	}
10348 
10349 	ret = validate_overlay(state);
10350 	if (ret)
10351 		goto fail;
10352 
10353 	/* Add new/modified planes */
10354 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10355 		ret = dm_update_plane_state(dc, state, plane,
10356 					    old_plane_state,
10357 					    new_plane_state,
10358 					    true,
10359 					    &lock_and_validation_needed);
10360 		if (ret)
10361 			goto fail;
10362 	}
10363 
10364 	/* Run this here since we want to validate the streams we created */
10365 	ret = drm_atomic_helper_check_planes(dev, state);
10366 	if (ret)
10367 		goto fail;
10368 
10369 	/* Check cursor planes scaling */
10370 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10371 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10372 		if (ret)
10373 			goto fail;
10374 	}
10375 
10376 	if (state->legacy_cursor_update) {
10377 		/*
10378 		 * This is a fast cursor update coming from the plane update
10379 		 * helper, check if it can be done asynchronously for better
10380 		 * performance.
10381 		 */
10382 		state->async_update =
10383 			!drm_atomic_helper_async_check(dev, state);
10384 
10385 		/*
10386 		 * Skip the remaining global validation if this is an async
10387 		 * update. Cursor updates can be done without affecting
10388 		 * state or bandwidth calcs and this avoids the performance
10389 		 * penalty of locking the private state object and
10390 		 * allocating a new dc_state.
10391 		 */
10392 		if (state->async_update)
10393 			return 0;
10394 	}
10395 
10396 	/* Check scaling and underscan changes*/
10397 	/* TODO Removed scaling changes validation due to inability to commit
10398 	 * new stream into context w\o causing full reset. Need to
10399 	 * decide how to handle.
10400 	 */
10401 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10402 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10403 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10404 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10405 
10406 		/* Skip any modesets/resets */
10407 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10408 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10409 			continue;
10410 
10411 		/* Skip any thing not scale or underscan changes */
10412 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10413 			continue;
10414 
10415 		lock_and_validation_needed = true;
10416 	}
10417 
10418 	/**
10419 	 * Streams and planes are reset when there are changes that affect
10420 	 * bandwidth. Anything that affects bandwidth needs to go through
10421 	 * DC global validation to ensure that the configuration can be applied
10422 	 * to hardware.
10423 	 *
10424 	 * We have to currently stall out here in atomic_check for outstanding
10425 	 * commits to finish in this case because our IRQ handlers reference
10426 	 * DRM state directly - we can end up disabling interrupts too early
10427 	 * if we don't.
10428 	 *
10429 	 * TODO: Remove this stall and drop DM state private objects.
10430 	 */
10431 	if (lock_and_validation_needed) {
10432 		ret = dm_atomic_get_state(state, &dm_state);
10433 		if (ret)
10434 			goto fail;
10435 
10436 		ret = do_aquire_global_lock(dev, state);
10437 		if (ret)
10438 			goto fail;
10439 
10440 #if defined(CONFIG_DRM_AMD_DC_DCN)
10441 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10442 			goto fail;
10443 
10444 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10445 		if (ret)
10446 			goto fail;
10447 #endif
10448 
10449 		/*
10450 		 * Perform validation of MST topology in the state:
10451 		 * We need to perform MST atomic check before calling
10452 		 * dc_validate_global_state(), or there is a chance
10453 		 * to get stuck in an infinite loop and hang eventually.
10454 		 */
10455 		ret = drm_dp_mst_atomic_check(state);
10456 		if (ret)
10457 			goto fail;
10458 		status = dc_validate_global_state(dc, dm_state->context, false);
10459 		if (status != DC_OK) {
10460 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10461 				       dc_status_to_str(status), status);
10462 			ret = -EINVAL;
10463 			goto fail;
10464 		}
10465 	} else {
10466 		/*
10467 		 * The commit is a fast update. Fast updates shouldn't change
10468 		 * the DC context, affect global validation, and can have their
10469 		 * commit work done in parallel with other commits not touching
10470 		 * the same resource. If we have a new DC context as part of
10471 		 * the DM atomic state from validation we need to free it and
10472 		 * retain the existing one instead.
10473 		 *
10474 		 * Furthermore, since the DM atomic state only contains the DC
10475 		 * context and can safely be annulled, we can free the state
10476 		 * and clear the associated private object now to free
10477 		 * some memory and avoid a possible use-after-free later.
10478 		 */
10479 
10480 		for (i = 0; i < state->num_private_objs; i++) {
10481 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10482 
10483 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10484 				int j = state->num_private_objs-1;
10485 
10486 				dm_atomic_destroy_state(obj,
10487 						state->private_objs[i].state);
10488 
10489 				/* If i is not at the end of the array then the
10490 				 * last element needs to be moved to where i was
10491 				 * before the array can safely be truncated.
10492 				 */
10493 				if (i != j)
10494 					state->private_objs[i] =
10495 						state->private_objs[j];
10496 
10497 				state->private_objs[j].ptr = NULL;
10498 				state->private_objs[j].state = NULL;
10499 				state->private_objs[j].old_state = NULL;
10500 				state->private_objs[j].new_state = NULL;
10501 
10502 				state->num_private_objs = j;
10503 				break;
10504 			}
10505 		}
10506 	}
10507 
10508 	/* Store the overall update type for use later in atomic check. */
10509 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10510 		struct dm_crtc_state *dm_new_crtc_state =
10511 			to_dm_crtc_state(new_crtc_state);
10512 
10513 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10514 							 UPDATE_TYPE_FULL :
10515 							 UPDATE_TYPE_FAST;
10516 	}
10517 
10518 	/* Must be success */
10519 	WARN_ON(ret);
10520 
10521 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10522 
10523 	return ret;
10524 
10525 fail:
10526 	if (ret == -EDEADLK)
10527 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10528 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10529 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10530 	else
10531 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10532 
10533 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10534 
10535 	return ret;
10536 }
10537 
10538 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10539 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10540 {
10541 	uint8_t dpcd_data;
10542 	bool capable = false;
10543 
10544 	if (amdgpu_dm_connector->dc_link &&
10545 		dm_helpers_dp_read_dpcd(
10546 				NULL,
10547 				amdgpu_dm_connector->dc_link,
10548 				DP_DOWN_STREAM_PORT_COUNT,
10549 				&dpcd_data,
10550 				sizeof(dpcd_data))) {
10551 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10552 	}
10553 
10554 	return capable;
10555 }
10556 
10557 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10558 		uint8_t *edid_ext, int len,
10559 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10560 {
10561 	int i;
10562 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10563 	struct dc *dc = adev->dm.dc;
10564 
10565 	/* send extension block to DMCU for parsing */
10566 	for (i = 0; i < len; i += 8) {
10567 		bool res;
10568 		int offset;
10569 
10570 		/* send 8 bytes a time */
10571 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10572 			return false;
10573 
10574 		if (i+8 == len) {
10575 			/* EDID block sent completed, expect result */
10576 			int version, min_rate, max_rate;
10577 
10578 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10579 			if (res) {
10580 				/* amd vsdb found */
10581 				vsdb_info->freesync_supported = 1;
10582 				vsdb_info->amd_vsdb_version = version;
10583 				vsdb_info->min_refresh_rate_hz = min_rate;
10584 				vsdb_info->max_refresh_rate_hz = max_rate;
10585 				return true;
10586 			}
10587 			/* not amd vsdb */
10588 			return false;
10589 		}
10590 
10591 		/* check for ack*/
10592 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10593 		if (!res)
10594 			return false;
10595 	}
10596 
10597 	return false;
10598 }
10599 
10600 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10601 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10602 {
10603 	uint8_t *edid_ext = NULL;
10604 	int i;
10605 	bool valid_vsdb_found = false;
10606 
10607 	/*----- drm_find_cea_extension() -----*/
10608 	/* No EDID or EDID extensions */
10609 	if (edid == NULL || edid->extensions == 0)
10610 		return -ENODEV;
10611 
10612 	/* Find CEA extension */
10613 	for (i = 0; i < edid->extensions; i++) {
10614 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10615 		if (edid_ext[0] == CEA_EXT)
10616 			break;
10617 	}
10618 
10619 	if (i == edid->extensions)
10620 		return -ENODEV;
10621 
10622 	/*----- cea_db_offsets() -----*/
10623 	if (edid_ext[0] != CEA_EXT)
10624 		return -ENODEV;
10625 
10626 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10627 
10628 	return valid_vsdb_found ? i : -ENODEV;
10629 }
10630 
10631 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10632 					struct edid *edid)
10633 {
10634 	int i = 0;
10635 	struct detailed_timing *timing;
10636 	struct detailed_non_pixel *data;
10637 	struct detailed_data_monitor_range *range;
10638 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10639 			to_amdgpu_dm_connector(connector);
10640 	struct dm_connector_state *dm_con_state = NULL;
10641 
10642 	struct drm_device *dev = connector->dev;
10643 	struct amdgpu_device *adev = drm_to_adev(dev);
10644 	bool freesync_capable = false;
10645 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10646 
10647 	if (!connector->state) {
10648 		DRM_ERROR("%s - Connector has no state", __func__);
10649 		goto update;
10650 	}
10651 
10652 	if (!edid) {
10653 		dm_con_state = to_dm_connector_state(connector->state);
10654 
10655 		amdgpu_dm_connector->min_vfreq = 0;
10656 		amdgpu_dm_connector->max_vfreq = 0;
10657 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10658 
10659 		goto update;
10660 	}
10661 
10662 	dm_con_state = to_dm_connector_state(connector->state);
10663 
10664 	if (!amdgpu_dm_connector->dc_sink) {
10665 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10666 		goto update;
10667 	}
10668 	if (!adev->dm.freesync_module)
10669 		goto update;
10670 
10671 
10672 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10673 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10674 		bool edid_check_required = false;
10675 
10676 		if (edid) {
10677 			edid_check_required = is_dp_capable_without_timing_msa(
10678 						adev->dm.dc,
10679 						amdgpu_dm_connector);
10680 		}
10681 
10682 		if (edid_check_required == true && (edid->version > 1 ||
10683 		   (edid->version == 1 && edid->revision > 1))) {
10684 			for (i = 0; i < 4; i++) {
10685 
10686 				timing	= &edid->detailed_timings[i];
10687 				data	= &timing->data.other_data;
10688 				range	= &data->data.range;
10689 				/*
10690 				 * Check if monitor has continuous frequency mode
10691 				 */
10692 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10693 					continue;
10694 				/*
10695 				 * Check for flag range limits only. If flag == 1 then
10696 				 * no additional timing information provided.
10697 				 * Default GTF, GTF Secondary curve and CVT are not
10698 				 * supported
10699 				 */
10700 				if (range->flags != 1)
10701 					continue;
10702 
10703 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10704 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10705 				amdgpu_dm_connector->pixel_clock_mhz =
10706 					range->pixel_clock_mhz * 10;
10707 
10708 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10709 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10710 
10711 				break;
10712 			}
10713 
10714 			if (amdgpu_dm_connector->max_vfreq -
10715 			    amdgpu_dm_connector->min_vfreq > 10) {
10716 
10717 				freesync_capable = true;
10718 			}
10719 		}
10720 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10721 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10722 		if (i >= 0 && vsdb_info.freesync_supported) {
10723 			timing  = &edid->detailed_timings[i];
10724 			data    = &timing->data.other_data;
10725 
10726 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10727 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10728 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10729 				freesync_capable = true;
10730 
10731 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10732 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10733 		}
10734 	}
10735 
10736 update:
10737 	if (dm_con_state)
10738 		dm_con_state->freesync_capable = freesync_capable;
10739 
10740 	if (connector->vrr_capable_property)
10741 		drm_connector_set_vrr_capable_property(connector,
10742 						       freesync_capable);
10743 }
10744 
10745 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10746 {
10747 	struct amdgpu_device *adev = drm_to_adev(dev);
10748 	struct dc *dc = adev->dm.dc;
10749 	int i;
10750 
10751 	mutex_lock(&adev->dm.dc_lock);
10752 	if (dc->current_state) {
10753 		for (i = 0; i < dc->current_state->stream_count; ++i)
10754 			dc->current_state->streams[i]
10755 				->triggered_crtc_reset.enabled =
10756 				adev->dm.force_timing_sync;
10757 
10758 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10759 		dc_trigger_sync(dc, dc->current_state);
10760 	}
10761 	mutex_unlock(&adev->dm.dc_lock);
10762 }
10763 
10764 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10765 		       uint32_t value, const char *func_name)
10766 {
10767 #ifdef DM_CHECK_ADDR_0
10768 	if (address == 0) {
10769 		DC_ERR("invalid register write. address = 0");
10770 		return;
10771 	}
10772 #endif
10773 	cgs_write_register(ctx->cgs_device, address, value);
10774 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10775 }
10776 
10777 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10778 			  const char *func_name)
10779 {
10780 	uint32_t value;
10781 #ifdef DM_CHECK_ADDR_0
10782 	if (address == 0) {
10783 		DC_ERR("invalid register read; address = 0\n");
10784 		return 0;
10785 	}
10786 #endif
10787 
10788 	if (ctx->dmub_srv &&
10789 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10790 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10791 		ASSERT(false);
10792 		return 0;
10793 	}
10794 
10795 	value = cgs_read_register(ctx->cgs_device, address);
10796 
10797 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10798 
10799 	return value;
10800 }
10801 
10802 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10803 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10804 {
10805 	struct amdgpu_device *adev = ctx->driver_context;
10806 	int ret = 0;
10807 
10808 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10809 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10810 	if (ret == 0) {
10811 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10812 		return -1;
10813 	}
10814 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10815 
10816 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10817 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10818 
10819 		// For read case, Copy data to payload
10820 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10821 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10822 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10823 			adev->dm.dmub_notify->aux_reply.length);
10824 	}
10825 
10826 	return adev->dm.dmub_notify->aux_reply.length;
10827 }
10828