1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 
116 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118 
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121 
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124 
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127 
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137 
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208 
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 				  struct drm_atomic_state *state);
211 
212 static void handle_cursor_update(struct drm_plane *plane,
213 				 struct drm_plane_state *old_plane_state);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 				 struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236 	if (crtc >= adev->mode_info.num_crtc)
237 		return 0;
238 	else {
239 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 
241 		if (acrtc->dm_irq_params.stream == NULL) {
242 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 				  crtc);
244 			return 0;
245 		}
246 
247 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 	}
249 }
250 
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 				  u32 *vbl, u32 *position)
253 {
254 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 
256 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 		return -EINVAL;
258 	else {
259 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 
261 		if (acrtc->dm_irq_params.stream ==  NULL) {
262 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 				  crtc);
264 			return 0;
265 		}
266 
267 		/*
268 		 * TODO rework base driver to use values directly.
269 		 * for now parse it back into reg-format
270 		 */
271 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 					 &v_blank_start,
273 					 &v_blank_end,
274 					 &h_position,
275 					 &v_position);
276 
277 		*position = v_position | (h_position << 16);
278 		*vbl = v_blank_start | (v_blank_end << 16);
279 	}
280 
281 	return 0;
282 }
283 
284 static bool dm_is_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return true;
288 }
289 
290 static int dm_wait_for_idle(void *handle)
291 {
292 	/* XXX todo */
293 	return 0;
294 }
295 
296 static bool dm_check_soft_reset(void *handle)
297 {
298 	return false;
299 }
300 
301 static int dm_soft_reset(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 		     int otg_inst)
310 {
311 	struct drm_device *dev = adev_to_drm(adev);
312 	struct drm_crtc *crtc;
313 	struct amdgpu_crtc *amdgpu_crtc;
314 
315 	if (WARN_ON(otg_inst == -1))
316 		return adev->mode_info.crtcs[0];
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	WARN_ON(!e);
396 
397 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398 
399 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
400 	if (!vrr_active ||
401 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 				      &v_blank_end, &hpos, &vpos) ||
403 	    (vpos < v_blank_start)) {
404 		/* Update to correct count and vblank timestamp if racing with
405 		 * vblank irq. This also updates to the correct vblank timestamp
406 		 * even in VRR mode, as scanout is past the front-porch atm.
407 		 */
408 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409 
410 		/* Wake up userspace by sending the pageflip event with proper
411 		 * count and timestamp of vblank of flip completion.
412 		 */
413 		if (e) {
414 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415 
416 			/* Event sent, so done with vblank for this flip */
417 			drm_crtc_vblank_put(&amdgpu_crtc->base);
418 		}
419 	} else if (e) {
420 		/* VRR active and inside front-porch: vblank count and
421 		 * timestamp for pageflip event will only be up to date after
422 		 * drm_crtc_handle_vblank() has been executed from late vblank
423 		 * irq handler after start of back-porch (vline 0). We queue the
424 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 		 * updated timestamp and count, once it runs after us.
426 		 *
427 		 * We need to open-code this instead of using the helper
428 		 * drm_crtc_arm_vblank_event(), as that helper would
429 		 * call drm_crtc_accurate_vblank_count(), which we must
430 		 * not call in VRR mode while we are in front-porch!
431 		 */
432 
433 		/* sequence will be replaced by real count during send-out. */
434 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 		e->pipe = amdgpu_crtc->crtc_id;
436 
437 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 		e = NULL;
439 	}
440 
441 	/* Keep track of vblank of this flip for flip throttling. We use the
442 	 * cooked hw counter, as that one incremented at start of this vblank
443 	 * of pageflip completion, so last_flip_vblank is the forbidden count
444 	 * for queueing new pageflips if vsync + VRR is enabled.
445 	 */
446 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448 
449 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451 
452 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
454 		     vrr_active, (int) !e);
455 }
456 
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459 	struct common_irq_params *irq_params = interrupt_params;
460 	struct amdgpu_device *adev = irq_params->adev;
461 	struct amdgpu_crtc *acrtc;
462 	struct drm_device *drm_dev;
463 	struct drm_vblank_crtc *vblank;
464 	ktime_t frame_duration_ns, previous_timestamp;
465 	unsigned long flags;
466 	int vrr_active;
467 
468 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469 
470 	if (acrtc) {
471 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 		drm_dev = acrtc->base.dev;
473 		vblank = &drm_dev->vblank[acrtc->base.index];
474 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 		frame_duration_ns = vblank->time - previous_timestamp;
476 
477 		if (frame_duration_ns > 0) {
478 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
479 						frame_duration_ns,
480 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
482 		}
483 
484 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485 			      acrtc->crtc_id,
486 			      vrr_active);
487 
488 		/* Core vblank handling is done here after end of front-porch in
489 		 * vrr mode, as vblank timestamping will give valid results
490 		 * while now done after front-porch. This will also deliver
491 		 * page-flip completion events that have been queued to us
492 		 * if a pageflip happened inside front-porch.
493 		 */
494 		if (vrr_active) {
495 			drm_crtc_handle_vblank(&acrtc->base);
496 
497 			/* BTR processing for pre-DCE12 ASICs */
498 			if (acrtc->dm_irq_params.stream &&
499 			    adev->family < AMDGPU_FAMILY_AI) {
500 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 				mod_freesync_handle_v_update(
502 				    adev->dm.freesync_module,
503 				    acrtc->dm_irq_params.stream,
504 				    &acrtc->dm_irq_params.vrr_params);
505 
506 				dc_stream_adjust_vmin_vmax(
507 				    adev->dm.dc,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params.adjust);
510 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
511 			}
512 		}
513 	}
514 }
515 
516 /**
517  * dm_crtc_high_irq() - Handles CRTC interrupt
518  * @interrupt_params: used for determining the CRTC instance
519  *
520  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
521  * event handler.
522  */
523 static void dm_crtc_high_irq(void *interrupt_params)
524 {
525 	struct common_irq_params *irq_params = interrupt_params;
526 	struct amdgpu_device *adev = irq_params->adev;
527 	struct amdgpu_crtc *acrtc;
528 	unsigned long flags;
529 	int vrr_active;
530 
531 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532 	if (!acrtc)
533 		return;
534 
535 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
536 
537 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 		      vrr_active, acrtc->dm_irq_params.active_planes);
539 
540 	/**
541 	 * Core vblank handling at start of front-porch is only possible
542 	 * in non-vrr mode, as only there vblank timestamping will give
543 	 * valid results while done in front-porch. Otherwise defer it
544 	 * to dm_vupdate_high_irq after end of front-porch.
545 	 */
546 	if (!vrr_active)
547 		drm_crtc_handle_vblank(&acrtc->base);
548 
549 	/**
550 	 * Following stuff must happen at start of vblank, for crc
551 	 * computation and below-the-range btr support in vrr mode.
552 	 */
553 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
554 
555 	/* BTR updates need to happen before VUPDATE on Vega and above. */
556 	if (adev->family < AMDGPU_FAMILY_AI)
557 		return;
558 
559 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
560 
561 	if (acrtc->dm_irq_params.stream &&
562 	    acrtc->dm_irq_params.vrr_params.supported &&
563 	    acrtc->dm_irq_params.freesync_config.state ==
564 		    VRR_STATE_ACTIVE_VARIABLE) {
565 		mod_freesync_handle_v_update(adev->dm.freesync_module,
566 					     acrtc->dm_irq_params.stream,
567 					     &acrtc->dm_irq_params.vrr_params);
568 
569 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 					   &acrtc->dm_irq_params.vrr_params.adjust);
571 	}
572 
573 	/*
574 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 	 * In that case, pageflip completion interrupts won't fire and pageflip
576 	 * completion events won't get delivered. Prevent this by sending
577 	 * pending pageflip events from here if a flip is still pending.
578 	 *
579 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 	 * avoid race conditions between flip programming and completion,
581 	 * which could cause too early flip completion events.
582 	 */
583 	if (adev->family >= AMDGPU_FAMILY_RV &&
584 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 	    acrtc->dm_irq_params.active_planes == 0) {
586 		if (acrtc->event) {
587 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588 			acrtc->event = NULL;
589 			drm_crtc_vblank_put(&acrtc->base);
590 		}
591 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
592 	}
593 
594 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
595 }
596 
597 #if defined(CONFIG_DRM_AMD_DC_DCN)
598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt_params: interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
607 {
608 	struct common_irq_params *irq_params = interrupt_params;
609 	struct amdgpu_device *adev = irq_params->adev;
610 	struct amdgpu_crtc *acrtc;
611 
612 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613 
614 	if (!acrtc)
615 		return;
616 
617 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
618 }
619 #endif
620 
621 #define DMUB_TRACE_MAX_READ 64
622 /**
623  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
624  * @interrupt_params: used for determining the Outbox instance
625  *
626  * Handles the Outbox Interrupt
627  * event handler.
628  */
629 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
630 {
631 	struct dmub_notification notify;
632 	struct common_irq_params *irq_params = interrupt_params;
633 	struct amdgpu_device *adev = irq_params->adev;
634 	struct amdgpu_display_manager *dm = &adev->dm;
635 	struct dmcub_trace_buf_entry entry = { 0 };
636 	uint32_t count = 0;
637 
638 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
640 			do {
641 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
642 			} while (notify.pending_notification);
643 
644 			if (adev->dm.dmub_notify)
645 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
646 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 				complete(&adev->dm.dmub_aux_transfer_done);
648 			// TODO : HPD Implementation
649 
650 		} else {
651 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
652 		}
653 	}
654 
655 
656 	do {
657 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 							entry.param0, entry.param1);
660 
661 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
663 		} else
664 			break;
665 
666 		count++;
667 
668 	} while (count <= DMUB_TRACE_MAX_READ);
669 
670 	ASSERT(count <= DMUB_TRACE_MAX_READ);
671 }
672 #endif
673 
674 static int dm_set_clockgating_state(void *handle,
675 		  enum amd_clockgating_state state)
676 {
677 	return 0;
678 }
679 
680 static int dm_set_powergating_state(void *handle,
681 		  enum amd_powergating_state state)
682 {
683 	return 0;
684 }
685 
686 /* Prototypes of private functions */
687 static int dm_early_init(void* handle);
688 
689 /* Allocate memory for FBC compressed data  */
690 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
691 {
692 	struct drm_device *dev = connector->dev;
693 	struct amdgpu_device *adev = drm_to_adev(dev);
694 	struct dm_compressor_info *compressor = &adev->dm.compressor;
695 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 	struct drm_display_mode *mode;
697 	unsigned long max_size = 0;
698 
699 	if (adev->dm.dc->fbc_compressor == NULL)
700 		return;
701 
702 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
703 		return;
704 
705 	if (compressor->bo_ptr)
706 		return;
707 
708 
709 	list_for_each_entry(mode, &connector->modes, head) {
710 		if (max_size < mode->htotal * mode->vtotal)
711 			max_size = mode->htotal * mode->vtotal;
712 	}
713 
714 	if (max_size) {
715 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 			    &compressor->gpu_addr, &compressor->cpu_addr);
718 
719 		if (r)
720 			DRM_ERROR("DM: Failed to initialize FBC\n");
721 		else {
722 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
724 		}
725 
726 	}
727 
728 }
729 
730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 					  int pipe, bool *enabled,
732 					  unsigned char *buf, int max_bytes)
733 {
734 	struct drm_device *dev = dev_get_drvdata(kdev);
735 	struct amdgpu_device *adev = drm_to_adev(dev);
736 	struct drm_connector *connector;
737 	struct drm_connector_list_iter conn_iter;
738 	struct amdgpu_dm_connector *aconnector;
739 	int ret = 0;
740 
741 	*enabled = false;
742 
743 	mutex_lock(&adev->dm.audio_lock);
744 
745 	drm_connector_list_iter_begin(dev, &conn_iter);
746 	drm_for_each_connector_iter(connector, &conn_iter) {
747 		aconnector = to_amdgpu_dm_connector(connector);
748 		if (aconnector->audio_inst != port)
749 			continue;
750 
751 		*enabled = true;
752 		ret = drm_eld_size(connector->eld);
753 		memcpy(buf, connector->eld, min(max_bytes, ret));
754 
755 		break;
756 	}
757 	drm_connector_list_iter_end(&conn_iter);
758 
759 	mutex_unlock(&adev->dm.audio_lock);
760 
761 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
762 
763 	return ret;
764 }
765 
766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 	.get_eld = amdgpu_dm_audio_component_get_eld,
768 };
769 
770 static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 				       struct device *hda_kdev, void *data)
772 {
773 	struct drm_device *dev = dev_get_drvdata(kdev);
774 	struct amdgpu_device *adev = drm_to_adev(dev);
775 	struct drm_audio_component *acomp = data;
776 
777 	acomp->ops = &amdgpu_dm_audio_component_ops;
778 	acomp->dev = kdev;
779 	adev->dm.audio_component = acomp;
780 
781 	return 0;
782 }
783 
784 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 					  struct device *hda_kdev, void *data)
786 {
787 	struct drm_device *dev = dev_get_drvdata(kdev);
788 	struct amdgpu_device *adev = drm_to_adev(dev);
789 	struct drm_audio_component *acomp = data;
790 
791 	acomp->ops = NULL;
792 	acomp->dev = NULL;
793 	adev->dm.audio_component = NULL;
794 }
795 
796 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
797 	.bind	= amdgpu_dm_audio_component_bind,
798 	.unbind	= amdgpu_dm_audio_component_unbind,
799 };
800 
801 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
802 {
803 	int i, ret;
804 
805 	if (!amdgpu_audio)
806 		return 0;
807 
808 	adev->mode_info.audio.enabled = true;
809 
810 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
811 
812 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
813 		adev->mode_info.audio.pin[i].channels = -1;
814 		adev->mode_info.audio.pin[i].rate = -1;
815 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
816 		adev->mode_info.audio.pin[i].status_bits = 0;
817 		adev->mode_info.audio.pin[i].category_code = 0;
818 		adev->mode_info.audio.pin[i].connected = false;
819 		adev->mode_info.audio.pin[i].id =
820 			adev->dm.dc->res_pool->audios[i]->inst;
821 		adev->mode_info.audio.pin[i].offset = 0;
822 	}
823 
824 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
825 	if (ret < 0)
826 		return ret;
827 
828 	adev->dm.audio_registered = true;
829 
830 	return 0;
831 }
832 
833 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
834 {
835 	if (!amdgpu_audio)
836 		return;
837 
838 	if (!adev->mode_info.audio.enabled)
839 		return;
840 
841 	if (adev->dm.audio_registered) {
842 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
843 		adev->dm.audio_registered = false;
844 	}
845 
846 	/* TODO: Disable audio? */
847 
848 	adev->mode_info.audio.enabled = false;
849 }
850 
851 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
852 {
853 	struct drm_audio_component *acomp = adev->dm.audio_component;
854 
855 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
856 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
857 
858 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
859 						 pin, -1);
860 	}
861 }
862 
863 static int dm_dmub_hw_init(struct amdgpu_device *adev)
864 {
865 	const struct dmcub_firmware_header_v1_0 *hdr;
866 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
867 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
868 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
869 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
870 	struct abm *abm = adev->dm.dc->res_pool->abm;
871 	struct dmub_srv_hw_params hw_params;
872 	enum dmub_status status;
873 	const unsigned char *fw_inst_const, *fw_bss_data;
874 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
875 	bool has_hw_support;
876 
877 	if (!dmub_srv)
878 		/* DMUB isn't supported on the ASIC. */
879 		return 0;
880 
881 	if (!fb_info) {
882 		DRM_ERROR("No framebuffer info for DMUB service.\n");
883 		return -EINVAL;
884 	}
885 
886 	if (!dmub_fw) {
887 		/* Firmware required for DMUB support. */
888 		DRM_ERROR("No firmware provided for DMUB.\n");
889 		return -EINVAL;
890 	}
891 
892 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
893 	if (status != DMUB_STATUS_OK) {
894 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
895 		return -EINVAL;
896 	}
897 
898 	if (!has_hw_support) {
899 		DRM_INFO("DMUB unsupported on ASIC\n");
900 		return 0;
901 	}
902 
903 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
904 
905 	fw_inst_const = dmub_fw->data +
906 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
907 			PSP_HEADER_BYTES;
908 
909 	fw_bss_data = dmub_fw->data +
910 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
911 		      le32_to_cpu(hdr->inst_const_bytes);
912 
913 	/* Copy firmware and bios info into FB memory. */
914 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
915 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
916 
917 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
918 
919 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
920 	 * amdgpu_ucode_init_single_fw will load dmub firmware
921 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
922 	 * will be done by dm_dmub_hw_init
923 	 */
924 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
925 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
926 				fw_inst_const_size);
927 	}
928 
929 	if (fw_bss_data_size)
930 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
931 		       fw_bss_data, fw_bss_data_size);
932 
933 	/* Copy firmware bios info into FB memory. */
934 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
935 	       adev->bios_size);
936 
937 	/* Reset regions that need to be reset. */
938 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
939 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
940 
941 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
942 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
943 
944 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
945 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
946 
947 	/* Initialize hardware. */
948 	memset(&hw_params, 0, sizeof(hw_params));
949 	hw_params.fb_base = adev->gmc.fb_start;
950 	hw_params.fb_offset = adev->gmc.aper_base;
951 
952 	/* backdoor load firmware and trigger dmub running */
953 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
954 		hw_params.load_inst_const = true;
955 
956 	if (dmcu)
957 		hw_params.psp_version = dmcu->psp_version;
958 
959 	for (i = 0; i < fb_info->num_fb; ++i)
960 		hw_params.fb[i] = &fb_info->fb[i];
961 
962 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
963 	if (status != DMUB_STATUS_OK) {
964 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
965 		return -EINVAL;
966 	}
967 
968 	/* Wait for firmware load to finish. */
969 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
970 	if (status != DMUB_STATUS_OK)
971 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
972 
973 	/* Init DMCU and ABM if available. */
974 	if (dmcu && abm) {
975 		dmcu->funcs->dmcu_init(dmcu);
976 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
977 	}
978 
979 	if (!adev->dm.dc->ctx->dmub_srv)
980 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
981 	if (!adev->dm.dc->ctx->dmub_srv) {
982 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
983 		return -ENOMEM;
984 	}
985 
986 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
987 		 adev->dm.dmcub_fw_version);
988 
989 	return 0;
990 }
991 
992 #if defined(CONFIG_DRM_AMD_DC_DCN)
993 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
994 {
995 	uint64_t pt_base;
996 	uint32_t logical_addr_low;
997 	uint32_t logical_addr_high;
998 	uint32_t agp_base, agp_bot, agp_top;
999 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1000 
1001 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1002 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1003 
1004 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1005 		/*
1006 		 * Raven2 has a HW issue that it is unable to use the vram which
1007 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1008 		 * workaround that increase system aperture high address (add 1)
1009 		 * to get rid of the VM fault and hardware hang.
1010 		 */
1011 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1012 	else
1013 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1014 
1015 	agp_base = 0;
1016 	agp_bot = adev->gmc.agp_start >> 24;
1017 	agp_top = adev->gmc.agp_end >> 24;
1018 
1019 
1020 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1021 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1022 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1023 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1024 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1025 	page_table_base.low_part = lower_32_bits(pt_base);
1026 
1027 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1028 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1029 
1030 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1031 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1032 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1033 
1034 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1035 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1036 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1037 
1038 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1039 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1040 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1041 
1042 	pa_config->is_hvm_enabled = 0;
1043 
1044 }
1045 #endif
1046 #if defined(CONFIG_DRM_AMD_DC_DCN)
1047 static void event_mall_stutter(struct work_struct *work)
1048 {
1049 
1050 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1051 	struct amdgpu_display_manager *dm = vblank_work->dm;
1052 
1053 	mutex_lock(&dm->dc_lock);
1054 
1055 	if (vblank_work->enable)
1056 		dm->active_vblank_irq_count++;
1057 	else if(dm->active_vblank_irq_count)
1058 		dm->active_vblank_irq_count--;
1059 
1060 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1061 
1062 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1063 
1064 	mutex_unlock(&dm->dc_lock);
1065 }
1066 
1067 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1068 {
1069 	struct vblank_workqueue *vblank_work;
1070 
1071 	vblank_work = kzalloc(sizeof(*vblank_work), GFP_KERNEL);
1072 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1073 		kfree(vblank_work);
1074 		return NULL;
1075 	}
1076 
1077 	INIT_WORK(&vblank_work->mall_work, event_mall_stutter);
1078 
1079 	return vblank_work;
1080 }
1081 #endif
1082 static int amdgpu_dm_init(struct amdgpu_device *adev)
1083 {
1084 	struct dc_init_data init_data;
1085 #ifdef CONFIG_DRM_AMD_DC_HDCP
1086 	struct dc_callback_init init_params;
1087 #endif
1088 	int r;
1089 
1090 	adev->dm.ddev = adev_to_drm(adev);
1091 	adev->dm.adev = adev;
1092 
1093 	/* Zero all the fields */
1094 	memset(&init_data, 0, sizeof(init_data));
1095 #ifdef CONFIG_DRM_AMD_DC_HDCP
1096 	memset(&init_params, 0, sizeof(init_params));
1097 #endif
1098 
1099 	mutex_init(&adev->dm.dc_lock);
1100 	mutex_init(&adev->dm.audio_lock);
1101 #if defined(CONFIG_DRM_AMD_DC_DCN)
1102 	spin_lock_init(&adev->dm.vblank_lock);
1103 #endif
1104 
1105 	if(amdgpu_dm_irq_init(adev)) {
1106 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1107 		goto error;
1108 	}
1109 
1110 	init_data.asic_id.chip_family = adev->family;
1111 
1112 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1113 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1114 
1115 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1116 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1117 	init_data.asic_id.atombios_base_address =
1118 		adev->mode_info.atom_context->bios;
1119 
1120 	init_data.driver = adev;
1121 
1122 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1123 
1124 	if (!adev->dm.cgs_device) {
1125 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1126 		goto error;
1127 	}
1128 
1129 	init_data.cgs_device = adev->dm.cgs_device;
1130 
1131 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1132 
1133 	switch (adev->asic_type) {
1134 	case CHIP_CARRIZO:
1135 	case CHIP_STONEY:
1136 	case CHIP_RAVEN:
1137 	case CHIP_RENOIR:
1138 		init_data.flags.gpu_vm_support = true;
1139 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1140 			init_data.flags.disable_dmcu = true;
1141 		break;
1142 	case CHIP_VANGOGH:
1143 	case CHIP_YELLOW_CARP:
1144 		init_data.flags.gpu_vm_support = true;
1145 		break;
1146 	default:
1147 		break;
1148 	}
1149 
1150 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1151 		init_data.flags.fbc_support = true;
1152 
1153 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1154 		init_data.flags.multi_mon_pp_mclk_switch = true;
1155 
1156 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1157 		init_data.flags.disable_fractional_pwm = true;
1158 
1159 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1160 		init_data.flags.edp_no_power_sequencing = true;
1161 
1162 	init_data.flags.power_down_display_on_boot = true;
1163 
1164 	INIT_LIST_HEAD(&adev->dm.da_list);
1165 	/* Display Core create. */
1166 	adev->dm.dc = dc_create(&init_data);
1167 
1168 	if (adev->dm.dc) {
1169 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1170 	} else {
1171 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1172 		goto error;
1173 	}
1174 
1175 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1176 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1177 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1178 	}
1179 
1180 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1181 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1182 
1183 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1184 		adev->dm.dc->debug.disable_stutter = true;
1185 
1186 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1187 		adev->dm.dc->debug.disable_dsc = true;
1188 
1189 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1190 		adev->dm.dc->debug.disable_clock_gate = true;
1191 
1192 	r = dm_dmub_hw_init(adev);
1193 	if (r) {
1194 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1195 		goto error;
1196 	}
1197 
1198 	dc_hardware_init(adev->dm.dc);
1199 
1200 #if defined(CONFIG_DRM_AMD_DC_DCN)
1201 	if (adev->apu_flags) {
1202 		struct dc_phy_addr_space_config pa_config;
1203 
1204 		mmhub_read_system_context(adev, &pa_config);
1205 
1206 		// Call the DC init_memory func
1207 		dc_setup_system_context(adev->dm.dc, &pa_config);
1208 	}
1209 #endif
1210 
1211 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1212 	if (!adev->dm.freesync_module) {
1213 		DRM_ERROR(
1214 		"amdgpu: failed to initialize freesync_module.\n");
1215 	} else
1216 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1217 				adev->dm.freesync_module);
1218 
1219 	amdgpu_dm_init_color_mod();
1220 
1221 #if defined(CONFIG_DRM_AMD_DC_DCN)
1222 	if (adev->dm.dc->caps.max_links > 0) {
1223 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1224 
1225 		if (!adev->dm.vblank_workqueue)
1226 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1227 		else
1228 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1229 	}
1230 #endif
1231 
1232 #ifdef CONFIG_DRM_AMD_DC_HDCP
1233 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1234 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1235 
1236 		if (!adev->dm.hdcp_workqueue)
1237 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1238 		else
1239 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1240 
1241 		dc_init_callbacks(adev->dm.dc, &init_params);
1242 	}
1243 #endif
1244 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1245 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1246 #endif
1247 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1248 		init_completion(&adev->dm.dmub_aux_transfer_done);
1249 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1250 		if (!adev->dm.dmub_notify) {
1251 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1252 			goto error;
1253 		}
1254 		amdgpu_dm_outbox_init(adev);
1255 	}
1256 
1257 	if (amdgpu_dm_initialize_drm_device(adev)) {
1258 		DRM_ERROR(
1259 		"amdgpu: failed to initialize sw for display support.\n");
1260 		goto error;
1261 	}
1262 
1263 	/* create fake encoders for MST */
1264 	dm_dp_create_fake_mst_encoders(adev);
1265 
1266 	/* TODO: Add_display_info? */
1267 
1268 	/* TODO use dynamic cursor width */
1269 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1270 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1271 
1272 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1273 		DRM_ERROR(
1274 		"amdgpu: failed to initialize sw for display support.\n");
1275 		goto error;
1276 	}
1277 
1278 
1279 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1280 
1281 	return 0;
1282 error:
1283 	amdgpu_dm_fini(adev);
1284 
1285 	return -EINVAL;
1286 }
1287 
1288 static int amdgpu_dm_early_fini(void *handle)
1289 {
1290 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1291 
1292 	amdgpu_dm_audio_fini(adev);
1293 
1294 	return 0;
1295 }
1296 
1297 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1298 {
1299 	int i;
1300 
1301 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1302 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1303 	}
1304 
1305 	amdgpu_dm_destroy_drm_device(&adev->dm);
1306 
1307 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1308 	if (adev->dm.crc_rd_wrk) {
1309 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1310 		kfree(adev->dm.crc_rd_wrk);
1311 		adev->dm.crc_rd_wrk = NULL;
1312 	}
1313 #endif
1314 #ifdef CONFIG_DRM_AMD_DC_HDCP
1315 	if (adev->dm.hdcp_workqueue) {
1316 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1317 		adev->dm.hdcp_workqueue = NULL;
1318 	}
1319 
1320 	if (adev->dm.dc)
1321 		dc_deinit_callbacks(adev->dm.dc);
1322 #endif
1323 
1324 #if defined(CONFIG_DRM_AMD_DC_DCN)
1325 	if (adev->dm.vblank_workqueue) {
1326 		adev->dm.vblank_workqueue->dm = NULL;
1327 		kfree(adev->dm.vblank_workqueue);
1328 		adev->dm.vblank_workqueue = NULL;
1329 	}
1330 #endif
1331 
1332 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1333 
1334 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1335 		kfree(adev->dm.dmub_notify);
1336 		adev->dm.dmub_notify = NULL;
1337 	}
1338 
1339 	if (adev->dm.dmub_bo)
1340 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1341 				      &adev->dm.dmub_bo_gpu_addr,
1342 				      &adev->dm.dmub_bo_cpu_addr);
1343 
1344 	/* DC Destroy TODO: Replace destroy DAL */
1345 	if (adev->dm.dc)
1346 		dc_destroy(&adev->dm.dc);
1347 	/*
1348 	 * TODO: pageflip, vlank interrupt
1349 	 *
1350 	 * amdgpu_dm_irq_fini(adev);
1351 	 */
1352 
1353 	if (adev->dm.cgs_device) {
1354 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1355 		adev->dm.cgs_device = NULL;
1356 	}
1357 	if (adev->dm.freesync_module) {
1358 		mod_freesync_destroy(adev->dm.freesync_module);
1359 		adev->dm.freesync_module = NULL;
1360 	}
1361 
1362 	mutex_destroy(&adev->dm.audio_lock);
1363 	mutex_destroy(&adev->dm.dc_lock);
1364 
1365 	return;
1366 }
1367 
1368 static int load_dmcu_fw(struct amdgpu_device *adev)
1369 {
1370 	const char *fw_name_dmcu = NULL;
1371 	int r;
1372 	const struct dmcu_firmware_header_v1_0 *hdr;
1373 
1374 	switch(adev->asic_type) {
1375 #if defined(CONFIG_DRM_AMD_DC_SI)
1376 	case CHIP_TAHITI:
1377 	case CHIP_PITCAIRN:
1378 	case CHIP_VERDE:
1379 	case CHIP_OLAND:
1380 #endif
1381 	case CHIP_BONAIRE:
1382 	case CHIP_HAWAII:
1383 	case CHIP_KAVERI:
1384 	case CHIP_KABINI:
1385 	case CHIP_MULLINS:
1386 	case CHIP_TONGA:
1387 	case CHIP_FIJI:
1388 	case CHIP_CARRIZO:
1389 	case CHIP_STONEY:
1390 	case CHIP_POLARIS11:
1391 	case CHIP_POLARIS10:
1392 	case CHIP_POLARIS12:
1393 	case CHIP_VEGAM:
1394 	case CHIP_VEGA10:
1395 	case CHIP_VEGA12:
1396 	case CHIP_VEGA20:
1397 	case CHIP_NAVI10:
1398 	case CHIP_NAVI14:
1399 	case CHIP_RENOIR:
1400 	case CHIP_SIENNA_CICHLID:
1401 	case CHIP_NAVY_FLOUNDER:
1402 	case CHIP_DIMGREY_CAVEFISH:
1403 	case CHIP_BEIGE_GOBY:
1404 	case CHIP_VANGOGH:
1405 	case CHIP_YELLOW_CARP:
1406 		return 0;
1407 	case CHIP_NAVI12:
1408 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1409 		break;
1410 	case CHIP_RAVEN:
1411 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1412 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1413 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1414 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1415 		else
1416 			return 0;
1417 		break;
1418 	default:
1419 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1420 		return -EINVAL;
1421 	}
1422 
1423 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1424 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1425 		return 0;
1426 	}
1427 
1428 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1429 	if (r == -ENOENT) {
1430 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1431 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1432 		adev->dm.fw_dmcu = NULL;
1433 		return 0;
1434 	}
1435 	if (r) {
1436 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1437 			fw_name_dmcu);
1438 		return r;
1439 	}
1440 
1441 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1442 	if (r) {
1443 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1444 			fw_name_dmcu);
1445 		release_firmware(adev->dm.fw_dmcu);
1446 		adev->dm.fw_dmcu = NULL;
1447 		return r;
1448 	}
1449 
1450 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1451 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1452 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1453 	adev->firmware.fw_size +=
1454 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1455 
1456 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1457 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1458 	adev->firmware.fw_size +=
1459 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1460 
1461 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1462 
1463 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1464 
1465 	return 0;
1466 }
1467 
1468 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1469 {
1470 	struct amdgpu_device *adev = ctx;
1471 
1472 	return dm_read_reg(adev->dm.dc->ctx, address);
1473 }
1474 
1475 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1476 				     uint32_t value)
1477 {
1478 	struct amdgpu_device *adev = ctx;
1479 
1480 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1481 }
1482 
1483 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1484 {
1485 	struct dmub_srv_create_params create_params;
1486 	struct dmub_srv_region_params region_params;
1487 	struct dmub_srv_region_info region_info;
1488 	struct dmub_srv_fb_params fb_params;
1489 	struct dmub_srv_fb_info *fb_info;
1490 	struct dmub_srv *dmub_srv;
1491 	const struct dmcub_firmware_header_v1_0 *hdr;
1492 	const char *fw_name_dmub;
1493 	enum dmub_asic dmub_asic;
1494 	enum dmub_status status;
1495 	int r;
1496 
1497 	switch (adev->asic_type) {
1498 	case CHIP_RENOIR:
1499 		dmub_asic = DMUB_ASIC_DCN21;
1500 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1501 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1502 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1503 		break;
1504 	case CHIP_SIENNA_CICHLID:
1505 		dmub_asic = DMUB_ASIC_DCN30;
1506 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1507 		break;
1508 	case CHIP_NAVY_FLOUNDER:
1509 		dmub_asic = DMUB_ASIC_DCN30;
1510 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1511 		break;
1512 	case CHIP_VANGOGH:
1513 		dmub_asic = DMUB_ASIC_DCN301;
1514 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1515 		break;
1516 	case CHIP_DIMGREY_CAVEFISH:
1517 		dmub_asic = DMUB_ASIC_DCN302;
1518 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1519 		break;
1520 	case CHIP_BEIGE_GOBY:
1521 		dmub_asic = DMUB_ASIC_DCN303;
1522 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1523 		break;
1524 	case CHIP_YELLOW_CARP:
1525 		dmub_asic = DMUB_ASIC_DCN31;
1526 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1527 		break;
1528 
1529 	default:
1530 		/* ASIC doesn't support DMUB. */
1531 		return 0;
1532 	}
1533 
1534 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1535 	if (r) {
1536 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1537 		return 0;
1538 	}
1539 
1540 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1541 	if (r) {
1542 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1543 		return 0;
1544 	}
1545 
1546 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1547 
1548 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1549 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1550 			AMDGPU_UCODE_ID_DMCUB;
1551 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1552 			adev->dm.dmub_fw;
1553 		adev->firmware.fw_size +=
1554 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1555 
1556 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1557 			 adev->dm.dmcub_fw_version);
1558 	}
1559 
1560 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1561 
1562 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1563 	dmub_srv = adev->dm.dmub_srv;
1564 
1565 	if (!dmub_srv) {
1566 		DRM_ERROR("Failed to allocate DMUB service!\n");
1567 		return -ENOMEM;
1568 	}
1569 
1570 	memset(&create_params, 0, sizeof(create_params));
1571 	create_params.user_ctx = adev;
1572 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1573 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1574 	create_params.asic = dmub_asic;
1575 
1576 	/* Create the DMUB service. */
1577 	status = dmub_srv_create(dmub_srv, &create_params);
1578 	if (status != DMUB_STATUS_OK) {
1579 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1580 		return -EINVAL;
1581 	}
1582 
1583 	/* Calculate the size of all the regions for the DMUB service. */
1584 	memset(&region_params, 0, sizeof(region_params));
1585 
1586 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1587 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1588 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1589 	region_params.vbios_size = adev->bios_size;
1590 	region_params.fw_bss_data = region_params.bss_data_size ?
1591 		adev->dm.dmub_fw->data +
1592 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1593 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1594 	region_params.fw_inst_const =
1595 		adev->dm.dmub_fw->data +
1596 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1597 		PSP_HEADER_BYTES;
1598 
1599 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1600 					   &region_info);
1601 
1602 	if (status != DMUB_STATUS_OK) {
1603 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1604 		return -EINVAL;
1605 	}
1606 
1607 	/*
1608 	 * Allocate a framebuffer based on the total size of all the regions.
1609 	 * TODO: Move this into GART.
1610 	 */
1611 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1612 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1613 				    &adev->dm.dmub_bo_gpu_addr,
1614 				    &adev->dm.dmub_bo_cpu_addr);
1615 	if (r)
1616 		return r;
1617 
1618 	/* Rebase the regions on the framebuffer address. */
1619 	memset(&fb_params, 0, sizeof(fb_params));
1620 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1621 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1622 	fb_params.region_info = &region_info;
1623 
1624 	adev->dm.dmub_fb_info =
1625 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1626 	fb_info = adev->dm.dmub_fb_info;
1627 
1628 	if (!fb_info) {
1629 		DRM_ERROR(
1630 			"Failed to allocate framebuffer info for DMUB service!\n");
1631 		return -ENOMEM;
1632 	}
1633 
1634 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1635 	if (status != DMUB_STATUS_OK) {
1636 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1637 		return -EINVAL;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 static int dm_sw_init(void *handle)
1644 {
1645 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1646 	int r;
1647 
1648 	r = dm_dmub_sw_init(adev);
1649 	if (r)
1650 		return r;
1651 
1652 	return load_dmcu_fw(adev);
1653 }
1654 
1655 static int dm_sw_fini(void *handle)
1656 {
1657 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1658 
1659 	kfree(adev->dm.dmub_fb_info);
1660 	adev->dm.dmub_fb_info = NULL;
1661 
1662 	if (adev->dm.dmub_srv) {
1663 		dmub_srv_destroy(adev->dm.dmub_srv);
1664 		adev->dm.dmub_srv = NULL;
1665 	}
1666 
1667 	release_firmware(adev->dm.dmub_fw);
1668 	adev->dm.dmub_fw = NULL;
1669 
1670 	release_firmware(adev->dm.fw_dmcu);
1671 	adev->dm.fw_dmcu = NULL;
1672 
1673 	return 0;
1674 }
1675 
1676 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1677 {
1678 	struct amdgpu_dm_connector *aconnector;
1679 	struct drm_connector *connector;
1680 	struct drm_connector_list_iter iter;
1681 	int ret = 0;
1682 
1683 	drm_connector_list_iter_begin(dev, &iter);
1684 	drm_for_each_connector_iter(connector, &iter) {
1685 		aconnector = to_amdgpu_dm_connector(connector);
1686 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1687 		    aconnector->mst_mgr.aux) {
1688 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1689 					 aconnector,
1690 					 aconnector->base.base.id);
1691 
1692 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1693 			if (ret < 0) {
1694 				DRM_ERROR("DM_MST: Failed to start MST\n");
1695 				aconnector->dc_link->type =
1696 					dc_connection_single;
1697 				break;
1698 			}
1699 		}
1700 	}
1701 	drm_connector_list_iter_end(&iter);
1702 
1703 	return ret;
1704 }
1705 
1706 static int dm_late_init(void *handle)
1707 {
1708 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1709 
1710 	struct dmcu_iram_parameters params;
1711 	unsigned int linear_lut[16];
1712 	int i;
1713 	struct dmcu *dmcu = NULL;
1714 
1715 	dmcu = adev->dm.dc->res_pool->dmcu;
1716 
1717 	for (i = 0; i < 16; i++)
1718 		linear_lut[i] = 0xFFFF * i / 15;
1719 
1720 	params.set = 0;
1721 	params.backlight_ramping_start = 0xCCCC;
1722 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1723 	params.backlight_lut_array_size = 16;
1724 	params.backlight_lut_array = linear_lut;
1725 
1726 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1727 	 * 0xFFFF x 0.01 = 0x28F
1728 	 */
1729 	params.min_abm_backlight = 0x28F;
1730 	/* In the case where abm is implemented on dmcub,
1731 	* dmcu object will be null.
1732 	* ABM 2.4 and up are implemented on dmcub.
1733 	*/
1734 	if (dmcu) {
1735 		if (!dmcu_load_iram(dmcu, params))
1736 			return -EINVAL;
1737 	} else if (adev->dm.dc->ctx->dmub_srv) {
1738 		struct dc_link *edp_links[MAX_NUM_EDP];
1739 		int edp_num;
1740 
1741 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1742 		for (i = 0; i < edp_num; i++) {
1743 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1744 				return -EINVAL;
1745 		}
1746 	}
1747 
1748 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1749 }
1750 
1751 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1752 {
1753 	struct amdgpu_dm_connector *aconnector;
1754 	struct drm_connector *connector;
1755 	struct drm_connector_list_iter iter;
1756 	struct drm_dp_mst_topology_mgr *mgr;
1757 	int ret;
1758 	bool need_hotplug = false;
1759 
1760 	drm_connector_list_iter_begin(dev, &iter);
1761 	drm_for_each_connector_iter(connector, &iter) {
1762 		aconnector = to_amdgpu_dm_connector(connector);
1763 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1764 		    aconnector->mst_port)
1765 			continue;
1766 
1767 		mgr = &aconnector->mst_mgr;
1768 
1769 		if (suspend) {
1770 			drm_dp_mst_topology_mgr_suspend(mgr);
1771 		} else {
1772 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1773 			if (ret < 0) {
1774 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1775 				need_hotplug = true;
1776 			}
1777 		}
1778 	}
1779 	drm_connector_list_iter_end(&iter);
1780 
1781 	if (need_hotplug)
1782 		drm_kms_helper_hotplug_event(dev);
1783 }
1784 
1785 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1786 {
1787 	struct smu_context *smu = &adev->smu;
1788 	int ret = 0;
1789 
1790 	if (!is_support_sw_smu(adev))
1791 		return 0;
1792 
1793 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1794 	 * on window driver dc implementation.
1795 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1796 	 * should be passed to smu during boot up and resume from s3.
1797 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1798 	 * dcn20_resource_construct
1799 	 * then call pplib functions below to pass the settings to smu:
1800 	 * smu_set_watermarks_for_clock_ranges
1801 	 * smu_set_watermarks_table
1802 	 * navi10_set_watermarks_table
1803 	 * smu_write_watermarks_table
1804 	 *
1805 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1806 	 * dc has implemented different flow for window driver:
1807 	 * dc_hardware_init / dc_set_power_state
1808 	 * dcn10_init_hw
1809 	 * notify_wm_ranges
1810 	 * set_wm_ranges
1811 	 * -- Linux
1812 	 * smu_set_watermarks_for_clock_ranges
1813 	 * renoir_set_watermarks_table
1814 	 * smu_write_watermarks_table
1815 	 *
1816 	 * For Linux,
1817 	 * dc_hardware_init -> amdgpu_dm_init
1818 	 * dc_set_power_state --> dm_resume
1819 	 *
1820 	 * therefore, this function apply to navi10/12/14 but not Renoir
1821 	 * *
1822 	 */
1823 	switch(adev->asic_type) {
1824 	case CHIP_NAVI10:
1825 	case CHIP_NAVI14:
1826 	case CHIP_NAVI12:
1827 		break;
1828 	default:
1829 		return 0;
1830 	}
1831 
1832 	ret = smu_write_watermarks_table(smu);
1833 	if (ret) {
1834 		DRM_ERROR("Failed to update WMTABLE!\n");
1835 		return ret;
1836 	}
1837 
1838 	return 0;
1839 }
1840 
1841 /**
1842  * dm_hw_init() - Initialize DC device
1843  * @handle: The base driver device containing the amdgpu_dm device.
1844  *
1845  * Initialize the &struct amdgpu_display_manager device. This involves calling
1846  * the initializers of each DM component, then populating the struct with them.
1847  *
1848  * Although the function implies hardware initialization, both hardware and
1849  * software are initialized here. Splitting them out to their relevant init
1850  * hooks is a future TODO item.
1851  *
1852  * Some notable things that are initialized here:
1853  *
1854  * - Display Core, both software and hardware
1855  * - DC modules that we need (freesync and color management)
1856  * - DRM software states
1857  * - Interrupt sources and handlers
1858  * - Vblank support
1859  * - Debug FS entries, if enabled
1860  */
1861 static int dm_hw_init(void *handle)
1862 {
1863 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1864 	/* Create DAL display manager */
1865 	amdgpu_dm_init(adev);
1866 	amdgpu_dm_hpd_init(adev);
1867 
1868 	return 0;
1869 }
1870 
1871 /**
1872  * dm_hw_fini() - Teardown DC device
1873  * @handle: The base driver device containing the amdgpu_dm device.
1874  *
1875  * Teardown components within &struct amdgpu_display_manager that require
1876  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1877  * were loaded. Also flush IRQ workqueues and disable them.
1878  */
1879 static int dm_hw_fini(void *handle)
1880 {
1881 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1882 
1883 	amdgpu_dm_hpd_fini(adev);
1884 
1885 	amdgpu_dm_irq_fini(adev);
1886 	amdgpu_dm_fini(adev);
1887 	return 0;
1888 }
1889 
1890 
1891 static int dm_enable_vblank(struct drm_crtc *crtc);
1892 static void dm_disable_vblank(struct drm_crtc *crtc);
1893 
1894 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1895 				 struct dc_state *state, bool enable)
1896 {
1897 	enum dc_irq_source irq_source;
1898 	struct amdgpu_crtc *acrtc;
1899 	int rc = -EBUSY;
1900 	int i = 0;
1901 
1902 	for (i = 0; i < state->stream_count; i++) {
1903 		acrtc = get_crtc_by_otg_inst(
1904 				adev, state->stream_status[i].primary_otg_inst);
1905 
1906 		if (acrtc && state->stream_status[i].plane_count != 0) {
1907 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1908 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1909 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1910 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1911 			if (rc)
1912 				DRM_WARN("Failed to %s pflip interrupts\n",
1913 					 enable ? "enable" : "disable");
1914 
1915 			if (enable) {
1916 				rc = dm_enable_vblank(&acrtc->base);
1917 				if (rc)
1918 					DRM_WARN("Failed to enable vblank interrupts\n");
1919 			} else {
1920 				dm_disable_vblank(&acrtc->base);
1921 			}
1922 
1923 		}
1924 	}
1925 
1926 }
1927 
1928 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1929 {
1930 	struct dc_state *context = NULL;
1931 	enum dc_status res = DC_ERROR_UNEXPECTED;
1932 	int i;
1933 	struct dc_stream_state *del_streams[MAX_PIPES];
1934 	int del_streams_count = 0;
1935 
1936 	memset(del_streams, 0, sizeof(del_streams));
1937 
1938 	context = dc_create_state(dc);
1939 	if (context == NULL)
1940 		goto context_alloc_fail;
1941 
1942 	dc_resource_state_copy_construct_current(dc, context);
1943 
1944 	/* First remove from context all streams */
1945 	for (i = 0; i < context->stream_count; i++) {
1946 		struct dc_stream_state *stream = context->streams[i];
1947 
1948 		del_streams[del_streams_count++] = stream;
1949 	}
1950 
1951 	/* Remove all planes for removed streams and then remove the streams */
1952 	for (i = 0; i < del_streams_count; i++) {
1953 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1954 			res = DC_FAIL_DETACH_SURFACES;
1955 			goto fail;
1956 		}
1957 
1958 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1959 		if (res != DC_OK)
1960 			goto fail;
1961 	}
1962 
1963 
1964 	res = dc_validate_global_state(dc, context, false);
1965 
1966 	if (res != DC_OK) {
1967 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1968 		goto fail;
1969 	}
1970 
1971 	res = dc_commit_state(dc, context);
1972 
1973 fail:
1974 	dc_release_state(context);
1975 
1976 context_alloc_fail:
1977 	return res;
1978 }
1979 
1980 static int dm_suspend(void *handle)
1981 {
1982 	struct amdgpu_device *adev = handle;
1983 	struct amdgpu_display_manager *dm = &adev->dm;
1984 	int ret = 0;
1985 
1986 	if (amdgpu_in_reset(adev)) {
1987 		mutex_lock(&dm->dc_lock);
1988 
1989 #if defined(CONFIG_DRM_AMD_DC_DCN)
1990 		dc_allow_idle_optimizations(adev->dm.dc, false);
1991 #endif
1992 
1993 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1994 
1995 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1996 
1997 		amdgpu_dm_commit_zero_streams(dm->dc);
1998 
1999 		amdgpu_dm_irq_suspend(adev);
2000 
2001 		return ret;
2002 	}
2003 
2004 	WARN_ON(adev->dm.cached_state);
2005 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2006 
2007 	s3_handle_mst(adev_to_drm(adev), true);
2008 
2009 	amdgpu_dm_irq_suspend(adev);
2010 
2011 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2012 
2013 	return 0;
2014 }
2015 
2016 static struct amdgpu_dm_connector *
2017 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2018 					     struct drm_crtc *crtc)
2019 {
2020 	uint32_t i;
2021 	struct drm_connector_state *new_con_state;
2022 	struct drm_connector *connector;
2023 	struct drm_crtc *crtc_from_state;
2024 
2025 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2026 		crtc_from_state = new_con_state->crtc;
2027 
2028 		if (crtc_from_state == crtc)
2029 			return to_amdgpu_dm_connector(connector);
2030 	}
2031 
2032 	return NULL;
2033 }
2034 
2035 static void emulated_link_detect(struct dc_link *link)
2036 {
2037 	struct dc_sink_init_data sink_init_data = { 0 };
2038 	struct display_sink_capability sink_caps = { 0 };
2039 	enum dc_edid_status edid_status;
2040 	struct dc_context *dc_ctx = link->ctx;
2041 	struct dc_sink *sink = NULL;
2042 	struct dc_sink *prev_sink = NULL;
2043 
2044 	link->type = dc_connection_none;
2045 	prev_sink = link->local_sink;
2046 
2047 	if (prev_sink)
2048 		dc_sink_release(prev_sink);
2049 
2050 	switch (link->connector_signal) {
2051 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2052 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2053 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2054 		break;
2055 	}
2056 
2057 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2058 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2059 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2060 		break;
2061 	}
2062 
2063 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2064 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2065 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2066 		break;
2067 	}
2068 
2069 	case SIGNAL_TYPE_LVDS: {
2070 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2071 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2072 		break;
2073 	}
2074 
2075 	case SIGNAL_TYPE_EDP: {
2076 		sink_caps.transaction_type =
2077 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2078 		sink_caps.signal = SIGNAL_TYPE_EDP;
2079 		break;
2080 	}
2081 
2082 	case SIGNAL_TYPE_DISPLAY_PORT: {
2083 		sink_caps.transaction_type =
2084 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2085 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2086 		break;
2087 	}
2088 
2089 	default:
2090 		DC_ERROR("Invalid connector type! signal:%d\n",
2091 			link->connector_signal);
2092 		return;
2093 	}
2094 
2095 	sink_init_data.link = link;
2096 	sink_init_data.sink_signal = sink_caps.signal;
2097 
2098 	sink = dc_sink_create(&sink_init_data);
2099 	if (!sink) {
2100 		DC_ERROR("Failed to create sink!\n");
2101 		return;
2102 	}
2103 
2104 	/* dc_sink_create returns a new reference */
2105 	link->local_sink = sink;
2106 
2107 	edid_status = dm_helpers_read_local_edid(
2108 			link->ctx,
2109 			link,
2110 			sink);
2111 
2112 	if (edid_status != EDID_OK)
2113 		DC_ERROR("Failed to read EDID");
2114 
2115 }
2116 
2117 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2118 				     struct amdgpu_display_manager *dm)
2119 {
2120 	struct {
2121 		struct dc_surface_update surface_updates[MAX_SURFACES];
2122 		struct dc_plane_info plane_infos[MAX_SURFACES];
2123 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2124 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2125 		struct dc_stream_update stream_update;
2126 	} * bundle;
2127 	int k, m;
2128 
2129 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2130 
2131 	if (!bundle) {
2132 		dm_error("Failed to allocate update bundle\n");
2133 		goto cleanup;
2134 	}
2135 
2136 	for (k = 0; k < dc_state->stream_count; k++) {
2137 		bundle->stream_update.stream = dc_state->streams[k];
2138 
2139 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2140 			bundle->surface_updates[m].surface =
2141 				dc_state->stream_status->plane_states[m];
2142 			bundle->surface_updates[m].surface->force_full_update =
2143 				true;
2144 		}
2145 		dc_commit_updates_for_stream(
2146 			dm->dc, bundle->surface_updates,
2147 			dc_state->stream_status->plane_count,
2148 			dc_state->streams[k], &bundle->stream_update, dc_state);
2149 	}
2150 
2151 cleanup:
2152 	kfree(bundle);
2153 
2154 	return;
2155 }
2156 
2157 static void dm_set_dpms_off(struct dc_link *link)
2158 {
2159 	struct dc_stream_state *stream_state;
2160 	struct amdgpu_dm_connector *aconnector = link->priv;
2161 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2162 	struct dc_stream_update stream_update;
2163 	bool dpms_off = true;
2164 
2165 	memset(&stream_update, 0, sizeof(stream_update));
2166 	stream_update.dpms_off = &dpms_off;
2167 
2168 	mutex_lock(&adev->dm.dc_lock);
2169 	stream_state = dc_stream_find_from_link(link);
2170 
2171 	if (stream_state == NULL) {
2172 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2173 		mutex_unlock(&adev->dm.dc_lock);
2174 		return;
2175 	}
2176 
2177 	stream_update.stream = stream_state;
2178 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2179 				     stream_state, &stream_update,
2180 				     stream_state->ctx->dc->current_state);
2181 	mutex_unlock(&adev->dm.dc_lock);
2182 }
2183 
2184 static int dm_resume(void *handle)
2185 {
2186 	struct amdgpu_device *adev = handle;
2187 	struct drm_device *ddev = adev_to_drm(adev);
2188 	struct amdgpu_display_manager *dm = &adev->dm;
2189 	struct amdgpu_dm_connector *aconnector;
2190 	struct drm_connector *connector;
2191 	struct drm_connector_list_iter iter;
2192 	struct drm_crtc *crtc;
2193 	struct drm_crtc_state *new_crtc_state;
2194 	struct dm_crtc_state *dm_new_crtc_state;
2195 	struct drm_plane *plane;
2196 	struct drm_plane_state *new_plane_state;
2197 	struct dm_plane_state *dm_new_plane_state;
2198 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2199 	enum dc_connection_type new_connection_type = dc_connection_none;
2200 	struct dc_state *dc_state;
2201 	int i, r, j;
2202 
2203 	if (amdgpu_in_reset(adev)) {
2204 		dc_state = dm->cached_dc_state;
2205 
2206 		r = dm_dmub_hw_init(adev);
2207 		if (r)
2208 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2209 
2210 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2211 		dc_resume(dm->dc);
2212 
2213 		amdgpu_dm_irq_resume_early(adev);
2214 
2215 		for (i = 0; i < dc_state->stream_count; i++) {
2216 			dc_state->streams[i]->mode_changed = true;
2217 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2218 				dc_state->stream_status->plane_states[j]->update_flags.raw
2219 					= 0xffffffff;
2220 			}
2221 		}
2222 #if defined(CONFIG_DRM_AMD_DC_DCN)
2223 		/*
2224 		 * Resource allocation happens for link encoders for newer ASIC in
2225 		 * dc_validate_global_state, so we need to revalidate it.
2226 		 *
2227 		 * This shouldn't fail (it passed once before), so warn if it does.
2228 		 */
2229 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2230 #endif
2231 
2232 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2233 
2234 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2235 
2236 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2237 
2238 		dc_release_state(dm->cached_dc_state);
2239 		dm->cached_dc_state = NULL;
2240 
2241 		amdgpu_dm_irq_resume_late(adev);
2242 
2243 		mutex_unlock(&dm->dc_lock);
2244 
2245 		return 0;
2246 	}
2247 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2248 	dc_release_state(dm_state->context);
2249 	dm_state->context = dc_create_state(dm->dc);
2250 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2251 	dc_resource_state_construct(dm->dc, dm_state->context);
2252 
2253 	/* Before powering on DC we need to re-initialize DMUB. */
2254 	r = dm_dmub_hw_init(adev);
2255 	if (r)
2256 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2257 
2258 	/* power on hardware */
2259 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2260 
2261 	/* program HPD filter */
2262 	dc_resume(dm->dc);
2263 
2264 	/*
2265 	 * early enable HPD Rx IRQ, should be done before set mode as short
2266 	 * pulse interrupts are used for MST
2267 	 */
2268 	amdgpu_dm_irq_resume_early(adev);
2269 
2270 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2271 	s3_handle_mst(ddev, false);
2272 
2273 	/* Do detection*/
2274 	drm_connector_list_iter_begin(ddev, &iter);
2275 	drm_for_each_connector_iter(connector, &iter) {
2276 		aconnector = to_amdgpu_dm_connector(connector);
2277 
2278 		/*
2279 		 * this is the case when traversing through already created
2280 		 * MST connectors, should be skipped
2281 		 */
2282 		if (aconnector->mst_port)
2283 			continue;
2284 
2285 		mutex_lock(&aconnector->hpd_lock);
2286 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2287 			DRM_ERROR("KMS: Failed to detect connector\n");
2288 
2289 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2290 			emulated_link_detect(aconnector->dc_link);
2291 		else
2292 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2293 
2294 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2295 			aconnector->fake_enable = false;
2296 
2297 		if (aconnector->dc_sink)
2298 			dc_sink_release(aconnector->dc_sink);
2299 		aconnector->dc_sink = NULL;
2300 		amdgpu_dm_update_connector_after_detect(aconnector);
2301 		mutex_unlock(&aconnector->hpd_lock);
2302 	}
2303 	drm_connector_list_iter_end(&iter);
2304 
2305 	/* Force mode set in atomic commit */
2306 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2307 		new_crtc_state->active_changed = true;
2308 
2309 	/*
2310 	 * atomic_check is expected to create the dc states. We need to release
2311 	 * them here, since they were duplicated as part of the suspend
2312 	 * procedure.
2313 	 */
2314 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2315 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2316 		if (dm_new_crtc_state->stream) {
2317 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2318 			dc_stream_release(dm_new_crtc_state->stream);
2319 			dm_new_crtc_state->stream = NULL;
2320 		}
2321 	}
2322 
2323 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2324 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2325 		if (dm_new_plane_state->dc_state) {
2326 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2327 			dc_plane_state_release(dm_new_plane_state->dc_state);
2328 			dm_new_plane_state->dc_state = NULL;
2329 		}
2330 	}
2331 
2332 	drm_atomic_helper_resume(ddev, dm->cached_state);
2333 
2334 	dm->cached_state = NULL;
2335 
2336 	amdgpu_dm_irq_resume_late(adev);
2337 
2338 	amdgpu_dm_smu_write_watermarks_table(adev);
2339 
2340 	return 0;
2341 }
2342 
2343 /**
2344  * DOC: DM Lifecycle
2345  *
2346  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2347  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2348  * the base driver's device list to be initialized and torn down accordingly.
2349  *
2350  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2351  */
2352 
2353 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2354 	.name = "dm",
2355 	.early_init = dm_early_init,
2356 	.late_init = dm_late_init,
2357 	.sw_init = dm_sw_init,
2358 	.sw_fini = dm_sw_fini,
2359 	.early_fini = amdgpu_dm_early_fini,
2360 	.hw_init = dm_hw_init,
2361 	.hw_fini = dm_hw_fini,
2362 	.suspend = dm_suspend,
2363 	.resume = dm_resume,
2364 	.is_idle = dm_is_idle,
2365 	.wait_for_idle = dm_wait_for_idle,
2366 	.check_soft_reset = dm_check_soft_reset,
2367 	.soft_reset = dm_soft_reset,
2368 	.set_clockgating_state = dm_set_clockgating_state,
2369 	.set_powergating_state = dm_set_powergating_state,
2370 };
2371 
2372 const struct amdgpu_ip_block_version dm_ip_block =
2373 {
2374 	.type = AMD_IP_BLOCK_TYPE_DCE,
2375 	.major = 1,
2376 	.minor = 0,
2377 	.rev = 0,
2378 	.funcs = &amdgpu_dm_funcs,
2379 };
2380 
2381 
2382 /**
2383  * DOC: atomic
2384  *
2385  * *WIP*
2386  */
2387 
2388 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2389 	.fb_create = amdgpu_display_user_framebuffer_create,
2390 	.get_format_info = amd_get_format_info,
2391 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2392 	.atomic_check = amdgpu_dm_atomic_check,
2393 	.atomic_commit = drm_atomic_helper_commit,
2394 };
2395 
2396 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2397 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2398 };
2399 
2400 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2401 {
2402 	u32 max_cll, min_cll, max, min, q, r;
2403 	struct amdgpu_dm_backlight_caps *caps;
2404 	struct amdgpu_display_manager *dm;
2405 	struct drm_connector *conn_base;
2406 	struct amdgpu_device *adev;
2407 	struct dc_link *link = NULL;
2408 	static const u8 pre_computed_values[] = {
2409 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2410 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2411 	int i;
2412 
2413 	if (!aconnector || !aconnector->dc_link)
2414 		return;
2415 
2416 	link = aconnector->dc_link;
2417 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2418 		return;
2419 
2420 	conn_base = &aconnector->base;
2421 	adev = drm_to_adev(conn_base->dev);
2422 	dm = &adev->dm;
2423 	for (i = 0; i < dm->num_of_edps; i++) {
2424 		if (link == dm->backlight_link[i])
2425 			break;
2426 	}
2427 	if (i >= dm->num_of_edps)
2428 		return;
2429 	caps = &dm->backlight_caps[i];
2430 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2431 	caps->aux_support = false;
2432 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2433 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2434 
2435 	if (caps->ext_caps->bits.oled == 1 /*||
2436 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2437 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2438 		caps->aux_support = true;
2439 
2440 	if (amdgpu_backlight == 0)
2441 		caps->aux_support = false;
2442 	else if (amdgpu_backlight == 1)
2443 		caps->aux_support = true;
2444 
2445 	/* From the specification (CTA-861-G), for calculating the maximum
2446 	 * luminance we need to use:
2447 	 *	Luminance = 50*2**(CV/32)
2448 	 * Where CV is a one-byte value.
2449 	 * For calculating this expression we may need float point precision;
2450 	 * to avoid this complexity level, we take advantage that CV is divided
2451 	 * by a constant. From the Euclids division algorithm, we know that CV
2452 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2453 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2454 	 * need to pre-compute the value of r/32. For pre-computing the values
2455 	 * We just used the following Ruby line:
2456 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2457 	 * The results of the above expressions can be verified at
2458 	 * pre_computed_values.
2459 	 */
2460 	q = max_cll >> 5;
2461 	r = max_cll % 32;
2462 	max = (1 << q) * pre_computed_values[r];
2463 
2464 	// min luminance: maxLum * (CV/255)^2 / 100
2465 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2466 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2467 
2468 	caps->aux_max_input_signal = max;
2469 	caps->aux_min_input_signal = min;
2470 }
2471 
2472 void amdgpu_dm_update_connector_after_detect(
2473 		struct amdgpu_dm_connector *aconnector)
2474 {
2475 	struct drm_connector *connector = &aconnector->base;
2476 	struct drm_device *dev = connector->dev;
2477 	struct dc_sink *sink;
2478 
2479 	/* MST handled by drm_mst framework */
2480 	if (aconnector->mst_mgr.mst_state == true)
2481 		return;
2482 
2483 	sink = aconnector->dc_link->local_sink;
2484 	if (sink)
2485 		dc_sink_retain(sink);
2486 
2487 	/*
2488 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2489 	 * the connector sink is set to either fake or physical sink depends on link status.
2490 	 * Skip if already done during boot.
2491 	 */
2492 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2493 			&& aconnector->dc_em_sink) {
2494 
2495 		/*
2496 		 * For S3 resume with headless use eml_sink to fake stream
2497 		 * because on resume connector->sink is set to NULL
2498 		 */
2499 		mutex_lock(&dev->mode_config.mutex);
2500 
2501 		if (sink) {
2502 			if (aconnector->dc_sink) {
2503 				amdgpu_dm_update_freesync_caps(connector, NULL);
2504 				/*
2505 				 * retain and release below are used to
2506 				 * bump up refcount for sink because the link doesn't point
2507 				 * to it anymore after disconnect, so on next crtc to connector
2508 				 * reshuffle by UMD we will get into unwanted dc_sink release
2509 				 */
2510 				dc_sink_release(aconnector->dc_sink);
2511 			}
2512 			aconnector->dc_sink = sink;
2513 			dc_sink_retain(aconnector->dc_sink);
2514 			amdgpu_dm_update_freesync_caps(connector,
2515 					aconnector->edid);
2516 		} else {
2517 			amdgpu_dm_update_freesync_caps(connector, NULL);
2518 			if (!aconnector->dc_sink) {
2519 				aconnector->dc_sink = aconnector->dc_em_sink;
2520 				dc_sink_retain(aconnector->dc_sink);
2521 			}
2522 		}
2523 
2524 		mutex_unlock(&dev->mode_config.mutex);
2525 
2526 		if (sink)
2527 			dc_sink_release(sink);
2528 		return;
2529 	}
2530 
2531 	/*
2532 	 * TODO: temporary guard to look for proper fix
2533 	 * if this sink is MST sink, we should not do anything
2534 	 */
2535 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2536 		dc_sink_release(sink);
2537 		return;
2538 	}
2539 
2540 	if (aconnector->dc_sink == sink) {
2541 		/*
2542 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2543 		 * Do nothing!!
2544 		 */
2545 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2546 				aconnector->connector_id);
2547 		if (sink)
2548 			dc_sink_release(sink);
2549 		return;
2550 	}
2551 
2552 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2553 		aconnector->connector_id, aconnector->dc_sink, sink);
2554 
2555 	mutex_lock(&dev->mode_config.mutex);
2556 
2557 	/*
2558 	 * 1. Update status of the drm connector
2559 	 * 2. Send an event and let userspace tell us what to do
2560 	 */
2561 	if (sink) {
2562 		/*
2563 		 * TODO: check if we still need the S3 mode update workaround.
2564 		 * If yes, put it here.
2565 		 */
2566 		if (aconnector->dc_sink) {
2567 			amdgpu_dm_update_freesync_caps(connector, NULL);
2568 			dc_sink_release(aconnector->dc_sink);
2569 		}
2570 
2571 		aconnector->dc_sink = sink;
2572 		dc_sink_retain(aconnector->dc_sink);
2573 		if (sink->dc_edid.length == 0) {
2574 			aconnector->edid = NULL;
2575 			if (aconnector->dc_link->aux_mode) {
2576 				drm_dp_cec_unset_edid(
2577 					&aconnector->dm_dp_aux.aux);
2578 			}
2579 		} else {
2580 			aconnector->edid =
2581 				(struct edid *)sink->dc_edid.raw_edid;
2582 
2583 			drm_connector_update_edid_property(connector,
2584 							   aconnector->edid);
2585 			if (aconnector->dc_link->aux_mode)
2586 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2587 						    aconnector->edid);
2588 		}
2589 
2590 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2591 		update_connector_ext_caps(aconnector);
2592 	} else {
2593 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2594 		amdgpu_dm_update_freesync_caps(connector, NULL);
2595 		drm_connector_update_edid_property(connector, NULL);
2596 		aconnector->num_modes = 0;
2597 		dc_sink_release(aconnector->dc_sink);
2598 		aconnector->dc_sink = NULL;
2599 		aconnector->edid = NULL;
2600 #ifdef CONFIG_DRM_AMD_DC_HDCP
2601 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2602 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2603 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2604 #endif
2605 	}
2606 
2607 	mutex_unlock(&dev->mode_config.mutex);
2608 
2609 	update_subconnector_property(aconnector);
2610 
2611 	if (sink)
2612 		dc_sink_release(sink);
2613 }
2614 
2615 static void handle_hpd_irq(void *param)
2616 {
2617 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2618 	struct drm_connector *connector = &aconnector->base;
2619 	struct drm_device *dev = connector->dev;
2620 	enum dc_connection_type new_connection_type = dc_connection_none;
2621 	struct amdgpu_device *adev = drm_to_adev(dev);
2622 #ifdef CONFIG_DRM_AMD_DC_HDCP
2623 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2624 #endif
2625 
2626 	if (adev->dm.disable_hpd_irq)
2627 		return;
2628 
2629 	/*
2630 	 * In case of failure or MST no need to update connector status or notify the OS
2631 	 * since (for MST case) MST does this in its own context.
2632 	 */
2633 	mutex_lock(&aconnector->hpd_lock);
2634 
2635 #ifdef CONFIG_DRM_AMD_DC_HDCP
2636 	if (adev->dm.hdcp_workqueue) {
2637 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2638 		dm_con_state->update_hdcp = true;
2639 	}
2640 #endif
2641 	if (aconnector->fake_enable)
2642 		aconnector->fake_enable = false;
2643 
2644 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2645 		DRM_ERROR("KMS: Failed to detect connector\n");
2646 
2647 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2648 		emulated_link_detect(aconnector->dc_link);
2649 
2650 
2651 		drm_modeset_lock_all(dev);
2652 		dm_restore_drm_connector_state(dev, connector);
2653 		drm_modeset_unlock_all(dev);
2654 
2655 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2656 			drm_kms_helper_hotplug_event(dev);
2657 
2658 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2659 		if (new_connection_type == dc_connection_none &&
2660 		    aconnector->dc_link->type == dc_connection_none)
2661 			dm_set_dpms_off(aconnector->dc_link);
2662 
2663 		amdgpu_dm_update_connector_after_detect(aconnector);
2664 
2665 		drm_modeset_lock_all(dev);
2666 		dm_restore_drm_connector_state(dev, connector);
2667 		drm_modeset_unlock_all(dev);
2668 
2669 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2670 			drm_kms_helper_hotplug_event(dev);
2671 	}
2672 	mutex_unlock(&aconnector->hpd_lock);
2673 
2674 }
2675 
2676 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2677 {
2678 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2679 	uint8_t dret;
2680 	bool new_irq_handled = false;
2681 	int dpcd_addr;
2682 	int dpcd_bytes_to_read;
2683 
2684 	const int max_process_count = 30;
2685 	int process_count = 0;
2686 
2687 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2688 
2689 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2690 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2691 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2692 		dpcd_addr = DP_SINK_COUNT;
2693 	} else {
2694 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2695 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2696 		dpcd_addr = DP_SINK_COUNT_ESI;
2697 	}
2698 
2699 	dret = drm_dp_dpcd_read(
2700 		&aconnector->dm_dp_aux.aux,
2701 		dpcd_addr,
2702 		esi,
2703 		dpcd_bytes_to_read);
2704 
2705 	while (dret == dpcd_bytes_to_read &&
2706 		process_count < max_process_count) {
2707 		uint8_t retry;
2708 		dret = 0;
2709 
2710 		process_count++;
2711 
2712 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2713 		/* handle HPD short pulse irq */
2714 		if (aconnector->mst_mgr.mst_state)
2715 			drm_dp_mst_hpd_irq(
2716 				&aconnector->mst_mgr,
2717 				esi,
2718 				&new_irq_handled);
2719 
2720 		if (new_irq_handled) {
2721 			/* ACK at DPCD to notify down stream */
2722 			const int ack_dpcd_bytes_to_write =
2723 				dpcd_bytes_to_read - 1;
2724 
2725 			for (retry = 0; retry < 3; retry++) {
2726 				uint8_t wret;
2727 
2728 				wret = drm_dp_dpcd_write(
2729 					&aconnector->dm_dp_aux.aux,
2730 					dpcd_addr + 1,
2731 					&esi[1],
2732 					ack_dpcd_bytes_to_write);
2733 				if (wret == ack_dpcd_bytes_to_write)
2734 					break;
2735 			}
2736 
2737 			/* check if there is new irq to be handled */
2738 			dret = drm_dp_dpcd_read(
2739 				&aconnector->dm_dp_aux.aux,
2740 				dpcd_addr,
2741 				esi,
2742 				dpcd_bytes_to_read);
2743 
2744 			new_irq_handled = false;
2745 		} else {
2746 			break;
2747 		}
2748 	}
2749 
2750 	if (process_count == max_process_count)
2751 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2752 }
2753 
2754 static void handle_hpd_rx_irq(void *param)
2755 {
2756 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2757 	struct drm_connector *connector = &aconnector->base;
2758 	struct drm_device *dev = connector->dev;
2759 	struct dc_link *dc_link = aconnector->dc_link;
2760 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2761 	bool result = false;
2762 	enum dc_connection_type new_connection_type = dc_connection_none;
2763 	struct amdgpu_device *adev = drm_to_adev(dev);
2764 	union hpd_irq_data hpd_irq_data;
2765 	bool lock_flag = 0;
2766 
2767 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2768 
2769 	if (adev->dm.disable_hpd_irq)
2770 		return;
2771 
2772 
2773 	/*
2774 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2775 	 * conflict, after implement i2c helper, this mutex should be
2776 	 * retired.
2777 	 */
2778 	mutex_lock(&aconnector->hpd_lock);
2779 
2780 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2781 
2782 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2783 		(dc_link->type == dc_connection_mst_branch)) {
2784 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2785 			result = true;
2786 			dm_handle_hpd_rx_irq(aconnector);
2787 			goto out;
2788 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2789 			result = false;
2790 			dm_handle_hpd_rx_irq(aconnector);
2791 			goto out;
2792 		}
2793 	}
2794 
2795 	/*
2796 	 * TODO: We need the lock to avoid touching DC state while it's being
2797 	 * modified during automated compliance testing, or when link loss
2798 	 * happens. While this should be split into subhandlers and proper
2799 	 * interfaces to avoid having to conditionally lock like this in the
2800 	 * outer layer, we need this workaround temporarily to allow MST
2801 	 * lightup in some scenarios to avoid timeout.
2802 	 */
2803 	if (!amdgpu_in_reset(adev) &&
2804 	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2805 	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2806 		mutex_lock(&adev->dm.dc_lock);
2807 		lock_flag = 1;
2808 	}
2809 
2810 #ifdef CONFIG_DRM_AMD_DC_HDCP
2811 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2812 #else
2813 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2814 #endif
2815 	if (!amdgpu_in_reset(adev) && lock_flag)
2816 		mutex_unlock(&adev->dm.dc_lock);
2817 
2818 out:
2819 	if (result && !is_mst_root_connector) {
2820 		/* Downstream Port status changed. */
2821 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2822 			DRM_ERROR("KMS: Failed to detect connector\n");
2823 
2824 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2825 			emulated_link_detect(dc_link);
2826 
2827 			if (aconnector->fake_enable)
2828 				aconnector->fake_enable = false;
2829 
2830 			amdgpu_dm_update_connector_after_detect(aconnector);
2831 
2832 
2833 			drm_modeset_lock_all(dev);
2834 			dm_restore_drm_connector_state(dev, connector);
2835 			drm_modeset_unlock_all(dev);
2836 
2837 			drm_kms_helper_hotplug_event(dev);
2838 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2839 
2840 			if (aconnector->fake_enable)
2841 				aconnector->fake_enable = false;
2842 
2843 			amdgpu_dm_update_connector_after_detect(aconnector);
2844 
2845 
2846 			drm_modeset_lock_all(dev);
2847 			dm_restore_drm_connector_state(dev, connector);
2848 			drm_modeset_unlock_all(dev);
2849 
2850 			drm_kms_helper_hotplug_event(dev);
2851 		}
2852 	}
2853 #ifdef CONFIG_DRM_AMD_DC_HDCP
2854 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2855 		if (adev->dm.hdcp_workqueue)
2856 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2857 	}
2858 #endif
2859 
2860 	if (dc_link->type != dc_connection_mst_branch)
2861 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2862 
2863 	mutex_unlock(&aconnector->hpd_lock);
2864 }
2865 
2866 static void register_hpd_handlers(struct amdgpu_device *adev)
2867 {
2868 	struct drm_device *dev = adev_to_drm(adev);
2869 	struct drm_connector *connector;
2870 	struct amdgpu_dm_connector *aconnector;
2871 	const struct dc_link *dc_link;
2872 	struct dc_interrupt_params int_params = {0};
2873 
2874 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2875 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2876 
2877 	list_for_each_entry(connector,
2878 			&dev->mode_config.connector_list, head)	{
2879 
2880 		aconnector = to_amdgpu_dm_connector(connector);
2881 		dc_link = aconnector->dc_link;
2882 
2883 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2884 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2885 			int_params.irq_source = dc_link->irq_source_hpd;
2886 
2887 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2888 					handle_hpd_irq,
2889 					(void *) aconnector);
2890 		}
2891 
2892 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2893 
2894 			/* Also register for DP short pulse (hpd_rx). */
2895 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2896 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2897 
2898 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2899 					handle_hpd_rx_irq,
2900 					(void *) aconnector);
2901 		}
2902 	}
2903 }
2904 
2905 #if defined(CONFIG_DRM_AMD_DC_SI)
2906 /* Register IRQ sources and initialize IRQ callbacks */
2907 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2908 {
2909 	struct dc *dc = adev->dm.dc;
2910 	struct common_irq_params *c_irq_params;
2911 	struct dc_interrupt_params int_params = {0};
2912 	int r;
2913 	int i;
2914 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2915 
2916 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2917 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2918 
2919 	/*
2920 	 * Actions of amdgpu_irq_add_id():
2921 	 * 1. Register a set() function with base driver.
2922 	 *    Base driver will call set() function to enable/disable an
2923 	 *    interrupt in DC hardware.
2924 	 * 2. Register amdgpu_dm_irq_handler().
2925 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2926 	 *    coming from DC hardware.
2927 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2928 	 *    for acknowledging and handling. */
2929 
2930 	/* Use VBLANK interrupt */
2931 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2932 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2933 		if (r) {
2934 			DRM_ERROR("Failed to add crtc irq id!\n");
2935 			return r;
2936 		}
2937 
2938 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2939 		int_params.irq_source =
2940 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2941 
2942 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2943 
2944 		c_irq_params->adev = adev;
2945 		c_irq_params->irq_src = int_params.irq_source;
2946 
2947 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2948 				dm_crtc_high_irq, c_irq_params);
2949 	}
2950 
2951 	/* Use GRPH_PFLIP interrupt */
2952 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2953 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2954 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2955 		if (r) {
2956 			DRM_ERROR("Failed to add page flip irq id!\n");
2957 			return r;
2958 		}
2959 
2960 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2961 		int_params.irq_source =
2962 			dc_interrupt_to_irq_source(dc, i, 0);
2963 
2964 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2965 
2966 		c_irq_params->adev = adev;
2967 		c_irq_params->irq_src = int_params.irq_source;
2968 
2969 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2970 				dm_pflip_high_irq, c_irq_params);
2971 
2972 	}
2973 
2974 	/* HPD */
2975 	r = amdgpu_irq_add_id(adev, client_id,
2976 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2977 	if (r) {
2978 		DRM_ERROR("Failed to add hpd irq id!\n");
2979 		return r;
2980 	}
2981 
2982 	register_hpd_handlers(adev);
2983 
2984 	return 0;
2985 }
2986 #endif
2987 
2988 /* Register IRQ sources and initialize IRQ callbacks */
2989 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2990 {
2991 	struct dc *dc = adev->dm.dc;
2992 	struct common_irq_params *c_irq_params;
2993 	struct dc_interrupt_params int_params = {0};
2994 	int r;
2995 	int i;
2996 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2997 
2998 	if (adev->asic_type >= CHIP_VEGA10)
2999 		client_id = SOC15_IH_CLIENTID_DCE;
3000 
3001 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3002 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3003 
3004 	/*
3005 	 * Actions of amdgpu_irq_add_id():
3006 	 * 1. Register a set() function with base driver.
3007 	 *    Base driver will call set() function to enable/disable an
3008 	 *    interrupt in DC hardware.
3009 	 * 2. Register amdgpu_dm_irq_handler().
3010 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3011 	 *    coming from DC hardware.
3012 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3013 	 *    for acknowledging and handling. */
3014 
3015 	/* Use VBLANK interrupt */
3016 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3017 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3018 		if (r) {
3019 			DRM_ERROR("Failed to add crtc irq id!\n");
3020 			return r;
3021 		}
3022 
3023 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3024 		int_params.irq_source =
3025 			dc_interrupt_to_irq_source(dc, i, 0);
3026 
3027 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3028 
3029 		c_irq_params->adev = adev;
3030 		c_irq_params->irq_src = int_params.irq_source;
3031 
3032 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3033 				dm_crtc_high_irq, c_irq_params);
3034 	}
3035 
3036 	/* Use VUPDATE interrupt */
3037 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3038 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3039 		if (r) {
3040 			DRM_ERROR("Failed to add vupdate irq id!\n");
3041 			return r;
3042 		}
3043 
3044 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3045 		int_params.irq_source =
3046 			dc_interrupt_to_irq_source(dc, i, 0);
3047 
3048 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3049 
3050 		c_irq_params->adev = adev;
3051 		c_irq_params->irq_src = int_params.irq_source;
3052 
3053 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3054 				dm_vupdate_high_irq, c_irq_params);
3055 	}
3056 
3057 	/* Use GRPH_PFLIP interrupt */
3058 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3059 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3060 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3061 		if (r) {
3062 			DRM_ERROR("Failed to add page flip irq id!\n");
3063 			return r;
3064 		}
3065 
3066 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3067 		int_params.irq_source =
3068 			dc_interrupt_to_irq_source(dc, i, 0);
3069 
3070 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3071 
3072 		c_irq_params->adev = adev;
3073 		c_irq_params->irq_src = int_params.irq_source;
3074 
3075 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3076 				dm_pflip_high_irq, c_irq_params);
3077 
3078 	}
3079 
3080 	/* HPD */
3081 	r = amdgpu_irq_add_id(adev, client_id,
3082 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3083 	if (r) {
3084 		DRM_ERROR("Failed to add hpd irq id!\n");
3085 		return r;
3086 	}
3087 
3088 	register_hpd_handlers(adev);
3089 
3090 	return 0;
3091 }
3092 
3093 #if defined(CONFIG_DRM_AMD_DC_DCN)
3094 /* Register IRQ sources and initialize IRQ callbacks */
3095 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3096 {
3097 	struct dc *dc = adev->dm.dc;
3098 	struct common_irq_params *c_irq_params;
3099 	struct dc_interrupt_params int_params = {0};
3100 	int r;
3101 	int i;
3102 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3103 	static const unsigned int vrtl_int_srcid[] = {
3104 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3105 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3106 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3107 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3108 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3109 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3110 	};
3111 #endif
3112 
3113 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3114 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3115 
3116 	/*
3117 	 * Actions of amdgpu_irq_add_id():
3118 	 * 1. Register a set() function with base driver.
3119 	 *    Base driver will call set() function to enable/disable an
3120 	 *    interrupt in DC hardware.
3121 	 * 2. Register amdgpu_dm_irq_handler().
3122 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3123 	 *    coming from DC hardware.
3124 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3125 	 *    for acknowledging and handling.
3126 	 */
3127 
3128 	/* Use VSTARTUP interrupt */
3129 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3130 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3131 			i++) {
3132 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3133 
3134 		if (r) {
3135 			DRM_ERROR("Failed to add crtc irq id!\n");
3136 			return r;
3137 		}
3138 
3139 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3140 		int_params.irq_source =
3141 			dc_interrupt_to_irq_source(dc, i, 0);
3142 
3143 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3144 
3145 		c_irq_params->adev = adev;
3146 		c_irq_params->irq_src = int_params.irq_source;
3147 
3148 		amdgpu_dm_irq_register_interrupt(
3149 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3150 	}
3151 
3152 	/* Use otg vertical line interrupt */
3153 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3154 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3155 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3156 				vrtl_int_srcid[i], &adev->vline0_irq);
3157 
3158 		if (r) {
3159 			DRM_ERROR("Failed to add vline0 irq id!\n");
3160 			return r;
3161 		}
3162 
3163 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3164 		int_params.irq_source =
3165 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3166 
3167 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3168 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3169 			break;
3170 		}
3171 
3172 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3173 					- DC_IRQ_SOURCE_DC1_VLINE0];
3174 
3175 		c_irq_params->adev = adev;
3176 		c_irq_params->irq_src = int_params.irq_source;
3177 
3178 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3179 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3180 	}
3181 #endif
3182 
3183 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3184 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3185 	 * to trigger at end of each vblank, regardless of state of the lock,
3186 	 * matching DCE behaviour.
3187 	 */
3188 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3189 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3190 	     i++) {
3191 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3192 
3193 		if (r) {
3194 			DRM_ERROR("Failed to add vupdate irq id!\n");
3195 			return r;
3196 		}
3197 
3198 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3199 		int_params.irq_source =
3200 			dc_interrupt_to_irq_source(dc, i, 0);
3201 
3202 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3203 
3204 		c_irq_params->adev = adev;
3205 		c_irq_params->irq_src = int_params.irq_source;
3206 
3207 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3208 				dm_vupdate_high_irq, c_irq_params);
3209 	}
3210 
3211 	/* Use GRPH_PFLIP interrupt */
3212 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3213 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3214 			i++) {
3215 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3216 		if (r) {
3217 			DRM_ERROR("Failed to add page flip irq id!\n");
3218 			return r;
3219 		}
3220 
3221 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3222 		int_params.irq_source =
3223 			dc_interrupt_to_irq_source(dc, i, 0);
3224 
3225 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3226 
3227 		c_irq_params->adev = adev;
3228 		c_irq_params->irq_src = int_params.irq_source;
3229 
3230 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3231 				dm_pflip_high_irq, c_irq_params);
3232 
3233 	}
3234 
3235 	/* HPD */
3236 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3237 			&adev->hpd_irq);
3238 	if (r) {
3239 		DRM_ERROR("Failed to add hpd irq id!\n");
3240 		return r;
3241 	}
3242 
3243 	register_hpd_handlers(adev);
3244 
3245 	return 0;
3246 }
3247 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3248 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3249 {
3250 	struct dc *dc = adev->dm.dc;
3251 	struct common_irq_params *c_irq_params;
3252 	struct dc_interrupt_params int_params = {0};
3253 	int r, i;
3254 
3255 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3256 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3257 
3258 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3259 			&adev->dmub_outbox_irq);
3260 	if (r) {
3261 		DRM_ERROR("Failed to add outbox irq id!\n");
3262 		return r;
3263 	}
3264 
3265 	if (dc->ctx->dmub_srv) {
3266 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3267 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3268 		int_params.irq_source =
3269 		dc_interrupt_to_irq_source(dc, i, 0);
3270 
3271 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3272 
3273 		c_irq_params->adev = adev;
3274 		c_irq_params->irq_src = int_params.irq_source;
3275 
3276 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3277 				dm_dmub_outbox1_low_irq, c_irq_params);
3278 	}
3279 
3280 	return 0;
3281 }
3282 #endif
3283 
3284 /*
3285  * Acquires the lock for the atomic state object and returns
3286  * the new atomic state.
3287  *
3288  * This should only be called during atomic check.
3289  */
3290 static int dm_atomic_get_state(struct drm_atomic_state *state,
3291 			       struct dm_atomic_state **dm_state)
3292 {
3293 	struct drm_device *dev = state->dev;
3294 	struct amdgpu_device *adev = drm_to_adev(dev);
3295 	struct amdgpu_display_manager *dm = &adev->dm;
3296 	struct drm_private_state *priv_state;
3297 
3298 	if (*dm_state)
3299 		return 0;
3300 
3301 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3302 	if (IS_ERR(priv_state))
3303 		return PTR_ERR(priv_state);
3304 
3305 	*dm_state = to_dm_atomic_state(priv_state);
3306 
3307 	return 0;
3308 }
3309 
3310 static struct dm_atomic_state *
3311 dm_atomic_get_new_state(struct drm_atomic_state *state)
3312 {
3313 	struct drm_device *dev = state->dev;
3314 	struct amdgpu_device *adev = drm_to_adev(dev);
3315 	struct amdgpu_display_manager *dm = &adev->dm;
3316 	struct drm_private_obj *obj;
3317 	struct drm_private_state *new_obj_state;
3318 	int i;
3319 
3320 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3321 		if (obj->funcs == dm->atomic_obj.funcs)
3322 			return to_dm_atomic_state(new_obj_state);
3323 	}
3324 
3325 	return NULL;
3326 }
3327 
3328 static struct drm_private_state *
3329 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3330 {
3331 	struct dm_atomic_state *old_state, *new_state;
3332 
3333 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3334 	if (!new_state)
3335 		return NULL;
3336 
3337 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3338 
3339 	old_state = to_dm_atomic_state(obj->state);
3340 
3341 	if (old_state && old_state->context)
3342 		new_state->context = dc_copy_state(old_state->context);
3343 
3344 	if (!new_state->context) {
3345 		kfree(new_state);
3346 		return NULL;
3347 	}
3348 
3349 	return &new_state->base;
3350 }
3351 
3352 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3353 				    struct drm_private_state *state)
3354 {
3355 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3356 
3357 	if (dm_state && dm_state->context)
3358 		dc_release_state(dm_state->context);
3359 
3360 	kfree(dm_state);
3361 }
3362 
3363 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3364 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3365 	.atomic_destroy_state = dm_atomic_destroy_state,
3366 };
3367 
3368 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3369 {
3370 	struct dm_atomic_state *state;
3371 	int r;
3372 
3373 	adev->mode_info.mode_config_initialized = true;
3374 
3375 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3376 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3377 
3378 	adev_to_drm(adev)->mode_config.max_width = 16384;
3379 	adev_to_drm(adev)->mode_config.max_height = 16384;
3380 
3381 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3382 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3383 	/* indicates support for immediate flip */
3384 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3385 
3386 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3387 
3388 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3389 	if (!state)
3390 		return -ENOMEM;
3391 
3392 	state->context = dc_create_state(adev->dm.dc);
3393 	if (!state->context) {
3394 		kfree(state);
3395 		return -ENOMEM;
3396 	}
3397 
3398 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3399 
3400 	drm_atomic_private_obj_init(adev_to_drm(adev),
3401 				    &adev->dm.atomic_obj,
3402 				    &state->base,
3403 				    &dm_atomic_state_funcs);
3404 
3405 	r = amdgpu_display_modeset_create_props(adev);
3406 	if (r) {
3407 		dc_release_state(state->context);
3408 		kfree(state);
3409 		return r;
3410 	}
3411 
3412 	r = amdgpu_dm_audio_init(adev);
3413 	if (r) {
3414 		dc_release_state(state->context);
3415 		kfree(state);
3416 		return r;
3417 	}
3418 
3419 	return 0;
3420 }
3421 
3422 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3423 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3424 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3425 
3426 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3427 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3428 
3429 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3430 					    int bl_idx)
3431 {
3432 #if defined(CONFIG_ACPI)
3433 	struct amdgpu_dm_backlight_caps caps;
3434 
3435 	memset(&caps, 0, sizeof(caps));
3436 
3437 	if (dm->backlight_caps[bl_idx].caps_valid)
3438 		return;
3439 
3440 	amdgpu_acpi_get_backlight_caps(&caps);
3441 	if (caps.caps_valid) {
3442 		dm->backlight_caps[bl_idx].caps_valid = true;
3443 		if (caps.aux_support)
3444 			return;
3445 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3446 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3447 	} else {
3448 		dm->backlight_caps[bl_idx].min_input_signal =
3449 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3450 		dm->backlight_caps[bl_idx].max_input_signal =
3451 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3452 	}
3453 #else
3454 	if (dm->backlight_caps[bl_idx].aux_support)
3455 		return;
3456 
3457 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3458 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3459 #endif
3460 }
3461 
3462 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3463 				unsigned *min, unsigned *max)
3464 {
3465 	if (!caps)
3466 		return 0;
3467 
3468 	if (caps->aux_support) {
3469 		// Firmware limits are in nits, DC API wants millinits.
3470 		*max = 1000 * caps->aux_max_input_signal;
3471 		*min = 1000 * caps->aux_min_input_signal;
3472 	} else {
3473 		// Firmware limits are 8-bit, PWM control is 16-bit.
3474 		*max = 0x101 * caps->max_input_signal;
3475 		*min = 0x101 * caps->min_input_signal;
3476 	}
3477 	return 1;
3478 }
3479 
3480 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3481 					uint32_t brightness)
3482 {
3483 	unsigned min, max;
3484 
3485 	if (!get_brightness_range(caps, &min, &max))
3486 		return brightness;
3487 
3488 	// Rescale 0..255 to min..max
3489 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3490 				       AMDGPU_MAX_BL_LEVEL);
3491 }
3492 
3493 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3494 				      uint32_t brightness)
3495 {
3496 	unsigned min, max;
3497 
3498 	if (!get_brightness_range(caps, &min, &max))
3499 		return brightness;
3500 
3501 	if (brightness < min)
3502 		return 0;
3503 	// Rescale min..max to 0..255
3504 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3505 				 max - min);
3506 }
3507 
3508 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3509 					 int bl_idx,
3510 					 u32 user_brightness)
3511 {
3512 	struct amdgpu_dm_backlight_caps caps;
3513 	struct dc_link *link;
3514 	u32 brightness;
3515 	bool rc;
3516 
3517 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3518 	caps = dm->backlight_caps[bl_idx];
3519 
3520 	dm->brightness[bl_idx] = user_brightness;
3521 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3522 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3523 
3524 	/* Change brightness based on AUX property */
3525 	if (caps.aux_support) {
3526 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3527 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3528 		if (!rc)
3529 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3530 	} else {
3531 		rc = dc_link_set_backlight_level(link, brightness, 0);
3532 		if (!rc)
3533 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3534 	}
3535 
3536 	return rc ? 0 : 1;
3537 }
3538 
3539 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3540 {
3541 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3542 	int i;
3543 
3544 	for (i = 0; i < dm->num_of_edps; i++) {
3545 		if (bd == dm->backlight_dev[i])
3546 			break;
3547 	}
3548 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3549 		i = 0;
3550 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3551 
3552 	return 0;
3553 }
3554 
3555 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3556 					 int bl_idx)
3557 {
3558 	struct amdgpu_dm_backlight_caps caps;
3559 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3560 
3561 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3562 	caps = dm->backlight_caps[bl_idx];
3563 
3564 	if (caps.aux_support) {
3565 		u32 avg, peak;
3566 		bool rc;
3567 
3568 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3569 		if (!rc)
3570 			return dm->brightness[bl_idx];
3571 		return convert_brightness_to_user(&caps, avg);
3572 	} else {
3573 		int ret = dc_link_get_backlight_level(link);
3574 
3575 		if (ret == DC_ERROR_UNEXPECTED)
3576 			return dm->brightness[bl_idx];
3577 		return convert_brightness_to_user(&caps, ret);
3578 	}
3579 }
3580 
3581 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3582 {
3583 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3584 	int i;
3585 
3586 	for (i = 0; i < dm->num_of_edps; i++) {
3587 		if (bd == dm->backlight_dev[i])
3588 			break;
3589 	}
3590 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3591 		i = 0;
3592 	return amdgpu_dm_backlight_get_level(dm, i);
3593 }
3594 
3595 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3596 	.options = BL_CORE_SUSPENDRESUME,
3597 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3598 	.update_status	= amdgpu_dm_backlight_update_status,
3599 };
3600 
3601 static void
3602 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3603 {
3604 	char bl_name[16];
3605 	struct backlight_properties props = { 0 };
3606 
3607 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3608 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3609 
3610 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3611 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3612 	props.type = BACKLIGHT_RAW;
3613 
3614 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3615 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3616 
3617 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3618 								       adev_to_drm(dm->adev)->dev,
3619 								       dm,
3620 								       &amdgpu_dm_backlight_ops,
3621 								       &props);
3622 
3623 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3624 		DRM_ERROR("DM: Backlight registration failed!\n");
3625 	else
3626 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3627 }
3628 #endif
3629 
3630 static int initialize_plane(struct amdgpu_display_manager *dm,
3631 			    struct amdgpu_mode_info *mode_info, int plane_id,
3632 			    enum drm_plane_type plane_type,
3633 			    const struct dc_plane_cap *plane_cap)
3634 {
3635 	struct drm_plane *plane;
3636 	unsigned long possible_crtcs;
3637 	int ret = 0;
3638 
3639 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3640 	if (!plane) {
3641 		DRM_ERROR("KMS: Failed to allocate plane\n");
3642 		return -ENOMEM;
3643 	}
3644 	plane->type = plane_type;
3645 
3646 	/*
3647 	 * HACK: IGT tests expect that the primary plane for a CRTC
3648 	 * can only have one possible CRTC. Only expose support for
3649 	 * any CRTC if they're not going to be used as a primary plane
3650 	 * for a CRTC - like overlay or underlay planes.
3651 	 */
3652 	possible_crtcs = 1 << plane_id;
3653 	if (plane_id >= dm->dc->caps.max_streams)
3654 		possible_crtcs = 0xff;
3655 
3656 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3657 
3658 	if (ret) {
3659 		DRM_ERROR("KMS: Failed to initialize plane\n");
3660 		kfree(plane);
3661 		return ret;
3662 	}
3663 
3664 	if (mode_info)
3665 		mode_info->planes[plane_id] = plane;
3666 
3667 	return ret;
3668 }
3669 
3670 
3671 static void register_backlight_device(struct amdgpu_display_manager *dm,
3672 				      struct dc_link *link)
3673 {
3674 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3675 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3676 
3677 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3678 	    link->type != dc_connection_none) {
3679 		/*
3680 		 * Event if registration failed, we should continue with
3681 		 * DM initialization because not having a backlight control
3682 		 * is better then a black screen.
3683 		 */
3684 		if (!dm->backlight_dev[dm->num_of_edps])
3685 			amdgpu_dm_register_backlight_device(dm);
3686 
3687 		if (dm->backlight_dev[dm->num_of_edps]) {
3688 			dm->backlight_link[dm->num_of_edps] = link;
3689 			dm->num_of_edps++;
3690 		}
3691 	}
3692 #endif
3693 }
3694 
3695 
3696 /*
3697  * In this architecture, the association
3698  * connector -> encoder -> crtc
3699  * id not really requried. The crtc and connector will hold the
3700  * display_index as an abstraction to use with DAL component
3701  *
3702  * Returns 0 on success
3703  */
3704 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3705 {
3706 	struct amdgpu_display_manager *dm = &adev->dm;
3707 	int32_t i;
3708 	struct amdgpu_dm_connector *aconnector = NULL;
3709 	struct amdgpu_encoder *aencoder = NULL;
3710 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3711 	uint32_t link_cnt;
3712 	int32_t primary_planes;
3713 	enum dc_connection_type new_connection_type = dc_connection_none;
3714 	const struct dc_plane_cap *plane;
3715 
3716 	dm->display_indexes_num = dm->dc->caps.max_streams;
3717 	/* Update the actual used number of crtc */
3718 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3719 
3720 	link_cnt = dm->dc->caps.max_links;
3721 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3722 		DRM_ERROR("DM: Failed to initialize mode config\n");
3723 		return -EINVAL;
3724 	}
3725 
3726 	/* There is one primary plane per CRTC */
3727 	primary_planes = dm->dc->caps.max_streams;
3728 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3729 
3730 	/*
3731 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3732 	 * Order is reversed to match iteration order in atomic check.
3733 	 */
3734 	for (i = (primary_planes - 1); i >= 0; i--) {
3735 		plane = &dm->dc->caps.planes[i];
3736 
3737 		if (initialize_plane(dm, mode_info, i,
3738 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3739 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3740 			goto fail;
3741 		}
3742 	}
3743 
3744 	/*
3745 	 * Initialize overlay planes, index starting after primary planes.
3746 	 * These planes have a higher DRM index than the primary planes since
3747 	 * they should be considered as having a higher z-order.
3748 	 * Order is reversed to match iteration order in atomic check.
3749 	 *
3750 	 * Only support DCN for now, and only expose one so we don't encourage
3751 	 * userspace to use up all the pipes.
3752 	 */
3753 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3754 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3755 
3756 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3757 			continue;
3758 
3759 		if (!plane->blends_with_above || !plane->blends_with_below)
3760 			continue;
3761 
3762 		if (!plane->pixel_format_support.argb8888)
3763 			continue;
3764 
3765 		if (initialize_plane(dm, NULL, primary_planes + i,
3766 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3767 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3768 			goto fail;
3769 		}
3770 
3771 		/* Only create one overlay plane. */
3772 		break;
3773 	}
3774 
3775 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3776 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3777 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3778 			goto fail;
3779 		}
3780 
3781 #if defined(CONFIG_DRM_AMD_DC_DCN)
3782 	/* Use Outbox interrupt */
3783 	switch (adev->asic_type) {
3784 	case CHIP_SIENNA_CICHLID:
3785 	case CHIP_NAVY_FLOUNDER:
3786 	case CHIP_YELLOW_CARP:
3787 	case CHIP_RENOIR:
3788 		if (register_outbox_irq_handlers(dm->adev)) {
3789 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3790 			goto fail;
3791 		}
3792 		break;
3793 	default:
3794 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3795 	}
3796 #endif
3797 
3798 	/* loops over all connectors on the board */
3799 	for (i = 0; i < link_cnt; i++) {
3800 		struct dc_link *link = NULL;
3801 
3802 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3803 			DRM_ERROR(
3804 				"KMS: Cannot support more than %d display indexes\n",
3805 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3806 			continue;
3807 		}
3808 
3809 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3810 		if (!aconnector)
3811 			goto fail;
3812 
3813 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3814 		if (!aencoder)
3815 			goto fail;
3816 
3817 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3818 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3819 			goto fail;
3820 		}
3821 
3822 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3823 			DRM_ERROR("KMS: Failed to initialize connector\n");
3824 			goto fail;
3825 		}
3826 
3827 		link = dc_get_link_at_index(dm->dc, i);
3828 
3829 		if (!dc_link_detect_sink(link, &new_connection_type))
3830 			DRM_ERROR("KMS: Failed to detect connector\n");
3831 
3832 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3833 			emulated_link_detect(link);
3834 			amdgpu_dm_update_connector_after_detect(aconnector);
3835 
3836 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3837 			amdgpu_dm_update_connector_after_detect(aconnector);
3838 			register_backlight_device(dm, link);
3839 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3840 				amdgpu_dm_set_psr_caps(link);
3841 		}
3842 
3843 
3844 	}
3845 
3846 	/* Software is initialized. Now we can register interrupt handlers. */
3847 	switch (adev->asic_type) {
3848 #if defined(CONFIG_DRM_AMD_DC_SI)
3849 	case CHIP_TAHITI:
3850 	case CHIP_PITCAIRN:
3851 	case CHIP_VERDE:
3852 	case CHIP_OLAND:
3853 		if (dce60_register_irq_handlers(dm->adev)) {
3854 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3855 			goto fail;
3856 		}
3857 		break;
3858 #endif
3859 	case CHIP_BONAIRE:
3860 	case CHIP_HAWAII:
3861 	case CHIP_KAVERI:
3862 	case CHIP_KABINI:
3863 	case CHIP_MULLINS:
3864 	case CHIP_TONGA:
3865 	case CHIP_FIJI:
3866 	case CHIP_CARRIZO:
3867 	case CHIP_STONEY:
3868 	case CHIP_POLARIS11:
3869 	case CHIP_POLARIS10:
3870 	case CHIP_POLARIS12:
3871 	case CHIP_VEGAM:
3872 	case CHIP_VEGA10:
3873 	case CHIP_VEGA12:
3874 	case CHIP_VEGA20:
3875 		if (dce110_register_irq_handlers(dm->adev)) {
3876 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3877 			goto fail;
3878 		}
3879 		break;
3880 #if defined(CONFIG_DRM_AMD_DC_DCN)
3881 	case CHIP_RAVEN:
3882 	case CHIP_NAVI12:
3883 	case CHIP_NAVI10:
3884 	case CHIP_NAVI14:
3885 	case CHIP_RENOIR:
3886 	case CHIP_SIENNA_CICHLID:
3887 	case CHIP_NAVY_FLOUNDER:
3888 	case CHIP_DIMGREY_CAVEFISH:
3889 	case CHIP_BEIGE_GOBY:
3890 	case CHIP_VANGOGH:
3891 	case CHIP_YELLOW_CARP:
3892 		if (dcn10_register_irq_handlers(dm->adev)) {
3893 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3894 			goto fail;
3895 		}
3896 		break;
3897 #endif
3898 	default:
3899 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3900 		goto fail;
3901 	}
3902 
3903 	return 0;
3904 fail:
3905 	kfree(aencoder);
3906 	kfree(aconnector);
3907 
3908 	return -EINVAL;
3909 }
3910 
3911 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3912 {
3913 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3914 	return;
3915 }
3916 
3917 /******************************************************************************
3918  * amdgpu_display_funcs functions
3919  *****************************************************************************/
3920 
3921 /*
3922  * dm_bandwidth_update - program display watermarks
3923  *
3924  * @adev: amdgpu_device pointer
3925  *
3926  * Calculate and program the display watermarks and line buffer allocation.
3927  */
3928 static void dm_bandwidth_update(struct amdgpu_device *adev)
3929 {
3930 	/* TODO: implement later */
3931 }
3932 
3933 static const struct amdgpu_display_funcs dm_display_funcs = {
3934 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3935 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3936 	.backlight_set_level = NULL, /* never called for DC */
3937 	.backlight_get_level = NULL, /* never called for DC */
3938 	.hpd_sense = NULL,/* called unconditionally */
3939 	.hpd_set_polarity = NULL, /* called unconditionally */
3940 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3941 	.page_flip_get_scanoutpos =
3942 		dm_crtc_get_scanoutpos,/* called unconditionally */
3943 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3944 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3945 };
3946 
3947 #if defined(CONFIG_DEBUG_KERNEL_DC)
3948 
3949 static ssize_t s3_debug_store(struct device *device,
3950 			      struct device_attribute *attr,
3951 			      const char *buf,
3952 			      size_t count)
3953 {
3954 	int ret;
3955 	int s3_state;
3956 	struct drm_device *drm_dev = dev_get_drvdata(device);
3957 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3958 
3959 	ret = kstrtoint(buf, 0, &s3_state);
3960 
3961 	if (ret == 0) {
3962 		if (s3_state) {
3963 			dm_resume(adev);
3964 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3965 		} else
3966 			dm_suspend(adev);
3967 	}
3968 
3969 	return ret == 0 ? count : 0;
3970 }
3971 
3972 DEVICE_ATTR_WO(s3_debug);
3973 
3974 #endif
3975 
3976 static int dm_early_init(void *handle)
3977 {
3978 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3979 
3980 	switch (adev->asic_type) {
3981 #if defined(CONFIG_DRM_AMD_DC_SI)
3982 	case CHIP_TAHITI:
3983 	case CHIP_PITCAIRN:
3984 	case CHIP_VERDE:
3985 		adev->mode_info.num_crtc = 6;
3986 		adev->mode_info.num_hpd = 6;
3987 		adev->mode_info.num_dig = 6;
3988 		break;
3989 	case CHIP_OLAND:
3990 		adev->mode_info.num_crtc = 2;
3991 		adev->mode_info.num_hpd = 2;
3992 		adev->mode_info.num_dig = 2;
3993 		break;
3994 #endif
3995 	case CHIP_BONAIRE:
3996 	case CHIP_HAWAII:
3997 		adev->mode_info.num_crtc = 6;
3998 		adev->mode_info.num_hpd = 6;
3999 		adev->mode_info.num_dig = 6;
4000 		break;
4001 	case CHIP_KAVERI:
4002 		adev->mode_info.num_crtc = 4;
4003 		adev->mode_info.num_hpd = 6;
4004 		adev->mode_info.num_dig = 7;
4005 		break;
4006 	case CHIP_KABINI:
4007 	case CHIP_MULLINS:
4008 		adev->mode_info.num_crtc = 2;
4009 		adev->mode_info.num_hpd = 6;
4010 		adev->mode_info.num_dig = 6;
4011 		break;
4012 	case CHIP_FIJI:
4013 	case CHIP_TONGA:
4014 		adev->mode_info.num_crtc = 6;
4015 		adev->mode_info.num_hpd = 6;
4016 		adev->mode_info.num_dig = 7;
4017 		break;
4018 	case CHIP_CARRIZO:
4019 		adev->mode_info.num_crtc = 3;
4020 		adev->mode_info.num_hpd = 6;
4021 		adev->mode_info.num_dig = 9;
4022 		break;
4023 	case CHIP_STONEY:
4024 		adev->mode_info.num_crtc = 2;
4025 		adev->mode_info.num_hpd = 6;
4026 		adev->mode_info.num_dig = 9;
4027 		break;
4028 	case CHIP_POLARIS11:
4029 	case CHIP_POLARIS12:
4030 		adev->mode_info.num_crtc = 5;
4031 		adev->mode_info.num_hpd = 5;
4032 		adev->mode_info.num_dig = 5;
4033 		break;
4034 	case CHIP_POLARIS10:
4035 	case CHIP_VEGAM:
4036 		adev->mode_info.num_crtc = 6;
4037 		adev->mode_info.num_hpd = 6;
4038 		adev->mode_info.num_dig = 6;
4039 		break;
4040 	case CHIP_VEGA10:
4041 	case CHIP_VEGA12:
4042 	case CHIP_VEGA20:
4043 		adev->mode_info.num_crtc = 6;
4044 		adev->mode_info.num_hpd = 6;
4045 		adev->mode_info.num_dig = 6;
4046 		break;
4047 #if defined(CONFIG_DRM_AMD_DC_DCN)
4048 	case CHIP_RAVEN:
4049 	case CHIP_RENOIR:
4050 	case CHIP_VANGOGH:
4051 		adev->mode_info.num_crtc = 4;
4052 		adev->mode_info.num_hpd = 4;
4053 		adev->mode_info.num_dig = 4;
4054 		break;
4055 	case CHIP_NAVI10:
4056 	case CHIP_NAVI12:
4057 	case CHIP_SIENNA_CICHLID:
4058 	case CHIP_NAVY_FLOUNDER:
4059 		adev->mode_info.num_crtc = 6;
4060 		adev->mode_info.num_hpd = 6;
4061 		adev->mode_info.num_dig = 6;
4062 		break;
4063 	case CHIP_YELLOW_CARP:
4064 		adev->mode_info.num_crtc = 4;
4065 		adev->mode_info.num_hpd = 4;
4066 		adev->mode_info.num_dig = 4;
4067 		break;
4068 	case CHIP_NAVI14:
4069 	case CHIP_DIMGREY_CAVEFISH:
4070 		adev->mode_info.num_crtc = 5;
4071 		adev->mode_info.num_hpd = 5;
4072 		adev->mode_info.num_dig = 5;
4073 		break;
4074 	case CHIP_BEIGE_GOBY:
4075 		adev->mode_info.num_crtc = 2;
4076 		adev->mode_info.num_hpd = 2;
4077 		adev->mode_info.num_dig = 2;
4078 		break;
4079 #endif
4080 	default:
4081 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4082 		return -EINVAL;
4083 	}
4084 
4085 	amdgpu_dm_set_irq_funcs(adev);
4086 
4087 	if (adev->mode_info.funcs == NULL)
4088 		adev->mode_info.funcs = &dm_display_funcs;
4089 
4090 	/*
4091 	 * Note: Do NOT change adev->audio_endpt_rreg and
4092 	 * adev->audio_endpt_wreg because they are initialised in
4093 	 * amdgpu_device_init()
4094 	 */
4095 #if defined(CONFIG_DEBUG_KERNEL_DC)
4096 	device_create_file(
4097 		adev_to_drm(adev)->dev,
4098 		&dev_attr_s3_debug);
4099 #endif
4100 
4101 	return 0;
4102 }
4103 
4104 static bool modeset_required(struct drm_crtc_state *crtc_state,
4105 			     struct dc_stream_state *new_stream,
4106 			     struct dc_stream_state *old_stream)
4107 {
4108 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4109 }
4110 
4111 static bool modereset_required(struct drm_crtc_state *crtc_state)
4112 {
4113 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4114 }
4115 
4116 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4117 {
4118 	drm_encoder_cleanup(encoder);
4119 	kfree(encoder);
4120 }
4121 
4122 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4123 	.destroy = amdgpu_dm_encoder_destroy,
4124 };
4125 
4126 
4127 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4128 					 struct drm_framebuffer *fb,
4129 					 int *min_downscale, int *max_upscale)
4130 {
4131 	struct amdgpu_device *adev = drm_to_adev(dev);
4132 	struct dc *dc = adev->dm.dc;
4133 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4134 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4135 
4136 	switch (fb->format->format) {
4137 	case DRM_FORMAT_P010:
4138 	case DRM_FORMAT_NV12:
4139 	case DRM_FORMAT_NV21:
4140 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4141 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4142 		break;
4143 
4144 	case DRM_FORMAT_XRGB16161616F:
4145 	case DRM_FORMAT_ARGB16161616F:
4146 	case DRM_FORMAT_XBGR16161616F:
4147 	case DRM_FORMAT_ABGR16161616F:
4148 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4149 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4150 		break;
4151 
4152 	default:
4153 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4154 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4155 		break;
4156 	}
4157 
4158 	/*
4159 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4160 	 * scaling factor of 1.0 == 1000 units.
4161 	 */
4162 	if (*max_upscale == 1)
4163 		*max_upscale = 1000;
4164 
4165 	if (*min_downscale == 1)
4166 		*min_downscale = 1000;
4167 }
4168 
4169 
4170 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4171 				struct dc_scaling_info *scaling_info)
4172 {
4173 	int scale_w, scale_h, min_downscale, max_upscale;
4174 
4175 	memset(scaling_info, 0, sizeof(*scaling_info));
4176 
4177 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4178 	scaling_info->src_rect.x = state->src_x >> 16;
4179 	scaling_info->src_rect.y = state->src_y >> 16;
4180 
4181 	/*
4182 	 * For reasons we don't (yet) fully understand a non-zero
4183 	 * src_y coordinate into an NV12 buffer can cause a
4184 	 * system hang. To avoid hangs (and maybe be overly cautious)
4185 	 * let's reject both non-zero src_x and src_y.
4186 	 *
4187 	 * We currently know of only one use-case to reproduce a
4188 	 * scenario with non-zero src_x and src_y for NV12, which
4189 	 * is to gesture the YouTube Android app into full screen
4190 	 * on ChromeOS.
4191 	 */
4192 	if (state->fb &&
4193 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4194 	    (scaling_info->src_rect.x != 0 ||
4195 	     scaling_info->src_rect.y != 0))
4196 		return -EINVAL;
4197 
4198 	scaling_info->src_rect.width = state->src_w >> 16;
4199 	if (scaling_info->src_rect.width == 0)
4200 		return -EINVAL;
4201 
4202 	scaling_info->src_rect.height = state->src_h >> 16;
4203 	if (scaling_info->src_rect.height == 0)
4204 		return -EINVAL;
4205 
4206 	scaling_info->dst_rect.x = state->crtc_x;
4207 	scaling_info->dst_rect.y = state->crtc_y;
4208 
4209 	if (state->crtc_w == 0)
4210 		return -EINVAL;
4211 
4212 	scaling_info->dst_rect.width = state->crtc_w;
4213 
4214 	if (state->crtc_h == 0)
4215 		return -EINVAL;
4216 
4217 	scaling_info->dst_rect.height = state->crtc_h;
4218 
4219 	/* DRM doesn't specify clipping on destination output. */
4220 	scaling_info->clip_rect = scaling_info->dst_rect;
4221 
4222 	/* Validate scaling per-format with DC plane caps */
4223 	if (state->plane && state->plane->dev && state->fb) {
4224 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4225 					     &min_downscale, &max_upscale);
4226 	} else {
4227 		min_downscale = 250;
4228 		max_upscale = 16000;
4229 	}
4230 
4231 	scale_w = scaling_info->dst_rect.width * 1000 /
4232 		  scaling_info->src_rect.width;
4233 
4234 	if (scale_w < min_downscale || scale_w > max_upscale)
4235 		return -EINVAL;
4236 
4237 	scale_h = scaling_info->dst_rect.height * 1000 /
4238 		  scaling_info->src_rect.height;
4239 
4240 	if (scale_h < min_downscale || scale_h > max_upscale)
4241 		return -EINVAL;
4242 
4243 	/*
4244 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4245 	 * assume reasonable defaults based on the format.
4246 	 */
4247 
4248 	return 0;
4249 }
4250 
4251 static void
4252 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4253 				 uint64_t tiling_flags)
4254 {
4255 	/* Fill GFX8 params */
4256 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4257 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4258 
4259 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4260 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4261 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4262 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4263 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4264 
4265 		/* XXX fix me for VI */
4266 		tiling_info->gfx8.num_banks = num_banks;
4267 		tiling_info->gfx8.array_mode =
4268 				DC_ARRAY_2D_TILED_THIN1;
4269 		tiling_info->gfx8.tile_split = tile_split;
4270 		tiling_info->gfx8.bank_width = bankw;
4271 		tiling_info->gfx8.bank_height = bankh;
4272 		tiling_info->gfx8.tile_aspect = mtaspect;
4273 		tiling_info->gfx8.tile_mode =
4274 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4275 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4276 			== DC_ARRAY_1D_TILED_THIN1) {
4277 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4278 	}
4279 
4280 	tiling_info->gfx8.pipe_config =
4281 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4282 }
4283 
4284 static void
4285 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4286 				  union dc_tiling_info *tiling_info)
4287 {
4288 	tiling_info->gfx9.num_pipes =
4289 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4290 	tiling_info->gfx9.num_banks =
4291 		adev->gfx.config.gb_addr_config_fields.num_banks;
4292 	tiling_info->gfx9.pipe_interleave =
4293 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4294 	tiling_info->gfx9.num_shader_engines =
4295 		adev->gfx.config.gb_addr_config_fields.num_se;
4296 	tiling_info->gfx9.max_compressed_frags =
4297 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4298 	tiling_info->gfx9.num_rb_per_se =
4299 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4300 	tiling_info->gfx9.shaderEnable = 1;
4301 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4302 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4303 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4304 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4305 	    adev->asic_type == CHIP_YELLOW_CARP ||
4306 	    adev->asic_type == CHIP_VANGOGH)
4307 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4308 }
4309 
4310 static int
4311 validate_dcc(struct amdgpu_device *adev,
4312 	     const enum surface_pixel_format format,
4313 	     const enum dc_rotation_angle rotation,
4314 	     const union dc_tiling_info *tiling_info,
4315 	     const struct dc_plane_dcc_param *dcc,
4316 	     const struct dc_plane_address *address,
4317 	     const struct plane_size *plane_size)
4318 {
4319 	struct dc *dc = adev->dm.dc;
4320 	struct dc_dcc_surface_param input;
4321 	struct dc_surface_dcc_cap output;
4322 
4323 	memset(&input, 0, sizeof(input));
4324 	memset(&output, 0, sizeof(output));
4325 
4326 	if (!dcc->enable)
4327 		return 0;
4328 
4329 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4330 	    !dc->cap_funcs.get_dcc_compression_cap)
4331 		return -EINVAL;
4332 
4333 	input.format = format;
4334 	input.surface_size.width = plane_size->surface_size.width;
4335 	input.surface_size.height = plane_size->surface_size.height;
4336 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4337 
4338 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4339 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4340 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4341 		input.scan = SCAN_DIRECTION_VERTICAL;
4342 
4343 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4344 		return -EINVAL;
4345 
4346 	if (!output.capable)
4347 		return -EINVAL;
4348 
4349 	if (dcc->independent_64b_blks == 0 &&
4350 	    output.grph.rgb.independent_64b_blks != 0)
4351 		return -EINVAL;
4352 
4353 	return 0;
4354 }
4355 
4356 static bool
4357 modifier_has_dcc(uint64_t modifier)
4358 {
4359 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4360 }
4361 
4362 static unsigned
4363 modifier_gfx9_swizzle_mode(uint64_t modifier)
4364 {
4365 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4366 		return 0;
4367 
4368 	return AMD_FMT_MOD_GET(TILE, modifier);
4369 }
4370 
4371 static const struct drm_format_info *
4372 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4373 {
4374 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4375 }
4376 
4377 static void
4378 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4379 				    union dc_tiling_info *tiling_info,
4380 				    uint64_t modifier)
4381 {
4382 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4383 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4384 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4385 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4386 
4387 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4388 
4389 	if (!IS_AMD_FMT_MOD(modifier))
4390 		return;
4391 
4392 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4393 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4394 
4395 	if (adev->family >= AMDGPU_FAMILY_NV) {
4396 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4397 	} else {
4398 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4399 
4400 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4401 	}
4402 }
4403 
4404 enum dm_micro_swizzle {
4405 	MICRO_SWIZZLE_Z = 0,
4406 	MICRO_SWIZZLE_S = 1,
4407 	MICRO_SWIZZLE_D = 2,
4408 	MICRO_SWIZZLE_R = 3
4409 };
4410 
4411 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4412 					  uint32_t format,
4413 					  uint64_t modifier)
4414 {
4415 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4416 	const struct drm_format_info *info = drm_format_info(format);
4417 	int i;
4418 
4419 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4420 
4421 	if (!info)
4422 		return false;
4423 
4424 	/*
4425 	 * We always have to allow these modifiers:
4426 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4427 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4428 	 */
4429 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4430 	    modifier == DRM_FORMAT_MOD_INVALID) {
4431 		return true;
4432 	}
4433 
4434 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4435 	for (i = 0; i < plane->modifier_count; i++) {
4436 		if (modifier == plane->modifiers[i])
4437 			break;
4438 	}
4439 	if (i == plane->modifier_count)
4440 		return false;
4441 
4442 	/*
4443 	 * For D swizzle the canonical modifier depends on the bpp, so check
4444 	 * it here.
4445 	 */
4446 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4447 	    adev->family >= AMDGPU_FAMILY_NV) {
4448 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4449 			return false;
4450 	}
4451 
4452 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4453 	    info->cpp[0] < 8)
4454 		return false;
4455 
4456 	if (modifier_has_dcc(modifier)) {
4457 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4458 		if (info->cpp[0] != 4)
4459 			return false;
4460 		/* We support multi-planar formats, but not when combined with
4461 		 * additional DCC metadata planes. */
4462 		if (info->num_planes > 1)
4463 			return false;
4464 	}
4465 
4466 	return true;
4467 }
4468 
4469 static void
4470 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4471 {
4472 	if (!*mods)
4473 		return;
4474 
4475 	if (*cap - *size < 1) {
4476 		uint64_t new_cap = *cap * 2;
4477 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4478 
4479 		if (!new_mods) {
4480 			kfree(*mods);
4481 			*mods = NULL;
4482 			return;
4483 		}
4484 
4485 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4486 		kfree(*mods);
4487 		*mods = new_mods;
4488 		*cap = new_cap;
4489 	}
4490 
4491 	(*mods)[*size] = mod;
4492 	*size += 1;
4493 }
4494 
4495 static void
4496 add_gfx9_modifiers(const struct amdgpu_device *adev,
4497 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4498 {
4499 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4500 	int pipe_xor_bits = min(8, pipes +
4501 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4502 	int bank_xor_bits = min(8 - pipe_xor_bits,
4503 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4504 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4505 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4506 
4507 
4508 	if (adev->family == AMDGPU_FAMILY_RV) {
4509 		/* Raven2 and later */
4510 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4511 
4512 		/*
4513 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4514 		 * doesn't support _D on DCN
4515 		 */
4516 
4517 		if (has_constant_encode) {
4518 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4519 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4520 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4521 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4522 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4523 				    AMD_FMT_MOD_SET(DCC, 1) |
4524 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4525 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4526 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4527 		}
4528 
4529 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4530 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4531 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4532 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4533 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4534 			    AMD_FMT_MOD_SET(DCC, 1) |
4535 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4536 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4537 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4538 
4539 		if (has_constant_encode) {
4540 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4541 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4542 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4543 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4544 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4545 				    AMD_FMT_MOD_SET(DCC, 1) |
4546 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4547 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4548 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4549 
4550 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4551 				    AMD_FMT_MOD_SET(RB, rb) |
4552 				    AMD_FMT_MOD_SET(PIPE, pipes));
4553 		}
4554 
4555 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4556 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4557 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4558 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4559 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4560 			    AMD_FMT_MOD_SET(DCC, 1) |
4561 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4562 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4563 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4564 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4565 			    AMD_FMT_MOD_SET(RB, rb) |
4566 			    AMD_FMT_MOD_SET(PIPE, pipes));
4567 	}
4568 
4569 	/*
4570 	 * Only supported for 64bpp on Raven, will be filtered on format in
4571 	 * dm_plane_format_mod_supported.
4572 	 */
4573 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4574 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4575 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4576 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4577 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4578 
4579 	if (adev->family == AMDGPU_FAMILY_RV) {
4580 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4581 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4582 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4583 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4584 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4585 	}
4586 
4587 	/*
4588 	 * Only supported for 64bpp on Raven, will be filtered on format in
4589 	 * dm_plane_format_mod_supported.
4590 	 */
4591 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4592 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4593 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4594 
4595 	if (adev->family == AMDGPU_FAMILY_RV) {
4596 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4597 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4598 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4599 	}
4600 }
4601 
4602 static void
4603 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4604 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4605 {
4606 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4607 
4608 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4610 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4611 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4612 		    AMD_FMT_MOD_SET(DCC, 1) |
4613 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4614 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4615 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4616 
4617 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4618 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4619 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4620 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4621 		    AMD_FMT_MOD_SET(DCC, 1) |
4622 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4623 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4624 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4625 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4626 
4627 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4628 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4629 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4630 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4631 
4632 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4633 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4634 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4635 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4636 
4637 
4638 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4639 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4640 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4641 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4642 
4643 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4644 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4645 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4646 }
4647 
4648 static void
4649 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4650 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4651 {
4652 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4653 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4654 
4655 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4656 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4657 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4658 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4659 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4660 		    AMD_FMT_MOD_SET(DCC, 1) |
4661 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4662 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4663 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4664 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4665 
4666 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4667 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4668 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4669 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4670 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4671 		    AMD_FMT_MOD_SET(DCC, 1) |
4672 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4673 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4674 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4675 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4676 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4677 
4678 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4679 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4680 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4681 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4682 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4683 
4684 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4685 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4686 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4687 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4688 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4689 
4690 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4691 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4692 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4693 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4694 
4695 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4696 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4697 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4698 }
4699 
4700 static int
4701 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4702 {
4703 	uint64_t size = 0, capacity = 128;
4704 	*mods = NULL;
4705 
4706 	/* We have not hooked up any pre-GFX9 modifiers. */
4707 	if (adev->family < AMDGPU_FAMILY_AI)
4708 		return 0;
4709 
4710 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4711 
4712 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4713 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4714 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4715 		return *mods ? 0 : -ENOMEM;
4716 	}
4717 
4718 	switch (adev->family) {
4719 	case AMDGPU_FAMILY_AI:
4720 	case AMDGPU_FAMILY_RV:
4721 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4722 		break;
4723 	case AMDGPU_FAMILY_NV:
4724 	case AMDGPU_FAMILY_VGH:
4725 	case AMDGPU_FAMILY_YC:
4726 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4727 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4728 		else
4729 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4730 		break;
4731 	}
4732 
4733 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4734 
4735 	/* INVALID marks the end of the list. */
4736 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4737 
4738 	if (!*mods)
4739 		return -ENOMEM;
4740 
4741 	return 0;
4742 }
4743 
4744 static int
4745 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4746 					  const struct amdgpu_framebuffer *afb,
4747 					  const enum surface_pixel_format format,
4748 					  const enum dc_rotation_angle rotation,
4749 					  const struct plane_size *plane_size,
4750 					  union dc_tiling_info *tiling_info,
4751 					  struct dc_plane_dcc_param *dcc,
4752 					  struct dc_plane_address *address,
4753 					  const bool force_disable_dcc)
4754 {
4755 	const uint64_t modifier = afb->base.modifier;
4756 	int ret = 0;
4757 
4758 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4759 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4760 
4761 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4762 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4763 
4764 		dcc->enable = 1;
4765 		dcc->meta_pitch = afb->base.pitches[1];
4766 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4767 
4768 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4769 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4770 	}
4771 
4772 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4773 	if (ret)
4774 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
4775 
4776 	return ret;
4777 }
4778 
4779 static int
4780 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4781 			     const struct amdgpu_framebuffer *afb,
4782 			     const enum surface_pixel_format format,
4783 			     const enum dc_rotation_angle rotation,
4784 			     const uint64_t tiling_flags,
4785 			     union dc_tiling_info *tiling_info,
4786 			     struct plane_size *plane_size,
4787 			     struct dc_plane_dcc_param *dcc,
4788 			     struct dc_plane_address *address,
4789 			     bool tmz_surface,
4790 			     bool force_disable_dcc)
4791 {
4792 	const struct drm_framebuffer *fb = &afb->base;
4793 	int ret;
4794 
4795 	memset(tiling_info, 0, sizeof(*tiling_info));
4796 	memset(plane_size, 0, sizeof(*plane_size));
4797 	memset(dcc, 0, sizeof(*dcc));
4798 	memset(address, 0, sizeof(*address));
4799 
4800 	address->tmz_surface = tmz_surface;
4801 
4802 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4803 		uint64_t addr = afb->address + fb->offsets[0];
4804 
4805 		plane_size->surface_size.x = 0;
4806 		plane_size->surface_size.y = 0;
4807 		plane_size->surface_size.width = fb->width;
4808 		plane_size->surface_size.height = fb->height;
4809 		plane_size->surface_pitch =
4810 			fb->pitches[0] / fb->format->cpp[0];
4811 
4812 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4813 		address->grph.addr.low_part = lower_32_bits(addr);
4814 		address->grph.addr.high_part = upper_32_bits(addr);
4815 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4816 		uint64_t luma_addr = afb->address + fb->offsets[0];
4817 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4818 
4819 		plane_size->surface_size.x = 0;
4820 		plane_size->surface_size.y = 0;
4821 		plane_size->surface_size.width = fb->width;
4822 		plane_size->surface_size.height = fb->height;
4823 		plane_size->surface_pitch =
4824 			fb->pitches[0] / fb->format->cpp[0];
4825 
4826 		plane_size->chroma_size.x = 0;
4827 		plane_size->chroma_size.y = 0;
4828 		/* TODO: set these based on surface format */
4829 		plane_size->chroma_size.width = fb->width / 2;
4830 		plane_size->chroma_size.height = fb->height / 2;
4831 
4832 		plane_size->chroma_pitch =
4833 			fb->pitches[1] / fb->format->cpp[1];
4834 
4835 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4836 		address->video_progressive.luma_addr.low_part =
4837 			lower_32_bits(luma_addr);
4838 		address->video_progressive.luma_addr.high_part =
4839 			upper_32_bits(luma_addr);
4840 		address->video_progressive.chroma_addr.low_part =
4841 			lower_32_bits(chroma_addr);
4842 		address->video_progressive.chroma_addr.high_part =
4843 			upper_32_bits(chroma_addr);
4844 	}
4845 
4846 	if (adev->family >= AMDGPU_FAMILY_AI) {
4847 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4848 								rotation, plane_size,
4849 								tiling_info, dcc,
4850 								address,
4851 								force_disable_dcc);
4852 		if (ret)
4853 			return ret;
4854 	} else {
4855 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4856 	}
4857 
4858 	return 0;
4859 }
4860 
4861 static void
4862 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4863 			       bool *per_pixel_alpha, bool *global_alpha,
4864 			       int *global_alpha_value)
4865 {
4866 	*per_pixel_alpha = false;
4867 	*global_alpha = false;
4868 	*global_alpha_value = 0xff;
4869 
4870 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4871 		return;
4872 
4873 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4874 		static const uint32_t alpha_formats[] = {
4875 			DRM_FORMAT_ARGB8888,
4876 			DRM_FORMAT_RGBA8888,
4877 			DRM_FORMAT_ABGR8888,
4878 		};
4879 		uint32_t format = plane_state->fb->format->format;
4880 		unsigned int i;
4881 
4882 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4883 			if (format == alpha_formats[i]) {
4884 				*per_pixel_alpha = true;
4885 				break;
4886 			}
4887 		}
4888 	}
4889 
4890 	if (plane_state->alpha < 0xffff) {
4891 		*global_alpha = true;
4892 		*global_alpha_value = plane_state->alpha >> 8;
4893 	}
4894 }
4895 
4896 static int
4897 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4898 			    const enum surface_pixel_format format,
4899 			    enum dc_color_space *color_space)
4900 {
4901 	bool full_range;
4902 
4903 	*color_space = COLOR_SPACE_SRGB;
4904 
4905 	/* DRM color properties only affect non-RGB formats. */
4906 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4907 		return 0;
4908 
4909 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4910 
4911 	switch (plane_state->color_encoding) {
4912 	case DRM_COLOR_YCBCR_BT601:
4913 		if (full_range)
4914 			*color_space = COLOR_SPACE_YCBCR601;
4915 		else
4916 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4917 		break;
4918 
4919 	case DRM_COLOR_YCBCR_BT709:
4920 		if (full_range)
4921 			*color_space = COLOR_SPACE_YCBCR709;
4922 		else
4923 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4924 		break;
4925 
4926 	case DRM_COLOR_YCBCR_BT2020:
4927 		if (full_range)
4928 			*color_space = COLOR_SPACE_2020_YCBCR;
4929 		else
4930 			return -EINVAL;
4931 		break;
4932 
4933 	default:
4934 		return -EINVAL;
4935 	}
4936 
4937 	return 0;
4938 }
4939 
4940 static int
4941 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4942 			    const struct drm_plane_state *plane_state,
4943 			    const uint64_t tiling_flags,
4944 			    struct dc_plane_info *plane_info,
4945 			    struct dc_plane_address *address,
4946 			    bool tmz_surface,
4947 			    bool force_disable_dcc)
4948 {
4949 	const struct drm_framebuffer *fb = plane_state->fb;
4950 	const struct amdgpu_framebuffer *afb =
4951 		to_amdgpu_framebuffer(plane_state->fb);
4952 	int ret;
4953 
4954 	memset(plane_info, 0, sizeof(*plane_info));
4955 
4956 	switch (fb->format->format) {
4957 	case DRM_FORMAT_C8:
4958 		plane_info->format =
4959 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4960 		break;
4961 	case DRM_FORMAT_RGB565:
4962 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4963 		break;
4964 	case DRM_FORMAT_XRGB8888:
4965 	case DRM_FORMAT_ARGB8888:
4966 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4967 		break;
4968 	case DRM_FORMAT_XRGB2101010:
4969 	case DRM_FORMAT_ARGB2101010:
4970 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4971 		break;
4972 	case DRM_FORMAT_XBGR2101010:
4973 	case DRM_FORMAT_ABGR2101010:
4974 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4975 		break;
4976 	case DRM_FORMAT_XBGR8888:
4977 	case DRM_FORMAT_ABGR8888:
4978 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4979 		break;
4980 	case DRM_FORMAT_NV21:
4981 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4982 		break;
4983 	case DRM_FORMAT_NV12:
4984 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4985 		break;
4986 	case DRM_FORMAT_P010:
4987 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4988 		break;
4989 	case DRM_FORMAT_XRGB16161616F:
4990 	case DRM_FORMAT_ARGB16161616F:
4991 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4992 		break;
4993 	case DRM_FORMAT_XBGR16161616F:
4994 	case DRM_FORMAT_ABGR16161616F:
4995 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4996 		break;
4997 	case DRM_FORMAT_XRGB16161616:
4998 	case DRM_FORMAT_ARGB16161616:
4999 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5000 		break;
5001 	case DRM_FORMAT_XBGR16161616:
5002 	case DRM_FORMAT_ABGR16161616:
5003 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5004 		break;
5005 	default:
5006 		DRM_ERROR(
5007 			"Unsupported screen format %p4cc\n",
5008 			&fb->format->format);
5009 		return -EINVAL;
5010 	}
5011 
5012 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5013 	case DRM_MODE_ROTATE_0:
5014 		plane_info->rotation = ROTATION_ANGLE_0;
5015 		break;
5016 	case DRM_MODE_ROTATE_90:
5017 		plane_info->rotation = ROTATION_ANGLE_90;
5018 		break;
5019 	case DRM_MODE_ROTATE_180:
5020 		plane_info->rotation = ROTATION_ANGLE_180;
5021 		break;
5022 	case DRM_MODE_ROTATE_270:
5023 		plane_info->rotation = ROTATION_ANGLE_270;
5024 		break;
5025 	default:
5026 		plane_info->rotation = ROTATION_ANGLE_0;
5027 		break;
5028 	}
5029 
5030 	plane_info->visible = true;
5031 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5032 
5033 	plane_info->layer_index = 0;
5034 
5035 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5036 					  &plane_info->color_space);
5037 	if (ret)
5038 		return ret;
5039 
5040 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5041 					   plane_info->rotation, tiling_flags,
5042 					   &plane_info->tiling_info,
5043 					   &plane_info->plane_size,
5044 					   &plane_info->dcc, address, tmz_surface,
5045 					   force_disable_dcc);
5046 	if (ret)
5047 		return ret;
5048 
5049 	fill_blending_from_plane_state(
5050 		plane_state, &plane_info->per_pixel_alpha,
5051 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5052 
5053 	return 0;
5054 }
5055 
5056 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5057 				    struct dc_plane_state *dc_plane_state,
5058 				    struct drm_plane_state *plane_state,
5059 				    struct drm_crtc_state *crtc_state)
5060 {
5061 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5062 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5063 	struct dc_scaling_info scaling_info;
5064 	struct dc_plane_info plane_info;
5065 	int ret;
5066 	bool force_disable_dcc = false;
5067 
5068 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5069 	if (ret)
5070 		return ret;
5071 
5072 	dc_plane_state->src_rect = scaling_info.src_rect;
5073 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5074 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5075 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5076 
5077 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5078 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5079 					  afb->tiling_flags,
5080 					  &plane_info,
5081 					  &dc_plane_state->address,
5082 					  afb->tmz_surface,
5083 					  force_disable_dcc);
5084 	if (ret)
5085 		return ret;
5086 
5087 	dc_plane_state->format = plane_info.format;
5088 	dc_plane_state->color_space = plane_info.color_space;
5089 	dc_plane_state->format = plane_info.format;
5090 	dc_plane_state->plane_size = plane_info.plane_size;
5091 	dc_plane_state->rotation = plane_info.rotation;
5092 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5093 	dc_plane_state->stereo_format = plane_info.stereo_format;
5094 	dc_plane_state->tiling_info = plane_info.tiling_info;
5095 	dc_plane_state->visible = plane_info.visible;
5096 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5097 	dc_plane_state->global_alpha = plane_info.global_alpha;
5098 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5099 	dc_plane_state->dcc = plane_info.dcc;
5100 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5101 	dc_plane_state->flip_int_enabled = true;
5102 
5103 	/*
5104 	 * Always set input transfer function, since plane state is refreshed
5105 	 * every time.
5106 	 */
5107 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5108 	if (ret)
5109 		return ret;
5110 
5111 	return 0;
5112 }
5113 
5114 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5115 					   const struct dm_connector_state *dm_state,
5116 					   struct dc_stream_state *stream)
5117 {
5118 	enum amdgpu_rmx_type rmx_type;
5119 
5120 	struct rect src = { 0 }; /* viewport in composition space*/
5121 	struct rect dst = { 0 }; /* stream addressable area */
5122 
5123 	/* no mode. nothing to be done */
5124 	if (!mode)
5125 		return;
5126 
5127 	/* Full screen scaling by default */
5128 	src.width = mode->hdisplay;
5129 	src.height = mode->vdisplay;
5130 	dst.width = stream->timing.h_addressable;
5131 	dst.height = stream->timing.v_addressable;
5132 
5133 	if (dm_state) {
5134 		rmx_type = dm_state->scaling;
5135 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5136 			if (src.width * dst.height <
5137 					src.height * dst.width) {
5138 				/* height needs less upscaling/more downscaling */
5139 				dst.width = src.width *
5140 						dst.height / src.height;
5141 			} else {
5142 				/* width needs less upscaling/more downscaling */
5143 				dst.height = src.height *
5144 						dst.width / src.width;
5145 			}
5146 		} else if (rmx_type == RMX_CENTER) {
5147 			dst = src;
5148 		}
5149 
5150 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5151 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5152 
5153 		if (dm_state->underscan_enable) {
5154 			dst.x += dm_state->underscan_hborder / 2;
5155 			dst.y += dm_state->underscan_vborder / 2;
5156 			dst.width -= dm_state->underscan_hborder;
5157 			dst.height -= dm_state->underscan_vborder;
5158 		}
5159 	}
5160 
5161 	stream->src = src;
5162 	stream->dst = dst;
5163 
5164 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5165 		      dst.x, dst.y, dst.width, dst.height);
5166 
5167 }
5168 
5169 static enum dc_color_depth
5170 convert_color_depth_from_display_info(const struct drm_connector *connector,
5171 				      bool is_y420, int requested_bpc)
5172 {
5173 	uint8_t bpc;
5174 
5175 	if (is_y420) {
5176 		bpc = 8;
5177 
5178 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5179 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5180 			bpc = 16;
5181 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5182 			bpc = 12;
5183 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5184 			bpc = 10;
5185 	} else {
5186 		bpc = (uint8_t)connector->display_info.bpc;
5187 		/* Assume 8 bpc by default if no bpc is specified. */
5188 		bpc = bpc ? bpc : 8;
5189 	}
5190 
5191 	if (requested_bpc > 0) {
5192 		/*
5193 		 * Cap display bpc based on the user requested value.
5194 		 *
5195 		 * The value for state->max_bpc may not correctly updated
5196 		 * depending on when the connector gets added to the state
5197 		 * or if this was called outside of atomic check, so it
5198 		 * can't be used directly.
5199 		 */
5200 		bpc = min_t(u8, bpc, requested_bpc);
5201 
5202 		/* Round down to the nearest even number. */
5203 		bpc = bpc - (bpc & 1);
5204 	}
5205 
5206 	switch (bpc) {
5207 	case 0:
5208 		/*
5209 		 * Temporary Work around, DRM doesn't parse color depth for
5210 		 * EDID revision before 1.4
5211 		 * TODO: Fix edid parsing
5212 		 */
5213 		return COLOR_DEPTH_888;
5214 	case 6:
5215 		return COLOR_DEPTH_666;
5216 	case 8:
5217 		return COLOR_DEPTH_888;
5218 	case 10:
5219 		return COLOR_DEPTH_101010;
5220 	case 12:
5221 		return COLOR_DEPTH_121212;
5222 	case 14:
5223 		return COLOR_DEPTH_141414;
5224 	case 16:
5225 		return COLOR_DEPTH_161616;
5226 	default:
5227 		return COLOR_DEPTH_UNDEFINED;
5228 	}
5229 }
5230 
5231 static enum dc_aspect_ratio
5232 get_aspect_ratio(const struct drm_display_mode *mode_in)
5233 {
5234 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5235 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5236 }
5237 
5238 static enum dc_color_space
5239 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5240 {
5241 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5242 
5243 	switch (dc_crtc_timing->pixel_encoding)	{
5244 	case PIXEL_ENCODING_YCBCR422:
5245 	case PIXEL_ENCODING_YCBCR444:
5246 	case PIXEL_ENCODING_YCBCR420:
5247 	{
5248 		/*
5249 		 * 27030khz is the separation point between HDTV and SDTV
5250 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5251 		 * respectively
5252 		 */
5253 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5254 			if (dc_crtc_timing->flags.Y_ONLY)
5255 				color_space =
5256 					COLOR_SPACE_YCBCR709_LIMITED;
5257 			else
5258 				color_space = COLOR_SPACE_YCBCR709;
5259 		} else {
5260 			if (dc_crtc_timing->flags.Y_ONLY)
5261 				color_space =
5262 					COLOR_SPACE_YCBCR601_LIMITED;
5263 			else
5264 				color_space = COLOR_SPACE_YCBCR601;
5265 		}
5266 
5267 	}
5268 	break;
5269 	case PIXEL_ENCODING_RGB:
5270 		color_space = COLOR_SPACE_SRGB;
5271 		break;
5272 
5273 	default:
5274 		WARN_ON(1);
5275 		break;
5276 	}
5277 
5278 	return color_space;
5279 }
5280 
5281 static bool adjust_colour_depth_from_display_info(
5282 	struct dc_crtc_timing *timing_out,
5283 	const struct drm_display_info *info)
5284 {
5285 	enum dc_color_depth depth = timing_out->display_color_depth;
5286 	int normalized_clk;
5287 	do {
5288 		normalized_clk = timing_out->pix_clk_100hz / 10;
5289 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5290 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5291 			normalized_clk /= 2;
5292 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5293 		switch (depth) {
5294 		case COLOR_DEPTH_888:
5295 			break;
5296 		case COLOR_DEPTH_101010:
5297 			normalized_clk = (normalized_clk * 30) / 24;
5298 			break;
5299 		case COLOR_DEPTH_121212:
5300 			normalized_clk = (normalized_clk * 36) / 24;
5301 			break;
5302 		case COLOR_DEPTH_161616:
5303 			normalized_clk = (normalized_clk * 48) / 24;
5304 			break;
5305 		default:
5306 			/* The above depths are the only ones valid for HDMI. */
5307 			return false;
5308 		}
5309 		if (normalized_clk <= info->max_tmds_clock) {
5310 			timing_out->display_color_depth = depth;
5311 			return true;
5312 		}
5313 	} while (--depth > COLOR_DEPTH_666);
5314 	return false;
5315 }
5316 
5317 static void fill_stream_properties_from_drm_display_mode(
5318 	struct dc_stream_state *stream,
5319 	const struct drm_display_mode *mode_in,
5320 	const struct drm_connector *connector,
5321 	const struct drm_connector_state *connector_state,
5322 	const struct dc_stream_state *old_stream,
5323 	int requested_bpc)
5324 {
5325 	struct dc_crtc_timing *timing_out = &stream->timing;
5326 	const struct drm_display_info *info = &connector->display_info;
5327 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5328 	struct hdmi_vendor_infoframe hv_frame;
5329 	struct hdmi_avi_infoframe avi_frame;
5330 
5331 	memset(&hv_frame, 0, sizeof(hv_frame));
5332 	memset(&avi_frame, 0, sizeof(avi_frame));
5333 
5334 	timing_out->h_border_left = 0;
5335 	timing_out->h_border_right = 0;
5336 	timing_out->v_border_top = 0;
5337 	timing_out->v_border_bottom = 0;
5338 	/* TODO: un-hardcode */
5339 	if (drm_mode_is_420_only(info, mode_in)
5340 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5341 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5342 	else if (drm_mode_is_420_also(info, mode_in)
5343 			&& aconnector->force_yuv420_output)
5344 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5345 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5346 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5347 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5348 	else
5349 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5350 
5351 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5352 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5353 		connector,
5354 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5355 		requested_bpc);
5356 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5357 	timing_out->hdmi_vic = 0;
5358 
5359 	if(old_stream) {
5360 		timing_out->vic = old_stream->timing.vic;
5361 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5362 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5363 	} else {
5364 		timing_out->vic = drm_match_cea_mode(mode_in);
5365 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5366 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5367 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5368 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5369 	}
5370 
5371 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5372 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5373 		timing_out->vic = avi_frame.video_code;
5374 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5375 		timing_out->hdmi_vic = hv_frame.vic;
5376 	}
5377 
5378 	if (is_freesync_video_mode(mode_in, aconnector)) {
5379 		timing_out->h_addressable = mode_in->hdisplay;
5380 		timing_out->h_total = mode_in->htotal;
5381 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5382 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5383 		timing_out->v_total = mode_in->vtotal;
5384 		timing_out->v_addressable = mode_in->vdisplay;
5385 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5386 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5387 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5388 	} else {
5389 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5390 		timing_out->h_total = mode_in->crtc_htotal;
5391 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5392 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5393 		timing_out->v_total = mode_in->crtc_vtotal;
5394 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5395 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5396 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5397 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5398 	}
5399 
5400 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5401 
5402 	stream->output_color_space = get_output_color_space(timing_out);
5403 
5404 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5405 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5406 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5407 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5408 		    drm_mode_is_420_also(info, mode_in) &&
5409 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5410 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5411 			adjust_colour_depth_from_display_info(timing_out, info);
5412 		}
5413 	}
5414 }
5415 
5416 static void fill_audio_info(struct audio_info *audio_info,
5417 			    const struct drm_connector *drm_connector,
5418 			    const struct dc_sink *dc_sink)
5419 {
5420 	int i = 0;
5421 	int cea_revision = 0;
5422 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5423 
5424 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5425 	audio_info->product_id = edid_caps->product_id;
5426 
5427 	cea_revision = drm_connector->display_info.cea_rev;
5428 
5429 	strscpy(audio_info->display_name,
5430 		edid_caps->display_name,
5431 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5432 
5433 	if (cea_revision >= 3) {
5434 		audio_info->mode_count = edid_caps->audio_mode_count;
5435 
5436 		for (i = 0; i < audio_info->mode_count; ++i) {
5437 			audio_info->modes[i].format_code =
5438 					(enum audio_format_code)
5439 					(edid_caps->audio_modes[i].format_code);
5440 			audio_info->modes[i].channel_count =
5441 					edid_caps->audio_modes[i].channel_count;
5442 			audio_info->modes[i].sample_rates.all =
5443 					edid_caps->audio_modes[i].sample_rate;
5444 			audio_info->modes[i].sample_size =
5445 					edid_caps->audio_modes[i].sample_size;
5446 		}
5447 	}
5448 
5449 	audio_info->flags.all = edid_caps->speaker_flags;
5450 
5451 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5452 	if (drm_connector->latency_present[0]) {
5453 		audio_info->video_latency = drm_connector->video_latency[0];
5454 		audio_info->audio_latency = drm_connector->audio_latency[0];
5455 	}
5456 
5457 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5458 
5459 }
5460 
5461 static void
5462 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5463 				      struct drm_display_mode *dst_mode)
5464 {
5465 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5466 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5467 	dst_mode->crtc_clock = src_mode->crtc_clock;
5468 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5469 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5470 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5471 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5472 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5473 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5474 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5475 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5476 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5477 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5478 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5479 }
5480 
5481 static void
5482 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5483 					const struct drm_display_mode *native_mode,
5484 					bool scale_enabled)
5485 {
5486 	if (scale_enabled) {
5487 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5488 	} else if (native_mode->clock == drm_mode->clock &&
5489 			native_mode->htotal == drm_mode->htotal &&
5490 			native_mode->vtotal == drm_mode->vtotal) {
5491 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5492 	} else {
5493 		/* no scaling nor amdgpu inserted, no need to patch */
5494 	}
5495 }
5496 
5497 static struct dc_sink *
5498 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5499 {
5500 	struct dc_sink_init_data sink_init_data = { 0 };
5501 	struct dc_sink *sink = NULL;
5502 	sink_init_data.link = aconnector->dc_link;
5503 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5504 
5505 	sink = dc_sink_create(&sink_init_data);
5506 	if (!sink) {
5507 		DRM_ERROR("Failed to create sink!\n");
5508 		return NULL;
5509 	}
5510 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5511 
5512 	return sink;
5513 }
5514 
5515 static void set_multisync_trigger_params(
5516 		struct dc_stream_state *stream)
5517 {
5518 	struct dc_stream_state *master = NULL;
5519 
5520 	if (stream->triggered_crtc_reset.enabled) {
5521 		master = stream->triggered_crtc_reset.event_source;
5522 		stream->triggered_crtc_reset.event =
5523 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5524 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5525 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5526 	}
5527 }
5528 
5529 static void set_master_stream(struct dc_stream_state *stream_set[],
5530 			      int stream_count)
5531 {
5532 	int j, highest_rfr = 0, master_stream = 0;
5533 
5534 	for (j = 0;  j < stream_count; j++) {
5535 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5536 			int refresh_rate = 0;
5537 
5538 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5539 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5540 			if (refresh_rate > highest_rfr) {
5541 				highest_rfr = refresh_rate;
5542 				master_stream = j;
5543 			}
5544 		}
5545 	}
5546 	for (j = 0;  j < stream_count; j++) {
5547 		if (stream_set[j])
5548 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5549 	}
5550 }
5551 
5552 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5553 {
5554 	int i = 0;
5555 	struct dc_stream_state *stream;
5556 
5557 	if (context->stream_count < 2)
5558 		return;
5559 	for (i = 0; i < context->stream_count ; i++) {
5560 		if (!context->streams[i])
5561 			continue;
5562 		/*
5563 		 * TODO: add a function to read AMD VSDB bits and set
5564 		 * crtc_sync_master.multi_sync_enabled flag
5565 		 * For now it's set to false
5566 		 */
5567 	}
5568 
5569 	set_master_stream(context->streams, context->stream_count);
5570 
5571 	for (i = 0; i < context->stream_count ; i++) {
5572 		stream = context->streams[i];
5573 
5574 		if (!stream)
5575 			continue;
5576 
5577 		set_multisync_trigger_params(stream);
5578 	}
5579 }
5580 
5581 #if defined(CONFIG_DRM_AMD_DC_DCN)
5582 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5583 							struct dc_sink *sink, struct dc_stream_state *stream,
5584 							struct dsc_dec_dpcd_caps *dsc_caps)
5585 {
5586 	stream->timing.flags.DSC = 0;
5587 
5588 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5589 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5590 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5591 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5592 				      dsc_caps);
5593 	}
5594 }
5595 
5596 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5597 										struct dc_sink *sink, struct dc_stream_state *stream,
5598 										struct dsc_dec_dpcd_caps *dsc_caps)
5599 {
5600 	struct drm_connector *drm_connector = &aconnector->base;
5601 	uint32_t link_bandwidth_kbps;
5602 
5603 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5604 							dc_link_get_link_cap(aconnector->dc_link));
5605 	/* Set DSC policy according to dsc_clock_en */
5606 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5607 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5608 
5609 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5610 
5611 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5612 						dsc_caps,
5613 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5614 						0,
5615 						link_bandwidth_kbps,
5616 						&stream->timing,
5617 						&stream->timing.dsc_cfg)) {
5618 			stream->timing.flags.DSC = 1;
5619 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5620 		}
5621 	}
5622 
5623 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5624 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5625 		stream->timing.flags.DSC = 1;
5626 
5627 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5628 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5629 
5630 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5631 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5632 
5633 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5634 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5635 }
5636 #endif
5637 
5638 /**
5639  * DOC: FreeSync Video
5640  *
5641  * When a userspace application wants to play a video, the content follows a
5642  * standard format definition that usually specifies the FPS for that format.
5643  * The below list illustrates some video format and the expected FPS,
5644  * respectively:
5645  *
5646  * - TV/NTSC (23.976 FPS)
5647  * - Cinema (24 FPS)
5648  * - TV/PAL (25 FPS)
5649  * - TV/NTSC (29.97 FPS)
5650  * - TV/NTSC (30 FPS)
5651  * - Cinema HFR (48 FPS)
5652  * - TV/PAL (50 FPS)
5653  * - Commonly used (60 FPS)
5654  * - Multiples of 24 (48,72,96 FPS)
5655  *
5656  * The list of standards video format is not huge and can be added to the
5657  * connector modeset list beforehand. With that, userspace can leverage
5658  * FreeSync to extends the front porch in order to attain the target refresh
5659  * rate. Such a switch will happen seamlessly, without screen blanking or
5660  * reprogramming of the output in any other way. If the userspace requests a
5661  * modesetting change compatible with FreeSync modes that only differ in the
5662  * refresh rate, DC will skip the full update and avoid blink during the
5663  * transition. For example, the video player can change the modesetting from
5664  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5665  * causing any display blink. This same concept can be applied to a mode
5666  * setting change.
5667  */
5668 static struct drm_display_mode *
5669 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5670 			  bool use_probed_modes)
5671 {
5672 	struct drm_display_mode *m, *m_pref = NULL;
5673 	u16 current_refresh, highest_refresh;
5674 	struct list_head *list_head = use_probed_modes ?
5675 						    &aconnector->base.probed_modes :
5676 						    &aconnector->base.modes;
5677 
5678 	if (aconnector->freesync_vid_base.clock != 0)
5679 		return &aconnector->freesync_vid_base;
5680 
5681 	/* Find the preferred mode */
5682 	list_for_each_entry (m, list_head, head) {
5683 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5684 			m_pref = m;
5685 			break;
5686 		}
5687 	}
5688 
5689 	if (!m_pref) {
5690 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5691 		m_pref = list_first_entry_or_null(
5692 			&aconnector->base.modes, struct drm_display_mode, head);
5693 		if (!m_pref) {
5694 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5695 			return NULL;
5696 		}
5697 	}
5698 
5699 	highest_refresh = drm_mode_vrefresh(m_pref);
5700 
5701 	/*
5702 	 * Find the mode with highest refresh rate with same resolution.
5703 	 * For some monitors, preferred mode is not the mode with highest
5704 	 * supported refresh rate.
5705 	 */
5706 	list_for_each_entry (m, list_head, head) {
5707 		current_refresh  = drm_mode_vrefresh(m);
5708 
5709 		if (m->hdisplay == m_pref->hdisplay &&
5710 		    m->vdisplay == m_pref->vdisplay &&
5711 		    highest_refresh < current_refresh) {
5712 			highest_refresh = current_refresh;
5713 			m_pref = m;
5714 		}
5715 	}
5716 
5717 	aconnector->freesync_vid_base = *m_pref;
5718 	return m_pref;
5719 }
5720 
5721 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5722 				   struct amdgpu_dm_connector *aconnector)
5723 {
5724 	struct drm_display_mode *high_mode;
5725 	int timing_diff;
5726 
5727 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5728 	if (!high_mode || !mode)
5729 		return false;
5730 
5731 	timing_diff = high_mode->vtotal - mode->vtotal;
5732 
5733 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5734 	    high_mode->hdisplay != mode->hdisplay ||
5735 	    high_mode->vdisplay != mode->vdisplay ||
5736 	    high_mode->hsync_start != mode->hsync_start ||
5737 	    high_mode->hsync_end != mode->hsync_end ||
5738 	    high_mode->htotal != mode->htotal ||
5739 	    high_mode->hskew != mode->hskew ||
5740 	    high_mode->vscan != mode->vscan ||
5741 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5742 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5743 		return false;
5744 	else
5745 		return true;
5746 }
5747 
5748 static struct dc_stream_state *
5749 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5750 		       const struct drm_display_mode *drm_mode,
5751 		       const struct dm_connector_state *dm_state,
5752 		       const struct dc_stream_state *old_stream,
5753 		       int requested_bpc)
5754 {
5755 	struct drm_display_mode *preferred_mode = NULL;
5756 	struct drm_connector *drm_connector;
5757 	const struct drm_connector_state *con_state =
5758 		dm_state ? &dm_state->base : NULL;
5759 	struct dc_stream_state *stream = NULL;
5760 	struct drm_display_mode mode = *drm_mode;
5761 	struct drm_display_mode saved_mode;
5762 	struct drm_display_mode *freesync_mode = NULL;
5763 	bool native_mode_found = false;
5764 	bool recalculate_timing = false;
5765 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5766 	int mode_refresh;
5767 	int preferred_refresh = 0;
5768 #if defined(CONFIG_DRM_AMD_DC_DCN)
5769 	struct dsc_dec_dpcd_caps dsc_caps;
5770 #endif
5771 	struct dc_sink *sink = NULL;
5772 
5773 	memset(&saved_mode, 0, sizeof(saved_mode));
5774 
5775 	if (aconnector == NULL) {
5776 		DRM_ERROR("aconnector is NULL!\n");
5777 		return stream;
5778 	}
5779 
5780 	drm_connector = &aconnector->base;
5781 
5782 	if (!aconnector->dc_sink) {
5783 		sink = create_fake_sink(aconnector);
5784 		if (!sink)
5785 			return stream;
5786 	} else {
5787 		sink = aconnector->dc_sink;
5788 		dc_sink_retain(sink);
5789 	}
5790 
5791 	stream = dc_create_stream_for_sink(sink);
5792 
5793 	if (stream == NULL) {
5794 		DRM_ERROR("Failed to create stream for sink!\n");
5795 		goto finish;
5796 	}
5797 
5798 	stream->dm_stream_context = aconnector;
5799 
5800 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5801 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5802 
5803 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5804 		/* Search for preferred mode */
5805 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5806 			native_mode_found = true;
5807 			break;
5808 		}
5809 	}
5810 	if (!native_mode_found)
5811 		preferred_mode = list_first_entry_or_null(
5812 				&aconnector->base.modes,
5813 				struct drm_display_mode,
5814 				head);
5815 
5816 	mode_refresh = drm_mode_vrefresh(&mode);
5817 
5818 	if (preferred_mode == NULL) {
5819 		/*
5820 		 * This may not be an error, the use case is when we have no
5821 		 * usermode calls to reset and set mode upon hotplug. In this
5822 		 * case, we call set mode ourselves to restore the previous mode
5823 		 * and the modelist may not be filled in in time.
5824 		 */
5825 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5826 	} else {
5827 		recalculate_timing = amdgpu_freesync_vid_mode &&
5828 				 is_freesync_video_mode(&mode, aconnector);
5829 		if (recalculate_timing) {
5830 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5831 			saved_mode = mode;
5832 			mode = *freesync_mode;
5833 		} else {
5834 			decide_crtc_timing_for_drm_display_mode(
5835 				&mode, preferred_mode, scale);
5836 
5837 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5838 		}
5839 	}
5840 
5841 	if (recalculate_timing)
5842 		drm_mode_set_crtcinfo(&saved_mode, 0);
5843 	else if (!dm_state)
5844 		drm_mode_set_crtcinfo(&mode, 0);
5845 
5846        /*
5847 	* If scaling is enabled and refresh rate didn't change
5848 	* we copy the vic and polarities of the old timings
5849 	*/
5850 	if (!scale || mode_refresh != preferred_refresh)
5851 		fill_stream_properties_from_drm_display_mode(
5852 			stream, &mode, &aconnector->base, con_state, NULL,
5853 			requested_bpc);
5854 	else
5855 		fill_stream_properties_from_drm_display_mode(
5856 			stream, &mode, &aconnector->base, con_state, old_stream,
5857 			requested_bpc);
5858 
5859 #if defined(CONFIG_DRM_AMD_DC_DCN)
5860 	/* SST DSC determination policy */
5861 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5862 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5863 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5864 #endif
5865 
5866 	update_stream_scaling_settings(&mode, dm_state, stream);
5867 
5868 	fill_audio_info(
5869 		&stream->audio_info,
5870 		drm_connector,
5871 		sink);
5872 
5873 	update_stream_signal(stream, sink);
5874 
5875 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5876 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5877 
5878 	if (stream->link->psr_settings.psr_feature_enabled) {
5879 		//
5880 		// should decide stream support vsc sdp colorimetry capability
5881 		// before building vsc info packet
5882 		//
5883 		stream->use_vsc_sdp_for_colorimetry = false;
5884 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5885 			stream->use_vsc_sdp_for_colorimetry =
5886 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5887 		} else {
5888 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5889 				stream->use_vsc_sdp_for_colorimetry = true;
5890 		}
5891 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5892 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5893 
5894 	}
5895 finish:
5896 	dc_sink_release(sink);
5897 
5898 	return stream;
5899 }
5900 
5901 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5902 {
5903 	drm_crtc_cleanup(crtc);
5904 	kfree(crtc);
5905 }
5906 
5907 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5908 				  struct drm_crtc_state *state)
5909 {
5910 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5911 
5912 	/* TODO Destroy dc_stream objects are stream object is flattened */
5913 	if (cur->stream)
5914 		dc_stream_release(cur->stream);
5915 
5916 
5917 	__drm_atomic_helper_crtc_destroy_state(state);
5918 
5919 
5920 	kfree(state);
5921 }
5922 
5923 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5924 {
5925 	struct dm_crtc_state *state;
5926 
5927 	if (crtc->state)
5928 		dm_crtc_destroy_state(crtc, crtc->state);
5929 
5930 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5931 	if (WARN_ON(!state))
5932 		return;
5933 
5934 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5935 }
5936 
5937 static struct drm_crtc_state *
5938 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5939 {
5940 	struct dm_crtc_state *state, *cur;
5941 
5942 	cur = to_dm_crtc_state(crtc->state);
5943 
5944 	if (WARN_ON(!crtc->state))
5945 		return NULL;
5946 
5947 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5948 	if (!state)
5949 		return NULL;
5950 
5951 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5952 
5953 	if (cur->stream) {
5954 		state->stream = cur->stream;
5955 		dc_stream_retain(state->stream);
5956 	}
5957 
5958 	state->active_planes = cur->active_planes;
5959 	state->vrr_infopacket = cur->vrr_infopacket;
5960 	state->abm_level = cur->abm_level;
5961 	state->vrr_supported = cur->vrr_supported;
5962 	state->freesync_config = cur->freesync_config;
5963 	state->cm_has_degamma = cur->cm_has_degamma;
5964 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5965 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5966 
5967 	return &state->base;
5968 }
5969 
5970 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5971 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5972 {
5973 	crtc_debugfs_init(crtc);
5974 
5975 	return 0;
5976 }
5977 #endif
5978 
5979 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5980 {
5981 	enum dc_irq_source irq_source;
5982 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5983 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5984 	int rc;
5985 
5986 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5987 
5988 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5989 
5990 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5991 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5992 	return rc;
5993 }
5994 
5995 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5996 {
5997 	enum dc_irq_source irq_source;
5998 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5999 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6000 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6001 #if defined(CONFIG_DRM_AMD_DC_DCN)
6002 	struct amdgpu_display_manager *dm = &adev->dm;
6003 	unsigned long flags;
6004 #endif
6005 	int rc = 0;
6006 
6007 	if (enable) {
6008 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6009 		if (amdgpu_dm_vrr_active(acrtc_state))
6010 			rc = dm_set_vupdate_irq(crtc, true);
6011 	} else {
6012 		/* vblank irq off -> vupdate irq off */
6013 		rc = dm_set_vupdate_irq(crtc, false);
6014 	}
6015 
6016 	if (rc)
6017 		return rc;
6018 
6019 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6020 
6021 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6022 		return -EBUSY;
6023 
6024 	if (amdgpu_in_reset(adev))
6025 		return 0;
6026 
6027 #if defined(CONFIG_DRM_AMD_DC_DCN)
6028 	spin_lock_irqsave(&dm->vblank_lock, flags);
6029 	dm->vblank_workqueue->dm = dm;
6030 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
6031 	dm->vblank_workqueue->enable = enable;
6032 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
6033 	schedule_work(&dm->vblank_workqueue->mall_work);
6034 #endif
6035 
6036 	return 0;
6037 }
6038 
6039 static int dm_enable_vblank(struct drm_crtc *crtc)
6040 {
6041 	return dm_set_vblank(crtc, true);
6042 }
6043 
6044 static void dm_disable_vblank(struct drm_crtc *crtc)
6045 {
6046 	dm_set_vblank(crtc, false);
6047 }
6048 
6049 /* Implemented only the options currently availible for the driver */
6050 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6051 	.reset = dm_crtc_reset_state,
6052 	.destroy = amdgpu_dm_crtc_destroy,
6053 	.set_config = drm_atomic_helper_set_config,
6054 	.page_flip = drm_atomic_helper_page_flip,
6055 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6056 	.atomic_destroy_state = dm_crtc_destroy_state,
6057 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6058 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6059 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6060 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6061 	.enable_vblank = dm_enable_vblank,
6062 	.disable_vblank = dm_disable_vblank,
6063 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6064 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6065 	.late_register = amdgpu_dm_crtc_late_register,
6066 #endif
6067 };
6068 
6069 static enum drm_connector_status
6070 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6071 {
6072 	bool connected;
6073 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6074 
6075 	/*
6076 	 * Notes:
6077 	 * 1. This interface is NOT called in context of HPD irq.
6078 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6079 	 * makes it a bad place for *any* MST-related activity.
6080 	 */
6081 
6082 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6083 	    !aconnector->fake_enable)
6084 		connected = (aconnector->dc_sink != NULL);
6085 	else
6086 		connected = (aconnector->base.force == DRM_FORCE_ON);
6087 
6088 	update_subconnector_property(aconnector);
6089 
6090 	return (connected ? connector_status_connected :
6091 			connector_status_disconnected);
6092 }
6093 
6094 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6095 					    struct drm_connector_state *connector_state,
6096 					    struct drm_property *property,
6097 					    uint64_t val)
6098 {
6099 	struct drm_device *dev = connector->dev;
6100 	struct amdgpu_device *adev = drm_to_adev(dev);
6101 	struct dm_connector_state *dm_old_state =
6102 		to_dm_connector_state(connector->state);
6103 	struct dm_connector_state *dm_new_state =
6104 		to_dm_connector_state(connector_state);
6105 
6106 	int ret = -EINVAL;
6107 
6108 	if (property == dev->mode_config.scaling_mode_property) {
6109 		enum amdgpu_rmx_type rmx_type;
6110 
6111 		switch (val) {
6112 		case DRM_MODE_SCALE_CENTER:
6113 			rmx_type = RMX_CENTER;
6114 			break;
6115 		case DRM_MODE_SCALE_ASPECT:
6116 			rmx_type = RMX_ASPECT;
6117 			break;
6118 		case DRM_MODE_SCALE_FULLSCREEN:
6119 			rmx_type = RMX_FULL;
6120 			break;
6121 		case DRM_MODE_SCALE_NONE:
6122 		default:
6123 			rmx_type = RMX_OFF;
6124 			break;
6125 		}
6126 
6127 		if (dm_old_state->scaling == rmx_type)
6128 			return 0;
6129 
6130 		dm_new_state->scaling = rmx_type;
6131 		ret = 0;
6132 	} else if (property == adev->mode_info.underscan_hborder_property) {
6133 		dm_new_state->underscan_hborder = val;
6134 		ret = 0;
6135 	} else if (property == adev->mode_info.underscan_vborder_property) {
6136 		dm_new_state->underscan_vborder = val;
6137 		ret = 0;
6138 	} else if (property == adev->mode_info.underscan_property) {
6139 		dm_new_state->underscan_enable = val;
6140 		ret = 0;
6141 	} else if (property == adev->mode_info.abm_level_property) {
6142 		dm_new_state->abm_level = val;
6143 		ret = 0;
6144 	}
6145 
6146 	return ret;
6147 }
6148 
6149 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6150 					    const struct drm_connector_state *state,
6151 					    struct drm_property *property,
6152 					    uint64_t *val)
6153 {
6154 	struct drm_device *dev = connector->dev;
6155 	struct amdgpu_device *adev = drm_to_adev(dev);
6156 	struct dm_connector_state *dm_state =
6157 		to_dm_connector_state(state);
6158 	int ret = -EINVAL;
6159 
6160 	if (property == dev->mode_config.scaling_mode_property) {
6161 		switch (dm_state->scaling) {
6162 		case RMX_CENTER:
6163 			*val = DRM_MODE_SCALE_CENTER;
6164 			break;
6165 		case RMX_ASPECT:
6166 			*val = DRM_MODE_SCALE_ASPECT;
6167 			break;
6168 		case RMX_FULL:
6169 			*val = DRM_MODE_SCALE_FULLSCREEN;
6170 			break;
6171 		case RMX_OFF:
6172 		default:
6173 			*val = DRM_MODE_SCALE_NONE;
6174 			break;
6175 		}
6176 		ret = 0;
6177 	} else if (property == adev->mode_info.underscan_hborder_property) {
6178 		*val = dm_state->underscan_hborder;
6179 		ret = 0;
6180 	} else if (property == adev->mode_info.underscan_vborder_property) {
6181 		*val = dm_state->underscan_vborder;
6182 		ret = 0;
6183 	} else if (property == adev->mode_info.underscan_property) {
6184 		*val = dm_state->underscan_enable;
6185 		ret = 0;
6186 	} else if (property == adev->mode_info.abm_level_property) {
6187 		*val = dm_state->abm_level;
6188 		ret = 0;
6189 	}
6190 
6191 	return ret;
6192 }
6193 
6194 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6195 {
6196 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6197 
6198 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6199 }
6200 
6201 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6202 {
6203 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6204 	const struct dc_link *link = aconnector->dc_link;
6205 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6206 	struct amdgpu_display_manager *dm = &adev->dm;
6207 	int i;
6208 
6209 	/*
6210 	 * Call only if mst_mgr was iniitalized before since it's not done
6211 	 * for all connector types.
6212 	 */
6213 	if (aconnector->mst_mgr.dev)
6214 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6215 
6216 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6217 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6218 	for (i = 0; i < dm->num_of_edps; i++) {
6219 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6220 			backlight_device_unregister(dm->backlight_dev[i]);
6221 			dm->backlight_dev[i] = NULL;
6222 		}
6223 	}
6224 #endif
6225 
6226 	if (aconnector->dc_em_sink)
6227 		dc_sink_release(aconnector->dc_em_sink);
6228 	aconnector->dc_em_sink = NULL;
6229 	if (aconnector->dc_sink)
6230 		dc_sink_release(aconnector->dc_sink);
6231 	aconnector->dc_sink = NULL;
6232 
6233 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6234 	drm_connector_unregister(connector);
6235 	drm_connector_cleanup(connector);
6236 	if (aconnector->i2c) {
6237 		i2c_del_adapter(&aconnector->i2c->base);
6238 		kfree(aconnector->i2c);
6239 	}
6240 	kfree(aconnector->dm_dp_aux.aux.name);
6241 
6242 	kfree(connector);
6243 }
6244 
6245 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6246 {
6247 	struct dm_connector_state *state =
6248 		to_dm_connector_state(connector->state);
6249 
6250 	if (connector->state)
6251 		__drm_atomic_helper_connector_destroy_state(connector->state);
6252 
6253 	kfree(state);
6254 
6255 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6256 
6257 	if (state) {
6258 		state->scaling = RMX_OFF;
6259 		state->underscan_enable = false;
6260 		state->underscan_hborder = 0;
6261 		state->underscan_vborder = 0;
6262 		state->base.max_requested_bpc = 8;
6263 		state->vcpi_slots = 0;
6264 		state->pbn = 0;
6265 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6266 			state->abm_level = amdgpu_dm_abm_level;
6267 
6268 		__drm_atomic_helper_connector_reset(connector, &state->base);
6269 	}
6270 }
6271 
6272 struct drm_connector_state *
6273 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6274 {
6275 	struct dm_connector_state *state =
6276 		to_dm_connector_state(connector->state);
6277 
6278 	struct dm_connector_state *new_state =
6279 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6280 
6281 	if (!new_state)
6282 		return NULL;
6283 
6284 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6285 
6286 	new_state->freesync_capable = state->freesync_capable;
6287 	new_state->abm_level = state->abm_level;
6288 	new_state->scaling = state->scaling;
6289 	new_state->underscan_enable = state->underscan_enable;
6290 	new_state->underscan_hborder = state->underscan_hborder;
6291 	new_state->underscan_vborder = state->underscan_vborder;
6292 	new_state->vcpi_slots = state->vcpi_slots;
6293 	new_state->pbn = state->pbn;
6294 	return &new_state->base;
6295 }
6296 
6297 static int
6298 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6299 {
6300 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6301 		to_amdgpu_dm_connector(connector);
6302 	int r;
6303 
6304 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6305 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6306 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6307 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6308 		if (r)
6309 			return r;
6310 	}
6311 
6312 #if defined(CONFIG_DEBUG_FS)
6313 	connector_debugfs_init(amdgpu_dm_connector);
6314 #endif
6315 
6316 	return 0;
6317 }
6318 
6319 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6320 	.reset = amdgpu_dm_connector_funcs_reset,
6321 	.detect = amdgpu_dm_connector_detect,
6322 	.fill_modes = drm_helper_probe_single_connector_modes,
6323 	.destroy = amdgpu_dm_connector_destroy,
6324 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6325 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6326 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6327 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6328 	.late_register = amdgpu_dm_connector_late_register,
6329 	.early_unregister = amdgpu_dm_connector_unregister
6330 };
6331 
6332 static int get_modes(struct drm_connector *connector)
6333 {
6334 	return amdgpu_dm_connector_get_modes(connector);
6335 }
6336 
6337 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6338 {
6339 	struct dc_sink_init_data init_params = {
6340 			.link = aconnector->dc_link,
6341 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6342 	};
6343 	struct edid *edid;
6344 
6345 	if (!aconnector->base.edid_blob_ptr) {
6346 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6347 				aconnector->base.name);
6348 
6349 		aconnector->base.force = DRM_FORCE_OFF;
6350 		aconnector->base.override_edid = false;
6351 		return;
6352 	}
6353 
6354 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6355 
6356 	aconnector->edid = edid;
6357 
6358 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6359 		aconnector->dc_link,
6360 		(uint8_t *)edid,
6361 		(edid->extensions + 1) * EDID_LENGTH,
6362 		&init_params);
6363 
6364 	if (aconnector->base.force == DRM_FORCE_ON) {
6365 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6366 		aconnector->dc_link->local_sink :
6367 		aconnector->dc_em_sink;
6368 		dc_sink_retain(aconnector->dc_sink);
6369 	}
6370 }
6371 
6372 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6373 {
6374 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6375 
6376 	/*
6377 	 * In case of headless boot with force on for DP managed connector
6378 	 * Those settings have to be != 0 to get initial modeset
6379 	 */
6380 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6381 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6382 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6383 	}
6384 
6385 
6386 	aconnector->base.override_edid = true;
6387 	create_eml_sink(aconnector);
6388 }
6389 
6390 static struct dc_stream_state *
6391 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6392 				const struct drm_display_mode *drm_mode,
6393 				const struct dm_connector_state *dm_state,
6394 				const struct dc_stream_state *old_stream)
6395 {
6396 	struct drm_connector *connector = &aconnector->base;
6397 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6398 	struct dc_stream_state *stream;
6399 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6400 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6401 	enum dc_status dc_result = DC_OK;
6402 
6403 	do {
6404 		stream = create_stream_for_sink(aconnector, drm_mode,
6405 						dm_state, old_stream,
6406 						requested_bpc);
6407 		if (stream == NULL) {
6408 			DRM_ERROR("Failed to create stream for sink!\n");
6409 			break;
6410 		}
6411 
6412 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6413 
6414 		if (dc_result != DC_OK) {
6415 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6416 				      drm_mode->hdisplay,
6417 				      drm_mode->vdisplay,
6418 				      drm_mode->clock,
6419 				      dc_result,
6420 				      dc_status_to_str(dc_result));
6421 
6422 			dc_stream_release(stream);
6423 			stream = NULL;
6424 			requested_bpc -= 2; /* lower bpc to retry validation */
6425 		}
6426 
6427 	} while (stream == NULL && requested_bpc >= 6);
6428 
6429 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6430 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6431 
6432 		aconnector->force_yuv420_output = true;
6433 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6434 						dm_state, old_stream);
6435 		aconnector->force_yuv420_output = false;
6436 	}
6437 
6438 	return stream;
6439 }
6440 
6441 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6442 				   struct drm_display_mode *mode)
6443 {
6444 	int result = MODE_ERROR;
6445 	struct dc_sink *dc_sink;
6446 	/* TODO: Unhardcode stream count */
6447 	struct dc_stream_state *stream;
6448 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6449 
6450 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6451 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6452 		return result;
6453 
6454 	/*
6455 	 * Only run this the first time mode_valid is called to initilialize
6456 	 * EDID mgmt
6457 	 */
6458 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6459 		!aconnector->dc_em_sink)
6460 		handle_edid_mgmt(aconnector);
6461 
6462 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6463 
6464 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6465 				aconnector->base.force != DRM_FORCE_ON) {
6466 		DRM_ERROR("dc_sink is NULL!\n");
6467 		goto fail;
6468 	}
6469 
6470 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6471 	if (stream) {
6472 		dc_stream_release(stream);
6473 		result = MODE_OK;
6474 	}
6475 
6476 fail:
6477 	/* TODO: error handling*/
6478 	return result;
6479 }
6480 
6481 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6482 				struct dc_info_packet *out)
6483 {
6484 	struct hdmi_drm_infoframe frame;
6485 	unsigned char buf[30]; /* 26 + 4 */
6486 	ssize_t len;
6487 	int ret, i;
6488 
6489 	memset(out, 0, sizeof(*out));
6490 
6491 	if (!state->hdr_output_metadata)
6492 		return 0;
6493 
6494 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6495 	if (ret)
6496 		return ret;
6497 
6498 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6499 	if (len < 0)
6500 		return (int)len;
6501 
6502 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6503 	if (len != 30)
6504 		return -EINVAL;
6505 
6506 	/* Prepare the infopacket for DC. */
6507 	switch (state->connector->connector_type) {
6508 	case DRM_MODE_CONNECTOR_HDMIA:
6509 		out->hb0 = 0x87; /* type */
6510 		out->hb1 = 0x01; /* version */
6511 		out->hb2 = 0x1A; /* length */
6512 		out->sb[0] = buf[3]; /* checksum */
6513 		i = 1;
6514 		break;
6515 
6516 	case DRM_MODE_CONNECTOR_DisplayPort:
6517 	case DRM_MODE_CONNECTOR_eDP:
6518 		out->hb0 = 0x00; /* sdp id, zero */
6519 		out->hb1 = 0x87; /* type */
6520 		out->hb2 = 0x1D; /* payload len - 1 */
6521 		out->hb3 = (0x13 << 2); /* sdp version */
6522 		out->sb[0] = 0x01; /* version */
6523 		out->sb[1] = 0x1A; /* length */
6524 		i = 2;
6525 		break;
6526 
6527 	default:
6528 		return -EINVAL;
6529 	}
6530 
6531 	memcpy(&out->sb[i], &buf[4], 26);
6532 	out->valid = true;
6533 
6534 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6535 		       sizeof(out->sb), false);
6536 
6537 	return 0;
6538 }
6539 
6540 static int
6541 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6542 				 struct drm_atomic_state *state)
6543 {
6544 	struct drm_connector_state *new_con_state =
6545 		drm_atomic_get_new_connector_state(state, conn);
6546 	struct drm_connector_state *old_con_state =
6547 		drm_atomic_get_old_connector_state(state, conn);
6548 	struct drm_crtc *crtc = new_con_state->crtc;
6549 	struct drm_crtc_state *new_crtc_state;
6550 	int ret;
6551 
6552 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6553 
6554 	if (!crtc)
6555 		return 0;
6556 
6557 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6558 		struct dc_info_packet hdr_infopacket;
6559 
6560 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6561 		if (ret)
6562 			return ret;
6563 
6564 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6565 		if (IS_ERR(new_crtc_state))
6566 			return PTR_ERR(new_crtc_state);
6567 
6568 		/*
6569 		 * DC considers the stream backends changed if the
6570 		 * static metadata changes. Forcing the modeset also
6571 		 * gives a simple way for userspace to switch from
6572 		 * 8bpc to 10bpc when setting the metadata to enter
6573 		 * or exit HDR.
6574 		 *
6575 		 * Changing the static metadata after it's been
6576 		 * set is permissible, however. So only force a
6577 		 * modeset if we're entering or exiting HDR.
6578 		 */
6579 		new_crtc_state->mode_changed =
6580 			!old_con_state->hdr_output_metadata ||
6581 			!new_con_state->hdr_output_metadata;
6582 	}
6583 
6584 	return 0;
6585 }
6586 
6587 static const struct drm_connector_helper_funcs
6588 amdgpu_dm_connector_helper_funcs = {
6589 	/*
6590 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6591 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6592 	 * are missing after user start lightdm. So we need to renew modes list.
6593 	 * in get_modes call back, not just return the modes count
6594 	 */
6595 	.get_modes = get_modes,
6596 	.mode_valid = amdgpu_dm_connector_mode_valid,
6597 	.atomic_check = amdgpu_dm_connector_atomic_check,
6598 };
6599 
6600 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6601 {
6602 }
6603 
6604 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6605 {
6606 	struct drm_atomic_state *state = new_crtc_state->state;
6607 	struct drm_plane *plane;
6608 	int num_active = 0;
6609 
6610 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6611 		struct drm_plane_state *new_plane_state;
6612 
6613 		/* Cursor planes are "fake". */
6614 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6615 			continue;
6616 
6617 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6618 
6619 		if (!new_plane_state) {
6620 			/*
6621 			 * The plane is enable on the CRTC and hasn't changed
6622 			 * state. This means that it previously passed
6623 			 * validation and is therefore enabled.
6624 			 */
6625 			num_active += 1;
6626 			continue;
6627 		}
6628 
6629 		/* We need a framebuffer to be considered enabled. */
6630 		num_active += (new_plane_state->fb != NULL);
6631 	}
6632 
6633 	return num_active;
6634 }
6635 
6636 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6637 					 struct drm_crtc_state *new_crtc_state)
6638 {
6639 	struct dm_crtc_state *dm_new_crtc_state =
6640 		to_dm_crtc_state(new_crtc_state);
6641 
6642 	dm_new_crtc_state->active_planes = 0;
6643 
6644 	if (!dm_new_crtc_state->stream)
6645 		return;
6646 
6647 	dm_new_crtc_state->active_planes =
6648 		count_crtc_active_planes(new_crtc_state);
6649 }
6650 
6651 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6652 				       struct drm_atomic_state *state)
6653 {
6654 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6655 									  crtc);
6656 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6657 	struct dc *dc = adev->dm.dc;
6658 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6659 	int ret = -EINVAL;
6660 
6661 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6662 
6663 	dm_update_crtc_active_planes(crtc, crtc_state);
6664 
6665 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6666 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6667 		return ret;
6668 	}
6669 
6670 	/*
6671 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6672 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6673 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6674 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6675 	 */
6676 	if (crtc_state->enable &&
6677 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6678 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6679 		return -EINVAL;
6680 	}
6681 
6682 	/* In some use cases, like reset, no stream is attached */
6683 	if (!dm_crtc_state->stream)
6684 		return 0;
6685 
6686 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6687 		return 0;
6688 
6689 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6690 	return ret;
6691 }
6692 
6693 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6694 				      const struct drm_display_mode *mode,
6695 				      struct drm_display_mode *adjusted_mode)
6696 {
6697 	return true;
6698 }
6699 
6700 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6701 	.disable = dm_crtc_helper_disable,
6702 	.atomic_check = dm_crtc_helper_atomic_check,
6703 	.mode_fixup = dm_crtc_helper_mode_fixup,
6704 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6705 };
6706 
6707 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6708 {
6709 
6710 }
6711 
6712 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6713 {
6714 	switch (display_color_depth) {
6715 		case COLOR_DEPTH_666:
6716 			return 6;
6717 		case COLOR_DEPTH_888:
6718 			return 8;
6719 		case COLOR_DEPTH_101010:
6720 			return 10;
6721 		case COLOR_DEPTH_121212:
6722 			return 12;
6723 		case COLOR_DEPTH_141414:
6724 			return 14;
6725 		case COLOR_DEPTH_161616:
6726 			return 16;
6727 		default:
6728 			break;
6729 		}
6730 	return 0;
6731 }
6732 
6733 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6734 					  struct drm_crtc_state *crtc_state,
6735 					  struct drm_connector_state *conn_state)
6736 {
6737 	struct drm_atomic_state *state = crtc_state->state;
6738 	struct drm_connector *connector = conn_state->connector;
6739 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6740 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6741 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6742 	struct drm_dp_mst_topology_mgr *mst_mgr;
6743 	struct drm_dp_mst_port *mst_port;
6744 	enum dc_color_depth color_depth;
6745 	int clock, bpp = 0;
6746 	bool is_y420 = false;
6747 
6748 	if (!aconnector->port || !aconnector->dc_sink)
6749 		return 0;
6750 
6751 	mst_port = aconnector->port;
6752 	mst_mgr = &aconnector->mst_port->mst_mgr;
6753 
6754 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6755 		return 0;
6756 
6757 	if (!state->duplicated) {
6758 		int max_bpc = conn_state->max_requested_bpc;
6759 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6760 				aconnector->force_yuv420_output;
6761 		color_depth = convert_color_depth_from_display_info(connector,
6762 								    is_y420,
6763 								    max_bpc);
6764 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6765 		clock = adjusted_mode->clock;
6766 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6767 	}
6768 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6769 									   mst_mgr,
6770 									   mst_port,
6771 									   dm_new_connector_state->pbn,
6772 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6773 	if (dm_new_connector_state->vcpi_slots < 0) {
6774 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6775 		return dm_new_connector_state->vcpi_slots;
6776 	}
6777 	return 0;
6778 }
6779 
6780 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6781 	.disable = dm_encoder_helper_disable,
6782 	.atomic_check = dm_encoder_helper_atomic_check
6783 };
6784 
6785 #if defined(CONFIG_DRM_AMD_DC_DCN)
6786 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6787 					    struct dc_state *dc_state)
6788 {
6789 	struct dc_stream_state *stream = NULL;
6790 	struct drm_connector *connector;
6791 	struct drm_connector_state *new_con_state;
6792 	struct amdgpu_dm_connector *aconnector;
6793 	struct dm_connector_state *dm_conn_state;
6794 	int i, j, clock, bpp;
6795 	int vcpi, pbn_div, pbn = 0;
6796 
6797 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6798 
6799 		aconnector = to_amdgpu_dm_connector(connector);
6800 
6801 		if (!aconnector->port)
6802 			continue;
6803 
6804 		if (!new_con_state || !new_con_state->crtc)
6805 			continue;
6806 
6807 		dm_conn_state = to_dm_connector_state(new_con_state);
6808 
6809 		for (j = 0; j < dc_state->stream_count; j++) {
6810 			stream = dc_state->streams[j];
6811 			if (!stream)
6812 				continue;
6813 
6814 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6815 				break;
6816 
6817 			stream = NULL;
6818 		}
6819 
6820 		if (!stream)
6821 			continue;
6822 
6823 		if (stream->timing.flags.DSC != 1) {
6824 			drm_dp_mst_atomic_enable_dsc(state,
6825 						     aconnector->port,
6826 						     dm_conn_state->pbn,
6827 						     0,
6828 						     false);
6829 			continue;
6830 		}
6831 
6832 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6833 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6834 		clock = stream->timing.pix_clk_100hz / 10;
6835 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6836 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6837 						    aconnector->port,
6838 						    pbn, pbn_div,
6839 						    true);
6840 		if (vcpi < 0)
6841 			return vcpi;
6842 
6843 		dm_conn_state->pbn = pbn;
6844 		dm_conn_state->vcpi_slots = vcpi;
6845 	}
6846 	return 0;
6847 }
6848 #endif
6849 
6850 static void dm_drm_plane_reset(struct drm_plane *plane)
6851 {
6852 	struct dm_plane_state *amdgpu_state = NULL;
6853 
6854 	if (plane->state)
6855 		plane->funcs->atomic_destroy_state(plane, plane->state);
6856 
6857 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6858 	WARN_ON(amdgpu_state == NULL);
6859 
6860 	if (amdgpu_state)
6861 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6862 }
6863 
6864 static struct drm_plane_state *
6865 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6866 {
6867 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6868 
6869 	old_dm_plane_state = to_dm_plane_state(plane->state);
6870 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6871 	if (!dm_plane_state)
6872 		return NULL;
6873 
6874 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6875 
6876 	if (old_dm_plane_state->dc_state) {
6877 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6878 		dc_plane_state_retain(dm_plane_state->dc_state);
6879 	}
6880 
6881 	return &dm_plane_state->base;
6882 }
6883 
6884 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6885 				struct drm_plane_state *state)
6886 {
6887 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6888 
6889 	if (dm_plane_state->dc_state)
6890 		dc_plane_state_release(dm_plane_state->dc_state);
6891 
6892 	drm_atomic_helper_plane_destroy_state(plane, state);
6893 }
6894 
6895 static const struct drm_plane_funcs dm_plane_funcs = {
6896 	.update_plane	= drm_atomic_helper_update_plane,
6897 	.disable_plane	= drm_atomic_helper_disable_plane,
6898 	.destroy	= drm_primary_helper_destroy,
6899 	.reset = dm_drm_plane_reset,
6900 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6901 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6902 	.format_mod_supported = dm_plane_format_mod_supported,
6903 };
6904 
6905 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6906 				      struct drm_plane_state *new_state)
6907 {
6908 	struct amdgpu_framebuffer *afb;
6909 	struct drm_gem_object *obj;
6910 	struct amdgpu_device *adev;
6911 	struct amdgpu_bo *rbo;
6912 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6913 	struct list_head list;
6914 	struct ttm_validate_buffer tv;
6915 	struct ww_acquire_ctx ticket;
6916 	uint32_t domain;
6917 	int r;
6918 
6919 	if (!new_state->fb) {
6920 		DRM_DEBUG_KMS("No FB bound\n");
6921 		return 0;
6922 	}
6923 
6924 	afb = to_amdgpu_framebuffer(new_state->fb);
6925 	obj = new_state->fb->obj[0];
6926 	rbo = gem_to_amdgpu_bo(obj);
6927 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6928 	INIT_LIST_HEAD(&list);
6929 
6930 	tv.bo = &rbo->tbo;
6931 	tv.num_shared = 1;
6932 	list_add(&tv.head, &list);
6933 
6934 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6935 	if (r) {
6936 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6937 		return r;
6938 	}
6939 
6940 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6941 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6942 	else
6943 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6944 
6945 	r = amdgpu_bo_pin(rbo, domain);
6946 	if (unlikely(r != 0)) {
6947 		if (r != -ERESTARTSYS)
6948 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6949 		ttm_eu_backoff_reservation(&ticket, &list);
6950 		return r;
6951 	}
6952 
6953 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6954 	if (unlikely(r != 0)) {
6955 		amdgpu_bo_unpin(rbo);
6956 		ttm_eu_backoff_reservation(&ticket, &list);
6957 		DRM_ERROR("%p bind failed\n", rbo);
6958 		return r;
6959 	}
6960 
6961 	ttm_eu_backoff_reservation(&ticket, &list);
6962 
6963 	afb->address = amdgpu_bo_gpu_offset(rbo);
6964 
6965 	amdgpu_bo_ref(rbo);
6966 
6967 	/**
6968 	 * We don't do surface updates on planes that have been newly created,
6969 	 * but we also don't have the afb->address during atomic check.
6970 	 *
6971 	 * Fill in buffer attributes depending on the address here, but only on
6972 	 * newly created planes since they're not being used by DC yet and this
6973 	 * won't modify global state.
6974 	 */
6975 	dm_plane_state_old = to_dm_plane_state(plane->state);
6976 	dm_plane_state_new = to_dm_plane_state(new_state);
6977 
6978 	if (dm_plane_state_new->dc_state &&
6979 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6980 		struct dc_plane_state *plane_state =
6981 			dm_plane_state_new->dc_state;
6982 		bool force_disable_dcc = !plane_state->dcc.enable;
6983 
6984 		fill_plane_buffer_attributes(
6985 			adev, afb, plane_state->format, plane_state->rotation,
6986 			afb->tiling_flags,
6987 			&plane_state->tiling_info, &plane_state->plane_size,
6988 			&plane_state->dcc, &plane_state->address,
6989 			afb->tmz_surface, force_disable_dcc);
6990 	}
6991 
6992 	return 0;
6993 }
6994 
6995 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6996 				       struct drm_plane_state *old_state)
6997 {
6998 	struct amdgpu_bo *rbo;
6999 	int r;
7000 
7001 	if (!old_state->fb)
7002 		return;
7003 
7004 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7005 	r = amdgpu_bo_reserve(rbo, false);
7006 	if (unlikely(r)) {
7007 		DRM_ERROR("failed to reserve rbo before unpin\n");
7008 		return;
7009 	}
7010 
7011 	amdgpu_bo_unpin(rbo);
7012 	amdgpu_bo_unreserve(rbo);
7013 	amdgpu_bo_unref(&rbo);
7014 }
7015 
7016 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7017 				       struct drm_crtc_state *new_crtc_state)
7018 {
7019 	struct drm_framebuffer *fb = state->fb;
7020 	int min_downscale, max_upscale;
7021 	int min_scale = 0;
7022 	int max_scale = INT_MAX;
7023 
7024 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7025 	if (fb && state->crtc) {
7026 		/* Validate viewport to cover the case when only the position changes */
7027 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7028 			int viewport_width = state->crtc_w;
7029 			int viewport_height = state->crtc_h;
7030 
7031 			if (state->crtc_x < 0)
7032 				viewport_width += state->crtc_x;
7033 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7034 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7035 
7036 			if (state->crtc_y < 0)
7037 				viewport_height += state->crtc_y;
7038 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7039 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7040 
7041 			if (viewport_width < 0 || viewport_height < 0) {
7042 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7043 				return -EINVAL;
7044 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7045 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7046 				return -EINVAL;
7047 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7048 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7049 				return -EINVAL;
7050 			}
7051 
7052 		}
7053 
7054 		/* Get min/max allowed scaling factors from plane caps. */
7055 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7056 					     &min_downscale, &max_upscale);
7057 		/*
7058 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7059 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7060 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7061 		 */
7062 		min_scale = (1000 << 16) / max_upscale;
7063 		max_scale = (1000 << 16) / min_downscale;
7064 	}
7065 
7066 	return drm_atomic_helper_check_plane_state(
7067 		state, new_crtc_state, min_scale, max_scale, true, true);
7068 }
7069 
7070 static int dm_plane_atomic_check(struct drm_plane *plane,
7071 				 struct drm_atomic_state *state)
7072 {
7073 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7074 										 plane);
7075 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7076 	struct dc *dc = adev->dm.dc;
7077 	struct dm_plane_state *dm_plane_state;
7078 	struct dc_scaling_info scaling_info;
7079 	struct drm_crtc_state *new_crtc_state;
7080 	int ret;
7081 
7082 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7083 
7084 	dm_plane_state = to_dm_plane_state(new_plane_state);
7085 
7086 	if (!dm_plane_state->dc_state)
7087 		return 0;
7088 
7089 	new_crtc_state =
7090 		drm_atomic_get_new_crtc_state(state,
7091 					      new_plane_state->crtc);
7092 	if (!new_crtc_state)
7093 		return -EINVAL;
7094 
7095 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7096 	if (ret)
7097 		return ret;
7098 
7099 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7100 	if (ret)
7101 		return ret;
7102 
7103 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7104 		return 0;
7105 
7106 	return -EINVAL;
7107 }
7108 
7109 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7110 				       struct drm_atomic_state *state)
7111 {
7112 	/* Only support async updates on cursor planes. */
7113 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7114 		return -EINVAL;
7115 
7116 	return 0;
7117 }
7118 
7119 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7120 					 struct drm_atomic_state *state)
7121 {
7122 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7123 									   plane);
7124 	struct drm_plane_state *old_state =
7125 		drm_atomic_get_old_plane_state(state, plane);
7126 
7127 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7128 
7129 	swap(plane->state->fb, new_state->fb);
7130 
7131 	plane->state->src_x = new_state->src_x;
7132 	plane->state->src_y = new_state->src_y;
7133 	plane->state->src_w = new_state->src_w;
7134 	plane->state->src_h = new_state->src_h;
7135 	plane->state->crtc_x = new_state->crtc_x;
7136 	plane->state->crtc_y = new_state->crtc_y;
7137 	plane->state->crtc_w = new_state->crtc_w;
7138 	plane->state->crtc_h = new_state->crtc_h;
7139 
7140 	handle_cursor_update(plane, old_state);
7141 }
7142 
7143 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7144 	.prepare_fb = dm_plane_helper_prepare_fb,
7145 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7146 	.atomic_check = dm_plane_atomic_check,
7147 	.atomic_async_check = dm_plane_atomic_async_check,
7148 	.atomic_async_update = dm_plane_atomic_async_update
7149 };
7150 
7151 /*
7152  * TODO: these are currently initialized to rgb formats only.
7153  * For future use cases we should either initialize them dynamically based on
7154  * plane capabilities, or initialize this array to all formats, so internal drm
7155  * check will succeed, and let DC implement proper check
7156  */
7157 static const uint32_t rgb_formats[] = {
7158 	DRM_FORMAT_XRGB8888,
7159 	DRM_FORMAT_ARGB8888,
7160 	DRM_FORMAT_RGBA8888,
7161 	DRM_FORMAT_XRGB2101010,
7162 	DRM_FORMAT_XBGR2101010,
7163 	DRM_FORMAT_ARGB2101010,
7164 	DRM_FORMAT_ABGR2101010,
7165 	DRM_FORMAT_XRGB16161616,
7166 	DRM_FORMAT_XBGR16161616,
7167 	DRM_FORMAT_ARGB16161616,
7168 	DRM_FORMAT_ABGR16161616,
7169 	DRM_FORMAT_XBGR8888,
7170 	DRM_FORMAT_ABGR8888,
7171 	DRM_FORMAT_RGB565,
7172 };
7173 
7174 static const uint32_t overlay_formats[] = {
7175 	DRM_FORMAT_XRGB8888,
7176 	DRM_FORMAT_ARGB8888,
7177 	DRM_FORMAT_RGBA8888,
7178 	DRM_FORMAT_XBGR8888,
7179 	DRM_FORMAT_ABGR8888,
7180 	DRM_FORMAT_RGB565
7181 };
7182 
7183 static const u32 cursor_formats[] = {
7184 	DRM_FORMAT_ARGB8888
7185 };
7186 
7187 static int get_plane_formats(const struct drm_plane *plane,
7188 			     const struct dc_plane_cap *plane_cap,
7189 			     uint32_t *formats, int max_formats)
7190 {
7191 	int i, num_formats = 0;
7192 
7193 	/*
7194 	 * TODO: Query support for each group of formats directly from
7195 	 * DC plane caps. This will require adding more formats to the
7196 	 * caps list.
7197 	 */
7198 
7199 	switch (plane->type) {
7200 	case DRM_PLANE_TYPE_PRIMARY:
7201 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7202 			if (num_formats >= max_formats)
7203 				break;
7204 
7205 			formats[num_formats++] = rgb_formats[i];
7206 		}
7207 
7208 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7209 			formats[num_formats++] = DRM_FORMAT_NV12;
7210 		if (plane_cap && plane_cap->pixel_format_support.p010)
7211 			formats[num_formats++] = DRM_FORMAT_P010;
7212 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7213 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7214 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7215 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7216 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7217 		}
7218 		break;
7219 
7220 	case DRM_PLANE_TYPE_OVERLAY:
7221 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7222 			if (num_formats >= max_formats)
7223 				break;
7224 
7225 			formats[num_formats++] = overlay_formats[i];
7226 		}
7227 		break;
7228 
7229 	case DRM_PLANE_TYPE_CURSOR:
7230 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7231 			if (num_formats >= max_formats)
7232 				break;
7233 
7234 			formats[num_formats++] = cursor_formats[i];
7235 		}
7236 		break;
7237 	}
7238 
7239 	return num_formats;
7240 }
7241 
7242 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7243 				struct drm_plane *plane,
7244 				unsigned long possible_crtcs,
7245 				const struct dc_plane_cap *plane_cap)
7246 {
7247 	uint32_t formats[32];
7248 	int num_formats;
7249 	int res = -EPERM;
7250 	unsigned int supported_rotations;
7251 	uint64_t *modifiers = NULL;
7252 
7253 	num_formats = get_plane_formats(plane, plane_cap, formats,
7254 					ARRAY_SIZE(formats));
7255 
7256 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7257 	if (res)
7258 		return res;
7259 
7260 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7261 				       &dm_plane_funcs, formats, num_formats,
7262 				       modifiers, plane->type, NULL);
7263 	kfree(modifiers);
7264 	if (res)
7265 		return res;
7266 
7267 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7268 	    plane_cap && plane_cap->per_pixel_alpha) {
7269 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7270 					  BIT(DRM_MODE_BLEND_PREMULTI);
7271 
7272 		drm_plane_create_alpha_property(plane);
7273 		drm_plane_create_blend_mode_property(plane, blend_caps);
7274 	}
7275 
7276 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7277 	    plane_cap &&
7278 	    (plane_cap->pixel_format_support.nv12 ||
7279 	     plane_cap->pixel_format_support.p010)) {
7280 		/* This only affects YUV formats. */
7281 		drm_plane_create_color_properties(
7282 			plane,
7283 			BIT(DRM_COLOR_YCBCR_BT601) |
7284 			BIT(DRM_COLOR_YCBCR_BT709) |
7285 			BIT(DRM_COLOR_YCBCR_BT2020),
7286 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7287 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7288 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7289 	}
7290 
7291 	supported_rotations =
7292 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7293 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7294 
7295 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7296 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7297 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7298 						   supported_rotations);
7299 
7300 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7301 
7302 	/* Create (reset) the plane state */
7303 	if (plane->funcs->reset)
7304 		plane->funcs->reset(plane);
7305 
7306 	return 0;
7307 }
7308 
7309 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7310 			       struct drm_plane *plane,
7311 			       uint32_t crtc_index)
7312 {
7313 	struct amdgpu_crtc *acrtc = NULL;
7314 	struct drm_plane *cursor_plane;
7315 
7316 	int res = -ENOMEM;
7317 
7318 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7319 	if (!cursor_plane)
7320 		goto fail;
7321 
7322 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7323 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7324 
7325 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7326 	if (!acrtc)
7327 		goto fail;
7328 
7329 	res = drm_crtc_init_with_planes(
7330 			dm->ddev,
7331 			&acrtc->base,
7332 			plane,
7333 			cursor_plane,
7334 			&amdgpu_dm_crtc_funcs, NULL);
7335 
7336 	if (res)
7337 		goto fail;
7338 
7339 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7340 
7341 	/* Create (reset) the plane state */
7342 	if (acrtc->base.funcs->reset)
7343 		acrtc->base.funcs->reset(&acrtc->base);
7344 
7345 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7346 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7347 
7348 	acrtc->crtc_id = crtc_index;
7349 	acrtc->base.enabled = false;
7350 	acrtc->otg_inst = -1;
7351 
7352 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7353 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7354 				   true, MAX_COLOR_LUT_ENTRIES);
7355 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7356 
7357 	return 0;
7358 
7359 fail:
7360 	kfree(acrtc);
7361 	kfree(cursor_plane);
7362 	return res;
7363 }
7364 
7365 
7366 static int to_drm_connector_type(enum signal_type st)
7367 {
7368 	switch (st) {
7369 	case SIGNAL_TYPE_HDMI_TYPE_A:
7370 		return DRM_MODE_CONNECTOR_HDMIA;
7371 	case SIGNAL_TYPE_EDP:
7372 		return DRM_MODE_CONNECTOR_eDP;
7373 	case SIGNAL_TYPE_LVDS:
7374 		return DRM_MODE_CONNECTOR_LVDS;
7375 	case SIGNAL_TYPE_RGB:
7376 		return DRM_MODE_CONNECTOR_VGA;
7377 	case SIGNAL_TYPE_DISPLAY_PORT:
7378 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7379 		return DRM_MODE_CONNECTOR_DisplayPort;
7380 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7381 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7382 		return DRM_MODE_CONNECTOR_DVID;
7383 	case SIGNAL_TYPE_VIRTUAL:
7384 		return DRM_MODE_CONNECTOR_VIRTUAL;
7385 
7386 	default:
7387 		return DRM_MODE_CONNECTOR_Unknown;
7388 	}
7389 }
7390 
7391 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7392 {
7393 	struct drm_encoder *encoder;
7394 
7395 	/* There is only one encoder per connector */
7396 	drm_connector_for_each_possible_encoder(connector, encoder)
7397 		return encoder;
7398 
7399 	return NULL;
7400 }
7401 
7402 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7403 {
7404 	struct drm_encoder *encoder;
7405 	struct amdgpu_encoder *amdgpu_encoder;
7406 
7407 	encoder = amdgpu_dm_connector_to_encoder(connector);
7408 
7409 	if (encoder == NULL)
7410 		return;
7411 
7412 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7413 
7414 	amdgpu_encoder->native_mode.clock = 0;
7415 
7416 	if (!list_empty(&connector->probed_modes)) {
7417 		struct drm_display_mode *preferred_mode = NULL;
7418 
7419 		list_for_each_entry(preferred_mode,
7420 				    &connector->probed_modes,
7421 				    head) {
7422 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7423 				amdgpu_encoder->native_mode = *preferred_mode;
7424 
7425 			break;
7426 		}
7427 
7428 	}
7429 }
7430 
7431 static struct drm_display_mode *
7432 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7433 			     char *name,
7434 			     int hdisplay, int vdisplay)
7435 {
7436 	struct drm_device *dev = encoder->dev;
7437 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7438 	struct drm_display_mode *mode = NULL;
7439 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7440 
7441 	mode = drm_mode_duplicate(dev, native_mode);
7442 
7443 	if (mode == NULL)
7444 		return NULL;
7445 
7446 	mode->hdisplay = hdisplay;
7447 	mode->vdisplay = vdisplay;
7448 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7449 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7450 
7451 	return mode;
7452 
7453 }
7454 
7455 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7456 						 struct drm_connector *connector)
7457 {
7458 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7459 	struct drm_display_mode *mode = NULL;
7460 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7461 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7462 				to_amdgpu_dm_connector(connector);
7463 	int i;
7464 	int n;
7465 	struct mode_size {
7466 		char name[DRM_DISPLAY_MODE_LEN];
7467 		int w;
7468 		int h;
7469 	} common_modes[] = {
7470 		{  "640x480",  640,  480},
7471 		{  "800x600",  800,  600},
7472 		{ "1024x768", 1024,  768},
7473 		{ "1280x720", 1280,  720},
7474 		{ "1280x800", 1280,  800},
7475 		{"1280x1024", 1280, 1024},
7476 		{ "1440x900", 1440,  900},
7477 		{"1680x1050", 1680, 1050},
7478 		{"1600x1200", 1600, 1200},
7479 		{"1920x1080", 1920, 1080},
7480 		{"1920x1200", 1920, 1200}
7481 	};
7482 
7483 	n = ARRAY_SIZE(common_modes);
7484 
7485 	for (i = 0; i < n; i++) {
7486 		struct drm_display_mode *curmode = NULL;
7487 		bool mode_existed = false;
7488 
7489 		if (common_modes[i].w > native_mode->hdisplay ||
7490 		    common_modes[i].h > native_mode->vdisplay ||
7491 		   (common_modes[i].w == native_mode->hdisplay &&
7492 		    common_modes[i].h == native_mode->vdisplay))
7493 			continue;
7494 
7495 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7496 			if (common_modes[i].w == curmode->hdisplay &&
7497 			    common_modes[i].h == curmode->vdisplay) {
7498 				mode_existed = true;
7499 				break;
7500 			}
7501 		}
7502 
7503 		if (mode_existed)
7504 			continue;
7505 
7506 		mode = amdgpu_dm_create_common_mode(encoder,
7507 				common_modes[i].name, common_modes[i].w,
7508 				common_modes[i].h);
7509 		drm_mode_probed_add(connector, mode);
7510 		amdgpu_dm_connector->num_modes++;
7511 	}
7512 }
7513 
7514 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7515 					      struct edid *edid)
7516 {
7517 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7518 			to_amdgpu_dm_connector(connector);
7519 
7520 	if (edid) {
7521 		/* empty probed_modes */
7522 		INIT_LIST_HEAD(&connector->probed_modes);
7523 		amdgpu_dm_connector->num_modes =
7524 				drm_add_edid_modes(connector, edid);
7525 
7526 		/* sorting the probed modes before calling function
7527 		 * amdgpu_dm_get_native_mode() since EDID can have
7528 		 * more than one preferred mode. The modes that are
7529 		 * later in the probed mode list could be of higher
7530 		 * and preferred resolution. For example, 3840x2160
7531 		 * resolution in base EDID preferred timing and 4096x2160
7532 		 * preferred resolution in DID extension block later.
7533 		 */
7534 		drm_mode_sort(&connector->probed_modes);
7535 		amdgpu_dm_get_native_mode(connector);
7536 
7537 		/* Freesync capabilities are reset by calling
7538 		 * drm_add_edid_modes() and need to be
7539 		 * restored here.
7540 		 */
7541 		amdgpu_dm_update_freesync_caps(connector, edid);
7542 	} else {
7543 		amdgpu_dm_connector->num_modes = 0;
7544 	}
7545 }
7546 
7547 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7548 			      struct drm_display_mode *mode)
7549 {
7550 	struct drm_display_mode *m;
7551 
7552 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7553 		if (drm_mode_equal(m, mode))
7554 			return true;
7555 	}
7556 
7557 	return false;
7558 }
7559 
7560 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7561 {
7562 	const struct drm_display_mode *m;
7563 	struct drm_display_mode *new_mode;
7564 	uint i;
7565 	uint32_t new_modes_count = 0;
7566 
7567 	/* Standard FPS values
7568 	 *
7569 	 * 23.976   - TV/NTSC
7570 	 * 24 	    - Cinema
7571 	 * 25 	    - TV/PAL
7572 	 * 29.97    - TV/NTSC
7573 	 * 30 	    - TV/NTSC
7574 	 * 48 	    - Cinema HFR
7575 	 * 50 	    - TV/PAL
7576 	 * 60 	    - Commonly used
7577 	 * 48,72,96 - Multiples of 24
7578 	 */
7579 	static const uint32_t common_rates[] = {
7580 		23976, 24000, 25000, 29970, 30000,
7581 		48000, 50000, 60000, 72000, 96000
7582 	};
7583 
7584 	/*
7585 	 * Find mode with highest refresh rate with the same resolution
7586 	 * as the preferred mode. Some monitors report a preferred mode
7587 	 * with lower resolution than the highest refresh rate supported.
7588 	 */
7589 
7590 	m = get_highest_refresh_rate_mode(aconnector, true);
7591 	if (!m)
7592 		return 0;
7593 
7594 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7595 		uint64_t target_vtotal, target_vtotal_diff;
7596 		uint64_t num, den;
7597 
7598 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7599 			continue;
7600 
7601 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7602 		    common_rates[i] > aconnector->max_vfreq * 1000)
7603 			continue;
7604 
7605 		num = (unsigned long long)m->clock * 1000 * 1000;
7606 		den = common_rates[i] * (unsigned long long)m->htotal;
7607 		target_vtotal = div_u64(num, den);
7608 		target_vtotal_diff = target_vtotal - m->vtotal;
7609 
7610 		/* Check for illegal modes */
7611 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7612 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7613 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7614 			continue;
7615 
7616 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7617 		if (!new_mode)
7618 			goto out;
7619 
7620 		new_mode->vtotal += (u16)target_vtotal_diff;
7621 		new_mode->vsync_start += (u16)target_vtotal_diff;
7622 		new_mode->vsync_end += (u16)target_vtotal_diff;
7623 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7624 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7625 
7626 		if (!is_duplicate_mode(aconnector, new_mode)) {
7627 			drm_mode_probed_add(&aconnector->base, new_mode);
7628 			new_modes_count += 1;
7629 		} else
7630 			drm_mode_destroy(aconnector->base.dev, new_mode);
7631 	}
7632  out:
7633 	return new_modes_count;
7634 }
7635 
7636 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7637 						   struct edid *edid)
7638 {
7639 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7640 		to_amdgpu_dm_connector(connector);
7641 
7642 	if (!(amdgpu_freesync_vid_mode && edid))
7643 		return;
7644 
7645 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7646 		amdgpu_dm_connector->num_modes +=
7647 			add_fs_modes(amdgpu_dm_connector);
7648 }
7649 
7650 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7651 {
7652 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7653 			to_amdgpu_dm_connector(connector);
7654 	struct drm_encoder *encoder;
7655 	struct edid *edid = amdgpu_dm_connector->edid;
7656 
7657 	encoder = amdgpu_dm_connector_to_encoder(connector);
7658 
7659 	if (!drm_edid_is_valid(edid)) {
7660 		amdgpu_dm_connector->num_modes =
7661 				drm_add_modes_noedid(connector, 640, 480);
7662 	} else {
7663 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7664 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7665 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7666 	}
7667 	amdgpu_dm_fbc_init(connector);
7668 
7669 	return amdgpu_dm_connector->num_modes;
7670 }
7671 
7672 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7673 				     struct amdgpu_dm_connector *aconnector,
7674 				     int connector_type,
7675 				     struct dc_link *link,
7676 				     int link_index)
7677 {
7678 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7679 
7680 	/*
7681 	 * Some of the properties below require access to state, like bpc.
7682 	 * Allocate some default initial connector state with our reset helper.
7683 	 */
7684 	if (aconnector->base.funcs->reset)
7685 		aconnector->base.funcs->reset(&aconnector->base);
7686 
7687 	aconnector->connector_id = link_index;
7688 	aconnector->dc_link = link;
7689 	aconnector->base.interlace_allowed = false;
7690 	aconnector->base.doublescan_allowed = false;
7691 	aconnector->base.stereo_allowed = false;
7692 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7693 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7694 	aconnector->audio_inst = -1;
7695 	mutex_init(&aconnector->hpd_lock);
7696 
7697 	/*
7698 	 * configure support HPD hot plug connector_>polled default value is 0
7699 	 * which means HPD hot plug not supported
7700 	 */
7701 	switch (connector_type) {
7702 	case DRM_MODE_CONNECTOR_HDMIA:
7703 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7704 		aconnector->base.ycbcr_420_allowed =
7705 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7706 		break;
7707 	case DRM_MODE_CONNECTOR_DisplayPort:
7708 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7709 		aconnector->base.ycbcr_420_allowed =
7710 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7711 		break;
7712 	case DRM_MODE_CONNECTOR_DVID:
7713 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7714 		break;
7715 	default:
7716 		break;
7717 	}
7718 
7719 	drm_object_attach_property(&aconnector->base.base,
7720 				dm->ddev->mode_config.scaling_mode_property,
7721 				DRM_MODE_SCALE_NONE);
7722 
7723 	drm_object_attach_property(&aconnector->base.base,
7724 				adev->mode_info.underscan_property,
7725 				UNDERSCAN_OFF);
7726 	drm_object_attach_property(&aconnector->base.base,
7727 				adev->mode_info.underscan_hborder_property,
7728 				0);
7729 	drm_object_attach_property(&aconnector->base.base,
7730 				adev->mode_info.underscan_vborder_property,
7731 				0);
7732 
7733 	if (!aconnector->mst_port)
7734 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7735 
7736 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7737 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7738 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7739 
7740 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7741 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7742 		drm_object_attach_property(&aconnector->base.base,
7743 				adev->mode_info.abm_level_property, 0);
7744 	}
7745 
7746 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7747 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7748 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7749 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7750 
7751 		if (!aconnector->mst_port)
7752 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7753 
7754 #ifdef CONFIG_DRM_AMD_DC_HDCP
7755 		if (adev->dm.hdcp_workqueue)
7756 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7757 #endif
7758 	}
7759 }
7760 
7761 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7762 			      struct i2c_msg *msgs, int num)
7763 {
7764 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7765 	struct ddc_service *ddc_service = i2c->ddc_service;
7766 	struct i2c_command cmd;
7767 	int i;
7768 	int result = -EIO;
7769 
7770 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7771 
7772 	if (!cmd.payloads)
7773 		return result;
7774 
7775 	cmd.number_of_payloads = num;
7776 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7777 	cmd.speed = 100;
7778 
7779 	for (i = 0; i < num; i++) {
7780 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7781 		cmd.payloads[i].address = msgs[i].addr;
7782 		cmd.payloads[i].length = msgs[i].len;
7783 		cmd.payloads[i].data = msgs[i].buf;
7784 	}
7785 
7786 	if (dc_submit_i2c(
7787 			ddc_service->ctx->dc,
7788 			ddc_service->ddc_pin->hw_info.ddc_channel,
7789 			&cmd))
7790 		result = num;
7791 
7792 	kfree(cmd.payloads);
7793 	return result;
7794 }
7795 
7796 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7797 {
7798 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7799 }
7800 
7801 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7802 	.master_xfer = amdgpu_dm_i2c_xfer,
7803 	.functionality = amdgpu_dm_i2c_func,
7804 };
7805 
7806 static struct amdgpu_i2c_adapter *
7807 create_i2c(struct ddc_service *ddc_service,
7808 	   int link_index,
7809 	   int *res)
7810 {
7811 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7812 	struct amdgpu_i2c_adapter *i2c;
7813 
7814 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7815 	if (!i2c)
7816 		return NULL;
7817 	i2c->base.owner = THIS_MODULE;
7818 	i2c->base.class = I2C_CLASS_DDC;
7819 	i2c->base.dev.parent = &adev->pdev->dev;
7820 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7821 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7822 	i2c_set_adapdata(&i2c->base, i2c);
7823 	i2c->ddc_service = ddc_service;
7824 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7825 
7826 	return i2c;
7827 }
7828 
7829 
7830 /*
7831  * Note: this function assumes that dc_link_detect() was called for the
7832  * dc_link which will be represented by this aconnector.
7833  */
7834 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7835 				    struct amdgpu_dm_connector *aconnector,
7836 				    uint32_t link_index,
7837 				    struct amdgpu_encoder *aencoder)
7838 {
7839 	int res = 0;
7840 	int connector_type;
7841 	struct dc *dc = dm->dc;
7842 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7843 	struct amdgpu_i2c_adapter *i2c;
7844 
7845 	link->priv = aconnector;
7846 
7847 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7848 
7849 	i2c = create_i2c(link->ddc, link->link_index, &res);
7850 	if (!i2c) {
7851 		DRM_ERROR("Failed to create i2c adapter data\n");
7852 		return -ENOMEM;
7853 	}
7854 
7855 	aconnector->i2c = i2c;
7856 	res = i2c_add_adapter(&i2c->base);
7857 
7858 	if (res) {
7859 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7860 		goto out_free;
7861 	}
7862 
7863 	connector_type = to_drm_connector_type(link->connector_signal);
7864 
7865 	res = drm_connector_init_with_ddc(
7866 			dm->ddev,
7867 			&aconnector->base,
7868 			&amdgpu_dm_connector_funcs,
7869 			connector_type,
7870 			&i2c->base);
7871 
7872 	if (res) {
7873 		DRM_ERROR("connector_init failed\n");
7874 		aconnector->connector_id = -1;
7875 		goto out_free;
7876 	}
7877 
7878 	drm_connector_helper_add(
7879 			&aconnector->base,
7880 			&amdgpu_dm_connector_helper_funcs);
7881 
7882 	amdgpu_dm_connector_init_helper(
7883 		dm,
7884 		aconnector,
7885 		connector_type,
7886 		link,
7887 		link_index);
7888 
7889 	drm_connector_attach_encoder(
7890 		&aconnector->base, &aencoder->base);
7891 
7892 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7893 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7894 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7895 
7896 out_free:
7897 	if (res) {
7898 		kfree(i2c);
7899 		aconnector->i2c = NULL;
7900 	}
7901 	return res;
7902 }
7903 
7904 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7905 {
7906 	switch (adev->mode_info.num_crtc) {
7907 	case 1:
7908 		return 0x1;
7909 	case 2:
7910 		return 0x3;
7911 	case 3:
7912 		return 0x7;
7913 	case 4:
7914 		return 0xf;
7915 	case 5:
7916 		return 0x1f;
7917 	case 6:
7918 	default:
7919 		return 0x3f;
7920 	}
7921 }
7922 
7923 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7924 				  struct amdgpu_encoder *aencoder,
7925 				  uint32_t link_index)
7926 {
7927 	struct amdgpu_device *adev = drm_to_adev(dev);
7928 
7929 	int res = drm_encoder_init(dev,
7930 				   &aencoder->base,
7931 				   &amdgpu_dm_encoder_funcs,
7932 				   DRM_MODE_ENCODER_TMDS,
7933 				   NULL);
7934 
7935 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7936 
7937 	if (!res)
7938 		aencoder->encoder_id = link_index;
7939 	else
7940 		aencoder->encoder_id = -1;
7941 
7942 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7943 
7944 	return res;
7945 }
7946 
7947 static void manage_dm_interrupts(struct amdgpu_device *adev,
7948 				 struct amdgpu_crtc *acrtc,
7949 				 bool enable)
7950 {
7951 	/*
7952 	 * We have no guarantee that the frontend index maps to the same
7953 	 * backend index - some even map to more than one.
7954 	 *
7955 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7956 	 */
7957 	int irq_type =
7958 		amdgpu_display_crtc_idx_to_irq_type(
7959 			adev,
7960 			acrtc->crtc_id);
7961 
7962 	if (enable) {
7963 		drm_crtc_vblank_on(&acrtc->base);
7964 		amdgpu_irq_get(
7965 			adev,
7966 			&adev->pageflip_irq,
7967 			irq_type);
7968 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7969 		amdgpu_irq_get(
7970 			adev,
7971 			&adev->vline0_irq,
7972 			irq_type);
7973 #endif
7974 	} else {
7975 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7976 		amdgpu_irq_put(
7977 			adev,
7978 			&adev->vline0_irq,
7979 			irq_type);
7980 #endif
7981 		amdgpu_irq_put(
7982 			adev,
7983 			&adev->pageflip_irq,
7984 			irq_type);
7985 		drm_crtc_vblank_off(&acrtc->base);
7986 	}
7987 }
7988 
7989 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7990 				      struct amdgpu_crtc *acrtc)
7991 {
7992 	int irq_type =
7993 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7994 
7995 	/**
7996 	 * This reads the current state for the IRQ and force reapplies
7997 	 * the setting to hardware.
7998 	 */
7999 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8000 }
8001 
8002 static bool
8003 is_scaling_state_different(const struct dm_connector_state *dm_state,
8004 			   const struct dm_connector_state *old_dm_state)
8005 {
8006 	if (dm_state->scaling != old_dm_state->scaling)
8007 		return true;
8008 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8009 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8010 			return true;
8011 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8012 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8013 			return true;
8014 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8015 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8016 		return true;
8017 	return false;
8018 }
8019 
8020 #ifdef CONFIG_DRM_AMD_DC_HDCP
8021 static bool is_content_protection_different(struct drm_connector_state *state,
8022 					    const struct drm_connector_state *old_state,
8023 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8024 {
8025 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8026 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8027 
8028 	/* Handle: Type0/1 change */
8029 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8030 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8031 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8032 		return true;
8033 	}
8034 
8035 	/* CP is being re enabled, ignore this
8036 	 *
8037 	 * Handles:	ENABLED -> DESIRED
8038 	 */
8039 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8040 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8041 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8042 		return false;
8043 	}
8044 
8045 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8046 	 *
8047 	 * Handles:	UNDESIRED -> ENABLED
8048 	 */
8049 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8050 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8051 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8052 
8053 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8054 	 * hot-plug, headless s3, dpms
8055 	 *
8056 	 * Handles:	DESIRED -> DESIRED (Special case)
8057 	 */
8058 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8059 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8060 		dm_con_state->update_hdcp = false;
8061 		return true;
8062 	}
8063 
8064 	/*
8065 	 * Handles:	UNDESIRED -> UNDESIRED
8066 	 *		DESIRED -> DESIRED
8067 	 *		ENABLED -> ENABLED
8068 	 */
8069 	if (old_state->content_protection == state->content_protection)
8070 		return false;
8071 
8072 	/*
8073 	 * Handles:	UNDESIRED -> DESIRED
8074 	 *		DESIRED -> UNDESIRED
8075 	 *		ENABLED -> UNDESIRED
8076 	 */
8077 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8078 		return true;
8079 
8080 	/*
8081 	 * Handles:	DESIRED -> ENABLED
8082 	 */
8083 	return false;
8084 }
8085 
8086 #endif
8087 static void remove_stream(struct amdgpu_device *adev,
8088 			  struct amdgpu_crtc *acrtc,
8089 			  struct dc_stream_state *stream)
8090 {
8091 	/* this is the update mode case */
8092 
8093 	acrtc->otg_inst = -1;
8094 	acrtc->enabled = false;
8095 }
8096 
8097 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8098 			       struct dc_cursor_position *position)
8099 {
8100 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8101 	int x, y;
8102 	int xorigin = 0, yorigin = 0;
8103 
8104 	if (!crtc || !plane->state->fb)
8105 		return 0;
8106 
8107 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8108 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8109 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8110 			  __func__,
8111 			  plane->state->crtc_w,
8112 			  plane->state->crtc_h);
8113 		return -EINVAL;
8114 	}
8115 
8116 	x = plane->state->crtc_x;
8117 	y = plane->state->crtc_y;
8118 
8119 	if (x <= -amdgpu_crtc->max_cursor_width ||
8120 	    y <= -amdgpu_crtc->max_cursor_height)
8121 		return 0;
8122 
8123 	if (x < 0) {
8124 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8125 		x = 0;
8126 	}
8127 	if (y < 0) {
8128 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8129 		y = 0;
8130 	}
8131 	position->enable = true;
8132 	position->translate_by_source = true;
8133 	position->x = x;
8134 	position->y = y;
8135 	position->x_hotspot = xorigin;
8136 	position->y_hotspot = yorigin;
8137 
8138 	return 0;
8139 }
8140 
8141 static void handle_cursor_update(struct drm_plane *plane,
8142 				 struct drm_plane_state *old_plane_state)
8143 {
8144 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8145 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8146 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8147 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8148 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8149 	uint64_t address = afb ? afb->address : 0;
8150 	struct dc_cursor_position position = {0};
8151 	struct dc_cursor_attributes attributes;
8152 	int ret;
8153 
8154 	if (!plane->state->fb && !old_plane_state->fb)
8155 		return;
8156 
8157 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8158 		      __func__,
8159 		      amdgpu_crtc->crtc_id,
8160 		      plane->state->crtc_w,
8161 		      plane->state->crtc_h);
8162 
8163 	ret = get_cursor_position(plane, crtc, &position);
8164 	if (ret)
8165 		return;
8166 
8167 	if (!position.enable) {
8168 		/* turn off cursor */
8169 		if (crtc_state && crtc_state->stream) {
8170 			mutex_lock(&adev->dm.dc_lock);
8171 			dc_stream_set_cursor_position(crtc_state->stream,
8172 						      &position);
8173 			mutex_unlock(&adev->dm.dc_lock);
8174 		}
8175 		return;
8176 	}
8177 
8178 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8179 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8180 
8181 	memset(&attributes, 0, sizeof(attributes));
8182 	attributes.address.high_part = upper_32_bits(address);
8183 	attributes.address.low_part  = lower_32_bits(address);
8184 	attributes.width             = plane->state->crtc_w;
8185 	attributes.height            = plane->state->crtc_h;
8186 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8187 	attributes.rotation_angle    = 0;
8188 	attributes.attribute_flags.value = 0;
8189 
8190 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8191 
8192 	if (crtc_state->stream) {
8193 		mutex_lock(&adev->dm.dc_lock);
8194 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8195 							 &attributes))
8196 			DRM_ERROR("DC failed to set cursor attributes\n");
8197 
8198 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8199 						   &position))
8200 			DRM_ERROR("DC failed to set cursor position\n");
8201 		mutex_unlock(&adev->dm.dc_lock);
8202 	}
8203 }
8204 
8205 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8206 {
8207 
8208 	assert_spin_locked(&acrtc->base.dev->event_lock);
8209 	WARN_ON(acrtc->event);
8210 
8211 	acrtc->event = acrtc->base.state->event;
8212 
8213 	/* Set the flip status */
8214 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8215 
8216 	/* Mark this event as consumed */
8217 	acrtc->base.state->event = NULL;
8218 
8219 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8220 		     acrtc->crtc_id);
8221 }
8222 
8223 static void update_freesync_state_on_stream(
8224 	struct amdgpu_display_manager *dm,
8225 	struct dm_crtc_state *new_crtc_state,
8226 	struct dc_stream_state *new_stream,
8227 	struct dc_plane_state *surface,
8228 	u32 flip_timestamp_in_us)
8229 {
8230 	struct mod_vrr_params vrr_params;
8231 	struct dc_info_packet vrr_infopacket = {0};
8232 	struct amdgpu_device *adev = dm->adev;
8233 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8234 	unsigned long flags;
8235 	bool pack_sdp_v1_3 = false;
8236 
8237 	if (!new_stream)
8238 		return;
8239 
8240 	/*
8241 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8242 	 * For now it's sufficient to just guard against these conditions.
8243 	 */
8244 
8245 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8246 		return;
8247 
8248 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8249         vrr_params = acrtc->dm_irq_params.vrr_params;
8250 
8251 	if (surface) {
8252 		mod_freesync_handle_preflip(
8253 			dm->freesync_module,
8254 			surface,
8255 			new_stream,
8256 			flip_timestamp_in_us,
8257 			&vrr_params);
8258 
8259 		if (adev->family < AMDGPU_FAMILY_AI &&
8260 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8261 			mod_freesync_handle_v_update(dm->freesync_module,
8262 						     new_stream, &vrr_params);
8263 
8264 			/* Need to call this before the frame ends. */
8265 			dc_stream_adjust_vmin_vmax(dm->dc,
8266 						   new_crtc_state->stream,
8267 						   &vrr_params.adjust);
8268 		}
8269 	}
8270 
8271 	mod_freesync_build_vrr_infopacket(
8272 		dm->freesync_module,
8273 		new_stream,
8274 		&vrr_params,
8275 		PACKET_TYPE_VRR,
8276 		TRANSFER_FUNC_UNKNOWN,
8277 		&vrr_infopacket,
8278 		pack_sdp_v1_3);
8279 
8280 	new_crtc_state->freesync_timing_changed |=
8281 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8282 			&vrr_params.adjust,
8283 			sizeof(vrr_params.adjust)) != 0);
8284 
8285 	new_crtc_state->freesync_vrr_info_changed |=
8286 		(memcmp(&new_crtc_state->vrr_infopacket,
8287 			&vrr_infopacket,
8288 			sizeof(vrr_infopacket)) != 0);
8289 
8290 	acrtc->dm_irq_params.vrr_params = vrr_params;
8291 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8292 
8293 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8294 	new_stream->vrr_infopacket = vrr_infopacket;
8295 
8296 	if (new_crtc_state->freesync_vrr_info_changed)
8297 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8298 			      new_crtc_state->base.crtc->base.id,
8299 			      (int)new_crtc_state->base.vrr_enabled,
8300 			      (int)vrr_params.state);
8301 
8302 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8303 }
8304 
8305 static void update_stream_irq_parameters(
8306 	struct amdgpu_display_manager *dm,
8307 	struct dm_crtc_state *new_crtc_state)
8308 {
8309 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8310 	struct mod_vrr_params vrr_params;
8311 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8312 	struct amdgpu_device *adev = dm->adev;
8313 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8314 	unsigned long flags;
8315 
8316 	if (!new_stream)
8317 		return;
8318 
8319 	/*
8320 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8321 	 * For now it's sufficient to just guard against these conditions.
8322 	 */
8323 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8324 		return;
8325 
8326 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8327 	vrr_params = acrtc->dm_irq_params.vrr_params;
8328 
8329 	if (new_crtc_state->vrr_supported &&
8330 	    config.min_refresh_in_uhz &&
8331 	    config.max_refresh_in_uhz) {
8332 		/*
8333 		 * if freesync compatible mode was set, config.state will be set
8334 		 * in atomic check
8335 		 */
8336 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8337 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8338 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8339 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8340 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8341 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8342 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8343 		} else {
8344 			config.state = new_crtc_state->base.vrr_enabled ?
8345 						     VRR_STATE_ACTIVE_VARIABLE :
8346 						     VRR_STATE_INACTIVE;
8347 		}
8348 	} else {
8349 		config.state = VRR_STATE_UNSUPPORTED;
8350 	}
8351 
8352 	mod_freesync_build_vrr_params(dm->freesync_module,
8353 				      new_stream,
8354 				      &config, &vrr_params);
8355 
8356 	new_crtc_state->freesync_timing_changed |=
8357 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8358 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8359 
8360 	new_crtc_state->freesync_config = config;
8361 	/* Copy state for access from DM IRQ handler */
8362 	acrtc->dm_irq_params.freesync_config = config;
8363 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8364 	acrtc->dm_irq_params.vrr_params = vrr_params;
8365 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8366 }
8367 
8368 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8369 					    struct dm_crtc_state *new_state)
8370 {
8371 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8372 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8373 
8374 	if (!old_vrr_active && new_vrr_active) {
8375 		/* Transition VRR inactive -> active:
8376 		 * While VRR is active, we must not disable vblank irq, as a
8377 		 * reenable after disable would compute bogus vblank/pflip
8378 		 * timestamps if it likely happened inside display front-porch.
8379 		 *
8380 		 * We also need vupdate irq for the actual core vblank handling
8381 		 * at end of vblank.
8382 		 */
8383 		dm_set_vupdate_irq(new_state->base.crtc, true);
8384 		drm_crtc_vblank_get(new_state->base.crtc);
8385 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8386 				 __func__, new_state->base.crtc->base.id);
8387 	} else if (old_vrr_active && !new_vrr_active) {
8388 		/* Transition VRR active -> inactive:
8389 		 * Allow vblank irq disable again for fixed refresh rate.
8390 		 */
8391 		dm_set_vupdate_irq(new_state->base.crtc, false);
8392 		drm_crtc_vblank_put(new_state->base.crtc);
8393 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8394 				 __func__, new_state->base.crtc->base.id);
8395 	}
8396 }
8397 
8398 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8399 {
8400 	struct drm_plane *plane;
8401 	struct drm_plane_state *old_plane_state;
8402 	int i;
8403 
8404 	/*
8405 	 * TODO: Make this per-stream so we don't issue redundant updates for
8406 	 * commits with multiple streams.
8407 	 */
8408 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8409 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8410 			handle_cursor_update(plane, old_plane_state);
8411 }
8412 
8413 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8414 				    struct dc_state *dc_state,
8415 				    struct drm_device *dev,
8416 				    struct amdgpu_display_manager *dm,
8417 				    struct drm_crtc *pcrtc,
8418 				    bool wait_for_vblank)
8419 {
8420 	uint32_t i;
8421 	uint64_t timestamp_ns;
8422 	struct drm_plane *plane;
8423 	struct drm_plane_state *old_plane_state, *new_plane_state;
8424 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8425 	struct drm_crtc_state *new_pcrtc_state =
8426 			drm_atomic_get_new_crtc_state(state, pcrtc);
8427 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8428 	struct dm_crtc_state *dm_old_crtc_state =
8429 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8430 	int planes_count = 0, vpos, hpos;
8431 	long r;
8432 	unsigned long flags;
8433 	struct amdgpu_bo *abo;
8434 	uint32_t target_vblank, last_flip_vblank;
8435 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8436 	bool pflip_present = false;
8437 	struct {
8438 		struct dc_surface_update surface_updates[MAX_SURFACES];
8439 		struct dc_plane_info plane_infos[MAX_SURFACES];
8440 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8441 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8442 		struct dc_stream_update stream_update;
8443 	} *bundle;
8444 
8445 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8446 
8447 	if (!bundle) {
8448 		dm_error("Failed to allocate update bundle\n");
8449 		goto cleanup;
8450 	}
8451 
8452 	/*
8453 	 * Disable the cursor first if we're disabling all the planes.
8454 	 * It'll remain on the screen after the planes are re-enabled
8455 	 * if we don't.
8456 	 */
8457 	if (acrtc_state->active_planes == 0)
8458 		amdgpu_dm_commit_cursors(state);
8459 
8460 	/* update planes when needed */
8461 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8462 		struct drm_crtc *crtc = new_plane_state->crtc;
8463 		struct drm_crtc_state *new_crtc_state;
8464 		struct drm_framebuffer *fb = new_plane_state->fb;
8465 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8466 		bool plane_needs_flip;
8467 		struct dc_plane_state *dc_plane;
8468 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8469 
8470 		/* Cursor plane is handled after stream updates */
8471 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8472 			continue;
8473 
8474 		if (!fb || !crtc || pcrtc != crtc)
8475 			continue;
8476 
8477 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8478 		if (!new_crtc_state->active)
8479 			continue;
8480 
8481 		dc_plane = dm_new_plane_state->dc_state;
8482 
8483 		bundle->surface_updates[planes_count].surface = dc_plane;
8484 		if (new_pcrtc_state->color_mgmt_changed) {
8485 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8486 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8487 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8488 		}
8489 
8490 		fill_dc_scaling_info(new_plane_state,
8491 				     &bundle->scaling_infos[planes_count]);
8492 
8493 		bundle->surface_updates[planes_count].scaling_info =
8494 			&bundle->scaling_infos[planes_count];
8495 
8496 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8497 
8498 		pflip_present = pflip_present || plane_needs_flip;
8499 
8500 		if (!plane_needs_flip) {
8501 			planes_count += 1;
8502 			continue;
8503 		}
8504 
8505 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8506 
8507 		/*
8508 		 * Wait for all fences on this FB. Do limited wait to avoid
8509 		 * deadlock during GPU reset when this fence will not signal
8510 		 * but we hold reservation lock for the BO.
8511 		 */
8512 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8513 					  msecs_to_jiffies(5000));
8514 		if (unlikely(r <= 0))
8515 			DRM_ERROR("Waiting for fences timed out!");
8516 
8517 		fill_dc_plane_info_and_addr(
8518 			dm->adev, new_plane_state,
8519 			afb->tiling_flags,
8520 			&bundle->plane_infos[planes_count],
8521 			&bundle->flip_addrs[planes_count].address,
8522 			afb->tmz_surface, false);
8523 
8524 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8525 				 new_plane_state->plane->index,
8526 				 bundle->plane_infos[planes_count].dcc.enable);
8527 
8528 		bundle->surface_updates[planes_count].plane_info =
8529 			&bundle->plane_infos[planes_count];
8530 
8531 		/*
8532 		 * Only allow immediate flips for fast updates that don't
8533 		 * change FB pitch, DCC state, rotation or mirroing.
8534 		 */
8535 		bundle->flip_addrs[planes_count].flip_immediate =
8536 			crtc->state->async_flip &&
8537 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8538 
8539 		timestamp_ns = ktime_get_ns();
8540 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8541 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8542 		bundle->surface_updates[planes_count].surface = dc_plane;
8543 
8544 		if (!bundle->surface_updates[planes_count].surface) {
8545 			DRM_ERROR("No surface for CRTC: id=%d\n",
8546 					acrtc_attach->crtc_id);
8547 			continue;
8548 		}
8549 
8550 		if (plane == pcrtc->primary)
8551 			update_freesync_state_on_stream(
8552 				dm,
8553 				acrtc_state,
8554 				acrtc_state->stream,
8555 				dc_plane,
8556 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8557 
8558 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8559 				 __func__,
8560 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8561 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8562 
8563 		planes_count += 1;
8564 
8565 	}
8566 
8567 	if (pflip_present) {
8568 		if (!vrr_active) {
8569 			/* Use old throttling in non-vrr fixed refresh rate mode
8570 			 * to keep flip scheduling based on target vblank counts
8571 			 * working in a backwards compatible way, e.g., for
8572 			 * clients using the GLX_OML_sync_control extension or
8573 			 * DRI3/Present extension with defined target_msc.
8574 			 */
8575 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8576 		}
8577 		else {
8578 			/* For variable refresh rate mode only:
8579 			 * Get vblank of last completed flip to avoid > 1 vrr
8580 			 * flips per video frame by use of throttling, but allow
8581 			 * flip programming anywhere in the possibly large
8582 			 * variable vrr vblank interval for fine-grained flip
8583 			 * timing control and more opportunity to avoid stutter
8584 			 * on late submission of flips.
8585 			 */
8586 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8587 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8588 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8589 		}
8590 
8591 		target_vblank = last_flip_vblank + wait_for_vblank;
8592 
8593 		/*
8594 		 * Wait until we're out of the vertical blank period before the one
8595 		 * targeted by the flip
8596 		 */
8597 		while ((acrtc_attach->enabled &&
8598 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8599 							    0, &vpos, &hpos, NULL,
8600 							    NULL, &pcrtc->hwmode)
8601 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8602 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8603 			(int)(target_vblank -
8604 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8605 			usleep_range(1000, 1100);
8606 		}
8607 
8608 		/**
8609 		 * Prepare the flip event for the pageflip interrupt to handle.
8610 		 *
8611 		 * This only works in the case where we've already turned on the
8612 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8613 		 * from 0 -> n planes we have to skip a hardware generated event
8614 		 * and rely on sending it from software.
8615 		 */
8616 		if (acrtc_attach->base.state->event &&
8617 		    acrtc_state->active_planes > 0) {
8618 			drm_crtc_vblank_get(pcrtc);
8619 
8620 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8621 
8622 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8623 			prepare_flip_isr(acrtc_attach);
8624 
8625 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8626 		}
8627 
8628 		if (acrtc_state->stream) {
8629 			if (acrtc_state->freesync_vrr_info_changed)
8630 				bundle->stream_update.vrr_infopacket =
8631 					&acrtc_state->stream->vrr_infopacket;
8632 		}
8633 	}
8634 
8635 	/* Update the planes if changed or disable if we don't have any. */
8636 	if ((planes_count || acrtc_state->active_planes == 0) &&
8637 		acrtc_state->stream) {
8638 		bundle->stream_update.stream = acrtc_state->stream;
8639 		if (new_pcrtc_state->mode_changed) {
8640 			bundle->stream_update.src = acrtc_state->stream->src;
8641 			bundle->stream_update.dst = acrtc_state->stream->dst;
8642 		}
8643 
8644 		if (new_pcrtc_state->color_mgmt_changed) {
8645 			/*
8646 			 * TODO: This isn't fully correct since we've actually
8647 			 * already modified the stream in place.
8648 			 */
8649 			bundle->stream_update.gamut_remap =
8650 				&acrtc_state->stream->gamut_remap_matrix;
8651 			bundle->stream_update.output_csc_transform =
8652 				&acrtc_state->stream->csc_color_matrix;
8653 			bundle->stream_update.out_transfer_func =
8654 				acrtc_state->stream->out_transfer_func;
8655 		}
8656 
8657 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8658 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8659 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8660 
8661 		/*
8662 		 * If FreeSync state on the stream has changed then we need to
8663 		 * re-adjust the min/max bounds now that DC doesn't handle this
8664 		 * as part of commit.
8665 		 */
8666 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8667 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8668 			dc_stream_adjust_vmin_vmax(
8669 				dm->dc, acrtc_state->stream,
8670 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8671 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8672 		}
8673 		mutex_lock(&dm->dc_lock);
8674 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8675 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8676 			amdgpu_dm_psr_disable(acrtc_state->stream);
8677 
8678 		dc_commit_updates_for_stream(dm->dc,
8679 						     bundle->surface_updates,
8680 						     planes_count,
8681 						     acrtc_state->stream,
8682 						     &bundle->stream_update,
8683 						     dc_state);
8684 
8685 		/**
8686 		 * Enable or disable the interrupts on the backend.
8687 		 *
8688 		 * Most pipes are put into power gating when unused.
8689 		 *
8690 		 * When power gating is enabled on a pipe we lose the
8691 		 * interrupt enablement state when power gating is disabled.
8692 		 *
8693 		 * So we need to update the IRQ control state in hardware
8694 		 * whenever the pipe turns on (since it could be previously
8695 		 * power gated) or off (since some pipes can't be power gated
8696 		 * on some ASICs).
8697 		 */
8698 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8699 			dm_update_pflip_irq_state(drm_to_adev(dev),
8700 						  acrtc_attach);
8701 
8702 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8703 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8704 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8705 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8706 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8707 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8708 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8709 			struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
8710 					acrtc_state->stream->dm_stream_context;
8711 
8712 			if (aconn->psr_skip_count > 0)
8713 				aconn->psr_skip_count--;
8714 			else
8715 				amdgpu_dm_psr_enable(acrtc_state->stream);
8716 		}
8717 
8718 		mutex_unlock(&dm->dc_lock);
8719 	}
8720 
8721 	/*
8722 	 * Update cursor state *after* programming all the planes.
8723 	 * This avoids redundant programming in the case where we're going
8724 	 * to be disabling a single plane - those pipes are being disabled.
8725 	 */
8726 	if (acrtc_state->active_planes)
8727 		amdgpu_dm_commit_cursors(state);
8728 
8729 cleanup:
8730 	kfree(bundle);
8731 }
8732 
8733 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8734 				   struct drm_atomic_state *state)
8735 {
8736 	struct amdgpu_device *adev = drm_to_adev(dev);
8737 	struct amdgpu_dm_connector *aconnector;
8738 	struct drm_connector *connector;
8739 	struct drm_connector_state *old_con_state, *new_con_state;
8740 	struct drm_crtc_state *new_crtc_state;
8741 	struct dm_crtc_state *new_dm_crtc_state;
8742 	const struct dc_stream_status *status;
8743 	int i, inst;
8744 
8745 	/* Notify device removals. */
8746 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8747 		if (old_con_state->crtc != new_con_state->crtc) {
8748 			/* CRTC changes require notification. */
8749 			goto notify;
8750 		}
8751 
8752 		if (!new_con_state->crtc)
8753 			continue;
8754 
8755 		new_crtc_state = drm_atomic_get_new_crtc_state(
8756 			state, new_con_state->crtc);
8757 
8758 		if (!new_crtc_state)
8759 			continue;
8760 
8761 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8762 			continue;
8763 
8764 	notify:
8765 		aconnector = to_amdgpu_dm_connector(connector);
8766 
8767 		mutex_lock(&adev->dm.audio_lock);
8768 		inst = aconnector->audio_inst;
8769 		aconnector->audio_inst = -1;
8770 		mutex_unlock(&adev->dm.audio_lock);
8771 
8772 		amdgpu_dm_audio_eld_notify(adev, inst);
8773 	}
8774 
8775 	/* Notify audio device additions. */
8776 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8777 		if (!new_con_state->crtc)
8778 			continue;
8779 
8780 		new_crtc_state = drm_atomic_get_new_crtc_state(
8781 			state, new_con_state->crtc);
8782 
8783 		if (!new_crtc_state)
8784 			continue;
8785 
8786 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8787 			continue;
8788 
8789 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8790 		if (!new_dm_crtc_state->stream)
8791 			continue;
8792 
8793 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8794 		if (!status)
8795 			continue;
8796 
8797 		aconnector = to_amdgpu_dm_connector(connector);
8798 
8799 		mutex_lock(&adev->dm.audio_lock);
8800 		inst = status->audio_inst;
8801 		aconnector->audio_inst = inst;
8802 		mutex_unlock(&adev->dm.audio_lock);
8803 
8804 		amdgpu_dm_audio_eld_notify(adev, inst);
8805 	}
8806 }
8807 
8808 /*
8809  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8810  * @crtc_state: the DRM CRTC state
8811  * @stream_state: the DC stream state.
8812  *
8813  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8814  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8815  */
8816 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8817 						struct dc_stream_state *stream_state)
8818 {
8819 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8820 }
8821 
8822 /**
8823  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8824  * @state: The atomic state to commit
8825  *
8826  * This will tell DC to commit the constructed DC state from atomic_check,
8827  * programming the hardware. Any failures here implies a hardware failure, since
8828  * atomic check should have filtered anything non-kosher.
8829  */
8830 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8831 {
8832 	struct drm_device *dev = state->dev;
8833 	struct amdgpu_device *adev = drm_to_adev(dev);
8834 	struct amdgpu_display_manager *dm = &adev->dm;
8835 	struct dm_atomic_state *dm_state;
8836 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8837 	uint32_t i, j;
8838 	struct drm_crtc *crtc;
8839 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8840 	unsigned long flags;
8841 	bool wait_for_vblank = true;
8842 	struct drm_connector *connector;
8843 	struct drm_connector_state *old_con_state, *new_con_state;
8844 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8845 	int crtc_disable_count = 0;
8846 	bool mode_set_reset_required = false;
8847 
8848 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8849 
8850 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8851 
8852 	dm_state = dm_atomic_get_new_state(state);
8853 	if (dm_state && dm_state->context) {
8854 		dc_state = dm_state->context;
8855 	} else {
8856 		/* No state changes, retain current state. */
8857 		dc_state_temp = dc_create_state(dm->dc);
8858 		ASSERT(dc_state_temp);
8859 		dc_state = dc_state_temp;
8860 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8861 	}
8862 
8863 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8864 				       new_crtc_state, i) {
8865 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8866 
8867 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8868 
8869 		if (old_crtc_state->active &&
8870 		    (!new_crtc_state->active ||
8871 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8872 			manage_dm_interrupts(adev, acrtc, false);
8873 			dc_stream_release(dm_old_crtc_state->stream);
8874 		}
8875 	}
8876 
8877 	drm_atomic_helper_calc_timestamping_constants(state);
8878 
8879 	/* update changed items */
8880 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8881 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8882 
8883 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8884 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8885 
8886 		DRM_DEBUG_ATOMIC(
8887 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8888 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8889 			"connectors_changed:%d\n",
8890 			acrtc->crtc_id,
8891 			new_crtc_state->enable,
8892 			new_crtc_state->active,
8893 			new_crtc_state->planes_changed,
8894 			new_crtc_state->mode_changed,
8895 			new_crtc_state->active_changed,
8896 			new_crtc_state->connectors_changed);
8897 
8898 		/* Disable cursor if disabling crtc */
8899 		if (old_crtc_state->active && !new_crtc_state->active) {
8900 			struct dc_cursor_position position;
8901 
8902 			memset(&position, 0, sizeof(position));
8903 			mutex_lock(&dm->dc_lock);
8904 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8905 			mutex_unlock(&dm->dc_lock);
8906 		}
8907 
8908 		/* Copy all transient state flags into dc state */
8909 		if (dm_new_crtc_state->stream) {
8910 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8911 							    dm_new_crtc_state->stream);
8912 		}
8913 
8914 		/* handles headless hotplug case, updating new_state and
8915 		 * aconnector as needed
8916 		 */
8917 
8918 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8919 
8920 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8921 
8922 			if (!dm_new_crtc_state->stream) {
8923 				/*
8924 				 * this could happen because of issues with
8925 				 * userspace notifications delivery.
8926 				 * In this case userspace tries to set mode on
8927 				 * display which is disconnected in fact.
8928 				 * dc_sink is NULL in this case on aconnector.
8929 				 * We expect reset mode will come soon.
8930 				 *
8931 				 * This can also happen when unplug is done
8932 				 * during resume sequence ended
8933 				 *
8934 				 * In this case, we want to pretend we still
8935 				 * have a sink to keep the pipe running so that
8936 				 * hw state is consistent with the sw state
8937 				 */
8938 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8939 						__func__, acrtc->base.base.id);
8940 				continue;
8941 			}
8942 
8943 			if (dm_old_crtc_state->stream)
8944 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8945 
8946 			pm_runtime_get_noresume(dev->dev);
8947 
8948 			acrtc->enabled = true;
8949 			acrtc->hw_mode = new_crtc_state->mode;
8950 			crtc->hwmode = new_crtc_state->mode;
8951 			mode_set_reset_required = true;
8952 		} else if (modereset_required(new_crtc_state)) {
8953 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8954 			/* i.e. reset mode */
8955 			if (dm_old_crtc_state->stream)
8956 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8957 
8958 			mode_set_reset_required = true;
8959 		}
8960 	} /* for_each_crtc_in_state() */
8961 
8962 	if (dc_state) {
8963 		/* if there mode set or reset, disable eDP PSR */
8964 		if (mode_set_reset_required)
8965 			amdgpu_dm_psr_disable_all(dm);
8966 
8967 		dm_enable_per_frame_crtc_master_sync(dc_state);
8968 		mutex_lock(&dm->dc_lock);
8969 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8970 #if defined(CONFIG_DRM_AMD_DC_DCN)
8971                /* Allow idle optimization when vblank count is 0 for display off */
8972                if (dm->active_vblank_irq_count == 0)
8973                    dc_allow_idle_optimizations(dm->dc,true);
8974 #endif
8975 		mutex_unlock(&dm->dc_lock);
8976 	}
8977 
8978 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8979 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8980 
8981 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8982 
8983 		if (dm_new_crtc_state->stream != NULL) {
8984 			const struct dc_stream_status *status =
8985 					dc_stream_get_status(dm_new_crtc_state->stream);
8986 
8987 			if (!status)
8988 				status = dc_stream_get_status_from_state(dc_state,
8989 									 dm_new_crtc_state->stream);
8990 			if (!status)
8991 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8992 			else
8993 				acrtc->otg_inst = status->primary_otg_inst;
8994 		}
8995 	}
8996 #ifdef CONFIG_DRM_AMD_DC_HDCP
8997 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8998 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8999 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9000 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9001 
9002 		new_crtc_state = NULL;
9003 
9004 		if (acrtc)
9005 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9006 
9007 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9008 
9009 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9010 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9011 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9012 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9013 			dm_new_con_state->update_hdcp = true;
9014 			continue;
9015 		}
9016 
9017 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9018 			hdcp_update_display(
9019 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9020 				new_con_state->hdcp_content_type,
9021 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9022 	}
9023 #endif
9024 
9025 	/* Handle connector state changes */
9026 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9027 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9028 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9029 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9030 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9031 		struct dc_stream_update stream_update;
9032 		struct dc_info_packet hdr_packet;
9033 		struct dc_stream_status *status = NULL;
9034 		bool abm_changed, hdr_changed, scaling_changed;
9035 
9036 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9037 		memset(&stream_update, 0, sizeof(stream_update));
9038 
9039 		if (acrtc) {
9040 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9041 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9042 		}
9043 
9044 		/* Skip any modesets/resets */
9045 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9046 			continue;
9047 
9048 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9049 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9050 
9051 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9052 							     dm_old_con_state);
9053 
9054 		abm_changed = dm_new_crtc_state->abm_level !=
9055 			      dm_old_crtc_state->abm_level;
9056 
9057 		hdr_changed =
9058 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9059 
9060 		if (!scaling_changed && !abm_changed && !hdr_changed)
9061 			continue;
9062 
9063 		stream_update.stream = dm_new_crtc_state->stream;
9064 		if (scaling_changed) {
9065 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9066 					dm_new_con_state, dm_new_crtc_state->stream);
9067 
9068 			stream_update.src = dm_new_crtc_state->stream->src;
9069 			stream_update.dst = dm_new_crtc_state->stream->dst;
9070 		}
9071 
9072 		if (abm_changed) {
9073 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9074 
9075 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9076 		}
9077 
9078 		if (hdr_changed) {
9079 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9080 			stream_update.hdr_static_metadata = &hdr_packet;
9081 		}
9082 
9083 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9084 
9085 		if (WARN_ON(!status))
9086 			continue;
9087 
9088 		WARN_ON(!status->plane_count);
9089 
9090 		/*
9091 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9092 		 * Here we create an empty update on each plane.
9093 		 * To fix this, DC should permit updating only stream properties.
9094 		 */
9095 		for (j = 0; j < status->plane_count; j++)
9096 			dummy_updates[j].surface = status->plane_states[0];
9097 
9098 
9099 		mutex_lock(&dm->dc_lock);
9100 		dc_commit_updates_for_stream(dm->dc,
9101 						     dummy_updates,
9102 						     status->plane_count,
9103 						     dm_new_crtc_state->stream,
9104 						     &stream_update,
9105 						     dc_state);
9106 		mutex_unlock(&dm->dc_lock);
9107 	}
9108 
9109 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9110 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9111 				      new_crtc_state, i) {
9112 		if (old_crtc_state->active && !new_crtc_state->active)
9113 			crtc_disable_count++;
9114 
9115 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9116 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9117 
9118 		/* For freesync config update on crtc state and params for irq */
9119 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9120 
9121 		/* Handle vrr on->off / off->on transitions */
9122 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9123 						dm_new_crtc_state);
9124 	}
9125 
9126 	/**
9127 	 * Enable interrupts for CRTCs that are newly enabled or went through
9128 	 * a modeset. It was intentionally deferred until after the front end
9129 	 * state was modified to wait until the OTG was on and so the IRQ
9130 	 * handlers didn't access stale or invalid state.
9131 	 */
9132 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9133 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9134 #ifdef CONFIG_DEBUG_FS
9135 		bool configure_crc = false;
9136 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9137 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9138 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9139 #endif
9140 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9141 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9142 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9143 #endif
9144 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9145 
9146 		if (new_crtc_state->active &&
9147 		    (!old_crtc_state->active ||
9148 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9149 			dc_stream_retain(dm_new_crtc_state->stream);
9150 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9151 			manage_dm_interrupts(adev, acrtc, true);
9152 
9153 #ifdef CONFIG_DEBUG_FS
9154 			/**
9155 			 * Frontend may have changed so reapply the CRC capture
9156 			 * settings for the stream.
9157 			 */
9158 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9159 
9160 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9161 				configure_crc = true;
9162 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9163 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9164 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9165 					acrtc->dm_irq_params.crc_window.update_win = true;
9166 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9167 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9168 					crc_rd_wrk->crtc = crtc;
9169 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9170 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9171 				}
9172 #endif
9173 			}
9174 
9175 			if (configure_crc)
9176 				if (amdgpu_dm_crtc_configure_crc_source(
9177 					crtc, dm_new_crtc_state, cur_crc_src))
9178 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9179 #endif
9180 		}
9181 	}
9182 
9183 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9184 		if (new_crtc_state->async_flip)
9185 			wait_for_vblank = false;
9186 
9187 	/* update planes when needed per crtc*/
9188 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9189 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9190 
9191 		if (dm_new_crtc_state->stream)
9192 			amdgpu_dm_commit_planes(state, dc_state, dev,
9193 						dm, crtc, wait_for_vblank);
9194 	}
9195 
9196 	/* Update audio instances for each connector. */
9197 	amdgpu_dm_commit_audio(dev, state);
9198 
9199 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9200 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9201 	/* restore the backlight level */
9202 	for (i = 0; i < dm->num_of_edps; i++) {
9203 		if (dm->backlight_dev[i] &&
9204 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9205 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9206 	}
9207 #endif
9208 	/*
9209 	 * send vblank event on all events not handled in flip and
9210 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9211 	 */
9212 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9213 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9214 
9215 		if (new_crtc_state->event)
9216 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9217 
9218 		new_crtc_state->event = NULL;
9219 	}
9220 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9221 
9222 	/* Signal HW programming completion */
9223 	drm_atomic_helper_commit_hw_done(state);
9224 
9225 	if (wait_for_vblank)
9226 		drm_atomic_helper_wait_for_flip_done(dev, state);
9227 
9228 	drm_atomic_helper_cleanup_planes(dev, state);
9229 
9230 	/* return the stolen vga memory back to VRAM */
9231 	if (!adev->mman.keep_stolen_vga_memory)
9232 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9233 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9234 
9235 	/*
9236 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9237 	 * so we can put the GPU into runtime suspend if we're not driving any
9238 	 * displays anymore
9239 	 */
9240 	for (i = 0; i < crtc_disable_count; i++)
9241 		pm_runtime_put_autosuspend(dev->dev);
9242 	pm_runtime_mark_last_busy(dev->dev);
9243 
9244 	if (dc_state_temp)
9245 		dc_release_state(dc_state_temp);
9246 }
9247 
9248 
9249 static int dm_force_atomic_commit(struct drm_connector *connector)
9250 {
9251 	int ret = 0;
9252 	struct drm_device *ddev = connector->dev;
9253 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9254 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9255 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9256 	struct drm_connector_state *conn_state;
9257 	struct drm_crtc_state *crtc_state;
9258 	struct drm_plane_state *plane_state;
9259 
9260 	if (!state)
9261 		return -ENOMEM;
9262 
9263 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9264 
9265 	/* Construct an atomic state to restore previous display setting */
9266 
9267 	/*
9268 	 * Attach connectors to drm_atomic_state
9269 	 */
9270 	conn_state = drm_atomic_get_connector_state(state, connector);
9271 
9272 	ret = PTR_ERR_OR_ZERO(conn_state);
9273 	if (ret)
9274 		goto out;
9275 
9276 	/* Attach crtc to drm_atomic_state*/
9277 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9278 
9279 	ret = PTR_ERR_OR_ZERO(crtc_state);
9280 	if (ret)
9281 		goto out;
9282 
9283 	/* force a restore */
9284 	crtc_state->mode_changed = true;
9285 
9286 	/* Attach plane to drm_atomic_state */
9287 	plane_state = drm_atomic_get_plane_state(state, plane);
9288 
9289 	ret = PTR_ERR_OR_ZERO(plane_state);
9290 	if (ret)
9291 		goto out;
9292 
9293 	/* Call commit internally with the state we just constructed */
9294 	ret = drm_atomic_commit(state);
9295 
9296 out:
9297 	drm_atomic_state_put(state);
9298 	if (ret)
9299 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9300 
9301 	return ret;
9302 }
9303 
9304 /*
9305  * This function handles all cases when set mode does not come upon hotplug.
9306  * This includes when a display is unplugged then plugged back into the
9307  * same port and when running without usermode desktop manager supprot
9308  */
9309 void dm_restore_drm_connector_state(struct drm_device *dev,
9310 				    struct drm_connector *connector)
9311 {
9312 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9313 	struct amdgpu_crtc *disconnected_acrtc;
9314 	struct dm_crtc_state *acrtc_state;
9315 
9316 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9317 		return;
9318 
9319 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9320 	if (!disconnected_acrtc)
9321 		return;
9322 
9323 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9324 	if (!acrtc_state->stream)
9325 		return;
9326 
9327 	/*
9328 	 * If the previous sink is not released and different from the current,
9329 	 * we deduce we are in a state where we can not rely on usermode call
9330 	 * to turn on the display, so we do it here
9331 	 */
9332 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9333 		dm_force_atomic_commit(&aconnector->base);
9334 }
9335 
9336 /*
9337  * Grabs all modesetting locks to serialize against any blocking commits,
9338  * Waits for completion of all non blocking commits.
9339  */
9340 static int do_aquire_global_lock(struct drm_device *dev,
9341 				 struct drm_atomic_state *state)
9342 {
9343 	struct drm_crtc *crtc;
9344 	struct drm_crtc_commit *commit;
9345 	long ret;
9346 
9347 	/*
9348 	 * Adding all modeset locks to aquire_ctx will
9349 	 * ensure that when the framework release it the
9350 	 * extra locks we are locking here will get released to
9351 	 */
9352 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9353 	if (ret)
9354 		return ret;
9355 
9356 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9357 		spin_lock(&crtc->commit_lock);
9358 		commit = list_first_entry_or_null(&crtc->commit_list,
9359 				struct drm_crtc_commit, commit_entry);
9360 		if (commit)
9361 			drm_crtc_commit_get(commit);
9362 		spin_unlock(&crtc->commit_lock);
9363 
9364 		if (!commit)
9365 			continue;
9366 
9367 		/*
9368 		 * Make sure all pending HW programming completed and
9369 		 * page flips done
9370 		 */
9371 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9372 
9373 		if (ret > 0)
9374 			ret = wait_for_completion_interruptible_timeout(
9375 					&commit->flip_done, 10*HZ);
9376 
9377 		if (ret == 0)
9378 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9379 				  "timed out\n", crtc->base.id, crtc->name);
9380 
9381 		drm_crtc_commit_put(commit);
9382 	}
9383 
9384 	return ret < 0 ? ret : 0;
9385 }
9386 
9387 static void get_freesync_config_for_crtc(
9388 	struct dm_crtc_state *new_crtc_state,
9389 	struct dm_connector_state *new_con_state)
9390 {
9391 	struct mod_freesync_config config = {0};
9392 	struct amdgpu_dm_connector *aconnector =
9393 			to_amdgpu_dm_connector(new_con_state->base.connector);
9394 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9395 	int vrefresh = drm_mode_vrefresh(mode);
9396 	bool fs_vid_mode = false;
9397 
9398 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9399 					vrefresh >= aconnector->min_vfreq &&
9400 					vrefresh <= aconnector->max_vfreq;
9401 
9402 	if (new_crtc_state->vrr_supported) {
9403 		new_crtc_state->stream->ignore_msa_timing_param = true;
9404 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9405 
9406 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9407 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9408 		config.vsif_supported = true;
9409 		config.btr = true;
9410 
9411 		if (fs_vid_mode) {
9412 			config.state = VRR_STATE_ACTIVE_FIXED;
9413 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9414 			goto out;
9415 		} else if (new_crtc_state->base.vrr_enabled) {
9416 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9417 		} else {
9418 			config.state = VRR_STATE_INACTIVE;
9419 		}
9420 	}
9421 out:
9422 	new_crtc_state->freesync_config = config;
9423 }
9424 
9425 static void reset_freesync_config_for_crtc(
9426 	struct dm_crtc_state *new_crtc_state)
9427 {
9428 	new_crtc_state->vrr_supported = false;
9429 
9430 	memset(&new_crtc_state->vrr_infopacket, 0,
9431 	       sizeof(new_crtc_state->vrr_infopacket));
9432 }
9433 
9434 static bool
9435 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9436 				 struct drm_crtc_state *new_crtc_state)
9437 {
9438 	struct drm_display_mode old_mode, new_mode;
9439 
9440 	if (!old_crtc_state || !new_crtc_state)
9441 		return false;
9442 
9443 	old_mode = old_crtc_state->mode;
9444 	new_mode = new_crtc_state->mode;
9445 
9446 	if (old_mode.clock       == new_mode.clock &&
9447 	    old_mode.hdisplay    == new_mode.hdisplay &&
9448 	    old_mode.vdisplay    == new_mode.vdisplay &&
9449 	    old_mode.htotal      == new_mode.htotal &&
9450 	    old_mode.vtotal      != new_mode.vtotal &&
9451 	    old_mode.hsync_start == new_mode.hsync_start &&
9452 	    old_mode.vsync_start != new_mode.vsync_start &&
9453 	    old_mode.hsync_end   == new_mode.hsync_end &&
9454 	    old_mode.vsync_end   != new_mode.vsync_end &&
9455 	    old_mode.hskew       == new_mode.hskew &&
9456 	    old_mode.vscan       == new_mode.vscan &&
9457 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9458 	    (new_mode.vsync_end - new_mode.vsync_start))
9459 		return true;
9460 
9461 	return false;
9462 }
9463 
9464 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9465 	uint64_t num, den, res;
9466 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9467 
9468 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9469 
9470 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9471 	den = (unsigned long long)new_crtc_state->mode.htotal *
9472 	      (unsigned long long)new_crtc_state->mode.vtotal;
9473 
9474 	res = div_u64(num, den);
9475 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9476 }
9477 
9478 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9479 				struct drm_atomic_state *state,
9480 				struct drm_crtc *crtc,
9481 				struct drm_crtc_state *old_crtc_state,
9482 				struct drm_crtc_state *new_crtc_state,
9483 				bool enable,
9484 				bool *lock_and_validation_needed)
9485 {
9486 	struct dm_atomic_state *dm_state = NULL;
9487 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9488 	struct dc_stream_state *new_stream;
9489 	int ret = 0;
9490 
9491 	/*
9492 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9493 	 * update changed items
9494 	 */
9495 	struct amdgpu_crtc *acrtc = NULL;
9496 	struct amdgpu_dm_connector *aconnector = NULL;
9497 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9498 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9499 
9500 	new_stream = NULL;
9501 
9502 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9503 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9504 	acrtc = to_amdgpu_crtc(crtc);
9505 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9506 
9507 	/* TODO This hack should go away */
9508 	if (aconnector && enable) {
9509 		/* Make sure fake sink is created in plug-in scenario */
9510 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9511 							    &aconnector->base);
9512 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9513 							    &aconnector->base);
9514 
9515 		if (IS_ERR(drm_new_conn_state)) {
9516 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9517 			goto fail;
9518 		}
9519 
9520 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9521 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9522 
9523 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9524 			goto skip_modeset;
9525 
9526 		new_stream = create_validate_stream_for_sink(aconnector,
9527 							     &new_crtc_state->mode,
9528 							     dm_new_conn_state,
9529 							     dm_old_crtc_state->stream);
9530 
9531 		/*
9532 		 * we can have no stream on ACTION_SET if a display
9533 		 * was disconnected during S3, in this case it is not an
9534 		 * error, the OS will be updated after detection, and
9535 		 * will do the right thing on next atomic commit
9536 		 */
9537 
9538 		if (!new_stream) {
9539 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9540 					__func__, acrtc->base.base.id);
9541 			ret = -ENOMEM;
9542 			goto fail;
9543 		}
9544 
9545 		/*
9546 		 * TODO: Check VSDB bits to decide whether this should
9547 		 * be enabled or not.
9548 		 */
9549 		new_stream->triggered_crtc_reset.enabled =
9550 			dm->force_timing_sync;
9551 
9552 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9553 
9554 		ret = fill_hdr_info_packet(drm_new_conn_state,
9555 					   &new_stream->hdr_static_metadata);
9556 		if (ret)
9557 			goto fail;
9558 
9559 		/*
9560 		 * If we already removed the old stream from the context
9561 		 * (and set the new stream to NULL) then we can't reuse
9562 		 * the old stream even if the stream and scaling are unchanged.
9563 		 * We'll hit the BUG_ON and black screen.
9564 		 *
9565 		 * TODO: Refactor this function to allow this check to work
9566 		 * in all conditions.
9567 		 */
9568 		if (amdgpu_freesync_vid_mode &&
9569 		    dm_new_crtc_state->stream &&
9570 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9571 			goto skip_modeset;
9572 
9573 		if (dm_new_crtc_state->stream &&
9574 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9575 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9576 			new_crtc_state->mode_changed = false;
9577 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9578 					 new_crtc_state->mode_changed);
9579 		}
9580 	}
9581 
9582 	/* mode_changed flag may get updated above, need to check again */
9583 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9584 		goto skip_modeset;
9585 
9586 	DRM_DEBUG_ATOMIC(
9587 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9588 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9589 		"connectors_changed:%d\n",
9590 		acrtc->crtc_id,
9591 		new_crtc_state->enable,
9592 		new_crtc_state->active,
9593 		new_crtc_state->planes_changed,
9594 		new_crtc_state->mode_changed,
9595 		new_crtc_state->active_changed,
9596 		new_crtc_state->connectors_changed);
9597 
9598 	/* Remove stream for any changed/disabled CRTC */
9599 	if (!enable) {
9600 
9601 		if (!dm_old_crtc_state->stream)
9602 			goto skip_modeset;
9603 
9604 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9605 		    is_timing_unchanged_for_freesync(new_crtc_state,
9606 						     old_crtc_state)) {
9607 			new_crtc_state->mode_changed = false;
9608 			DRM_DEBUG_DRIVER(
9609 				"Mode change not required for front porch change, "
9610 				"setting mode_changed to %d",
9611 				new_crtc_state->mode_changed);
9612 
9613 			set_freesync_fixed_config(dm_new_crtc_state);
9614 
9615 			goto skip_modeset;
9616 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9617 			   is_freesync_video_mode(&new_crtc_state->mode,
9618 						  aconnector)) {
9619 			set_freesync_fixed_config(dm_new_crtc_state);
9620 		}
9621 
9622 		ret = dm_atomic_get_state(state, &dm_state);
9623 		if (ret)
9624 			goto fail;
9625 
9626 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9627 				crtc->base.id);
9628 
9629 		/* i.e. reset mode */
9630 		if (dc_remove_stream_from_ctx(
9631 				dm->dc,
9632 				dm_state->context,
9633 				dm_old_crtc_state->stream) != DC_OK) {
9634 			ret = -EINVAL;
9635 			goto fail;
9636 		}
9637 
9638 		dc_stream_release(dm_old_crtc_state->stream);
9639 		dm_new_crtc_state->stream = NULL;
9640 
9641 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9642 
9643 		*lock_and_validation_needed = true;
9644 
9645 	} else {/* Add stream for any updated/enabled CRTC */
9646 		/*
9647 		 * Quick fix to prevent NULL pointer on new_stream when
9648 		 * added MST connectors not found in existing crtc_state in the chained mode
9649 		 * TODO: need to dig out the root cause of that
9650 		 */
9651 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9652 			goto skip_modeset;
9653 
9654 		if (modereset_required(new_crtc_state))
9655 			goto skip_modeset;
9656 
9657 		if (modeset_required(new_crtc_state, new_stream,
9658 				     dm_old_crtc_state->stream)) {
9659 
9660 			WARN_ON(dm_new_crtc_state->stream);
9661 
9662 			ret = dm_atomic_get_state(state, &dm_state);
9663 			if (ret)
9664 				goto fail;
9665 
9666 			dm_new_crtc_state->stream = new_stream;
9667 
9668 			dc_stream_retain(new_stream);
9669 
9670 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9671 					 crtc->base.id);
9672 
9673 			if (dc_add_stream_to_ctx(
9674 					dm->dc,
9675 					dm_state->context,
9676 					dm_new_crtc_state->stream) != DC_OK) {
9677 				ret = -EINVAL;
9678 				goto fail;
9679 			}
9680 
9681 			*lock_and_validation_needed = true;
9682 		}
9683 	}
9684 
9685 skip_modeset:
9686 	/* Release extra reference */
9687 	if (new_stream)
9688 		 dc_stream_release(new_stream);
9689 
9690 	/*
9691 	 * We want to do dc stream updates that do not require a
9692 	 * full modeset below.
9693 	 */
9694 	if (!(enable && aconnector && new_crtc_state->active))
9695 		return 0;
9696 	/*
9697 	 * Given above conditions, the dc state cannot be NULL because:
9698 	 * 1. We're in the process of enabling CRTCs (just been added
9699 	 *    to the dc context, or already is on the context)
9700 	 * 2. Has a valid connector attached, and
9701 	 * 3. Is currently active and enabled.
9702 	 * => The dc stream state currently exists.
9703 	 */
9704 	BUG_ON(dm_new_crtc_state->stream == NULL);
9705 
9706 	/* Scaling or underscan settings */
9707 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9708 				drm_atomic_crtc_needs_modeset(new_crtc_state))
9709 		update_stream_scaling_settings(
9710 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9711 
9712 	/* ABM settings */
9713 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9714 
9715 	/*
9716 	 * Color management settings. We also update color properties
9717 	 * when a modeset is needed, to ensure it gets reprogrammed.
9718 	 */
9719 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9720 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9721 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9722 		if (ret)
9723 			goto fail;
9724 	}
9725 
9726 	/* Update Freesync settings. */
9727 	get_freesync_config_for_crtc(dm_new_crtc_state,
9728 				     dm_new_conn_state);
9729 
9730 	return ret;
9731 
9732 fail:
9733 	if (new_stream)
9734 		dc_stream_release(new_stream);
9735 	return ret;
9736 }
9737 
9738 static bool should_reset_plane(struct drm_atomic_state *state,
9739 			       struct drm_plane *plane,
9740 			       struct drm_plane_state *old_plane_state,
9741 			       struct drm_plane_state *new_plane_state)
9742 {
9743 	struct drm_plane *other;
9744 	struct drm_plane_state *old_other_state, *new_other_state;
9745 	struct drm_crtc_state *new_crtc_state;
9746 	int i;
9747 
9748 	/*
9749 	 * TODO: Remove this hack once the checks below are sufficient
9750 	 * enough to determine when we need to reset all the planes on
9751 	 * the stream.
9752 	 */
9753 	if (state->allow_modeset)
9754 		return true;
9755 
9756 	/* Exit early if we know that we're adding or removing the plane. */
9757 	if (old_plane_state->crtc != new_plane_state->crtc)
9758 		return true;
9759 
9760 	/* old crtc == new_crtc == NULL, plane not in context. */
9761 	if (!new_plane_state->crtc)
9762 		return false;
9763 
9764 	new_crtc_state =
9765 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9766 
9767 	if (!new_crtc_state)
9768 		return true;
9769 
9770 	/* CRTC Degamma changes currently require us to recreate planes. */
9771 	if (new_crtc_state->color_mgmt_changed)
9772 		return true;
9773 
9774 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9775 		return true;
9776 
9777 	/*
9778 	 * If there are any new primary or overlay planes being added or
9779 	 * removed then the z-order can potentially change. To ensure
9780 	 * correct z-order and pipe acquisition the current DC architecture
9781 	 * requires us to remove and recreate all existing planes.
9782 	 *
9783 	 * TODO: Come up with a more elegant solution for this.
9784 	 */
9785 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9786 		struct amdgpu_framebuffer *old_afb, *new_afb;
9787 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9788 			continue;
9789 
9790 		if (old_other_state->crtc != new_plane_state->crtc &&
9791 		    new_other_state->crtc != new_plane_state->crtc)
9792 			continue;
9793 
9794 		if (old_other_state->crtc != new_other_state->crtc)
9795 			return true;
9796 
9797 		/* Src/dst size and scaling updates. */
9798 		if (old_other_state->src_w != new_other_state->src_w ||
9799 		    old_other_state->src_h != new_other_state->src_h ||
9800 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9801 		    old_other_state->crtc_h != new_other_state->crtc_h)
9802 			return true;
9803 
9804 		/* Rotation / mirroring updates. */
9805 		if (old_other_state->rotation != new_other_state->rotation)
9806 			return true;
9807 
9808 		/* Blending updates. */
9809 		if (old_other_state->pixel_blend_mode !=
9810 		    new_other_state->pixel_blend_mode)
9811 			return true;
9812 
9813 		/* Alpha updates. */
9814 		if (old_other_state->alpha != new_other_state->alpha)
9815 			return true;
9816 
9817 		/* Colorspace changes. */
9818 		if (old_other_state->color_range != new_other_state->color_range ||
9819 		    old_other_state->color_encoding != new_other_state->color_encoding)
9820 			return true;
9821 
9822 		/* Framebuffer checks fall at the end. */
9823 		if (!old_other_state->fb || !new_other_state->fb)
9824 			continue;
9825 
9826 		/* Pixel format changes can require bandwidth updates. */
9827 		if (old_other_state->fb->format != new_other_state->fb->format)
9828 			return true;
9829 
9830 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9831 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9832 
9833 		/* Tiling and DCC changes also require bandwidth updates. */
9834 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9835 		    old_afb->base.modifier != new_afb->base.modifier)
9836 			return true;
9837 	}
9838 
9839 	return false;
9840 }
9841 
9842 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9843 			      struct drm_plane_state *new_plane_state,
9844 			      struct drm_framebuffer *fb)
9845 {
9846 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9847 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9848 	unsigned int pitch;
9849 	bool linear;
9850 
9851 	if (fb->width > new_acrtc->max_cursor_width ||
9852 	    fb->height > new_acrtc->max_cursor_height) {
9853 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9854 				 new_plane_state->fb->width,
9855 				 new_plane_state->fb->height);
9856 		return -EINVAL;
9857 	}
9858 	if (new_plane_state->src_w != fb->width << 16 ||
9859 	    new_plane_state->src_h != fb->height << 16) {
9860 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9861 		return -EINVAL;
9862 	}
9863 
9864 	/* Pitch in pixels */
9865 	pitch = fb->pitches[0] / fb->format->cpp[0];
9866 
9867 	if (fb->width != pitch) {
9868 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9869 				 fb->width, pitch);
9870 		return -EINVAL;
9871 	}
9872 
9873 	switch (pitch) {
9874 	case 64:
9875 	case 128:
9876 	case 256:
9877 		/* FB pitch is supported by cursor plane */
9878 		break;
9879 	default:
9880 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9881 		return -EINVAL;
9882 	}
9883 
9884 	/* Core DRM takes care of checking FB modifiers, so we only need to
9885 	 * check tiling flags when the FB doesn't have a modifier. */
9886 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9887 		if (adev->family < AMDGPU_FAMILY_AI) {
9888 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9889 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9890 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9891 		} else {
9892 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9893 		}
9894 		if (!linear) {
9895 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9896 			return -EINVAL;
9897 		}
9898 	}
9899 
9900 	return 0;
9901 }
9902 
9903 static int dm_update_plane_state(struct dc *dc,
9904 				 struct drm_atomic_state *state,
9905 				 struct drm_plane *plane,
9906 				 struct drm_plane_state *old_plane_state,
9907 				 struct drm_plane_state *new_plane_state,
9908 				 bool enable,
9909 				 bool *lock_and_validation_needed)
9910 {
9911 
9912 	struct dm_atomic_state *dm_state = NULL;
9913 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9914 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9915 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9916 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9917 	struct amdgpu_crtc *new_acrtc;
9918 	bool needs_reset;
9919 	int ret = 0;
9920 
9921 
9922 	new_plane_crtc = new_plane_state->crtc;
9923 	old_plane_crtc = old_plane_state->crtc;
9924 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9925 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9926 
9927 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9928 		if (!enable || !new_plane_crtc ||
9929 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9930 			return 0;
9931 
9932 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9933 
9934 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9935 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9936 			return -EINVAL;
9937 		}
9938 
9939 		if (new_plane_state->fb) {
9940 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9941 						 new_plane_state->fb);
9942 			if (ret)
9943 				return ret;
9944 		}
9945 
9946 		return 0;
9947 	}
9948 
9949 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9950 					 new_plane_state);
9951 
9952 	/* Remove any changed/removed planes */
9953 	if (!enable) {
9954 		if (!needs_reset)
9955 			return 0;
9956 
9957 		if (!old_plane_crtc)
9958 			return 0;
9959 
9960 		old_crtc_state = drm_atomic_get_old_crtc_state(
9961 				state, old_plane_crtc);
9962 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9963 
9964 		if (!dm_old_crtc_state->stream)
9965 			return 0;
9966 
9967 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9968 				plane->base.id, old_plane_crtc->base.id);
9969 
9970 		ret = dm_atomic_get_state(state, &dm_state);
9971 		if (ret)
9972 			return ret;
9973 
9974 		if (!dc_remove_plane_from_context(
9975 				dc,
9976 				dm_old_crtc_state->stream,
9977 				dm_old_plane_state->dc_state,
9978 				dm_state->context)) {
9979 
9980 			return -EINVAL;
9981 		}
9982 
9983 
9984 		dc_plane_state_release(dm_old_plane_state->dc_state);
9985 		dm_new_plane_state->dc_state = NULL;
9986 
9987 		*lock_and_validation_needed = true;
9988 
9989 	} else { /* Add new planes */
9990 		struct dc_plane_state *dc_new_plane_state;
9991 
9992 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9993 			return 0;
9994 
9995 		if (!new_plane_crtc)
9996 			return 0;
9997 
9998 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9999 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10000 
10001 		if (!dm_new_crtc_state->stream)
10002 			return 0;
10003 
10004 		if (!needs_reset)
10005 			return 0;
10006 
10007 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10008 		if (ret)
10009 			return ret;
10010 
10011 		WARN_ON(dm_new_plane_state->dc_state);
10012 
10013 		dc_new_plane_state = dc_create_plane_state(dc);
10014 		if (!dc_new_plane_state)
10015 			return -ENOMEM;
10016 
10017 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10018 				 plane->base.id, new_plane_crtc->base.id);
10019 
10020 		ret = fill_dc_plane_attributes(
10021 			drm_to_adev(new_plane_crtc->dev),
10022 			dc_new_plane_state,
10023 			new_plane_state,
10024 			new_crtc_state);
10025 		if (ret) {
10026 			dc_plane_state_release(dc_new_plane_state);
10027 			return ret;
10028 		}
10029 
10030 		ret = dm_atomic_get_state(state, &dm_state);
10031 		if (ret) {
10032 			dc_plane_state_release(dc_new_plane_state);
10033 			return ret;
10034 		}
10035 
10036 		/*
10037 		 * Any atomic check errors that occur after this will
10038 		 * not need a release. The plane state will be attached
10039 		 * to the stream, and therefore part of the atomic
10040 		 * state. It'll be released when the atomic state is
10041 		 * cleaned.
10042 		 */
10043 		if (!dc_add_plane_to_context(
10044 				dc,
10045 				dm_new_crtc_state->stream,
10046 				dc_new_plane_state,
10047 				dm_state->context)) {
10048 
10049 			dc_plane_state_release(dc_new_plane_state);
10050 			return -EINVAL;
10051 		}
10052 
10053 		dm_new_plane_state->dc_state = dc_new_plane_state;
10054 
10055 		/* Tell DC to do a full surface update every time there
10056 		 * is a plane change. Inefficient, but works for now.
10057 		 */
10058 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10059 
10060 		*lock_and_validation_needed = true;
10061 	}
10062 
10063 
10064 	return ret;
10065 }
10066 
10067 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10068 				struct drm_crtc *crtc,
10069 				struct drm_crtc_state *new_crtc_state)
10070 {
10071 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10072 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10073 
10074 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10075 	 * cursor per pipe but it's going to inherit the scaling and
10076 	 * positioning from the underlying pipe. Check the cursor plane's
10077 	 * blending properties match the primary plane's. */
10078 
10079 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10080 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10081 	if (!new_cursor_state || !new_primary_state ||
10082 	    !new_cursor_state->fb || !new_primary_state->fb) {
10083 		return 0;
10084 	}
10085 
10086 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10087 			 (new_cursor_state->src_w >> 16);
10088 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10089 			 (new_cursor_state->src_h >> 16);
10090 
10091 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10092 			 (new_primary_state->src_w >> 16);
10093 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10094 			 (new_primary_state->src_h >> 16);
10095 
10096 	if (cursor_scale_w != primary_scale_w ||
10097 	    cursor_scale_h != primary_scale_h) {
10098 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10099 		return -EINVAL;
10100 	}
10101 
10102 	return 0;
10103 }
10104 
10105 #if defined(CONFIG_DRM_AMD_DC_DCN)
10106 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10107 {
10108 	struct drm_connector *connector;
10109 	struct drm_connector_state *conn_state;
10110 	struct amdgpu_dm_connector *aconnector = NULL;
10111 	int i;
10112 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10113 		if (conn_state->crtc != crtc)
10114 			continue;
10115 
10116 		aconnector = to_amdgpu_dm_connector(connector);
10117 		if (!aconnector->port || !aconnector->mst_port)
10118 			aconnector = NULL;
10119 		else
10120 			break;
10121 	}
10122 
10123 	if (!aconnector)
10124 		return 0;
10125 
10126 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10127 }
10128 #endif
10129 
10130 static int validate_overlay(struct drm_atomic_state *state)
10131 {
10132 	int i;
10133 	struct drm_plane *plane;
10134 	struct drm_plane_state *new_plane_state;
10135 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10136 
10137 	/* Check if primary plane is contained inside overlay */
10138 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10139 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10140 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10141 				return 0;
10142 
10143 			overlay_state = new_plane_state;
10144 			continue;
10145 		}
10146 	}
10147 
10148 	/* check if we're making changes to the overlay plane */
10149 	if (!overlay_state)
10150 		return 0;
10151 
10152 	/* check if overlay plane is enabled */
10153 	if (!overlay_state->crtc)
10154 		return 0;
10155 
10156 	/* find the primary plane for the CRTC that the overlay is enabled on */
10157 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10158 	if (IS_ERR(primary_state))
10159 		return PTR_ERR(primary_state);
10160 
10161 	/* check if primary plane is enabled */
10162 	if (!primary_state->crtc)
10163 		return 0;
10164 
10165 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10166 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10167 	    primary_state->crtc_y < overlay_state->crtc_y ||
10168 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10169 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10170 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10171 		return -EINVAL;
10172 	}
10173 
10174 	return 0;
10175 }
10176 
10177 /**
10178  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10179  * @dev: The DRM device
10180  * @state: The atomic state to commit
10181  *
10182  * Validate that the given atomic state is programmable by DC into hardware.
10183  * This involves constructing a &struct dc_state reflecting the new hardware
10184  * state we wish to commit, then querying DC to see if it is programmable. It's
10185  * important not to modify the existing DC state. Otherwise, atomic_check
10186  * may unexpectedly commit hardware changes.
10187  *
10188  * When validating the DC state, it's important that the right locks are
10189  * acquired. For full updates case which removes/adds/updates streams on one
10190  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10191  * that any such full update commit will wait for completion of any outstanding
10192  * flip using DRMs synchronization events.
10193  *
10194  * Note that DM adds the affected connectors for all CRTCs in state, when that
10195  * might not seem necessary. This is because DC stream creation requires the
10196  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10197  * be possible but non-trivial - a possible TODO item.
10198  *
10199  * Return: -Error code if validation failed.
10200  */
10201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10202 				  struct drm_atomic_state *state)
10203 {
10204 	struct amdgpu_device *adev = drm_to_adev(dev);
10205 	struct dm_atomic_state *dm_state = NULL;
10206 	struct dc *dc = adev->dm.dc;
10207 	struct drm_connector *connector;
10208 	struct drm_connector_state *old_con_state, *new_con_state;
10209 	struct drm_crtc *crtc;
10210 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10211 	struct drm_plane *plane;
10212 	struct drm_plane_state *old_plane_state, *new_plane_state;
10213 	enum dc_status status;
10214 	int ret, i;
10215 	bool lock_and_validation_needed = false;
10216 	struct dm_crtc_state *dm_old_crtc_state;
10217 
10218 	trace_amdgpu_dm_atomic_check_begin(state);
10219 
10220 	ret = drm_atomic_helper_check_modeset(dev, state);
10221 	if (ret)
10222 		goto fail;
10223 
10224 	/* Check connector changes */
10225 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10226 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10227 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10228 
10229 		/* Skip connectors that are disabled or part of modeset already. */
10230 		if (!old_con_state->crtc && !new_con_state->crtc)
10231 			continue;
10232 
10233 		if (!new_con_state->crtc)
10234 			continue;
10235 
10236 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10237 		if (IS_ERR(new_crtc_state)) {
10238 			ret = PTR_ERR(new_crtc_state);
10239 			goto fail;
10240 		}
10241 
10242 		if (dm_old_con_state->abm_level !=
10243 		    dm_new_con_state->abm_level)
10244 			new_crtc_state->connectors_changed = true;
10245 	}
10246 
10247 #if defined(CONFIG_DRM_AMD_DC_DCN)
10248 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10249 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10250 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10251 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10252 				if (ret)
10253 					goto fail;
10254 			}
10255 		}
10256 	}
10257 #endif
10258 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10259 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10260 
10261 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10262 		    !new_crtc_state->color_mgmt_changed &&
10263 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10264 			dm_old_crtc_state->dsc_force_changed == false)
10265 			continue;
10266 
10267 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10268 		if (ret)
10269 			goto fail;
10270 
10271 		if (!new_crtc_state->enable)
10272 			continue;
10273 
10274 		ret = drm_atomic_add_affected_connectors(state, crtc);
10275 		if (ret)
10276 			return ret;
10277 
10278 		ret = drm_atomic_add_affected_planes(state, crtc);
10279 		if (ret)
10280 			goto fail;
10281 
10282 		if (dm_old_crtc_state->dsc_force_changed)
10283 			new_crtc_state->mode_changed = true;
10284 	}
10285 
10286 	/*
10287 	 * Add all primary and overlay planes on the CRTC to the state
10288 	 * whenever a plane is enabled to maintain correct z-ordering
10289 	 * and to enable fast surface updates.
10290 	 */
10291 	drm_for_each_crtc(crtc, dev) {
10292 		bool modified = false;
10293 
10294 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10295 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10296 				continue;
10297 
10298 			if (new_plane_state->crtc == crtc ||
10299 			    old_plane_state->crtc == crtc) {
10300 				modified = true;
10301 				break;
10302 			}
10303 		}
10304 
10305 		if (!modified)
10306 			continue;
10307 
10308 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10309 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10310 				continue;
10311 
10312 			new_plane_state =
10313 				drm_atomic_get_plane_state(state, plane);
10314 
10315 			if (IS_ERR(new_plane_state)) {
10316 				ret = PTR_ERR(new_plane_state);
10317 				goto fail;
10318 			}
10319 		}
10320 	}
10321 
10322 	/* Remove exiting planes if they are modified */
10323 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10324 		ret = dm_update_plane_state(dc, state, plane,
10325 					    old_plane_state,
10326 					    new_plane_state,
10327 					    false,
10328 					    &lock_and_validation_needed);
10329 		if (ret)
10330 			goto fail;
10331 	}
10332 
10333 	/* Disable all crtcs which require disable */
10334 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10335 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10336 					   old_crtc_state,
10337 					   new_crtc_state,
10338 					   false,
10339 					   &lock_and_validation_needed);
10340 		if (ret)
10341 			goto fail;
10342 	}
10343 
10344 	/* Enable all crtcs which require enable */
10345 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10346 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10347 					   old_crtc_state,
10348 					   new_crtc_state,
10349 					   true,
10350 					   &lock_and_validation_needed);
10351 		if (ret)
10352 			goto fail;
10353 	}
10354 
10355 	ret = validate_overlay(state);
10356 	if (ret)
10357 		goto fail;
10358 
10359 	/* Add new/modified planes */
10360 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10361 		ret = dm_update_plane_state(dc, state, plane,
10362 					    old_plane_state,
10363 					    new_plane_state,
10364 					    true,
10365 					    &lock_and_validation_needed);
10366 		if (ret)
10367 			goto fail;
10368 	}
10369 
10370 	/* Run this here since we want to validate the streams we created */
10371 	ret = drm_atomic_helper_check_planes(dev, state);
10372 	if (ret)
10373 		goto fail;
10374 
10375 	/* Check cursor planes scaling */
10376 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10377 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10378 		if (ret)
10379 			goto fail;
10380 	}
10381 
10382 	if (state->legacy_cursor_update) {
10383 		/*
10384 		 * This is a fast cursor update coming from the plane update
10385 		 * helper, check if it can be done asynchronously for better
10386 		 * performance.
10387 		 */
10388 		state->async_update =
10389 			!drm_atomic_helper_async_check(dev, state);
10390 
10391 		/*
10392 		 * Skip the remaining global validation if this is an async
10393 		 * update. Cursor updates can be done without affecting
10394 		 * state or bandwidth calcs and this avoids the performance
10395 		 * penalty of locking the private state object and
10396 		 * allocating a new dc_state.
10397 		 */
10398 		if (state->async_update)
10399 			return 0;
10400 	}
10401 
10402 	/* Check scaling and underscan changes*/
10403 	/* TODO Removed scaling changes validation due to inability to commit
10404 	 * new stream into context w\o causing full reset. Need to
10405 	 * decide how to handle.
10406 	 */
10407 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10408 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10409 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10410 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10411 
10412 		/* Skip any modesets/resets */
10413 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10414 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10415 			continue;
10416 
10417 		/* Skip any thing not scale or underscan changes */
10418 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10419 			continue;
10420 
10421 		lock_and_validation_needed = true;
10422 	}
10423 
10424 	/**
10425 	 * Streams and planes are reset when there are changes that affect
10426 	 * bandwidth. Anything that affects bandwidth needs to go through
10427 	 * DC global validation to ensure that the configuration can be applied
10428 	 * to hardware.
10429 	 *
10430 	 * We have to currently stall out here in atomic_check for outstanding
10431 	 * commits to finish in this case because our IRQ handlers reference
10432 	 * DRM state directly - we can end up disabling interrupts too early
10433 	 * if we don't.
10434 	 *
10435 	 * TODO: Remove this stall and drop DM state private objects.
10436 	 */
10437 	if (lock_and_validation_needed) {
10438 		ret = dm_atomic_get_state(state, &dm_state);
10439 		if (ret)
10440 			goto fail;
10441 
10442 		ret = do_aquire_global_lock(dev, state);
10443 		if (ret)
10444 			goto fail;
10445 
10446 #if defined(CONFIG_DRM_AMD_DC_DCN)
10447 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10448 			goto fail;
10449 
10450 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10451 		if (ret)
10452 			goto fail;
10453 #endif
10454 
10455 		/*
10456 		 * Perform validation of MST topology in the state:
10457 		 * We need to perform MST atomic check before calling
10458 		 * dc_validate_global_state(), or there is a chance
10459 		 * to get stuck in an infinite loop and hang eventually.
10460 		 */
10461 		ret = drm_dp_mst_atomic_check(state);
10462 		if (ret)
10463 			goto fail;
10464 		status = dc_validate_global_state(dc, dm_state->context, false);
10465 		if (status != DC_OK) {
10466 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10467 				       dc_status_to_str(status), status);
10468 			ret = -EINVAL;
10469 			goto fail;
10470 		}
10471 	} else {
10472 		/*
10473 		 * The commit is a fast update. Fast updates shouldn't change
10474 		 * the DC context, affect global validation, and can have their
10475 		 * commit work done in parallel with other commits not touching
10476 		 * the same resource. If we have a new DC context as part of
10477 		 * the DM atomic state from validation we need to free it and
10478 		 * retain the existing one instead.
10479 		 *
10480 		 * Furthermore, since the DM atomic state only contains the DC
10481 		 * context and can safely be annulled, we can free the state
10482 		 * and clear the associated private object now to free
10483 		 * some memory and avoid a possible use-after-free later.
10484 		 */
10485 
10486 		for (i = 0; i < state->num_private_objs; i++) {
10487 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10488 
10489 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10490 				int j = state->num_private_objs-1;
10491 
10492 				dm_atomic_destroy_state(obj,
10493 						state->private_objs[i].state);
10494 
10495 				/* If i is not at the end of the array then the
10496 				 * last element needs to be moved to where i was
10497 				 * before the array can safely be truncated.
10498 				 */
10499 				if (i != j)
10500 					state->private_objs[i] =
10501 						state->private_objs[j];
10502 
10503 				state->private_objs[j].ptr = NULL;
10504 				state->private_objs[j].state = NULL;
10505 				state->private_objs[j].old_state = NULL;
10506 				state->private_objs[j].new_state = NULL;
10507 
10508 				state->num_private_objs = j;
10509 				break;
10510 			}
10511 		}
10512 	}
10513 
10514 	/* Store the overall update type for use later in atomic check. */
10515 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10516 		struct dm_crtc_state *dm_new_crtc_state =
10517 			to_dm_crtc_state(new_crtc_state);
10518 
10519 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10520 							 UPDATE_TYPE_FULL :
10521 							 UPDATE_TYPE_FAST;
10522 	}
10523 
10524 	/* Must be success */
10525 	WARN_ON(ret);
10526 
10527 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10528 
10529 	return ret;
10530 
10531 fail:
10532 	if (ret == -EDEADLK)
10533 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10534 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10535 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10536 	else
10537 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10538 
10539 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10540 
10541 	return ret;
10542 }
10543 
10544 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10545 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10546 {
10547 	uint8_t dpcd_data;
10548 	bool capable = false;
10549 
10550 	if (amdgpu_dm_connector->dc_link &&
10551 		dm_helpers_dp_read_dpcd(
10552 				NULL,
10553 				amdgpu_dm_connector->dc_link,
10554 				DP_DOWN_STREAM_PORT_COUNT,
10555 				&dpcd_data,
10556 				sizeof(dpcd_data))) {
10557 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10558 	}
10559 
10560 	return capable;
10561 }
10562 
10563 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10564 		unsigned int offset,
10565 		unsigned int total_length,
10566 		uint8_t *data,
10567 		unsigned int length,
10568 		struct amdgpu_hdmi_vsdb_info *vsdb)
10569 {
10570 	bool res;
10571 	union dmub_rb_cmd cmd;
10572 	struct dmub_cmd_send_edid_cea *input;
10573 	struct dmub_cmd_edid_cea_output *output;
10574 
10575 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10576 		return false;
10577 
10578 	memset(&cmd, 0, sizeof(cmd));
10579 
10580 	input = &cmd.edid_cea.data.input;
10581 
10582 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10583 	cmd.edid_cea.header.sub_type = 0;
10584 	cmd.edid_cea.header.payload_bytes =
10585 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10586 	input->offset = offset;
10587 	input->length = length;
10588 	input->total_length = total_length;
10589 	memcpy(input->payload, data, length);
10590 
10591 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10592 	if (!res) {
10593 		DRM_ERROR("EDID CEA parser failed\n");
10594 		return false;
10595 	}
10596 
10597 	output = &cmd.edid_cea.data.output;
10598 
10599 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10600 		if (!output->ack.success) {
10601 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
10602 					output->ack.offset);
10603 		}
10604 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10605 		if (!output->amd_vsdb.vsdb_found)
10606 			return false;
10607 
10608 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10609 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10610 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10611 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10612 	} else {
10613 		DRM_WARN("Unknown EDID CEA parser results\n");
10614 		return false;
10615 	}
10616 
10617 	return true;
10618 }
10619 
10620 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10621 		uint8_t *edid_ext, int len,
10622 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10623 {
10624 	int i;
10625 
10626 	/* send extension block to DMCU for parsing */
10627 	for (i = 0; i < len; i += 8) {
10628 		bool res;
10629 		int offset;
10630 
10631 		/* send 8 bytes a time */
10632 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10633 			return false;
10634 
10635 		if (i+8 == len) {
10636 			/* EDID block sent completed, expect result */
10637 			int version, min_rate, max_rate;
10638 
10639 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10640 			if (res) {
10641 				/* amd vsdb found */
10642 				vsdb_info->freesync_supported = 1;
10643 				vsdb_info->amd_vsdb_version = version;
10644 				vsdb_info->min_refresh_rate_hz = min_rate;
10645 				vsdb_info->max_refresh_rate_hz = max_rate;
10646 				return true;
10647 			}
10648 			/* not amd vsdb */
10649 			return false;
10650 		}
10651 
10652 		/* check for ack*/
10653 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10654 		if (!res)
10655 			return false;
10656 	}
10657 
10658 	return false;
10659 }
10660 
10661 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10662 		uint8_t *edid_ext, int len,
10663 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10664 {
10665 	int i;
10666 
10667 	/* send extension block to DMCU for parsing */
10668 	for (i = 0; i < len; i += 8) {
10669 		/* send 8 bytes a time */
10670 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10671 			return false;
10672 	}
10673 
10674 	return vsdb_info->freesync_supported;
10675 }
10676 
10677 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10678 		uint8_t *edid_ext, int len,
10679 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10680 {
10681 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10682 
10683 	if (adev->dm.dmub_srv)
10684 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10685 	else
10686 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10687 }
10688 
10689 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10690 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10691 {
10692 	uint8_t *edid_ext = NULL;
10693 	int i;
10694 	bool valid_vsdb_found = false;
10695 
10696 	/*----- drm_find_cea_extension() -----*/
10697 	/* No EDID or EDID extensions */
10698 	if (edid == NULL || edid->extensions == 0)
10699 		return -ENODEV;
10700 
10701 	/* Find CEA extension */
10702 	for (i = 0; i < edid->extensions; i++) {
10703 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10704 		if (edid_ext[0] == CEA_EXT)
10705 			break;
10706 	}
10707 
10708 	if (i == edid->extensions)
10709 		return -ENODEV;
10710 
10711 	/*----- cea_db_offsets() -----*/
10712 	if (edid_ext[0] != CEA_EXT)
10713 		return -ENODEV;
10714 
10715 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10716 
10717 	return valid_vsdb_found ? i : -ENODEV;
10718 }
10719 
10720 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10721 					struct edid *edid)
10722 {
10723 	int i = 0;
10724 	struct detailed_timing *timing;
10725 	struct detailed_non_pixel *data;
10726 	struct detailed_data_monitor_range *range;
10727 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10728 			to_amdgpu_dm_connector(connector);
10729 	struct dm_connector_state *dm_con_state = NULL;
10730 
10731 	struct drm_device *dev = connector->dev;
10732 	struct amdgpu_device *adev = drm_to_adev(dev);
10733 	bool freesync_capable = false;
10734 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10735 
10736 	if (!connector->state) {
10737 		DRM_ERROR("%s - Connector has no state", __func__);
10738 		goto update;
10739 	}
10740 
10741 	if (!edid) {
10742 		dm_con_state = to_dm_connector_state(connector->state);
10743 
10744 		amdgpu_dm_connector->min_vfreq = 0;
10745 		amdgpu_dm_connector->max_vfreq = 0;
10746 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10747 
10748 		goto update;
10749 	}
10750 
10751 	dm_con_state = to_dm_connector_state(connector->state);
10752 
10753 	if (!amdgpu_dm_connector->dc_sink) {
10754 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10755 		goto update;
10756 	}
10757 	if (!adev->dm.freesync_module)
10758 		goto update;
10759 
10760 
10761 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10762 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10763 		bool edid_check_required = false;
10764 
10765 		if (edid) {
10766 			edid_check_required = is_dp_capable_without_timing_msa(
10767 						adev->dm.dc,
10768 						amdgpu_dm_connector);
10769 		}
10770 
10771 		if (edid_check_required == true && (edid->version > 1 ||
10772 		   (edid->version == 1 && edid->revision > 1))) {
10773 			for (i = 0; i < 4; i++) {
10774 
10775 				timing	= &edid->detailed_timings[i];
10776 				data	= &timing->data.other_data;
10777 				range	= &data->data.range;
10778 				/*
10779 				 * Check if monitor has continuous frequency mode
10780 				 */
10781 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10782 					continue;
10783 				/*
10784 				 * Check for flag range limits only. If flag == 1 then
10785 				 * no additional timing information provided.
10786 				 * Default GTF, GTF Secondary curve and CVT are not
10787 				 * supported
10788 				 */
10789 				if (range->flags != 1)
10790 					continue;
10791 
10792 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10793 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10794 				amdgpu_dm_connector->pixel_clock_mhz =
10795 					range->pixel_clock_mhz * 10;
10796 
10797 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10798 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10799 
10800 				break;
10801 			}
10802 
10803 			if (amdgpu_dm_connector->max_vfreq -
10804 			    amdgpu_dm_connector->min_vfreq > 10) {
10805 
10806 				freesync_capable = true;
10807 			}
10808 		}
10809 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10810 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10811 		if (i >= 0 && vsdb_info.freesync_supported) {
10812 			timing  = &edid->detailed_timings[i];
10813 			data    = &timing->data.other_data;
10814 
10815 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10816 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10817 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10818 				freesync_capable = true;
10819 
10820 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10821 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10822 		}
10823 	}
10824 
10825 update:
10826 	if (dm_con_state)
10827 		dm_con_state->freesync_capable = freesync_capable;
10828 
10829 	if (connector->vrr_capable_property)
10830 		drm_connector_set_vrr_capable_property(connector,
10831 						       freesync_capable);
10832 }
10833 
10834 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10835 {
10836 	struct amdgpu_device *adev = drm_to_adev(dev);
10837 	struct dc *dc = adev->dm.dc;
10838 	int i;
10839 
10840 	mutex_lock(&adev->dm.dc_lock);
10841 	if (dc->current_state) {
10842 		for (i = 0; i < dc->current_state->stream_count; ++i)
10843 			dc->current_state->streams[i]
10844 				->triggered_crtc_reset.enabled =
10845 				adev->dm.force_timing_sync;
10846 
10847 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10848 		dc_trigger_sync(dc, dc->current_state);
10849 	}
10850 	mutex_unlock(&adev->dm.dc_lock);
10851 }
10852 
10853 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10854 		       uint32_t value, const char *func_name)
10855 {
10856 #ifdef DM_CHECK_ADDR_0
10857 	if (address == 0) {
10858 		DC_ERR("invalid register write. address = 0");
10859 		return;
10860 	}
10861 #endif
10862 	cgs_write_register(ctx->cgs_device, address, value);
10863 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10864 }
10865 
10866 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10867 			  const char *func_name)
10868 {
10869 	uint32_t value;
10870 #ifdef DM_CHECK_ADDR_0
10871 	if (address == 0) {
10872 		DC_ERR("invalid register read; address = 0\n");
10873 		return 0;
10874 	}
10875 #endif
10876 
10877 	if (ctx->dmub_srv &&
10878 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10879 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10880 		ASSERT(false);
10881 		return 0;
10882 	}
10883 
10884 	value = cgs_read_register(ctx->cgs_device, address);
10885 
10886 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10887 
10888 	return value;
10889 }
10890 
10891 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10892 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10893 {
10894 	struct amdgpu_device *adev = ctx->driver_context;
10895 	int ret = 0;
10896 
10897 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10898 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10899 	if (ret == 0) {
10900 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10901 		return -1;
10902 	}
10903 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10904 
10905 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10906 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10907 
10908 		// For read case, Copy data to payload
10909 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10910 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10911 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10912 			adev->dm.dmub_notify->aux_reply.length);
10913 	}
10914 
10915 	return adev->dm.dmub_notify->aux_reply.length;
10916 }
10917