1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 
116 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118 
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121 
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124 
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127 
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137 
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208 
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 				  struct drm_atomic_state *state);
211 
212 static void handle_cursor_update(struct drm_plane *plane,
213 				 struct drm_plane_state *old_plane_state);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
219 
220 static bool
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222 				 struct drm_crtc_state *new_crtc_state);
223 /*
224  * dm_vblank_get_counter
225  *
226  * @brief
227  * Get counter for number of vertical blanks
228  *
229  * @param
230  * struct amdgpu_device *adev - [in] desired amdgpu device
231  * int disp_idx - [in] which CRTC to get the counter from
232  *
233  * @return
234  * Counter for vertical blanks
235  */
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237 {
238 	if (crtc >= adev->mode_info.num_crtc)
239 		return 0;
240 	else {
241 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242 
243 		if (acrtc->dm_irq_params.stream == NULL) {
244 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 				  crtc);
246 			return 0;
247 		}
248 
249 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
250 	}
251 }
252 
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254 				  u32 *vbl, u32 *position)
255 {
256 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
257 
258 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 		return -EINVAL;
260 	else {
261 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262 
263 		if (acrtc->dm_irq_params.stream ==  NULL) {
264 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265 				  crtc);
266 			return 0;
267 		}
268 
269 		/*
270 		 * TODO rework base driver to use values directly.
271 		 * for now parse it back into reg-format
272 		 */
273 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274 					 &v_blank_start,
275 					 &v_blank_end,
276 					 &h_position,
277 					 &v_position);
278 
279 		*position = v_position | (h_position << 16);
280 		*vbl = v_blank_start | (v_blank_end << 16);
281 	}
282 
283 	return 0;
284 }
285 
286 static bool dm_is_idle(void *handle)
287 {
288 	/* XXX todo */
289 	return true;
290 }
291 
292 static int dm_wait_for_idle(void *handle)
293 {
294 	/* XXX todo */
295 	return 0;
296 }
297 
298 static bool dm_check_soft_reset(void *handle)
299 {
300 	return false;
301 }
302 
303 static int dm_soft_reset(void *handle)
304 {
305 	/* XXX todo */
306 	return 0;
307 }
308 
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 		     int otg_inst)
312 {
313 	struct drm_device *dev = adev_to_drm(adev);
314 	struct drm_crtc *crtc;
315 	struct amdgpu_crtc *amdgpu_crtc;
316 
317 	if (WARN_ON(otg_inst == -1))
318 		return adev->mode_info.crtcs[0];
319 
320 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321 		amdgpu_crtc = to_amdgpu_crtc(crtc);
322 
323 		if (amdgpu_crtc->otg_inst == otg_inst)
324 			return amdgpu_crtc;
325 	}
326 
327 	return NULL;
328 }
329 
330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331 {
332 	return acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_VARIABLE ||
334 	       acrtc->dm_irq_params.freesync_config.state ==
335 		       VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339 {
340 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 }
343 
344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345 					      struct dm_crtc_state *new_state)
346 {
347 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
348 		return true;
349 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350 		return true;
351 	else
352 		return false;
353 }
354 
355 /**
356  * dm_pflip_high_irq() - Handle pageflip interrupt
357  * @interrupt_params: ignored
358  *
359  * Handles the pageflip interrupt by notifying all interested parties
360  * that the pageflip has been completed.
361  */
362 static void dm_pflip_high_irq(void *interrupt_params)
363 {
364 	struct amdgpu_crtc *amdgpu_crtc;
365 	struct common_irq_params *irq_params = interrupt_params;
366 	struct amdgpu_device *adev = irq_params->adev;
367 	unsigned long flags;
368 	struct drm_pending_vblank_event *e;
369 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 	bool vrr_active;
371 
372 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373 
374 	/* IRQ could occur when in initial stage */
375 	/* TODO work and BO cleanup */
376 	if (amdgpu_crtc == NULL) {
377 		DC_LOG_PFLIP("CRTC is null, returning.\n");
378 		return;
379 	}
380 
381 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
382 
383 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
384 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
385 						 amdgpu_crtc->pflip_status,
386 						 AMDGPU_FLIP_SUBMITTED,
387 						 amdgpu_crtc->crtc_id,
388 						 amdgpu_crtc);
389 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390 		return;
391 	}
392 
393 	/* page flip completed. */
394 	e = amdgpu_crtc->event;
395 	amdgpu_crtc->event = NULL;
396 
397 	WARN_ON(!e);
398 
399 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
400 
401 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
402 	if (!vrr_active ||
403 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
404 				      &v_blank_end, &hpos, &vpos) ||
405 	    (vpos < v_blank_start)) {
406 		/* Update to correct count and vblank timestamp if racing with
407 		 * vblank irq. This also updates to the correct vblank timestamp
408 		 * even in VRR mode, as scanout is past the front-porch atm.
409 		 */
410 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
411 
412 		/* Wake up userspace by sending the pageflip event with proper
413 		 * count and timestamp of vblank of flip completion.
414 		 */
415 		if (e) {
416 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
417 
418 			/* Event sent, so done with vblank for this flip */
419 			drm_crtc_vblank_put(&amdgpu_crtc->base);
420 		}
421 	} else if (e) {
422 		/* VRR active and inside front-porch: vblank count and
423 		 * timestamp for pageflip event will only be up to date after
424 		 * drm_crtc_handle_vblank() has been executed from late vblank
425 		 * irq handler after start of back-porch (vline 0). We queue the
426 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
427 		 * updated timestamp and count, once it runs after us.
428 		 *
429 		 * We need to open-code this instead of using the helper
430 		 * drm_crtc_arm_vblank_event(), as that helper would
431 		 * call drm_crtc_accurate_vblank_count(), which we must
432 		 * not call in VRR mode while we are in front-porch!
433 		 */
434 
435 		/* sequence will be replaced by real count during send-out. */
436 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
437 		e->pipe = amdgpu_crtc->crtc_id;
438 
439 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
440 		e = NULL;
441 	}
442 
443 	/* Keep track of vblank of this flip for flip throttling. We use the
444 	 * cooked hw counter, as that one incremented at start of this vblank
445 	 * of pageflip completion, so last_flip_vblank is the forbidden count
446 	 * for queueing new pageflips if vsync + VRR is enabled.
447 	 */
448 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
449 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
450 
451 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
452 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
453 
454 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
455 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
456 		     vrr_active, (int) !e);
457 }
458 
459 static void dm_vupdate_high_irq(void *interrupt_params)
460 {
461 	struct common_irq_params *irq_params = interrupt_params;
462 	struct amdgpu_device *adev = irq_params->adev;
463 	struct amdgpu_crtc *acrtc;
464 	struct drm_device *drm_dev;
465 	struct drm_vblank_crtc *vblank;
466 	ktime_t frame_duration_ns, previous_timestamp;
467 	unsigned long flags;
468 	int vrr_active;
469 
470 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
471 
472 	if (acrtc) {
473 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
474 		drm_dev = acrtc->base.dev;
475 		vblank = &drm_dev->vblank[acrtc->base.index];
476 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
477 		frame_duration_ns = vblank->time - previous_timestamp;
478 
479 		if (frame_duration_ns > 0) {
480 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
481 						frame_duration_ns,
482 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
483 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
484 		}
485 
486 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
487 			      acrtc->crtc_id,
488 			      vrr_active);
489 
490 		/* Core vblank handling is done here after end of front-porch in
491 		 * vrr mode, as vblank timestamping will give valid results
492 		 * while now done after front-porch. This will also deliver
493 		 * page-flip completion events that have been queued to us
494 		 * if a pageflip happened inside front-porch.
495 		 */
496 		if (vrr_active) {
497 			drm_crtc_handle_vblank(&acrtc->base);
498 
499 			/* BTR processing for pre-DCE12 ASICs */
500 			if (acrtc->dm_irq_params.stream &&
501 			    adev->family < AMDGPU_FAMILY_AI) {
502 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
503 				mod_freesync_handle_v_update(
504 				    adev->dm.freesync_module,
505 				    acrtc->dm_irq_params.stream,
506 				    &acrtc->dm_irq_params.vrr_params);
507 
508 				dc_stream_adjust_vmin_vmax(
509 				    adev->dm.dc,
510 				    acrtc->dm_irq_params.stream,
511 				    &acrtc->dm_irq_params.vrr_params.adjust);
512 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
513 			}
514 		}
515 	}
516 }
517 
518 /**
519  * dm_crtc_high_irq() - Handles CRTC interrupt
520  * @interrupt_params: used for determining the CRTC instance
521  *
522  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
523  * event handler.
524  */
525 static void dm_crtc_high_irq(void *interrupt_params)
526 {
527 	struct common_irq_params *irq_params = interrupt_params;
528 	struct amdgpu_device *adev = irq_params->adev;
529 	struct amdgpu_crtc *acrtc;
530 	unsigned long flags;
531 	int vrr_active;
532 
533 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
534 	if (!acrtc)
535 		return;
536 
537 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
538 
539 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
540 		      vrr_active, acrtc->dm_irq_params.active_planes);
541 
542 	/**
543 	 * Core vblank handling at start of front-porch is only possible
544 	 * in non-vrr mode, as only there vblank timestamping will give
545 	 * valid results while done in front-porch. Otherwise defer it
546 	 * to dm_vupdate_high_irq after end of front-porch.
547 	 */
548 	if (!vrr_active)
549 		drm_crtc_handle_vblank(&acrtc->base);
550 
551 	/**
552 	 * Following stuff must happen at start of vblank, for crc
553 	 * computation and below-the-range btr support in vrr mode.
554 	 */
555 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
556 
557 	/* BTR updates need to happen before VUPDATE on Vega and above. */
558 	if (adev->family < AMDGPU_FAMILY_AI)
559 		return;
560 
561 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
562 
563 	if (acrtc->dm_irq_params.stream &&
564 	    acrtc->dm_irq_params.vrr_params.supported &&
565 	    acrtc->dm_irq_params.freesync_config.state ==
566 		    VRR_STATE_ACTIVE_VARIABLE) {
567 		mod_freesync_handle_v_update(adev->dm.freesync_module,
568 					     acrtc->dm_irq_params.stream,
569 					     &acrtc->dm_irq_params.vrr_params);
570 
571 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
572 					   &acrtc->dm_irq_params.vrr_params.adjust);
573 	}
574 
575 	/*
576 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
577 	 * In that case, pageflip completion interrupts won't fire and pageflip
578 	 * completion events won't get delivered. Prevent this by sending
579 	 * pending pageflip events from here if a flip is still pending.
580 	 *
581 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
582 	 * avoid race conditions between flip programming and completion,
583 	 * which could cause too early flip completion events.
584 	 */
585 	if (adev->family >= AMDGPU_FAMILY_RV &&
586 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
587 	    acrtc->dm_irq_params.active_planes == 0) {
588 		if (acrtc->event) {
589 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
590 			acrtc->event = NULL;
591 			drm_crtc_vblank_put(&acrtc->base);
592 		}
593 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
594 	}
595 
596 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
597 }
598 
599 #if defined(CONFIG_DRM_AMD_DC_DCN)
600 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
601 /**
602  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603  * DCN generation ASICs
604  * @interrupt_params: interrupt parameters
605  *
606  * Used to set crc window/read out crc value at vertical line 0 position
607  */
608 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
609 {
610 	struct common_irq_params *irq_params = interrupt_params;
611 	struct amdgpu_device *adev = irq_params->adev;
612 	struct amdgpu_crtc *acrtc;
613 
614 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
615 
616 	if (!acrtc)
617 		return;
618 
619 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
620 }
621 #endif
622 
623 /**
624  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
625  * @adev: amdgpu_device pointer
626  * @notify: dmub notification structure
627  *
628  * Dmub AUX or SET_CONFIG command completion processing callback
629  * Copies dmub notification to DM which is to be read by AUX command.
630  * issuing thread and also signals the event to wake up the thread.
631  */
632 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
633 {
634 	if (adev->dm.dmub_notify)
635 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
636 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
637 		complete(&adev->dm.dmub_aux_transfer_done);
638 }
639 
640 /**
641  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
642  * @adev: amdgpu_device pointer
643  * @notify: dmub notification structure
644  *
645  * Dmub Hpd interrupt processing callback. Gets displayindex through the
646  * ink index and calls helper to do the processing.
647  */
648 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
649 {
650 	struct amdgpu_dm_connector *aconnector;
651 	struct drm_connector *connector;
652 	struct drm_connector_list_iter iter;
653 	struct dc_link *link;
654 	uint8_t link_index = 0;
655 	struct drm_device *dev = adev->dm.ddev;
656 
657 	if (adev == NULL)
658 		return;
659 
660 	if (notify == NULL) {
661 		DRM_ERROR("DMUB HPD callback notification was NULL");
662 		return;
663 	}
664 
665 	if (notify->link_index > adev->dm.dc->link_count) {
666 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
667 		return;
668 	}
669 
670 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
671 
672 	link_index = notify->link_index;
673 
674 	link = adev->dm.dc->links[link_index];
675 
676 	drm_connector_list_iter_begin(dev, &iter);
677 	drm_for_each_connector_iter(connector, &iter) {
678 		aconnector = to_amdgpu_dm_connector(connector);
679 		if (link && aconnector->dc_link == link) {
680 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
681 			handle_hpd_irq_helper(aconnector);
682 			break;
683 		}
684 	}
685 	drm_connector_list_iter_end(&iter);
686 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
687 
688 }
689 
690 /**
691  * register_dmub_notify_callback - Sets callback for DMUB notify
692  * @adev: amdgpu_device pointer
693  * @type: Type of dmub notification
694  * @callback: Dmub interrupt callback function
695  * @dmub_int_thread_offload: offload indicator
696  *
697  * API to register a dmub callback handler for a dmub notification
698  * Also sets indicator whether callback processing to be offloaded.
699  * to dmub interrupt handling thread
700  * Return: true if successfully registered, false if there is existing registration
701  */
702 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
703 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
704 {
705 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
706 		adev->dm.dmub_callback[type] = callback;
707 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
708 	} else
709 		return false;
710 
711 	return true;
712 }
713 
714 static void dm_handle_hpd_work(struct work_struct *work)
715 {
716 	struct dmub_hpd_work *dmub_hpd_wrk;
717 
718 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
719 
720 	if (!dmub_hpd_wrk->dmub_notify) {
721 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
722 		return;
723 	}
724 
725 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
726 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
727 		dmub_hpd_wrk->dmub_notify);
728 	}
729 	kfree(dmub_hpd_wrk);
730 
731 }
732 
733 #define DMUB_TRACE_MAX_READ 64
734 /**
735  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
736  * @interrupt_params: used for determining the Outbox instance
737  *
738  * Handles the Outbox Interrupt
739  * event handler.
740  */
741 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
742 {
743 	struct dmub_notification notify;
744 	struct common_irq_params *irq_params = interrupt_params;
745 	struct amdgpu_device *adev = irq_params->adev;
746 	struct amdgpu_display_manager *dm = &adev->dm;
747 	struct dmcub_trace_buf_entry entry = { 0 };
748 	uint32_t count = 0;
749 	struct dmub_hpd_work *dmub_hpd_wrk;
750 
751 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
752 		dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
753 		if (!dmub_hpd_wrk) {
754 			DRM_ERROR("Failed to allocate dmub_hpd_wrk");
755 			return;
756 		}
757 		INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
758 
759 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
760 			do {
761 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
762 				if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
763 					DRM_ERROR("DM: notify type %d larger than the array size %ld !", notify.type,
764 					ARRAY_SIZE(dm->dmub_thread_offload));
765 					continue;
766 				}
767 				if (dm->dmub_thread_offload[notify.type] == true) {
768 					dmub_hpd_wrk->dmub_notify = &notify;
769 					dmub_hpd_wrk->adev = adev;
770 					queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
771 				} else {
772 					dm->dmub_callback[notify.type](adev, &notify);
773 				}
774 
775 			} while (notify.pending_notification);
776 
777 		} else {
778 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
779 		}
780 	}
781 
782 
783 	do {
784 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
785 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
786 							entry.param0, entry.param1);
787 
788 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
789 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
790 		} else
791 			break;
792 
793 		count++;
794 
795 	} while (count <= DMUB_TRACE_MAX_READ);
796 
797 	ASSERT(count <= DMUB_TRACE_MAX_READ);
798 }
799 #endif
800 
801 static int dm_set_clockgating_state(void *handle,
802 		  enum amd_clockgating_state state)
803 {
804 	return 0;
805 }
806 
807 static int dm_set_powergating_state(void *handle,
808 		  enum amd_powergating_state state)
809 {
810 	return 0;
811 }
812 
813 /* Prototypes of private functions */
814 static int dm_early_init(void* handle);
815 
816 /* Allocate memory for FBC compressed data  */
817 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
818 {
819 	struct drm_device *dev = connector->dev;
820 	struct amdgpu_device *adev = drm_to_adev(dev);
821 	struct dm_compressor_info *compressor = &adev->dm.compressor;
822 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
823 	struct drm_display_mode *mode;
824 	unsigned long max_size = 0;
825 
826 	if (adev->dm.dc->fbc_compressor == NULL)
827 		return;
828 
829 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
830 		return;
831 
832 	if (compressor->bo_ptr)
833 		return;
834 
835 
836 	list_for_each_entry(mode, &connector->modes, head) {
837 		if (max_size < mode->htotal * mode->vtotal)
838 			max_size = mode->htotal * mode->vtotal;
839 	}
840 
841 	if (max_size) {
842 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
843 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
844 			    &compressor->gpu_addr, &compressor->cpu_addr);
845 
846 		if (r)
847 			DRM_ERROR("DM: Failed to initialize FBC\n");
848 		else {
849 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
850 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
851 		}
852 
853 	}
854 
855 }
856 
857 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
858 					  int pipe, bool *enabled,
859 					  unsigned char *buf, int max_bytes)
860 {
861 	struct drm_device *dev = dev_get_drvdata(kdev);
862 	struct amdgpu_device *adev = drm_to_adev(dev);
863 	struct drm_connector *connector;
864 	struct drm_connector_list_iter conn_iter;
865 	struct amdgpu_dm_connector *aconnector;
866 	int ret = 0;
867 
868 	*enabled = false;
869 
870 	mutex_lock(&adev->dm.audio_lock);
871 
872 	drm_connector_list_iter_begin(dev, &conn_iter);
873 	drm_for_each_connector_iter(connector, &conn_iter) {
874 		aconnector = to_amdgpu_dm_connector(connector);
875 		if (aconnector->audio_inst != port)
876 			continue;
877 
878 		*enabled = true;
879 		ret = drm_eld_size(connector->eld);
880 		memcpy(buf, connector->eld, min(max_bytes, ret));
881 
882 		break;
883 	}
884 	drm_connector_list_iter_end(&conn_iter);
885 
886 	mutex_unlock(&adev->dm.audio_lock);
887 
888 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
889 
890 	return ret;
891 }
892 
893 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
894 	.get_eld = amdgpu_dm_audio_component_get_eld,
895 };
896 
897 static int amdgpu_dm_audio_component_bind(struct device *kdev,
898 				       struct device *hda_kdev, void *data)
899 {
900 	struct drm_device *dev = dev_get_drvdata(kdev);
901 	struct amdgpu_device *adev = drm_to_adev(dev);
902 	struct drm_audio_component *acomp = data;
903 
904 	acomp->ops = &amdgpu_dm_audio_component_ops;
905 	acomp->dev = kdev;
906 	adev->dm.audio_component = acomp;
907 
908 	return 0;
909 }
910 
911 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
912 					  struct device *hda_kdev, void *data)
913 {
914 	struct drm_device *dev = dev_get_drvdata(kdev);
915 	struct amdgpu_device *adev = drm_to_adev(dev);
916 	struct drm_audio_component *acomp = data;
917 
918 	acomp->ops = NULL;
919 	acomp->dev = NULL;
920 	adev->dm.audio_component = NULL;
921 }
922 
923 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
924 	.bind	= amdgpu_dm_audio_component_bind,
925 	.unbind	= amdgpu_dm_audio_component_unbind,
926 };
927 
928 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
929 {
930 	int i, ret;
931 
932 	if (!amdgpu_audio)
933 		return 0;
934 
935 	adev->mode_info.audio.enabled = true;
936 
937 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
938 
939 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
940 		adev->mode_info.audio.pin[i].channels = -1;
941 		adev->mode_info.audio.pin[i].rate = -1;
942 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
943 		adev->mode_info.audio.pin[i].status_bits = 0;
944 		adev->mode_info.audio.pin[i].category_code = 0;
945 		adev->mode_info.audio.pin[i].connected = false;
946 		adev->mode_info.audio.pin[i].id =
947 			adev->dm.dc->res_pool->audios[i]->inst;
948 		adev->mode_info.audio.pin[i].offset = 0;
949 	}
950 
951 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
952 	if (ret < 0)
953 		return ret;
954 
955 	adev->dm.audio_registered = true;
956 
957 	return 0;
958 }
959 
960 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
961 {
962 	if (!amdgpu_audio)
963 		return;
964 
965 	if (!adev->mode_info.audio.enabled)
966 		return;
967 
968 	if (adev->dm.audio_registered) {
969 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
970 		adev->dm.audio_registered = false;
971 	}
972 
973 	/* TODO: Disable audio? */
974 
975 	adev->mode_info.audio.enabled = false;
976 }
977 
978 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
979 {
980 	struct drm_audio_component *acomp = adev->dm.audio_component;
981 
982 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
983 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
984 
985 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
986 						 pin, -1);
987 	}
988 }
989 
990 static int dm_dmub_hw_init(struct amdgpu_device *adev)
991 {
992 	const struct dmcub_firmware_header_v1_0 *hdr;
993 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
994 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
995 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
996 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
997 	struct abm *abm = adev->dm.dc->res_pool->abm;
998 	struct dmub_srv_hw_params hw_params;
999 	enum dmub_status status;
1000 	const unsigned char *fw_inst_const, *fw_bss_data;
1001 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1002 	bool has_hw_support;
1003 
1004 	if (!dmub_srv)
1005 		/* DMUB isn't supported on the ASIC. */
1006 		return 0;
1007 
1008 	if (!fb_info) {
1009 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	if (!dmub_fw) {
1014 		/* Firmware required for DMUB support. */
1015 		DRM_ERROR("No firmware provided for DMUB.\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1020 	if (status != DMUB_STATUS_OK) {
1021 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1022 		return -EINVAL;
1023 	}
1024 
1025 	if (!has_hw_support) {
1026 		DRM_INFO("DMUB unsupported on ASIC\n");
1027 		return 0;
1028 	}
1029 
1030 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1031 
1032 	fw_inst_const = dmub_fw->data +
1033 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1034 			PSP_HEADER_BYTES;
1035 
1036 	fw_bss_data = dmub_fw->data +
1037 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1038 		      le32_to_cpu(hdr->inst_const_bytes);
1039 
1040 	/* Copy firmware and bios info into FB memory. */
1041 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1042 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1043 
1044 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1045 
1046 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1047 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1048 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1049 	 * will be done by dm_dmub_hw_init
1050 	 */
1051 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1052 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1053 				fw_inst_const_size);
1054 	}
1055 
1056 	if (fw_bss_data_size)
1057 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1058 		       fw_bss_data, fw_bss_data_size);
1059 
1060 	/* Copy firmware bios info into FB memory. */
1061 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1062 	       adev->bios_size);
1063 
1064 	/* Reset regions that need to be reset. */
1065 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1066 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1067 
1068 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1069 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1070 
1071 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1072 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1073 
1074 	/* Initialize hardware. */
1075 	memset(&hw_params, 0, sizeof(hw_params));
1076 	hw_params.fb_base = adev->gmc.fb_start;
1077 	hw_params.fb_offset = adev->gmc.aper_base;
1078 
1079 	/* backdoor load firmware and trigger dmub running */
1080 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1081 		hw_params.load_inst_const = true;
1082 
1083 	if (dmcu)
1084 		hw_params.psp_version = dmcu->psp_version;
1085 
1086 	for (i = 0; i < fb_info->num_fb; ++i)
1087 		hw_params.fb[i] = &fb_info->fb[i];
1088 
1089 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1090 	if (status != DMUB_STATUS_OK) {
1091 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1092 		return -EINVAL;
1093 	}
1094 
1095 	/* Wait for firmware load to finish. */
1096 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1097 	if (status != DMUB_STATUS_OK)
1098 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1099 
1100 	/* Init DMCU and ABM if available. */
1101 	if (dmcu && abm) {
1102 		dmcu->funcs->dmcu_init(dmcu);
1103 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1104 	}
1105 
1106 	if (!adev->dm.dc->ctx->dmub_srv)
1107 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1108 	if (!adev->dm.dc->ctx->dmub_srv) {
1109 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1110 		return -ENOMEM;
1111 	}
1112 
1113 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1114 		 adev->dm.dmcub_fw_version);
1115 
1116 	return 0;
1117 }
1118 
1119 #if defined(CONFIG_DRM_AMD_DC_DCN)
1120 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1121 {
1122 	uint64_t pt_base;
1123 	uint32_t logical_addr_low;
1124 	uint32_t logical_addr_high;
1125 	uint32_t agp_base, agp_bot, agp_top;
1126 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1127 
1128 	memset(pa_config, 0, sizeof(*pa_config));
1129 
1130 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1131 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1132 
1133 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1134 		/*
1135 		 * Raven2 has a HW issue that it is unable to use the vram which
1136 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1137 		 * workaround that increase system aperture high address (add 1)
1138 		 * to get rid of the VM fault and hardware hang.
1139 		 */
1140 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1141 	else
1142 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1143 
1144 	agp_base = 0;
1145 	agp_bot = adev->gmc.agp_start >> 24;
1146 	agp_top = adev->gmc.agp_end >> 24;
1147 
1148 
1149 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1150 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1151 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1152 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1153 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1154 	page_table_base.low_part = lower_32_bits(pt_base);
1155 
1156 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1157 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1158 
1159 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1160 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1161 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1162 
1163 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1164 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1165 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1166 
1167 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1168 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1169 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1170 
1171 	pa_config->is_hvm_enabled = 0;
1172 
1173 }
1174 #endif
1175 #if defined(CONFIG_DRM_AMD_DC_DCN)
1176 static void vblank_control_worker(struct work_struct *work)
1177 {
1178 	struct vblank_control_work *vblank_work =
1179 		container_of(work, struct vblank_control_work, work);
1180 	struct amdgpu_display_manager *dm = vblank_work->dm;
1181 
1182 	mutex_lock(&dm->dc_lock);
1183 
1184 	if (vblank_work->enable)
1185 		dm->active_vblank_irq_count++;
1186 	else if(dm->active_vblank_irq_count)
1187 		dm->active_vblank_irq_count--;
1188 
1189 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1190 
1191 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1192 
1193 	/* Control PSR based on vblank requirements from OS */
1194 	if (vblank_work->stream && vblank_work->stream->link) {
1195 		if (vblank_work->enable) {
1196 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1197 				amdgpu_dm_psr_disable(vblank_work->stream);
1198 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1199 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1200 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1201 			amdgpu_dm_psr_enable(vblank_work->stream);
1202 		}
1203 	}
1204 
1205 	mutex_unlock(&dm->dc_lock);
1206 
1207 	dc_stream_release(vblank_work->stream);
1208 
1209 	kfree(vblank_work);
1210 }
1211 
1212 #endif
1213 
1214 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1215 {
1216 	struct hpd_rx_irq_offload_work *offload_work;
1217 	struct amdgpu_dm_connector *aconnector;
1218 	struct dc_link *dc_link;
1219 	struct amdgpu_device *adev;
1220 	enum dc_connection_type new_connection_type = dc_connection_none;
1221 	unsigned long flags;
1222 
1223 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1224 	aconnector = offload_work->offload_wq->aconnector;
1225 
1226 	if (!aconnector) {
1227 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1228 		goto skip;
1229 	}
1230 
1231 	adev = drm_to_adev(aconnector->base.dev);
1232 	dc_link = aconnector->dc_link;
1233 
1234 	mutex_lock(&aconnector->hpd_lock);
1235 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1236 		DRM_ERROR("KMS: Failed to detect connector\n");
1237 	mutex_unlock(&aconnector->hpd_lock);
1238 
1239 	if (new_connection_type == dc_connection_none)
1240 		goto skip;
1241 
1242 	if (amdgpu_in_reset(adev))
1243 		goto skip;
1244 
1245 	mutex_lock(&adev->dm.dc_lock);
1246 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1247 		dc_link_dp_handle_automated_test(dc_link);
1248 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1249 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1250 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1251 		dc_link_dp_handle_link_loss(dc_link);
1252 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1253 		offload_work->offload_wq->is_handling_link_loss = false;
1254 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1255 	}
1256 	mutex_unlock(&adev->dm.dc_lock);
1257 
1258 skip:
1259 	kfree(offload_work);
1260 
1261 }
1262 
1263 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1264 {
1265 	int max_caps = dc->caps.max_links;
1266 	int i = 0;
1267 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1268 
1269 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1270 
1271 	if (!hpd_rx_offload_wq)
1272 		return NULL;
1273 
1274 
1275 	for (i = 0; i < max_caps; i++) {
1276 		hpd_rx_offload_wq[i].wq =
1277 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1278 
1279 		if (hpd_rx_offload_wq[i].wq == NULL) {
1280 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1281 			return NULL;
1282 		}
1283 
1284 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1285 	}
1286 
1287 	return hpd_rx_offload_wq;
1288 }
1289 
1290 static int amdgpu_dm_init(struct amdgpu_device *adev)
1291 {
1292 	struct dc_init_data init_data;
1293 #ifdef CONFIG_DRM_AMD_DC_HDCP
1294 	struct dc_callback_init init_params;
1295 #endif
1296 	int r;
1297 
1298 	adev->dm.ddev = adev_to_drm(adev);
1299 	adev->dm.adev = adev;
1300 
1301 	/* Zero all the fields */
1302 	memset(&init_data, 0, sizeof(init_data));
1303 #ifdef CONFIG_DRM_AMD_DC_HDCP
1304 	memset(&init_params, 0, sizeof(init_params));
1305 #endif
1306 
1307 	mutex_init(&adev->dm.dc_lock);
1308 	mutex_init(&adev->dm.audio_lock);
1309 #if defined(CONFIG_DRM_AMD_DC_DCN)
1310 	spin_lock_init(&adev->dm.vblank_lock);
1311 #endif
1312 
1313 	if(amdgpu_dm_irq_init(adev)) {
1314 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1315 		goto error;
1316 	}
1317 
1318 	init_data.asic_id.chip_family = adev->family;
1319 
1320 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1321 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1322 
1323 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1324 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1325 	init_data.asic_id.atombios_base_address =
1326 		adev->mode_info.atom_context->bios;
1327 
1328 	init_data.driver = adev;
1329 
1330 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1331 
1332 	if (!adev->dm.cgs_device) {
1333 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1334 		goto error;
1335 	}
1336 
1337 	init_data.cgs_device = adev->dm.cgs_device;
1338 
1339 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1340 
1341 	switch (adev->asic_type) {
1342 	case CHIP_CARRIZO:
1343 	case CHIP_STONEY:
1344 	case CHIP_RAVEN:
1345 	case CHIP_RENOIR:
1346 		init_data.flags.gpu_vm_support = true;
1347 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1348 			init_data.flags.disable_dmcu = true;
1349 		break;
1350 	case CHIP_VANGOGH:
1351 	case CHIP_YELLOW_CARP:
1352 		init_data.flags.gpu_vm_support = true;
1353 		break;
1354 	default:
1355 		break;
1356 	}
1357 
1358 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1359 		init_data.flags.fbc_support = true;
1360 
1361 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1362 		init_data.flags.multi_mon_pp_mclk_switch = true;
1363 
1364 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1365 		init_data.flags.disable_fractional_pwm = true;
1366 
1367 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1368 		init_data.flags.edp_no_power_sequencing = true;
1369 
1370 	init_data.flags.power_down_display_on_boot = true;
1371 
1372 	INIT_LIST_HEAD(&adev->dm.da_list);
1373 	/* Display Core create. */
1374 	adev->dm.dc = dc_create(&init_data);
1375 
1376 	if (adev->dm.dc) {
1377 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1378 	} else {
1379 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1380 		goto error;
1381 	}
1382 
1383 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1384 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1385 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1386 	}
1387 
1388 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1389 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1390 
1391 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1392 		adev->dm.dc->debug.disable_stutter = true;
1393 
1394 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1395 		adev->dm.dc->debug.disable_dsc = true;
1396 
1397 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1398 		adev->dm.dc->debug.disable_clock_gate = true;
1399 
1400 	r = dm_dmub_hw_init(adev);
1401 	if (r) {
1402 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1403 		goto error;
1404 	}
1405 
1406 	dc_hardware_init(adev->dm.dc);
1407 
1408 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1409 	if (!adev->dm.hpd_rx_offload_wq) {
1410 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1411 		goto error;
1412 	}
1413 
1414 #if defined(CONFIG_DRM_AMD_DC_DCN)
1415 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1416 		struct dc_phy_addr_space_config pa_config;
1417 
1418 		mmhub_read_system_context(adev, &pa_config);
1419 
1420 		// Call the DC init_memory func
1421 		dc_setup_system_context(adev->dm.dc, &pa_config);
1422 	}
1423 #endif
1424 
1425 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1426 	if (!adev->dm.freesync_module) {
1427 		DRM_ERROR(
1428 		"amdgpu: failed to initialize freesync_module.\n");
1429 	} else
1430 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1431 				adev->dm.freesync_module);
1432 
1433 	amdgpu_dm_init_color_mod();
1434 
1435 #if defined(CONFIG_DRM_AMD_DC_DCN)
1436 	if (adev->dm.dc->caps.max_links > 0) {
1437 		adev->dm.vblank_control_workqueue =
1438 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1439 		if (!adev->dm.vblank_control_workqueue)
1440 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1441 	}
1442 #endif
1443 
1444 #ifdef CONFIG_DRM_AMD_DC_HDCP
1445 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1446 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1447 
1448 		if (!adev->dm.hdcp_workqueue)
1449 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1450 		else
1451 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1452 
1453 		dc_init_callbacks(adev->dm.dc, &init_params);
1454 	}
1455 #endif
1456 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1457 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1458 #endif
1459 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1460 		init_completion(&adev->dm.dmub_aux_transfer_done);
1461 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1462 		if (!adev->dm.dmub_notify) {
1463 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1464 			goto error;
1465 		}
1466 
1467 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1468 		if (!adev->dm.delayed_hpd_wq) {
1469 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1470 			goto error;
1471 		}
1472 
1473 		amdgpu_dm_outbox_init(adev);
1474 #if defined(CONFIG_DRM_AMD_DC_DCN)
1475 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1476 			dmub_aux_setconfig_callback, false)) {
1477 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1478 			goto error;
1479 		}
1480 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1481 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1482 			goto error;
1483 		}
1484 #endif
1485 	}
1486 
1487 	if (amdgpu_dm_initialize_drm_device(adev)) {
1488 		DRM_ERROR(
1489 		"amdgpu: failed to initialize sw for display support.\n");
1490 		goto error;
1491 	}
1492 
1493 	/* create fake encoders for MST */
1494 	dm_dp_create_fake_mst_encoders(adev);
1495 
1496 	/* TODO: Add_display_info? */
1497 
1498 	/* TODO use dynamic cursor width */
1499 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1500 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1501 
1502 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1503 		DRM_ERROR(
1504 		"amdgpu: failed to initialize sw for display support.\n");
1505 		goto error;
1506 	}
1507 
1508 
1509 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1510 
1511 	return 0;
1512 error:
1513 	amdgpu_dm_fini(adev);
1514 
1515 	return -EINVAL;
1516 }
1517 
1518 static int amdgpu_dm_early_fini(void *handle)
1519 {
1520 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521 
1522 	amdgpu_dm_audio_fini(adev);
1523 
1524 	return 0;
1525 }
1526 
1527 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1528 {
1529 	int i;
1530 
1531 #if defined(CONFIG_DRM_AMD_DC_DCN)
1532 	if (adev->dm.vblank_control_workqueue) {
1533 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1534 		adev->dm.vblank_control_workqueue = NULL;
1535 	}
1536 #endif
1537 
1538 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1539 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1540 	}
1541 
1542 	amdgpu_dm_destroy_drm_device(&adev->dm);
1543 
1544 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1545 	if (adev->dm.crc_rd_wrk) {
1546 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1547 		kfree(adev->dm.crc_rd_wrk);
1548 		adev->dm.crc_rd_wrk = NULL;
1549 	}
1550 #endif
1551 #ifdef CONFIG_DRM_AMD_DC_HDCP
1552 	if (adev->dm.hdcp_workqueue) {
1553 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1554 		adev->dm.hdcp_workqueue = NULL;
1555 	}
1556 
1557 	if (adev->dm.dc)
1558 		dc_deinit_callbacks(adev->dm.dc);
1559 #endif
1560 
1561 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1562 
1563 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1564 		kfree(adev->dm.dmub_notify);
1565 		adev->dm.dmub_notify = NULL;
1566 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1567 		adev->dm.delayed_hpd_wq = NULL;
1568 	}
1569 
1570 	if (adev->dm.dmub_bo)
1571 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1572 				      &adev->dm.dmub_bo_gpu_addr,
1573 				      &adev->dm.dmub_bo_cpu_addr);
1574 
1575 	if (adev->dm.hpd_rx_offload_wq) {
1576 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1577 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1578 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1579 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1580 			}
1581 		}
1582 
1583 		kfree(adev->dm.hpd_rx_offload_wq);
1584 		adev->dm.hpd_rx_offload_wq = NULL;
1585 	}
1586 
1587 	/* DC Destroy TODO: Replace destroy DAL */
1588 	if (adev->dm.dc)
1589 		dc_destroy(&adev->dm.dc);
1590 	/*
1591 	 * TODO: pageflip, vlank interrupt
1592 	 *
1593 	 * amdgpu_dm_irq_fini(adev);
1594 	 */
1595 
1596 	if (adev->dm.cgs_device) {
1597 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1598 		adev->dm.cgs_device = NULL;
1599 	}
1600 	if (adev->dm.freesync_module) {
1601 		mod_freesync_destroy(adev->dm.freesync_module);
1602 		adev->dm.freesync_module = NULL;
1603 	}
1604 
1605 	mutex_destroy(&adev->dm.audio_lock);
1606 	mutex_destroy(&adev->dm.dc_lock);
1607 
1608 	return;
1609 }
1610 
1611 static int load_dmcu_fw(struct amdgpu_device *adev)
1612 {
1613 	const char *fw_name_dmcu = NULL;
1614 	int r;
1615 	const struct dmcu_firmware_header_v1_0 *hdr;
1616 
1617 	switch(adev->asic_type) {
1618 #if defined(CONFIG_DRM_AMD_DC_SI)
1619 	case CHIP_TAHITI:
1620 	case CHIP_PITCAIRN:
1621 	case CHIP_VERDE:
1622 	case CHIP_OLAND:
1623 #endif
1624 	case CHIP_BONAIRE:
1625 	case CHIP_HAWAII:
1626 	case CHIP_KAVERI:
1627 	case CHIP_KABINI:
1628 	case CHIP_MULLINS:
1629 	case CHIP_TONGA:
1630 	case CHIP_FIJI:
1631 	case CHIP_CARRIZO:
1632 	case CHIP_STONEY:
1633 	case CHIP_POLARIS11:
1634 	case CHIP_POLARIS10:
1635 	case CHIP_POLARIS12:
1636 	case CHIP_VEGAM:
1637 	case CHIP_VEGA10:
1638 	case CHIP_VEGA12:
1639 	case CHIP_VEGA20:
1640 	case CHIP_NAVI10:
1641 	case CHIP_NAVI14:
1642 	case CHIP_RENOIR:
1643 	case CHIP_SIENNA_CICHLID:
1644 	case CHIP_NAVY_FLOUNDER:
1645 	case CHIP_DIMGREY_CAVEFISH:
1646 	case CHIP_BEIGE_GOBY:
1647 	case CHIP_VANGOGH:
1648 	case CHIP_YELLOW_CARP:
1649 		return 0;
1650 	case CHIP_NAVI12:
1651 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1652 		break;
1653 	case CHIP_RAVEN:
1654 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1655 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1656 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1657 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1658 		else
1659 			return 0;
1660 		break;
1661 	default:
1662 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1663 		return -EINVAL;
1664 	}
1665 
1666 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1667 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1668 		return 0;
1669 	}
1670 
1671 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1672 	if (r == -ENOENT) {
1673 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1674 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1675 		adev->dm.fw_dmcu = NULL;
1676 		return 0;
1677 	}
1678 	if (r) {
1679 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1680 			fw_name_dmcu);
1681 		return r;
1682 	}
1683 
1684 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1685 	if (r) {
1686 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1687 			fw_name_dmcu);
1688 		release_firmware(adev->dm.fw_dmcu);
1689 		adev->dm.fw_dmcu = NULL;
1690 		return r;
1691 	}
1692 
1693 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1694 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1695 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1696 	adev->firmware.fw_size +=
1697 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1698 
1699 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1700 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1701 	adev->firmware.fw_size +=
1702 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1703 
1704 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1705 
1706 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1707 
1708 	return 0;
1709 }
1710 
1711 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1712 {
1713 	struct amdgpu_device *adev = ctx;
1714 
1715 	return dm_read_reg(adev->dm.dc->ctx, address);
1716 }
1717 
1718 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1719 				     uint32_t value)
1720 {
1721 	struct amdgpu_device *adev = ctx;
1722 
1723 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1724 }
1725 
1726 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1727 {
1728 	struct dmub_srv_create_params create_params;
1729 	struct dmub_srv_region_params region_params;
1730 	struct dmub_srv_region_info region_info;
1731 	struct dmub_srv_fb_params fb_params;
1732 	struct dmub_srv_fb_info *fb_info;
1733 	struct dmub_srv *dmub_srv;
1734 	const struct dmcub_firmware_header_v1_0 *hdr;
1735 	const char *fw_name_dmub;
1736 	enum dmub_asic dmub_asic;
1737 	enum dmub_status status;
1738 	int r;
1739 
1740 	switch (adev->asic_type) {
1741 	case CHIP_RENOIR:
1742 		dmub_asic = DMUB_ASIC_DCN21;
1743 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1744 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1745 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1746 		break;
1747 	case CHIP_SIENNA_CICHLID:
1748 		dmub_asic = DMUB_ASIC_DCN30;
1749 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1750 		break;
1751 	case CHIP_NAVY_FLOUNDER:
1752 		dmub_asic = DMUB_ASIC_DCN30;
1753 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1754 		break;
1755 	case CHIP_VANGOGH:
1756 		dmub_asic = DMUB_ASIC_DCN301;
1757 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1758 		break;
1759 	case CHIP_DIMGREY_CAVEFISH:
1760 		dmub_asic = DMUB_ASIC_DCN302;
1761 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1762 		break;
1763 	case CHIP_BEIGE_GOBY:
1764 		dmub_asic = DMUB_ASIC_DCN303;
1765 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1766 		break;
1767 	case CHIP_YELLOW_CARP:
1768 		dmub_asic = DMUB_ASIC_DCN31;
1769 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1770 		break;
1771 
1772 	default:
1773 		/* ASIC doesn't support DMUB. */
1774 		return 0;
1775 	}
1776 
1777 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1778 	if (r) {
1779 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1780 		return 0;
1781 	}
1782 
1783 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1784 	if (r) {
1785 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1786 		return 0;
1787 	}
1788 
1789 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1790 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1791 
1792 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1793 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1794 			AMDGPU_UCODE_ID_DMCUB;
1795 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1796 			adev->dm.dmub_fw;
1797 		adev->firmware.fw_size +=
1798 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1799 
1800 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1801 			 adev->dm.dmcub_fw_version);
1802 	}
1803 
1804 
1805 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1806 	dmub_srv = adev->dm.dmub_srv;
1807 
1808 	if (!dmub_srv) {
1809 		DRM_ERROR("Failed to allocate DMUB service!\n");
1810 		return -ENOMEM;
1811 	}
1812 
1813 	memset(&create_params, 0, sizeof(create_params));
1814 	create_params.user_ctx = adev;
1815 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1816 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1817 	create_params.asic = dmub_asic;
1818 
1819 	/* Create the DMUB service. */
1820 	status = dmub_srv_create(dmub_srv, &create_params);
1821 	if (status != DMUB_STATUS_OK) {
1822 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1823 		return -EINVAL;
1824 	}
1825 
1826 	/* Calculate the size of all the regions for the DMUB service. */
1827 	memset(&region_params, 0, sizeof(region_params));
1828 
1829 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1830 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1831 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1832 	region_params.vbios_size = adev->bios_size;
1833 	region_params.fw_bss_data = region_params.bss_data_size ?
1834 		adev->dm.dmub_fw->data +
1835 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1836 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1837 	region_params.fw_inst_const =
1838 		adev->dm.dmub_fw->data +
1839 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1840 		PSP_HEADER_BYTES;
1841 
1842 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1843 					   &region_info);
1844 
1845 	if (status != DMUB_STATUS_OK) {
1846 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1847 		return -EINVAL;
1848 	}
1849 
1850 	/*
1851 	 * Allocate a framebuffer based on the total size of all the regions.
1852 	 * TODO: Move this into GART.
1853 	 */
1854 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1855 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1856 				    &adev->dm.dmub_bo_gpu_addr,
1857 				    &adev->dm.dmub_bo_cpu_addr);
1858 	if (r)
1859 		return r;
1860 
1861 	/* Rebase the regions on the framebuffer address. */
1862 	memset(&fb_params, 0, sizeof(fb_params));
1863 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1864 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1865 	fb_params.region_info = &region_info;
1866 
1867 	adev->dm.dmub_fb_info =
1868 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1869 	fb_info = adev->dm.dmub_fb_info;
1870 
1871 	if (!fb_info) {
1872 		DRM_ERROR(
1873 			"Failed to allocate framebuffer info for DMUB service!\n");
1874 		return -ENOMEM;
1875 	}
1876 
1877 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1878 	if (status != DMUB_STATUS_OK) {
1879 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1880 		return -EINVAL;
1881 	}
1882 
1883 	return 0;
1884 }
1885 
1886 static int dm_sw_init(void *handle)
1887 {
1888 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1889 	int r;
1890 
1891 	r = dm_dmub_sw_init(adev);
1892 	if (r)
1893 		return r;
1894 
1895 	return load_dmcu_fw(adev);
1896 }
1897 
1898 static int dm_sw_fini(void *handle)
1899 {
1900 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1901 
1902 	kfree(adev->dm.dmub_fb_info);
1903 	adev->dm.dmub_fb_info = NULL;
1904 
1905 	if (adev->dm.dmub_srv) {
1906 		dmub_srv_destroy(adev->dm.dmub_srv);
1907 		adev->dm.dmub_srv = NULL;
1908 	}
1909 
1910 	release_firmware(adev->dm.dmub_fw);
1911 	adev->dm.dmub_fw = NULL;
1912 
1913 	release_firmware(adev->dm.fw_dmcu);
1914 	adev->dm.fw_dmcu = NULL;
1915 
1916 	return 0;
1917 }
1918 
1919 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1920 {
1921 	struct amdgpu_dm_connector *aconnector;
1922 	struct drm_connector *connector;
1923 	struct drm_connector_list_iter iter;
1924 	int ret = 0;
1925 
1926 	drm_connector_list_iter_begin(dev, &iter);
1927 	drm_for_each_connector_iter(connector, &iter) {
1928 		aconnector = to_amdgpu_dm_connector(connector);
1929 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1930 		    aconnector->mst_mgr.aux) {
1931 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1932 					 aconnector,
1933 					 aconnector->base.base.id);
1934 
1935 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1936 			if (ret < 0) {
1937 				DRM_ERROR("DM_MST: Failed to start MST\n");
1938 				aconnector->dc_link->type =
1939 					dc_connection_single;
1940 				break;
1941 			}
1942 		}
1943 	}
1944 	drm_connector_list_iter_end(&iter);
1945 
1946 	return ret;
1947 }
1948 
1949 static int dm_late_init(void *handle)
1950 {
1951 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1952 
1953 	struct dmcu_iram_parameters params;
1954 	unsigned int linear_lut[16];
1955 	int i;
1956 	struct dmcu *dmcu = NULL;
1957 
1958 	dmcu = adev->dm.dc->res_pool->dmcu;
1959 
1960 	for (i = 0; i < 16; i++)
1961 		linear_lut[i] = 0xFFFF * i / 15;
1962 
1963 	params.set = 0;
1964 	params.backlight_ramping_start = 0xCCCC;
1965 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1966 	params.backlight_lut_array_size = 16;
1967 	params.backlight_lut_array = linear_lut;
1968 
1969 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1970 	 * 0xFFFF x 0.01 = 0x28F
1971 	 */
1972 	params.min_abm_backlight = 0x28F;
1973 	/* In the case where abm is implemented on dmcub,
1974 	* dmcu object will be null.
1975 	* ABM 2.4 and up are implemented on dmcub.
1976 	*/
1977 	if (dmcu) {
1978 		if (!dmcu_load_iram(dmcu, params))
1979 			return -EINVAL;
1980 	} else if (adev->dm.dc->ctx->dmub_srv) {
1981 		struct dc_link *edp_links[MAX_NUM_EDP];
1982 		int edp_num;
1983 
1984 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1985 		for (i = 0; i < edp_num; i++) {
1986 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1987 				return -EINVAL;
1988 		}
1989 	}
1990 
1991 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1992 }
1993 
1994 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1995 {
1996 	struct amdgpu_dm_connector *aconnector;
1997 	struct drm_connector *connector;
1998 	struct drm_connector_list_iter iter;
1999 	struct drm_dp_mst_topology_mgr *mgr;
2000 	int ret;
2001 	bool need_hotplug = false;
2002 
2003 	drm_connector_list_iter_begin(dev, &iter);
2004 	drm_for_each_connector_iter(connector, &iter) {
2005 		aconnector = to_amdgpu_dm_connector(connector);
2006 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2007 		    aconnector->mst_port)
2008 			continue;
2009 
2010 		mgr = &aconnector->mst_mgr;
2011 
2012 		if (suspend) {
2013 			drm_dp_mst_topology_mgr_suspend(mgr);
2014 		} else {
2015 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2016 			if (ret < 0) {
2017 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2018 				need_hotplug = true;
2019 			}
2020 		}
2021 	}
2022 	drm_connector_list_iter_end(&iter);
2023 
2024 	if (need_hotplug)
2025 		drm_kms_helper_hotplug_event(dev);
2026 }
2027 
2028 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2029 {
2030 	struct smu_context *smu = &adev->smu;
2031 	int ret = 0;
2032 
2033 	if (!is_support_sw_smu(adev))
2034 		return 0;
2035 
2036 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2037 	 * on window driver dc implementation.
2038 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2039 	 * should be passed to smu during boot up and resume from s3.
2040 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2041 	 * dcn20_resource_construct
2042 	 * then call pplib functions below to pass the settings to smu:
2043 	 * smu_set_watermarks_for_clock_ranges
2044 	 * smu_set_watermarks_table
2045 	 * navi10_set_watermarks_table
2046 	 * smu_write_watermarks_table
2047 	 *
2048 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2049 	 * dc has implemented different flow for window driver:
2050 	 * dc_hardware_init / dc_set_power_state
2051 	 * dcn10_init_hw
2052 	 * notify_wm_ranges
2053 	 * set_wm_ranges
2054 	 * -- Linux
2055 	 * smu_set_watermarks_for_clock_ranges
2056 	 * renoir_set_watermarks_table
2057 	 * smu_write_watermarks_table
2058 	 *
2059 	 * For Linux,
2060 	 * dc_hardware_init -> amdgpu_dm_init
2061 	 * dc_set_power_state --> dm_resume
2062 	 *
2063 	 * therefore, this function apply to navi10/12/14 but not Renoir
2064 	 * *
2065 	 */
2066 	switch(adev->asic_type) {
2067 	case CHIP_NAVI10:
2068 	case CHIP_NAVI14:
2069 	case CHIP_NAVI12:
2070 		break;
2071 	default:
2072 		return 0;
2073 	}
2074 
2075 	ret = smu_write_watermarks_table(smu);
2076 	if (ret) {
2077 		DRM_ERROR("Failed to update WMTABLE!\n");
2078 		return ret;
2079 	}
2080 
2081 	return 0;
2082 }
2083 
2084 /**
2085  * dm_hw_init() - Initialize DC device
2086  * @handle: The base driver device containing the amdgpu_dm device.
2087  *
2088  * Initialize the &struct amdgpu_display_manager device. This involves calling
2089  * the initializers of each DM component, then populating the struct with them.
2090  *
2091  * Although the function implies hardware initialization, both hardware and
2092  * software are initialized here. Splitting them out to their relevant init
2093  * hooks is a future TODO item.
2094  *
2095  * Some notable things that are initialized here:
2096  *
2097  * - Display Core, both software and hardware
2098  * - DC modules that we need (freesync and color management)
2099  * - DRM software states
2100  * - Interrupt sources and handlers
2101  * - Vblank support
2102  * - Debug FS entries, if enabled
2103  */
2104 static int dm_hw_init(void *handle)
2105 {
2106 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2107 	/* Create DAL display manager */
2108 	amdgpu_dm_init(adev);
2109 	amdgpu_dm_hpd_init(adev);
2110 
2111 	return 0;
2112 }
2113 
2114 /**
2115  * dm_hw_fini() - Teardown DC device
2116  * @handle: The base driver device containing the amdgpu_dm device.
2117  *
2118  * Teardown components within &struct amdgpu_display_manager that require
2119  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2120  * were loaded. Also flush IRQ workqueues and disable them.
2121  */
2122 static int dm_hw_fini(void *handle)
2123 {
2124 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2125 
2126 	amdgpu_dm_hpd_fini(adev);
2127 
2128 	amdgpu_dm_irq_fini(adev);
2129 	amdgpu_dm_fini(adev);
2130 	return 0;
2131 }
2132 
2133 
2134 static int dm_enable_vblank(struct drm_crtc *crtc);
2135 static void dm_disable_vblank(struct drm_crtc *crtc);
2136 
2137 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2138 				 struct dc_state *state, bool enable)
2139 {
2140 	enum dc_irq_source irq_source;
2141 	struct amdgpu_crtc *acrtc;
2142 	int rc = -EBUSY;
2143 	int i = 0;
2144 
2145 	for (i = 0; i < state->stream_count; i++) {
2146 		acrtc = get_crtc_by_otg_inst(
2147 				adev, state->stream_status[i].primary_otg_inst);
2148 
2149 		if (acrtc && state->stream_status[i].plane_count != 0) {
2150 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2151 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2152 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2153 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2154 			if (rc)
2155 				DRM_WARN("Failed to %s pflip interrupts\n",
2156 					 enable ? "enable" : "disable");
2157 
2158 			if (enable) {
2159 				rc = dm_enable_vblank(&acrtc->base);
2160 				if (rc)
2161 					DRM_WARN("Failed to enable vblank interrupts\n");
2162 			} else {
2163 				dm_disable_vblank(&acrtc->base);
2164 			}
2165 
2166 		}
2167 	}
2168 
2169 }
2170 
2171 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2172 {
2173 	struct dc_state *context = NULL;
2174 	enum dc_status res = DC_ERROR_UNEXPECTED;
2175 	int i;
2176 	struct dc_stream_state *del_streams[MAX_PIPES];
2177 	int del_streams_count = 0;
2178 
2179 	memset(del_streams, 0, sizeof(del_streams));
2180 
2181 	context = dc_create_state(dc);
2182 	if (context == NULL)
2183 		goto context_alloc_fail;
2184 
2185 	dc_resource_state_copy_construct_current(dc, context);
2186 
2187 	/* First remove from context all streams */
2188 	for (i = 0; i < context->stream_count; i++) {
2189 		struct dc_stream_state *stream = context->streams[i];
2190 
2191 		del_streams[del_streams_count++] = stream;
2192 	}
2193 
2194 	/* Remove all planes for removed streams and then remove the streams */
2195 	for (i = 0; i < del_streams_count; i++) {
2196 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2197 			res = DC_FAIL_DETACH_SURFACES;
2198 			goto fail;
2199 		}
2200 
2201 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2202 		if (res != DC_OK)
2203 			goto fail;
2204 	}
2205 
2206 
2207 	res = dc_validate_global_state(dc, context, false);
2208 
2209 	if (res != DC_OK) {
2210 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2211 		goto fail;
2212 	}
2213 
2214 	res = dc_commit_state(dc, context);
2215 
2216 fail:
2217 	dc_release_state(context);
2218 
2219 context_alloc_fail:
2220 	return res;
2221 }
2222 
2223 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2224 {
2225 	int i;
2226 
2227 	if (dm->hpd_rx_offload_wq) {
2228 		for (i = 0; i < dm->dc->caps.max_links; i++)
2229 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2230 	}
2231 }
2232 
2233 static int dm_suspend(void *handle)
2234 {
2235 	struct amdgpu_device *adev = handle;
2236 	struct amdgpu_display_manager *dm = &adev->dm;
2237 	int ret = 0;
2238 
2239 	if (amdgpu_in_reset(adev)) {
2240 		mutex_lock(&dm->dc_lock);
2241 
2242 #if defined(CONFIG_DRM_AMD_DC_DCN)
2243 		dc_allow_idle_optimizations(adev->dm.dc, false);
2244 #endif
2245 
2246 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2247 
2248 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2249 
2250 		amdgpu_dm_commit_zero_streams(dm->dc);
2251 
2252 		amdgpu_dm_irq_suspend(adev);
2253 
2254 		hpd_rx_irq_work_suspend(dm);
2255 
2256 		return ret;
2257 	}
2258 
2259 	WARN_ON(adev->dm.cached_state);
2260 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2261 
2262 	s3_handle_mst(adev_to_drm(adev), true);
2263 
2264 	amdgpu_dm_irq_suspend(adev);
2265 
2266 	hpd_rx_irq_work_suspend(dm);
2267 
2268 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2269 
2270 	return 0;
2271 }
2272 
2273 static struct amdgpu_dm_connector *
2274 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2275 					     struct drm_crtc *crtc)
2276 {
2277 	uint32_t i;
2278 	struct drm_connector_state *new_con_state;
2279 	struct drm_connector *connector;
2280 	struct drm_crtc *crtc_from_state;
2281 
2282 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2283 		crtc_from_state = new_con_state->crtc;
2284 
2285 		if (crtc_from_state == crtc)
2286 			return to_amdgpu_dm_connector(connector);
2287 	}
2288 
2289 	return NULL;
2290 }
2291 
2292 static void emulated_link_detect(struct dc_link *link)
2293 {
2294 	struct dc_sink_init_data sink_init_data = { 0 };
2295 	struct display_sink_capability sink_caps = { 0 };
2296 	enum dc_edid_status edid_status;
2297 	struct dc_context *dc_ctx = link->ctx;
2298 	struct dc_sink *sink = NULL;
2299 	struct dc_sink *prev_sink = NULL;
2300 
2301 	link->type = dc_connection_none;
2302 	prev_sink = link->local_sink;
2303 
2304 	if (prev_sink)
2305 		dc_sink_release(prev_sink);
2306 
2307 	switch (link->connector_signal) {
2308 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2309 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2310 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2311 		break;
2312 	}
2313 
2314 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2315 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2316 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2317 		break;
2318 	}
2319 
2320 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2321 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2322 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2323 		break;
2324 	}
2325 
2326 	case SIGNAL_TYPE_LVDS: {
2327 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2328 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2329 		break;
2330 	}
2331 
2332 	case SIGNAL_TYPE_EDP: {
2333 		sink_caps.transaction_type =
2334 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2335 		sink_caps.signal = SIGNAL_TYPE_EDP;
2336 		break;
2337 	}
2338 
2339 	case SIGNAL_TYPE_DISPLAY_PORT: {
2340 		sink_caps.transaction_type =
2341 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2342 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2343 		break;
2344 	}
2345 
2346 	default:
2347 		DC_ERROR("Invalid connector type! signal:%d\n",
2348 			link->connector_signal);
2349 		return;
2350 	}
2351 
2352 	sink_init_data.link = link;
2353 	sink_init_data.sink_signal = sink_caps.signal;
2354 
2355 	sink = dc_sink_create(&sink_init_data);
2356 	if (!sink) {
2357 		DC_ERROR("Failed to create sink!\n");
2358 		return;
2359 	}
2360 
2361 	/* dc_sink_create returns a new reference */
2362 	link->local_sink = sink;
2363 
2364 	edid_status = dm_helpers_read_local_edid(
2365 			link->ctx,
2366 			link,
2367 			sink);
2368 
2369 	if (edid_status != EDID_OK)
2370 		DC_ERROR("Failed to read EDID");
2371 
2372 }
2373 
2374 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2375 				     struct amdgpu_display_manager *dm)
2376 {
2377 	struct {
2378 		struct dc_surface_update surface_updates[MAX_SURFACES];
2379 		struct dc_plane_info plane_infos[MAX_SURFACES];
2380 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2381 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2382 		struct dc_stream_update stream_update;
2383 	} * bundle;
2384 	int k, m;
2385 
2386 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2387 
2388 	if (!bundle) {
2389 		dm_error("Failed to allocate update bundle\n");
2390 		goto cleanup;
2391 	}
2392 
2393 	for (k = 0; k < dc_state->stream_count; k++) {
2394 		bundle->stream_update.stream = dc_state->streams[k];
2395 
2396 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2397 			bundle->surface_updates[m].surface =
2398 				dc_state->stream_status->plane_states[m];
2399 			bundle->surface_updates[m].surface->force_full_update =
2400 				true;
2401 		}
2402 		dc_commit_updates_for_stream(
2403 			dm->dc, bundle->surface_updates,
2404 			dc_state->stream_status->plane_count,
2405 			dc_state->streams[k], &bundle->stream_update, dc_state);
2406 	}
2407 
2408 cleanup:
2409 	kfree(bundle);
2410 
2411 	return;
2412 }
2413 
2414 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2415 {
2416 	struct dc_stream_state *stream_state;
2417 	struct amdgpu_dm_connector *aconnector = link->priv;
2418 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2419 	struct dc_stream_update stream_update;
2420 	bool dpms_off = true;
2421 
2422 	memset(&stream_update, 0, sizeof(stream_update));
2423 	stream_update.dpms_off = &dpms_off;
2424 
2425 	mutex_lock(&adev->dm.dc_lock);
2426 	stream_state = dc_stream_find_from_link(link);
2427 
2428 	if (stream_state == NULL) {
2429 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2430 		mutex_unlock(&adev->dm.dc_lock);
2431 		return;
2432 	}
2433 
2434 	stream_update.stream = stream_state;
2435 	acrtc_state->force_dpms_off = true;
2436 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2437 				     stream_state, &stream_update,
2438 				     stream_state->ctx->dc->current_state);
2439 	mutex_unlock(&adev->dm.dc_lock);
2440 }
2441 
2442 static int dm_resume(void *handle)
2443 {
2444 	struct amdgpu_device *adev = handle;
2445 	struct drm_device *ddev = adev_to_drm(adev);
2446 	struct amdgpu_display_manager *dm = &adev->dm;
2447 	struct amdgpu_dm_connector *aconnector;
2448 	struct drm_connector *connector;
2449 	struct drm_connector_list_iter iter;
2450 	struct drm_crtc *crtc;
2451 	struct drm_crtc_state *new_crtc_state;
2452 	struct dm_crtc_state *dm_new_crtc_state;
2453 	struct drm_plane *plane;
2454 	struct drm_plane_state *new_plane_state;
2455 	struct dm_plane_state *dm_new_plane_state;
2456 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2457 	enum dc_connection_type new_connection_type = dc_connection_none;
2458 	struct dc_state *dc_state;
2459 	int i, r, j;
2460 
2461 	if (amdgpu_in_reset(adev)) {
2462 		dc_state = dm->cached_dc_state;
2463 
2464 		r = dm_dmub_hw_init(adev);
2465 		if (r)
2466 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2467 
2468 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2469 		dc_resume(dm->dc);
2470 
2471 		amdgpu_dm_irq_resume_early(adev);
2472 
2473 		for (i = 0; i < dc_state->stream_count; i++) {
2474 			dc_state->streams[i]->mode_changed = true;
2475 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2476 				dc_state->stream_status->plane_states[j]->update_flags.raw
2477 					= 0xffffffff;
2478 			}
2479 		}
2480 #if defined(CONFIG_DRM_AMD_DC_DCN)
2481 		/*
2482 		 * Resource allocation happens for link encoders for newer ASIC in
2483 		 * dc_validate_global_state, so we need to revalidate it.
2484 		 *
2485 		 * This shouldn't fail (it passed once before), so warn if it does.
2486 		 */
2487 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2488 #endif
2489 
2490 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2491 
2492 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2493 
2494 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2495 
2496 		dc_release_state(dm->cached_dc_state);
2497 		dm->cached_dc_state = NULL;
2498 
2499 		amdgpu_dm_irq_resume_late(adev);
2500 
2501 		mutex_unlock(&dm->dc_lock);
2502 
2503 		return 0;
2504 	}
2505 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2506 	dc_release_state(dm_state->context);
2507 	dm_state->context = dc_create_state(dm->dc);
2508 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2509 	dc_resource_state_construct(dm->dc, dm_state->context);
2510 
2511 	/* Before powering on DC we need to re-initialize DMUB. */
2512 	r = dm_dmub_hw_init(adev);
2513 	if (r)
2514 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2515 
2516 	/* power on hardware */
2517 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2518 
2519 	/* program HPD filter */
2520 	dc_resume(dm->dc);
2521 
2522 	/*
2523 	 * early enable HPD Rx IRQ, should be done before set mode as short
2524 	 * pulse interrupts are used for MST
2525 	 */
2526 	amdgpu_dm_irq_resume_early(adev);
2527 
2528 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2529 	s3_handle_mst(ddev, false);
2530 
2531 	/* Do detection*/
2532 	drm_connector_list_iter_begin(ddev, &iter);
2533 	drm_for_each_connector_iter(connector, &iter) {
2534 		aconnector = to_amdgpu_dm_connector(connector);
2535 
2536 		/*
2537 		 * this is the case when traversing through already created
2538 		 * MST connectors, should be skipped
2539 		 */
2540 		if (aconnector->mst_port)
2541 			continue;
2542 
2543 		mutex_lock(&aconnector->hpd_lock);
2544 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2545 			DRM_ERROR("KMS: Failed to detect connector\n");
2546 
2547 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2548 			emulated_link_detect(aconnector->dc_link);
2549 		else
2550 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2551 
2552 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2553 			aconnector->fake_enable = false;
2554 
2555 		if (aconnector->dc_sink)
2556 			dc_sink_release(aconnector->dc_sink);
2557 		aconnector->dc_sink = NULL;
2558 		amdgpu_dm_update_connector_after_detect(aconnector);
2559 		mutex_unlock(&aconnector->hpd_lock);
2560 	}
2561 	drm_connector_list_iter_end(&iter);
2562 
2563 	/* Force mode set in atomic commit */
2564 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2565 		new_crtc_state->active_changed = true;
2566 
2567 	/*
2568 	 * atomic_check is expected to create the dc states. We need to release
2569 	 * them here, since they were duplicated as part of the suspend
2570 	 * procedure.
2571 	 */
2572 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2573 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2574 		if (dm_new_crtc_state->stream) {
2575 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2576 			dc_stream_release(dm_new_crtc_state->stream);
2577 			dm_new_crtc_state->stream = NULL;
2578 		}
2579 	}
2580 
2581 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2582 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2583 		if (dm_new_plane_state->dc_state) {
2584 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2585 			dc_plane_state_release(dm_new_plane_state->dc_state);
2586 			dm_new_plane_state->dc_state = NULL;
2587 		}
2588 	}
2589 
2590 	drm_atomic_helper_resume(ddev, dm->cached_state);
2591 
2592 	dm->cached_state = NULL;
2593 
2594 	amdgpu_dm_irq_resume_late(adev);
2595 
2596 	amdgpu_dm_smu_write_watermarks_table(adev);
2597 
2598 	return 0;
2599 }
2600 
2601 /**
2602  * DOC: DM Lifecycle
2603  *
2604  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2605  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2606  * the base driver's device list to be initialized and torn down accordingly.
2607  *
2608  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2609  */
2610 
2611 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2612 	.name = "dm",
2613 	.early_init = dm_early_init,
2614 	.late_init = dm_late_init,
2615 	.sw_init = dm_sw_init,
2616 	.sw_fini = dm_sw_fini,
2617 	.early_fini = amdgpu_dm_early_fini,
2618 	.hw_init = dm_hw_init,
2619 	.hw_fini = dm_hw_fini,
2620 	.suspend = dm_suspend,
2621 	.resume = dm_resume,
2622 	.is_idle = dm_is_idle,
2623 	.wait_for_idle = dm_wait_for_idle,
2624 	.check_soft_reset = dm_check_soft_reset,
2625 	.soft_reset = dm_soft_reset,
2626 	.set_clockgating_state = dm_set_clockgating_state,
2627 	.set_powergating_state = dm_set_powergating_state,
2628 };
2629 
2630 const struct amdgpu_ip_block_version dm_ip_block =
2631 {
2632 	.type = AMD_IP_BLOCK_TYPE_DCE,
2633 	.major = 1,
2634 	.minor = 0,
2635 	.rev = 0,
2636 	.funcs = &amdgpu_dm_funcs,
2637 };
2638 
2639 
2640 /**
2641  * DOC: atomic
2642  *
2643  * *WIP*
2644  */
2645 
2646 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2647 	.fb_create = amdgpu_display_user_framebuffer_create,
2648 	.get_format_info = amd_get_format_info,
2649 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2650 	.atomic_check = amdgpu_dm_atomic_check,
2651 	.atomic_commit = drm_atomic_helper_commit,
2652 };
2653 
2654 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2655 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2656 };
2657 
2658 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2659 {
2660 	u32 max_cll, min_cll, max, min, q, r;
2661 	struct amdgpu_dm_backlight_caps *caps;
2662 	struct amdgpu_display_manager *dm;
2663 	struct drm_connector *conn_base;
2664 	struct amdgpu_device *adev;
2665 	struct dc_link *link = NULL;
2666 	static const u8 pre_computed_values[] = {
2667 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2668 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2669 	int i;
2670 
2671 	if (!aconnector || !aconnector->dc_link)
2672 		return;
2673 
2674 	link = aconnector->dc_link;
2675 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2676 		return;
2677 
2678 	conn_base = &aconnector->base;
2679 	adev = drm_to_adev(conn_base->dev);
2680 	dm = &adev->dm;
2681 	for (i = 0; i < dm->num_of_edps; i++) {
2682 		if (link == dm->backlight_link[i])
2683 			break;
2684 	}
2685 	if (i >= dm->num_of_edps)
2686 		return;
2687 	caps = &dm->backlight_caps[i];
2688 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2689 	caps->aux_support = false;
2690 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2691 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2692 
2693 	if (caps->ext_caps->bits.oled == 1 /*||
2694 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2695 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2696 		caps->aux_support = true;
2697 
2698 	if (amdgpu_backlight == 0)
2699 		caps->aux_support = false;
2700 	else if (amdgpu_backlight == 1)
2701 		caps->aux_support = true;
2702 
2703 	/* From the specification (CTA-861-G), for calculating the maximum
2704 	 * luminance we need to use:
2705 	 *	Luminance = 50*2**(CV/32)
2706 	 * Where CV is a one-byte value.
2707 	 * For calculating this expression we may need float point precision;
2708 	 * to avoid this complexity level, we take advantage that CV is divided
2709 	 * by a constant. From the Euclids division algorithm, we know that CV
2710 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2711 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2712 	 * need to pre-compute the value of r/32. For pre-computing the values
2713 	 * We just used the following Ruby line:
2714 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2715 	 * The results of the above expressions can be verified at
2716 	 * pre_computed_values.
2717 	 */
2718 	q = max_cll >> 5;
2719 	r = max_cll % 32;
2720 	max = (1 << q) * pre_computed_values[r];
2721 
2722 	// min luminance: maxLum * (CV/255)^2 / 100
2723 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2724 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2725 
2726 	caps->aux_max_input_signal = max;
2727 	caps->aux_min_input_signal = min;
2728 }
2729 
2730 void amdgpu_dm_update_connector_after_detect(
2731 		struct amdgpu_dm_connector *aconnector)
2732 {
2733 	struct drm_connector *connector = &aconnector->base;
2734 	struct drm_device *dev = connector->dev;
2735 	struct dc_sink *sink;
2736 
2737 	/* MST handled by drm_mst framework */
2738 	if (aconnector->mst_mgr.mst_state == true)
2739 		return;
2740 
2741 	sink = aconnector->dc_link->local_sink;
2742 	if (sink)
2743 		dc_sink_retain(sink);
2744 
2745 	/*
2746 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2747 	 * the connector sink is set to either fake or physical sink depends on link status.
2748 	 * Skip if already done during boot.
2749 	 */
2750 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2751 			&& aconnector->dc_em_sink) {
2752 
2753 		/*
2754 		 * For S3 resume with headless use eml_sink to fake stream
2755 		 * because on resume connector->sink is set to NULL
2756 		 */
2757 		mutex_lock(&dev->mode_config.mutex);
2758 
2759 		if (sink) {
2760 			if (aconnector->dc_sink) {
2761 				amdgpu_dm_update_freesync_caps(connector, NULL);
2762 				/*
2763 				 * retain and release below are used to
2764 				 * bump up refcount for sink because the link doesn't point
2765 				 * to it anymore after disconnect, so on next crtc to connector
2766 				 * reshuffle by UMD we will get into unwanted dc_sink release
2767 				 */
2768 				dc_sink_release(aconnector->dc_sink);
2769 			}
2770 			aconnector->dc_sink = sink;
2771 			dc_sink_retain(aconnector->dc_sink);
2772 			amdgpu_dm_update_freesync_caps(connector,
2773 					aconnector->edid);
2774 		} else {
2775 			amdgpu_dm_update_freesync_caps(connector, NULL);
2776 			if (!aconnector->dc_sink) {
2777 				aconnector->dc_sink = aconnector->dc_em_sink;
2778 				dc_sink_retain(aconnector->dc_sink);
2779 			}
2780 		}
2781 
2782 		mutex_unlock(&dev->mode_config.mutex);
2783 
2784 		if (sink)
2785 			dc_sink_release(sink);
2786 		return;
2787 	}
2788 
2789 	/*
2790 	 * TODO: temporary guard to look for proper fix
2791 	 * if this sink is MST sink, we should not do anything
2792 	 */
2793 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2794 		dc_sink_release(sink);
2795 		return;
2796 	}
2797 
2798 	if (aconnector->dc_sink == sink) {
2799 		/*
2800 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2801 		 * Do nothing!!
2802 		 */
2803 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2804 				aconnector->connector_id);
2805 		if (sink)
2806 			dc_sink_release(sink);
2807 		return;
2808 	}
2809 
2810 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2811 		aconnector->connector_id, aconnector->dc_sink, sink);
2812 
2813 	mutex_lock(&dev->mode_config.mutex);
2814 
2815 	/*
2816 	 * 1. Update status of the drm connector
2817 	 * 2. Send an event and let userspace tell us what to do
2818 	 */
2819 	if (sink) {
2820 		/*
2821 		 * TODO: check if we still need the S3 mode update workaround.
2822 		 * If yes, put it here.
2823 		 */
2824 		if (aconnector->dc_sink) {
2825 			amdgpu_dm_update_freesync_caps(connector, NULL);
2826 			dc_sink_release(aconnector->dc_sink);
2827 		}
2828 
2829 		aconnector->dc_sink = sink;
2830 		dc_sink_retain(aconnector->dc_sink);
2831 		if (sink->dc_edid.length == 0) {
2832 			aconnector->edid = NULL;
2833 			if (aconnector->dc_link->aux_mode) {
2834 				drm_dp_cec_unset_edid(
2835 					&aconnector->dm_dp_aux.aux);
2836 			}
2837 		} else {
2838 			aconnector->edid =
2839 				(struct edid *)sink->dc_edid.raw_edid;
2840 
2841 			drm_connector_update_edid_property(connector,
2842 							   aconnector->edid);
2843 			if (aconnector->dc_link->aux_mode)
2844 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2845 						    aconnector->edid);
2846 		}
2847 
2848 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2849 		update_connector_ext_caps(aconnector);
2850 	} else {
2851 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2852 		amdgpu_dm_update_freesync_caps(connector, NULL);
2853 		drm_connector_update_edid_property(connector, NULL);
2854 		aconnector->num_modes = 0;
2855 		dc_sink_release(aconnector->dc_sink);
2856 		aconnector->dc_sink = NULL;
2857 		aconnector->edid = NULL;
2858 #ifdef CONFIG_DRM_AMD_DC_HDCP
2859 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2860 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2861 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2862 #endif
2863 	}
2864 
2865 	mutex_unlock(&dev->mode_config.mutex);
2866 
2867 	update_subconnector_property(aconnector);
2868 
2869 	if (sink)
2870 		dc_sink_release(sink);
2871 }
2872 
2873 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2874 {
2875 	struct drm_connector *connector = &aconnector->base;
2876 	struct drm_device *dev = connector->dev;
2877 	enum dc_connection_type new_connection_type = dc_connection_none;
2878 	struct amdgpu_device *adev = drm_to_adev(dev);
2879 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2880 	struct dm_crtc_state *dm_crtc_state = NULL;
2881 
2882 	if (adev->dm.disable_hpd_irq)
2883 		return;
2884 
2885 	if (dm_con_state->base.state && dm_con_state->base.crtc)
2886 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2887 					dm_con_state->base.state,
2888 					dm_con_state->base.crtc));
2889 	/*
2890 	 * In case of failure or MST no need to update connector status or notify the OS
2891 	 * since (for MST case) MST does this in its own context.
2892 	 */
2893 	mutex_lock(&aconnector->hpd_lock);
2894 
2895 #ifdef CONFIG_DRM_AMD_DC_HDCP
2896 	if (adev->dm.hdcp_workqueue) {
2897 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2898 		dm_con_state->update_hdcp = true;
2899 	}
2900 #endif
2901 	if (aconnector->fake_enable)
2902 		aconnector->fake_enable = false;
2903 
2904 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2905 		DRM_ERROR("KMS: Failed to detect connector\n");
2906 
2907 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2908 		emulated_link_detect(aconnector->dc_link);
2909 
2910 
2911 		drm_modeset_lock_all(dev);
2912 		dm_restore_drm_connector_state(dev, connector);
2913 		drm_modeset_unlock_all(dev);
2914 
2915 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2916 			drm_kms_helper_hotplug_event(dev);
2917 
2918 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2919 		if (new_connection_type == dc_connection_none &&
2920 		    aconnector->dc_link->type == dc_connection_none &&
2921 		    dm_crtc_state)
2922 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
2923 
2924 		amdgpu_dm_update_connector_after_detect(aconnector);
2925 
2926 		drm_modeset_lock_all(dev);
2927 		dm_restore_drm_connector_state(dev, connector);
2928 		drm_modeset_unlock_all(dev);
2929 
2930 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2931 			drm_kms_helper_hotplug_event(dev);
2932 	}
2933 	mutex_unlock(&aconnector->hpd_lock);
2934 
2935 }
2936 
2937 static void handle_hpd_irq(void *param)
2938 {
2939 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2940 
2941 	handle_hpd_irq_helper(aconnector);
2942 
2943 }
2944 
2945 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
2946 {
2947 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2948 	uint8_t dret;
2949 	bool new_irq_handled = false;
2950 	int dpcd_addr;
2951 	int dpcd_bytes_to_read;
2952 
2953 	const int max_process_count = 30;
2954 	int process_count = 0;
2955 
2956 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2957 
2958 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2959 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2960 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2961 		dpcd_addr = DP_SINK_COUNT;
2962 	} else {
2963 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2964 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2965 		dpcd_addr = DP_SINK_COUNT_ESI;
2966 	}
2967 
2968 	dret = drm_dp_dpcd_read(
2969 		&aconnector->dm_dp_aux.aux,
2970 		dpcd_addr,
2971 		esi,
2972 		dpcd_bytes_to_read);
2973 
2974 	while (dret == dpcd_bytes_to_read &&
2975 		process_count < max_process_count) {
2976 		uint8_t retry;
2977 		dret = 0;
2978 
2979 		process_count++;
2980 
2981 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2982 		/* handle HPD short pulse irq */
2983 		if (aconnector->mst_mgr.mst_state)
2984 			drm_dp_mst_hpd_irq(
2985 				&aconnector->mst_mgr,
2986 				esi,
2987 				&new_irq_handled);
2988 
2989 		if (new_irq_handled) {
2990 			/* ACK at DPCD to notify down stream */
2991 			const int ack_dpcd_bytes_to_write =
2992 				dpcd_bytes_to_read - 1;
2993 
2994 			for (retry = 0; retry < 3; retry++) {
2995 				uint8_t wret;
2996 
2997 				wret = drm_dp_dpcd_write(
2998 					&aconnector->dm_dp_aux.aux,
2999 					dpcd_addr + 1,
3000 					&esi[1],
3001 					ack_dpcd_bytes_to_write);
3002 				if (wret == ack_dpcd_bytes_to_write)
3003 					break;
3004 			}
3005 
3006 			/* check if there is new irq to be handled */
3007 			dret = drm_dp_dpcd_read(
3008 				&aconnector->dm_dp_aux.aux,
3009 				dpcd_addr,
3010 				esi,
3011 				dpcd_bytes_to_read);
3012 
3013 			new_irq_handled = false;
3014 		} else {
3015 			break;
3016 		}
3017 	}
3018 
3019 	if (process_count == max_process_count)
3020 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3021 }
3022 
3023 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3024 							union hpd_irq_data hpd_irq_data)
3025 {
3026 	struct hpd_rx_irq_offload_work *offload_work =
3027 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3028 
3029 	if (!offload_work) {
3030 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3031 		return;
3032 	}
3033 
3034 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3035 	offload_work->data = hpd_irq_data;
3036 	offload_work->offload_wq = offload_wq;
3037 
3038 	queue_work(offload_wq->wq, &offload_work->work);
3039 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3040 }
3041 
3042 static void handle_hpd_rx_irq(void *param)
3043 {
3044 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3045 	struct drm_connector *connector = &aconnector->base;
3046 	struct drm_device *dev = connector->dev;
3047 	struct dc_link *dc_link = aconnector->dc_link;
3048 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3049 	bool result = false;
3050 	enum dc_connection_type new_connection_type = dc_connection_none;
3051 	struct amdgpu_device *adev = drm_to_adev(dev);
3052 	union hpd_irq_data hpd_irq_data;
3053 	bool link_loss = false;
3054 	bool has_left_work = false;
3055 	int idx = aconnector->base.index;
3056 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3057 
3058 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3059 
3060 	if (adev->dm.disable_hpd_irq)
3061 		return;
3062 
3063 	/*
3064 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3065 	 * conflict, after implement i2c helper, this mutex should be
3066 	 * retired.
3067 	 */
3068 	mutex_lock(&aconnector->hpd_lock);
3069 
3070 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3071 						&link_loss, true, &has_left_work);
3072 
3073 	if (!has_left_work)
3074 		goto out;
3075 
3076 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3077 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3078 		goto out;
3079 	}
3080 
3081 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3082 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3083 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3084 			dm_handle_mst_sideband_msg(aconnector);
3085 			goto out;
3086 		}
3087 
3088 		if (link_loss) {
3089 			bool skip = false;
3090 
3091 			spin_lock(&offload_wq->offload_lock);
3092 			skip = offload_wq->is_handling_link_loss;
3093 
3094 			if (!skip)
3095 				offload_wq->is_handling_link_loss = true;
3096 
3097 			spin_unlock(&offload_wq->offload_lock);
3098 
3099 			if (!skip)
3100 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3101 
3102 			goto out;
3103 		}
3104 	}
3105 
3106 out:
3107 	if (result && !is_mst_root_connector) {
3108 		/* Downstream Port status changed. */
3109 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3110 			DRM_ERROR("KMS: Failed to detect connector\n");
3111 
3112 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3113 			emulated_link_detect(dc_link);
3114 
3115 			if (aconnector->fake_enable)
3116 				aconnector->fake_enable = false;
3117 
3118 			amdgpu_dm_update_connector_after_detect(aconnector);
3119 
3120 
3121 			drm_modeset_lock_all(dev);
3122 			dm_restore_drm_connector_state(dev, connector);
3123 			drm_modeset_unlock_all(dev);
3124 
3125 			drm_kms_helper_hotplug_event(dev);
3126 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3127 
3128 			if (aconnector->fake_enable)
3129 				aconnector->fake_enable = false;
3130 
3131 			amdgpu_dm_update_connector_after_detect(aconnector);
3132 
3133 
3134 			drm_modeset_lock_all(dev);
3135 			dm_restore_drm_connector_state(dev, connector);
3136 			drm_modeset_unlock_all(dev);
3137 
3138 			drm_kms_helper_hotplug_event(dev);
3139 		}
3140 	}
3141 #ifdef CONFIG_DRM_AMD_DC_HDCP
3142 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3143 		if (adev->dm.hdcp_workqueue)
3144 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3145 	}
3146 #endif
3147 
3148 	if (dc_link->type != dc_connection_mst_branch)
3149 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3150 
3151 	mutex_unlock(&aconnector->hpd_lock);
3152 }
3153 
3154 static void register_hpd_handlers(struct amdgpu_device *adev)
3155 {
3156 	struct drm_device *dev = adev_to_drm(adev);
3157 	struct drm_connector *connector;
3158 	struct amdgpu_dm_connector *aconnector;
3159 	const struct dc_link *dc_link;
3160 	struct dc_interrupt_params int_params = {0};
3161 
3162 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3163 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3164 
3165 	list_for_each_entry(connector,
3166 			&dev->mode_config.connector_list, head)	{
3167 
3168 		aconnector = to_amdgpu_dm_connector(connector);
3169 		dc_link = aconnector->dc_link;
3170 
3171 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3172 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3173 			int_params.irq_source = dc_link->irq_source_hpd;
3174 
3175 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3176 					handle_hpd_irq,
3177 					(void *) aconnector);
3178 		}
3179 
3180 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3181 
3182 			/* Also register for DP short pulse (hpd_rx). */
3183 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3184 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3185 
3186 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3187 					handle_hpd_rx_irq,
3188 					(void *) aconnector);
3189 
3190 			if (adev->dm.hpd_rx_offload_wq)
3191 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3192 					aconnector;
3193 		}
3194 	}
3195 }
3196 
3197 #if defined(CONFIG_DRM_AMD_DC_SI)
3198 /* Register IRQ sources and initialize IRQ callbacks */
3199 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3200 {
3201 	struct dc *dc = adev->dm.dc;
3202 	struct common_irq_params *c_irq_params;
3203 	struct dc_interrupt_params int_params = {0};
3204 	int r;
3205 	int i;
3206 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3207 
3208 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3209 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3210 
3211 	/*
3212 	 * Actions of amdgpu_irq_add_id():
3213 	 * 1. Register a set() function with base driver.
3214 	 *    Base driver will call set() function to enable/disable an
3215 	 *    interrupt in DC hardware.
3216 	 * 2. Register amdgpu_dm_irq_handler().
3217 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3218 	 *    coming from DC hardware.
3219 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3220 	 *    for acknowledging and handling. */
3221 
3222 	/* Use VBLANK interrupt */
3223 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3224 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3225 		if (r) {
3226 			DRM_ERROR("Failed to add crtc irq id!\n");
3227 			return r;
3228 		}
3229 
3230 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3231 		int_params.irq_source =
3232 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3233 
3234 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3235 
3236 		c_irq_params->adev = adev;
3237 		c_irq_params->irq_src = int_params.irq_source;
3238 
3239 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240 				dm_crtc_high_irq, c_irq_params);
3241 	}
3242 
3243 	/* Use GRPH_PFLIP interrupt */
3244 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3245 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3246 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3247 		if (r) {
3248 			DRM_ERROR("Failed to add page flip irq id!\n");
3249 			return r;
3250 		}
3251 
3252 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3253 		int_params.irq_source =
3254 			dc_interrupt_to_irq_source(dc, i, 0);
3255 
3256 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3257 
3258 		c_irq_params->adev = adev;
3259 		c_irq_params->irq_src = int_params.irq_source;
3260 
3261 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3262 				dm_pflip_high_irq, c_irq_params);
3263 
3264 	}
3265 
3266 	/* HPD */
3267 	r = amdgpu_irq_add_id(adev, client_id,
3268 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3269 	if (r) {
3270 		DRM_ERROR("Failed to add hpd irq id!\n");
3271 		return r;
3272 	}
3273 
3274 	register_hpd_handlers(adev);
3275 
3276 	return 0;
3277 }
3278 #endif
3279 
3280 /* Register IRQ sources and initialize IRQ callbacks */
3281 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3282 {
3283 	struct dc *dc = adev->dm.dc;
3284 	struct common_irq_params *c_irq_params;
3285 	struct dc_interrupt_params int_params = {0};
3286 	int r;
3287 	int i;
3288 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3289 
3290 	if (adev->asic_type >= CHIP_VEGA10)
3291 		client_id = SOC15_IH_CLIENTID_DCE;
3292 
3293 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3294 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3295 
3296 	/*
3297 	 * Actions of amdgpu_irq_add_id():
3298 	 * 1. Register a set() function with base driver.
3299 	 *    Base driver will call set() function to enable/disable an
3300 	 *    interrupt in DC hardware.
3301 	 * 2. Register amdgpu_dm_irq_handler().
3302 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3303 	 *    coming from DC hardware.
3304 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3305 	 *    for acknowledging and handling. */
3306 
3307 	/* Use VBLANK interrupt */
3308 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3309 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3310 		if (r) {
3311 			DRM_ERROR("Failed to add crtc irq id!\n");
3312 			return r;
3313 		}
3314 
3315 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3316 		int_params.irq_source =
3317 			dc_interrupt_to_irq_source(dc, i, 0);
3318 
3319 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3320 
3321 		c_irq_params->adev = adev;
3322 		c_irq_params->irq_src = int_params.irq_source;
3323 
3324 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3325 				dm_crtc_high_irq, c_irq_params);
3326 	}
3327 
3328 	/* Use VUPDATE interrupt */
3329 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3330 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3331 		if (r) {
3332 			DRM_ERROR("Failed to add vupdate irq id!\n");
3333 			return r;
3334 		}
3335 
3336 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3337 		int_params.irq_source =
3338 			dc_interrupt_to_irq_source(dc, i, 0);
3339 
3340 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3341 
3342 		c_irq_params->adev = adev;
3343 		c_irq_params->irq_src = int_params.irq_source;
3344 
3345 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3346 				dm_vupdate_high_irq, c_irq_params);
3347 	}
3348 
3349 	/* Use GRPH_PFLIP interrupt */
3350 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3351 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3352 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3353 		if (r) {
3354 			DRM_ERROR("Failed to add page flip irq id!\n");
3355 			return r;
3356 		}
3357 
3358 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3359 		int_params.irq_source =
3360 			dc_interrupt_to_irq_source(dc, i, 0);
3361 
3362 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3363 
3364 		c_irq_params->adev = adev;
3365 		c_irq_params->irq_src = int_params.irq_source;
3366 
3367 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3368 				dm_pflip_high_irq, c_irq_params);
3369 
3370 	}
3371 
3372 	/* HPD */
3373 	r = amdgpu_irq_add_id(adev, client_id,
3374 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3375 	if (r) {
3376 		DRM_ERROR("Failed to add hpd irq id!\n");
3377 		return r;
3378 	}
3379 
3380 	register_hpd_handlers(adev);
3381 
3382 	return 0;
3383 }
3384 
3385 #if defined(CONFIG_DRM_AMD_DC_DCN)
3386 /* Register IRQ sources and initialize IRQ callbacks */
3387 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3388 {
3389 	struct dc *dc = adev->dm.dc;
3390 	struct common_irq_params *c_irq_params;
3391 	struct dc_interrupt_params int_params = {0};
3392 	int r;
3393 	int i;
3394 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3395 	static const unsigned int vrtl_int_srcid[] = {
3396 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3397 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3398 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3399 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3400 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3401 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3402 	};
3403 #endif
3404 
3405 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3406 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3407 
3408 	/*
3409 	 * Actions of amdgpu_irq_add_id():
3410 	 * 1. Register a set() function with base driver.
3411 	 *    Base driver will call set() function to enable/disable an
3412 	 *    interrupt in DC hardware.
3413 	 * 2. Register amdgpu_dm_irq_handler().
3414 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3415 	 *    coming from DC hardware.
3416 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3417 	 *    for acknowledging and handling.
3418 	 */
3419 
3420 	/* Use VSTARTUP interrupt */
3421 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3422 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3423 			i++) {
3424 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3425 
3426 		if (r) {
3427 			DRM_ERROR("Failed to add crtc irq id!\n");
3428 			return r;
3429 		}
3430 
3431 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3432 		int_params.irq_source =
3433 			dc_interrupt_to_irq_source(dc, i, 0);
3434 
3435 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3436 
3437 		c_irq_params->adev = adev;
3438 		c_irq_params->irq_src = int_params.irq_source;
3439 
3440 		amdgpu_dm_irq_register_interrupt(
3441 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3442 	}
3443 
3444 	/* Use otg vertical line interrupt */
3445 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3446 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3447 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3448 				vrtl_int_srcid[i], &adev->vline0_irq);
3449 
3450 		if (r) {
3451 			DRM_ERROR("Failed to add vline0 irq id!\n");
3452 			return r;
3453 		}
3454 
3455 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3456 		int_params.irq_source =
3457 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3458 
3459 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3460 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3461 			break;
3462 		}
3463 
3464 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3465 					- DC_IRQ_SOURCE_DC1_VLINE0];
3466 
3467 		c_irq_params->adev = adev;
3468 		c_irq_params->irq_src = int_params.irq_source;
3469 
3470 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3471 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3472 	}
3473 #endif
3474 
3475 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3476 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3477 	 * to trigger at end of each vblank, regardless of state of the lock,
3478 	 * matching DCE behaviour.
3479 	 */
3480 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3481 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3482 	     i++) {
3483 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3484 
3485 		if (r) {
3486 			DRM_ERROR("Failed to add vupdate irq id!\n");
3487 			return r;
3488 		}
3489 
3490 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3491 		int_params.irq_source =
3492 			dc_interrupt_to_irq_source(dc, i, 0);
3493 
3494 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3495 
3496 		c_irq_params->adev = adev;
3497 		c_irq_params->irq_src = int_params.irq_source;
3498 
3499 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3500 				dm_vupdate_high_irq, c_irq_params);
3501 	}
3502 
3503 	/* Use GRPH_PFLIP interrupt */
3504 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3505 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3506 			i++) {
3507 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3508 		if (r) {
3509 			DRM_ERROR("Failed to add page flip irq id!\n");
3510 			return r;
3511 		}
3512 
3513 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3514 		int_params.irq_source =
3515 			dc_interrupt_to_irq_source(dc, i, 0);
3516 
3517 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3518 
3519 		c_irq_params->adev = adev;
3520 		c_irq_params->irq_src = int_params.irq_source;
3521 
3522 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3523 				dm_pflip_high_irq, c_irq_params);
3524 
3525 	}
3526 
3527 	/* HPD */
3528 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3529 			&adev->hpd_irq);
3530 	if (r) {
3531 		DRM_ERROR("Failed to add hpd irq id!\n");
3532 		return r;
3533 	}
3534 
3535 	register_hpd_handlers(adev);
3536 
3537 	return 0;
3538 }
3539 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3540 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3541 {
3542 	struct dc *dc = adev->dm.dc;
3543 	struct common_irq_params *c_irq_params;
3544 	struct dc_interrupt_params int_params = {0};
3545 	int r, i;
3546 
3547 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3548 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3549 
3550 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3551 			&adev->dmub_outbox_irq);
3552 	if (r) {
3553 		DRM_ERROR("Failed to add outbox irq id!\n");
3554 		return r;
3555 	}
3556 
3557 	if (dc->ctx->dmub_srv) {
3558 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3559 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3560 		int_params.irq_source =
3561 		dc_interrupt_to_irq_source(dc, i, 0);
3562 
3563 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3564 
3565 		c_irq_params->adev = adev;
3566 		c_irq_params->irq_src = int_params.irq_source;
3567 
3568 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3569 				dm_dmub_outbox1_low_irq, c_irq_params);
3570 	}
3571 
3572 	return 0;
3573 }
3574 #endif
3575 
3576 /*
3577  * Acquires the lock for the atomic state object and returns
3578  * the new atomic state.
3579  *
3580  * This should only be called during atomic check.
3581  */
3582 static int dm_atomic_get_state(struct drm_atomic_state *state,
3583 			       struct dm_atomic_state **dm_state)
3584 {
3585 	struct drm_device *dev = state->dev;
3586 	struct amdgpu_device *adev = drm_to_adev(dev);
3587 	struct amdgpu_display_manager *dm = &adev->dm;
3588 	struct drm_private_state *priv_state;
3589 
3590 	if (*dm_state)
3591 		return 0;
3592 
3593 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3594 	if (IS_ERR(priv_state))
3595 		return PTR_ERR(priv_state);
3596 
3597 	*dm_state = to_dm_atomic_state(priv_state);
3598 
3599 	return 0;
3600 }
3601 
3602 static struct dm_atomic_state *
3603 dm_atomic_get_new_state(struct drm_atomic_state *state)
3604 {
3605 	struct drm_device *dev = state->dev;
3606 	struct amdgpu_device *adev = drm_to_adev(dev);
3607 	struct amdgpu_display_manager *dm = &adev->dm;
3608 	struct drm_private_obj *obj;
3609 	struct drm_private_state *new_obj_state;
3610 	int i;
3611 
3612 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3613 		if (obj->funcs == dm->atomic_obj.funcs)
3614 			return to_dm_atomic_state(new_obj_state);
3615 	}
3616 
3617 	return NULL;
3618 }
3619 
3620 static struct drm_private_state *
3621 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3622 {
3623 	struct dm_atomic_state *old_state, *new_state;
3624 
3625 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3626 	if (!new_state)
3627 		return NULL;
3628 
3629 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3630 
3631 	old_state = to_dm_atomic_state(obj->state);
3632 
3633 	if (old_state && old_state->context)
3634 		new_state->context = dc_copy_state(old_state->context);
3635 
3636 	if (!new_state->context) {
3637 		kfree(new_state);
3638 		return NULL;
3639 	}
3640 
3641 	return &new_state->base;
3642 }
3643 
3644 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3645 				    struct drm_private_state *state)
3646 {
3647 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3648 
3649 	if (dm_state && dm_state->context)
3650 		dc_release_state(dm_state->context);
3651 
3652 	kfree(dm_state);
3653 }
3654 
3655 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3656 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3657 	.atomic_destroy_state = dm_atomic_destroy_state,
3658 };
3659 
3660 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3661 {
3662 	struct dm_atomic_state *state;
3663 	int r;
3664 
3665 	adev->mode_info.mode_config_initialized = true;
3666 
3667 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3668 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3669 
3670 	adev_to_drm(adev)->mode_config.max_width = 16384;
3671 	adev_to_drm(adev)->mode_config.max_height = 16384;
3672 
3673 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3674 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3675 	/* indicates support for immediate flip */
3676 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3677 
3678 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3679 
3680 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3681 	if (!state)
3682 		return -ENOMEM;
3683 
3684 	state->context = dc_create_state(adev->dm.dc);
3685 	if (!state->context) {
3686 		kfree(state);
3687 		return -ENOMEM;
3688 	}
3689 
3690 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3691 
3692 	drm_atomic_private_obj_init(adev_to_drm(adev),
3693 				    &adev->dm.atomic_obj,
3694 				    &state->base,
3695 				    &dm_atomic_state_funcs);
3696 
3697 	r = amdgpu_display_modeset_create_props(adev);
3698 	if (r) {
3699 		dc_release_state(state->context);
3700 		kfree(state);
3701 		return r;
3702 	}
3703 
3704 	r = amdgpu_dm_audio_init(adev);
3705 	if (r) {
3706 		dc_release_state(state->context);
3707 		kfree(state);
3708 		return r;
3709 	}
3710 
3711 	return 0;
3712 }
3713 
3714 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3715 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3716 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3717 
3718 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3719 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3720 
3721 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3722 					    int bl_idx)
3723 {
3724 #if defined(CONFIG_ACPI)
3725 	struct amdgpu_dm_backlight_caps caps;
3726 
3727 	memset(&caps, 0, sizeof(caps));
3728 
3729 	if (dm->backlight_caps[bl_idx].caps_valid)
3730 		return;
3731 
3732 	amdgpu_acpi_get_backlight_caps(&caps);
3733 	if (caps.caps_valid) {
3734 		dm->backlight_caps[bl_idx].caps_valid = true;
3735 		if (caps.aux_support)
3736 			return;
3737 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3738 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3739 	} else {
3740 		dm->backlight_caps[bl_idx].min_input_signal =
3741 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3742 		dm->backlight_caps[bl_idx].max_input_signal =
3743 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3744 	}
3745 #else
3746 	if (dm->backlight_caps[bl_idx].aux_support)
3747 		return;
3748 
3749 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3750 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3751 #endif
3752 }
3753 
3754 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3755 				unsigned *min, unsigned *max)
3756 {
3757 	if (!caps)
3758 		return 0;
3759 
3760 	if (caps->aux_support) {
3761 		// Firmware limits are in nits, DC API wants millinits.
3762 		*max = 1000 * caps->aux_max_input_signal;
3763 		*min = 1000 * caps->aux_min_input_signal;
3764 	} else {
3765 		// Firmware limits are 8-bit, PWM control is 16-bit.
3766 		*max = 0x101 * caps->max_input_signal;
3767 		*min = 0x101 * caps->min_input_signal;
3768 	}
3769 	return 1;
3770 }
3771 
3772 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3773 					uint32_t brightness)
3774 {
3775 	unsigned min, max;
3776 
3777 	if (!get_brightness_range(caps, &min, &max))
3778 		return brightness;
3779 
3780 	// Rescale 0..255 to min..max
3781 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3782 				       AMDGPU_MAX_BL_LEVEL);
3783 }
3784 
3785 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3786 				      uint32_t brightness)
3787 {
3788 	unsigned min, max;
3789 
3790 	if (!get_brightness_range(caps, &min, &max))
3791 		return brightness;
3792 
3793 	if (brightness < min)
3794 		return 0;
3795 	// Rescale min..max to 0..255
3796 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3797 				 max - min);
3798 }
3799 
3800 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3801 					 int bl_idx,
3802 					 u32 user_brightness)
3803 {
3804 	struct amdgpu_dm_backlight_caps caps;
3805 	struct dc_link *link;
3806 	u32 brightness;
3807 	bool rc;
3808 
3809 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3810 	caps = dm->backlight_caps[bl_idx];
3811 
3812 	dm->brightness[bl_idx] = user_brightness;
3813 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3814 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3815 
3816 	/* Change brightness based on AUX property */
3817 	if (caps.aux_support) {
3818 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3819 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3820 		if (!rc)
3821 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3822 	} else {
3823 		rc = dc_link_set_backlight_level(link, brightness, 0);
3824 		if (!rc)
3825 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3826 	}
3827 
3828 	return rc ? 0 : 1;
3829 }
3830 
3831 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3832 {
3833 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3834 	int i;
3835 
3836 	for (i = 0; i < dm->num_of_edps; i++) {
3837 		if (bd == dm->backlight_dev[i])
3838 			break;
3839 	}
3840 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3841 		i = 0;
3842 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3843 
3844 	return 0;
3845 }
3846 
3847 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3848 					 int bl_idx)
3849 {
3850 	struct amdgpu_dm_backlight_caps caps;
3851 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3852 
3853 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3854 	caps = dm->backlight_caps[bl_idx];
3855 
3856 	if (caps.aux_support) {
3857 		u32 avg, peak;
3858 		bool rc;
3859 
3860 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3861 		if (!rc)
3862 			return dm->brightness[bl_idx];
3863 		return convert_brightness_to_user(&caps, avg);
3864 	} else {
3865 		int ret = dc_link_get_backlight_level(link);
3866 
3867 		if (ret == DC_ERROR_UNEXPECTED)
3868 			return dm->brightness[bl_idx];
3869 		return convert_brightness_to_user(&caps, ret);
3870 	}
3871 }
3872 
3873 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3874 {
3875 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3876 	int i;
3877 
3878 	for (i = 0; i < dm->num_of_edps; i++) {
3879 		if (bd == dm->backlight_dev[i])
3880 			break;
3881 	}
3882 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3883 		i = 0;
3884 	return amdgpu_dm_backlight_get_level(dm, i);
3885 }
3886 
3887 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3888 	.options = BL_CORE_SUSPENDRESUME,
3889 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3890 	.update_status	= amdgpu_dm_backlight_update_status,
3891 };
3892 
3893 static void
3894 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3895 {
3896 	char bl_name[16];
3897 	struct backlight_properties props = { 0 };
3898 
3899 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3900 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3901 
3902 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3903 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3904 	props.type = BACKLIGHT_RAW;
3905 
3906 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3907 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3908 
3909 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3910 								       adev_to_drm(dm->adev)->dev,
3911 								       dm,
3912 								       &amdgpu_dm_backlight_ops,
3913 								       &props);
3914 
3915 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3916 		DRM_ERROR("DM: Backlight registration failed!\n");
3917 	else
3918 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3919 }
3920 #endif
3921 
3922 static int initialize_plane(struct amdgpu_display_manager *dm,
3923 			    struct amdgpu_mode_info *mode_info, int plane_id,
3924 			    enum drm_plane_type plane_type,
3925 			    const struct dc_plane_cap *plane_cap)
3926 {
3927 	struct drm_plane *plane;
3928 	unsigned long possible_crtcs;
3929 	int ret = 0;
3930 
3931 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3932 	if (!plane) {
3933 		DRM_ERROR("KMS: Failed to allocate plane\n");
3934 		return -ENOMEM;
3935 	}
3936 	plane->type = plane_type;
3937 
3938 	/*
3939 	 * HACK: IGT tests expect that the primary plane for a CRTC
3940 	 * can only have one possible CRTC. Only expose support for
3941 	 * any CRTC if they're not going to be used as a primary plane
3942 	 * for a CRTC - like overlay or underlay planes.
3943 	 */
3944 	possible_crtcs = 1 << plane_id;
3945 	if (plane_id >= dm->dc->caps.max_streams)
3946 		possible_crtcs = 0xff;
3947 
3948 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3949 
3950 	if (ret) {
3951 		DRM_ERROR("KMS: Failed to initialize plane\n");
3952 		kfree(plane);
3953 		return ret;
3954 	}
3955 
3956 	if (mode_info)
3957 		mode_info->planes[plane_id] = plane;
3958 
3959 	return ret;
3960 }
3961 
3962 
3963 static void register_backlight_device(struct amdgpu_display_manager *dm,
3964 				      struct dc_link *link)
3965 {
3966 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3967 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3968 
3969 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3970 	    link->type != dc_connection_none) {
3971 		/*
3972 		 * Event if registration failed, we should continue with
3973 		 * DM initialization because not having a backlight control
3974 		 * is better then a black screen.
3975 		 */
3976 		if (!dm->backlight_dev[dm->num_of_edps])
3977 			amdgpu_dm_register_backlight_device(dm);
3978 
3979 		if (dm->backlight_dev[dm->num_of_edps]) {
3980 			dm->backlight_link[dm->num_of_edps] = link;
3981 			dm->num_of_edps++;
3982 		}
3983 	}
3984 #endif
3985 }
3986 
3987 
3988 /*
3989  * In this architecture, the association
3990  * connector -> encoder -> crtc
3991  * id not really requried. The crtc and connector will hold the
3992  * display_index as an abstraction to use with DAL component
3993  *
3994  * Returns 0 on success
3995  */
3996 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3997 {
3998 	struct amdgpu_display_manager *dm = &adev->dm;
3999 	int32_t i;
4000 	struct amdgpu_dm_connector *aconnector = NULL;
4001 	struct amdgpu_encoder *aencoder = NULL;
4002 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4003 	uint32_t link_cnt;
4004 	int32_t primary_planes;
4005 	enum dc_connection_type new_connection_type = dc_connection_none;
4006 	const struct dc_plane_cap *plane;
4007 
4008 	dm->display_indexes_num = dm->dc->caps.max_streams;
4009 	/* Update the actual used number of crtc */
4010 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4011 
4012 	link_cnt = dm->dc->caps.max_links;
4013 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4014 		DRM_ERROR("DM: Failed to initialize mode config\n");
4015 		return -EINVAL;
4016 	}
4017 
4018 	/* There is one primary plane per CRTC */
4019 	primary_planes = dm->dc->caps.max_streams;
4020 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4021 
4022 	/*
4023 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4024 	 * Order is reversed to match iteration order in atomic check.
4025 	 */
4026 	for (i = (primary_planes - 1); i >= 0; i--) {
4027 		plane = &dm->dc->caps.planes[i];
4028 
4029 		if (initialize_plane(dm, mode_info, i,
4030 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4031 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4032 			goto fail;
4033 		}
4034 	}
4035 
4036 	/*
4037 	 * Initialize overlay planes, index starting after primary planes.
4038 	 * These planes have a higher DRM index than the primary planes since
4039 	 * they should be considered as having a higher z-order.
4040 	 * Order is reversed to match iteration order in atomic check.
4041 	 *
4042 	 * Only support DCN for now, and only expose one so we don't encourage
4043 	 * userspace to use up all the pipes.
4044 	 */
4045 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4046 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4047 
4048 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4049 			continue;
4050 
4051 		if (!plane->blends_with_above || !plane->blends_with_below)
4052 			continue;
4053 
4054 		if (!plane->pixel_format_support.argb8888)
4055 			continue;
4056 
4057 		if (initialize_plane(dm, NULL, primary_planes + i,
4058 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4059 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4060 			goto fail;
4061 		}
4062 
4063 		/* Only create one overlay plane. */
4064 		break;
4065 	}
4066 
4067 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4068 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4069 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4070 			goto fail;
4071 		}
4072 
4073 #if defined(CONFIG_DRM_AMD_DC_DCN)
4074 	/* Use Outbox interrupt */
4075 	switch (adev->asic_type) {
4076 	case CHIP_SIENNA_CICHLID:
4077 	case CHIP_NAVY_FLOUNDER:
4078 	case CHIP_YELLOW_CARP:
4079 	case CHIP_RENOIR:
4080 		if (register_outbox_irq_handlers(dm->adev)) {
4081 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4082 			goto fail;
4083 		}
4084 		break;
4085 	default:
4086 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
4087 	}
4088 #endif
4089 
4090 	/* loops over all connectors on the board */
4091 	for (i = 0; i < link_cnt; i++) {
4092 		struct dc_link *link = NULL;
4093 
4094 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4095 			DRM_ERROR(
4096 				"KMS: Cannot support more than %d display indexes\n",
4097 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4098 			continue;
4099 		}
4100 
4101 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4102 		if (!aconnector)
4103 			goto fail;
4104 
4105 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4106 		if (!aencoder)
4107 			goto fail;
4108 
4109 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4110 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4111 			goto fail;
4112 		}
4113 
4114 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4115 			DRM_ERROR("KMS: Failed to initialize connector\n");
4116 			goto fail;
4117 		}
4118 
4119 		link = dc_get_link_at_index(dm->dc, i);
4120 
4121 		if (!dc_link_detect_sink(link, &new_connection_type))
4122 			DRM_ERROR("KMS: Failed to detect connector\n");
4123 
4124 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4125 			emulated_link_detect(link);
4126 			amdgpu_dm_update_connector_after_detect(aconnector);
4127 
4128 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4129 			amdgpu_dm_update_connector_after_detect(aconnector);
4130 			register_backlight_device(dm, link);
4131 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4132 				amdgpu_dm_set_psr_caps(link);
4133 		}
4134 
4135 
4136 	}
4137 
4138 	/* Software is initialized. Now we can register interrupt handlers. */
4139 	switch (adev->asic_type) {
4140 #if defined(CONFIG_DRM_AMD_DC_SI)
4141 	case CHIP_TAHITI:
4142 	case CHIP_PITCAIRN:
4143 	case CHIP_VERDE:
4144 	case CHIP_OLAND:
4145 		if (dce60_register_irq_handlers(dm->adev)) {
4146 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4147 			goto fail;
4148 		}
4149 		break;
4150 #endif
4151 	case CHIP_BONAIRE:
4152 	case CHIP_HAWAII:
4153 	case CHIP_KAVERI:
4154 	case CHIP_KABINI:
4155 	case CHIP_MULLINS:
4156 	case CHIP_TONGA:
4157 	case CHIP_FIJI:
4158 	case CHIP_CARRIZO:
4159 	case CHIP_STONEY:
4160 	case CHIP_POLARIS11:
4161 	case CHIP_POLARIS10:
4162 	case CHIP_POLARIS12:
4163 	case CHIP_VEGAM:
4164 	case CHIP_VEGA10:
4165 	case CHIP_VEGA12:
4166 	case CHIP_VEGA20:
4167 		if (dce110_register_irq_handlers(dm->adev)) {
4168 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4169 			goto fail;
4170 		}
4171 		break;
4172 #if defined(CONFIG_DRM_AMD_DC_DCN)
4173 	case CHIP_RAVEN:
4174 	case CHIP_NAVI12:
4175 	case CHIP_NAVI10:
4176 	case CHIP_NAVI14:
4177 	case CHIP_RENOIR:
4178 	case CHIP_SIENNA_CICHLID:
4179 	case CHIP_NAVY_FLOUNDER:
4180 	case CHIP_DIMGREY_CAVEFISH:
4181 	case CHIP_BEIGE_GOBY:
4182 	case CHIP_VANGOGH:
4183 	case CHIP_YELLOW_CARP:
4184 		if (dcn10_register_irq_handlers(dm->adev)) {
4185 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4186 			goto fail;
4187 		}
4188 		break;
4189 #endif
4190 	default:
4191 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4192 		goto fail;
4193 	}
4194 
4195 	return 0;
4196 fail:
4197 	kfree(aencoder);
4198 	kfree(aconnector);
4199 
4200 	return -EINVAL;
4201 }
4202 
4203 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4204 {
4205 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4206 	return;
4207 }
4208 
4209 /******************************************************************************
4210  * amdgpu_display_funcs functions
4211  *****************************************************************************/
4212 
4213 /*
4214  * dm_bandwidth_update - program display watermarks
4215  *
4216  * @adev: amdgpu_device pointer
4217  *
4218  * Calculate and program the display watermarks and line buffer allocation.
4219  */
4220 static void dm_bandwidth_update(struct amdgpu_device *adev)
4221 {
4222 	/* TODO: implement later */
4223 }
4224 
4225 static const struct amdgpu_display_funcs dm_display_funcs = {
4226 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4227 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4228 	.backlight_set_level = NULL, /* never called for DC */
4229 	.backlight_get_level = NULL, /* never called for DC */
4230 	.hpd_sense = NULL,/* called unconditionally */
4231 	.hpd_set_polarity = NULL, /* called unconditionally */
4232 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4233 	.page_flip_get_scanoutpos =
4234 		dm_crtc_get_scanoutpos,/* called unconditionally */
4235 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4236 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4237 };
4238 
4239 #if defined(CONFIG_DEBUG_KERNEL_DC)
4240 
4241 static ssize_t s3_debug_store(struct device *device,
4242 			      struct device_attribute *attr,
4243 			      const char *buf,
4244 			      size_t count)
4245 {
4246 	int ret;
4247 	int s3_state;
4248 	struct drm_device *drm_dev = dev_get_drvdata(device);
4249 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4250 
4251 	ret = kstrtoint(buf, 0, &s3_state);
4252 
4253 	if (ret == 0) {
4254 		if (s3_state) {
4255 			dm_resume(adev);
4256 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4257 		} else
4258 			dm_suspend(adev);
4259 	}
4260 
4261 	return ret == 0 ? count : 0;
4262 }
4263 
4264 DEVICE_ATTR_WO(s3_debug);
4265 
4266 #endif
4267 
4268 static int dm_early_init(void *handle)
4269 {
4270 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4271 
4272 	switch (adev->asic_type) {
4273 #if defined(CONFIG_DRM_AMD_DC_SI)
4274 	case CHIP_TAHITI:
4275 	case CHIP_PITCAIRN:
4276 	case CHIP_VERDE:
4277 		adev->mode_info.num_crtc = 6;
4278 		adev->mode_info.num_hpd = 6;
4279 		adev->mode_info.num_dig = 6;
4280 		break;
4281 	case CHIP_OLAND:
4282 		adev->mode_info.num_crtc = 2;
4283 		adev->mode_info.num_hpd = 2;
4284 		adev->mode_info.num_dig = 2;
4285 		break;
4286 #endif
4287 	case CHIP_BONAIRE:
4288 	case CHIP_HAWAII:
4289 		adev->mode_info.num_crtc = 6;
4290 		adev->mode_info.num_hpd = 6;
4291 		adev->mode_info.num_dig = 6;
4292 		break;
4293 	case CHIP_KAVERI:
4294 		adev->mode_info.num_crtc = 4;
4295 		adev->mode_info.num_hpd = 6;
4296 		adev->mode_info.num_dig = 7;
4297 		break;
4298 	case CHIP_KABINI:
4299 	case CHIP_MULLINS:
4300 		adev->mode_info.num_crtc = 2;
4301 		adev->mode_info.num_hpd = 6;
4302 		adev->mode_info.num_dig = 6;
4303 		break;
4304 	case CHIP_FIJI:
4305 	case CHIP_TONGA:
4306 		adev->mode_info.num_crtc = 6;
4307 		adev->mode_info.num_hpd = 6;
4308 		adev->mode_info.num_dig = 7;
4309 		break;
4310 	case CHIP_CARRIZO:
4311 		adev->mode_info.num_crtc = 3;
4312 		adev->mode_info.num_hpd = 6;
4313 		adev->mode_info.num_dig = 9;
4314 		break;
4315 	case CHIP_STONEY:
4316 		adev->mode_info.num_crtc = 2;
4317 		adev->mode_info.num_hpd = 6;
4318 		adev->mode_info.num_dig = 9;
4319 		break;
4320 	case CHIP_POLARIS11:
4321 	case CHIP_POLARIS12:
4322 		adev->mode_info.num_crtc = 5;
4323 		adev->mode_info.num_hpd = 5;
4324 		adev->mode_info.num_dig = 5;
4325 		break;
4326 	case CHIP_POLARIS10:
4327 	case CHIP_VEGAM:
4328 		adev->mode_info.num_crtc = 6;
4329 		adev->mode_info.num_hpd = 6;
4330 		adev->mode_info.num_dig = 6;
4331 		break;
4332 	case CHIP_VEGA10:
4333 	case CHIP_VEGA12:
4334 	case CHIP_VEGA20:
4335 		adev->mode_info.num_crtc = 6;
4336 		adev->mode_info.num_hpd = 6;
4337 		adev->mode_info.num_dig = 6;
4338 		break;
4339 #if defined(CONFIG_DRM_AMD_DC_DCN)
4340 	case CHIP_RAVEN:
4341 	case CHIP_RENOIR:
4342 	case CHIP_VANGOGH:
4343 		adev->mode_info.num_crtc = 4;
4344 		adev->mode_info.num_hpd = 4;
4345 		adev->mode_info.num_dig = 4;
4346 		break;
4347 	case CHIP_NAVI10:
4348 	case CHIP_NAVI12:
4349 	case CHIP_SIENNA_CICHLID:
4350 	case CHIP_NAVY_FLOUNDER:
4351 		adev->mode_info.num_crtc = 6;
4352 		adev->mode_info.num_hpd = 6;
4353 		adev->mode_info.num_dig = 6;
4354 		break;
4355 	case CHIP_YELLOW_CARP:
4356 		adev->mode_info.num_crtc = 4;
4357 		adev->mode_info.num_hpd = 4;
4358 		adev->mode_info.num_dig = 4;
4359 		break;
4360 	case CHIP_NAVI14:
4361 	case CHIP_DIMGREY_CAVEFISH:
4362 		adev->mode_info.num_crtc = 5;
4363 		adev->mode_info.num_hpd = 5;
4364 		adev->mode_info.num_dig = 5;
4365 		break;
4366 	case CHIP_BEIGE_GOBY:
4367 		adev->mode_info.num_crtc = 2;
4368 		adev->mode_info.num_hpd = 2;
4369 		adev->mode_info.num_dig = 2;
4370 		break;
4371 #endif
4372 	default:
4373 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4374 		return -EINVAL;
4375 	}
4376 
4377 	amdgpu_dm_set_irq_funcs(adev);
4378 
4379 	if (adev->mode_info.funcs == NULL)
4380 		adev->mode_info.funcs = &dm_display_funcs;
4381 
4382 	/*
4383 	 * Note: Do NOT change adev->audio_endpt_rreg and
4384 	 * adev->audio_endpt_wreg because they are initialised in
4385 	 * amdgpu_device_init()
4386 	 */
4387 #if defined(CONFIG_DEBUG_KERNEL_DC)
4388 	device_create_file(
4389 		adev_to_drm(adev)->dev,
4390 		&dev_attr_s3_debug);
4391 #endif
4392 
4393 	return 0;
4394 }
4395 
4396 static bool modeset_required(struct drm_crtc_state *crtc_state,
4397 			     struct dc_stream_state *new_stream,
4398 			     struct dc_stream_state *old_stream)
4399 {
4400 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4401 }
4402 
4403 static bool modereset_required(struct drm_crtc_state *crtc_state)
4404 {
4405 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4406 }
4407 
4408 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4409 {
4410 	drm_encoder_cleanup(encoder);
4411 	kfree(encoder);
4412 }
4413 
4414 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4415 	.destroy = amdgpu_dm_encoder_destroy,
4416 };
4417 
4418 
4419 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4420 					 struct drm_framebuffer *fb,
4421 					 int *min_downscale, int *max_upscale)
4422 {
4423 	struct amdgpu_device *adev = drm_to_adev(dev);
4424 	struct dc *dc = adev->dm.dc;
4425 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4426 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4427 
4428 	switch (fb->format->format) {
4429 	case DRM_FORMAT_P010:
4430 	case DRM_FORMAT_NV12:
4431 	case DRM_FORMAT_NV21:
4432 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4433 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4434 		break;
4435 
4436 	case DRM_FORMAT_XRGB16161616F:
4437 	case DRM_FORMAT_ARGB16161616F:
4438 	case DRM_FORMAT_XBGR16161616F:
4439 	case DRM_FORMAT_ABGR16161616F:
4440 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4441 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4442 		break;
4443 
4444 	default:
4445 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4446 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4447 		break;
4448 	}
4449 
4450 	/*
4451 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4452 	 * scaling factor of 1.0 == 1000 units.
4453 	 */
4454 	if (*max_upscale == 1)
4455 		*max_upscale = 1000;
4456 
4457 	if (*min_downscale == 1)
4458 		*min_downscale = 1000;
4459 }
4460 
4461 
4462 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4463 				struct dc_scaling_info *scaling_info)
4464 {
4465 	int scale_w, scale_h, min_downscale, max_upscale;
4466 
4467 	memset(scaling_info, 0, sizeof(*scaling_info));
4468 
4469 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4470 	scaling_info->src_rect.x = state->src_x >> 16;
4471 	scaling_info->src_rect.y = state->src_y >> 16;
4472 
4473 	/*
4474 	 * For reasons we don't (yet) fully understand a non-zero
4475 	 * src_y coordinate into an NV12 buffer can cause a
4476 	 * system hang. To avoid hangs (and maybe be overly cautious)
4477 	 * let's reject both non-zero src_x and src_y.
4478 	 *
4479 	 * We currently know of only one use-case to reproduce a
4480 	 * scenario with non-zero src_x and src_y for NV12, which
4481 	 * is to gesture the YouTube Android app into full screen
4482 	 * on ChromeOS.
4483 	 */
4484 	if (state->fb &&
4485 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4486 	    (scaling_info->src_rect.x != 0 ||
4487 	     scaling_info->src_rect.y != 0))
4488 		return -EINVAL;
4489 
4490 	scaling_info->src_rect.width = state->src_w >> 16;
4491 	if (scaling_info->src_rect.width == 0)
4492 		return -EINVAL;
4493 
4494 	scaling_info->src_rect.height = state->src_h >> 16;
4495 	if (scaling_info->src_rect.height == 0)
4496 		return -EINVAL;
4497 
4498 	scaling_info->dst_rect.x = state->crtc_x;
4499 	scaling_info->dst_rect.y = state->crtc_y;
4500 
4501 	if (state->crtc_w == 0)
4502 		return -EINVAL;
4503 
4504 	scaling_info->dst_rect.width = state->crtc_w;
4505 
4506 	if (state->crtc_h == 0)
4507 		return -EINVAL;
4508 
4509 	scaling_info->dst_rect.height = state->crtc_h;
4510 
4511 	/* DRM doesn't specify clipping on destination output. */
4512 	scaling_info->clip_rect = scaling_info->dst_rect;
4513 
4514 	/* Validate scaling per-format with DC plane caps */
4515 	if (state->plane && state->plane->dev && state->fb) {
4516 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4517 					     &min_downscale, &max_upscale);
4518 	} else {
4519 		min_downscale = 250;
4520 		max_upscale = 16000;
4521 	}
4522 
4523 	scale_w = scaling_info->dst_rect.width * 1000 /
4524 		  scaling_info->src_rect.width;
4525 
4526 	if (scale_w < min_downscale || scale_w > max_upscale)
4527 		return -EINVAL;
4528 
4529 	scale_h = scaling_info->dst_rect.height * 1000 /
4530 		  scaling_info->src_rect.height;
4531 
4532 	if (scale_h < min_downscale || scale_h > max_upscale)
4533 		return -EINVAL;
4534 
4535 	/*
4536 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4537 	 * assume reasonable defaults based on the format.
4538 	 */
4539 
4540 	return 0;
4541 }
4542 
4543 static void
4544 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4545 				 uint64_t tiling_flags)
4546 {
4547 	/* Fill GFX8 params */
4548 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4549 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4550 
4551 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4552 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4553 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4554 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4555 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4556 
4557 		/* XXX fix me for VI */
4558 		tiling_info->gfx8.num_banks = num_banks;
4559 		tiling_info->gfx8.array_mode =
4560 				DC_ARRAY_2D_TILED_THIN1;
4561 		tiling_info->gfx8.tile_split = tile_split;
4562 		tiling_info->gfx8.bank_width = bankw;
4563 		tiling_info->gfx8.bank_height = bankh;
4564 		tiling_info->gfx8.tile_aspect = mtaspect;
4565 		tiling_info->gfx8.tile_mode =
4566 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4567 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4568 			== DC_ARRAY_1D_TILED_THIN1) {
4569 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4570 	}
4571 
4572 	tiling_info->gfx8.pipe_config =
4573 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4574 }
4575 
4576 static void
4577 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4578 				  union dc_tiling_info *tiling_info)
4579 {
4580 	tiling_info->gfx9.num_pipes =
4581 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4582 	tiling_info->gfx9.num_banks =
4583 		adev->gfx.config.gb_addr_config_fields.num_banks;
4584 	tiling_info->gfx9.pipe_interleave =
4585 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4586 	tiling_info->gfx9.num_shader_engines =
4587 		adev->gfx.config.gb_addr_config_fields.num_se;
4588 	tiling_info->gfx9.max_compressed_frags =
4589 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4590 	tiling_info->gfx9.num_rb_per_se =
4591 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4592 	tiling_info->gfx9.shaderEnable = 1;
4593 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4594 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4595 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4596 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4597 	    adev->asic_type == CHIP_YELLOW_CARP ||
4598 	    adev->asic_type == CHIP_VANGOGH)
4599 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4600 }
4601 
4602 static int
4603 validate_dcc(struct amdgpu_device *adev,
4604 	     const enum surface_pixel_format format,
4605 	     const enum dc_rotation_angle rotation,
4606 	     const union dc_tiling_info *tiling_info,
4607 	     const struct dc_plane_dcc_param *dcc,
4608 	     const struct dc_plane_address *address,
4609 	     const struct plane_size *plane_size)
4610 {
4611 	struct dc *dc = adev->dm.dc;
4612 	struct dc_dcc_surface_param input;
4613 	struct dc_surface_dcc_cap output;
4614 
4615 	memset(&input, 0, sizeof(input));
4616 	memset(&output, 0, sizeof(output));
4617 
4618 	if (!dcc->enable)
4619 		return 0;
4620 
4621 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4622 	    !dc->cap_funcs.get_dcc_compression_cap)
4623 		return -EINVAL;
4624 
4625 	input.format = format;
4626 	input.surface_size.width = plane_size->surface_size.width;
4627 	input.surface_size.height = plane_size->surface_size.height;
4628 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4629 
4630 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4631 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4632 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4633 		input.scan = SCAN_DIRECTION_VERTICAL;
4634 
4635 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4636 		return -EINVAL;
4637 
4638 	if (!output.capable)
4639 		return -EINVAL;
4640 
4641 	if (dcc->independent_64b_blks == 0 &&
4642 	    output.grph.rgb.independent_64b_blks != 0)
4643 		return -EINVAL;
4644 
4645 	return 0;
4646 }
4647 
4648 static bool
4649 modifier_has_dcc(uint64_t modifier)
4650 {
4651 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4652 }
4653 
4654 static unsigned
4655 modifier_gfx9_swizzle_mode(uint64_t modifier)
4656 {
4657 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4658 		return 0;
4659 
4660 	return AMD_FMT_MOD_GET(TILE, modifier);
4661 }
4662 
4663 static const struct drm_format_info *
4664 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4665 {
4666 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4667 }
4668 
4669 static void
4670 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4671 				    union dc_tiling_info *tiling_info,
4672 				    uint64_t modifier)
4673 {
4674 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4675 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4676 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4677 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4678 
4679 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4680 
4681 	if (!IS_AMD_FMT_MOD(modifier))
4682 		return;
4683 
4684 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4685 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4686 
4687 	if (adev->family >= AMDGPU_FAMILY_NV) {
4688 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4689 	} else {
4690 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4691 
4692 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4693 	}
4694 }
4695 
4696 enum dm_micro_swizzle {
4697 	MICRO_SWIZZLE_Z = 0,
4698 	MICRO_SWIZZLE_S = 1,
4699 	MICRO_SWIZZLE_D = 2,
4700 	MICRO_SWIZZLE_R = 3
4701 };
4702 
4703 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4704 					  uint32_t format,
4705 					  uint64_t modifier)
4706 {
4707 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4708 	const struct drm_format_info *info = drm_format_info(format);
4709 	int i;
4710 
4711 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4712 
4713 	if (!info)
4714 		return false;
4715 
4716 	/*
4717 	 * We always have to allow these modifiers:
4718 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4719 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4720 	 */
4721 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4722 	    modifier == DRM_FORMAT_MOD_INVALID) {
4723 		return true;
4724 	}
4725 
4726 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4727 	for (i = 0; i < plane->modifier_count; i++) {
4728 		if (modifier == plane->modifiers[i])
4729 			break;
4730 	}
4731 	if (i == plane->modifier_count)
4732 		return false;
4733 
4734 	/*
4735 	 * For D swizzle the canonical modifier depends on the bpp, so check
4736 	 * it here.
4737 	 */
4738 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4739 	    adev->family >= AMDGPU_FAMILY_NV) {
4740 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4741 			return false;
4742 	}
4743 
4744 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4745 	    info->cpp[0] < 8)
4746 		return false;
4747 
4748 	if (modifier_has_dcc(modifier)) {
4749 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4750 		if (info->cpp[0] != 4)
4751 			return false;
4752 		/* We support multi-planar formats, but not when combined with
4753 		 * additional DCC metadata planes. */
4754 		if (info->num_planes > 1)
4755 			return false;
4756 	}
4757 
4758 	return true;
4759 }
4760 
4761 static void
4762 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4763 {
4764 	if (!*mods)
4765 		return;
4766 
4767 	if (*cap - *size < 1) {
4768 		uint64_t new_cap = *cap * 2;
4769 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4770 
4771 		if (!new_mods) {
4772 			kfree(*mods);
4773 			*mods = NULL;
4774 			return;
4775 		}
4776 
4777 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4778 		kfree(*mods);
4779 		*mods = new_mods;
4780 		*cap = new_cap;
4781 	}
4782 
4783 	(*mods)[*size] = mod;
4784 	*size += 1;
4785 }
4786 
4787 static void
4788 add_gfx9_modifiers(const struct amdgpu_device *adev,
4789 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4790 {
4791 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4792 	int pipe_xor_bits = min(8, pipes +
4793 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4794 	int bank_xor_bits = min(8 - pipe_xor_bits,
4795 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4796 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4797 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4798 
4799 
4800 	if (adev->family == AMDGPU_FAMILY_RV) {
4801 		/* Raven2 and later */
4802 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4803 
4804 		/*
4805 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4806 		 * doesn't support _D on DCN
4807 		 */
4808 
4809 		if (has_constant_encode) {
4810 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4811 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4812 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4813 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4814 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4815 				    AMD_FMT_MOD_SET(DCC, 1) |
4816 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4817 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4818 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4819 		}
4820 
4821 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4822 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4823 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4824 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4825 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4826 			    AMD_FMT_MOD_SET(DCC, 1) |
4827 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4828 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4829 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4830 
4831 		if (has_constant_encode) {
4832 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4833 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4834 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4835 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4836 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4837 				    AMD_FMT_MOD_SET(DCC, 1) |
4838 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4839 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4840 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4841 
4842 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4843 				    AMD_FMT_MOD_SET(RB, rb) |
4844 				    AMD_FMT_MOD_SET(PIPE, pipes));
4845 		}
4846 
4847 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4848 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4849 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4850 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4851 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4852 			    AMD_FMT_MOD_SET(DCC, 1) |
4853 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4854 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4855 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4856 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4857 			    AMD_FMT_MOD_SET(RB, rb) |
4858 			    AMD_FMT_MOD_SET(PIPE, pipes));
4859 	}
4860 
4861 	/*
4862 	 * Only supported for 64bpp on Raven, will be filtered on format in
4863 	 * dm_plane_format_mod_supported.
4864 	 */
4865 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4866 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4867 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4868 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4869 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4870 
4871 	if (adev->family == AMDGPU_FAMILY_RV) {
4872 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4873 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4874 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4875 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4876 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4877 	}
4878 
4879 	/*
4880 	 * Only supported for 64bpp on Raven, will be filtered on format in
4881 	 * dm_plane_format_mod_supported.
4882 	 */
4883 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4884 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4885 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4886 
4887 	if (adev->family == AMDGPU_FAMILY_RV) {
4888 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4889 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4890 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4891 	}
4892 }
4893 
4894 static void
4895 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4896 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4897 {
4898 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4899 
4900 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4901 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4902 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4903 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4904 		    AMD_FMT_MOD_SET(DCC, 1) |
4905 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4906 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4907 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4908 
4909 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4910 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4911 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4912 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4913 		    AMD_FMT_MOD_SET(DCC, 1) |
4914 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4915 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4916 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4917 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4918 
4919 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4920 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4921 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4922 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4923 
4924 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4925 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4926 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4927 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4928 
4929 
4930 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4931 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4932 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4933 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4934 
4935 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4936 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4937 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4938 }
4939 
4940 static void
4941 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4942 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4943 {
4944 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4945 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4946 
4947 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4948 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4949 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4950 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4951 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4952 		    AMD_FMT_MOD_SET(DCC, 1) |
4953 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4954 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4955 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4956 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4957 
4958 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4959 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4960 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4961 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4962 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4963 		    AMD_FMT_MOD_SET(DCC, 1) |
4964 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4965 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4966 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4967 
4968 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4969 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4970 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4971 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4972 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4973 		    AMD_FMT_MOD_SET(DCC, 1) |
4974 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4975 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4976 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4977 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4978 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4979 
4980 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4981 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4982 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4983 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4984 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4985 		    AMD_FMT_MOD_SET(DCC, 1) |
4986 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4987 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4988 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4989 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4990 
4991 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4992 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4993 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4994 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4995 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4996 
4997 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4998 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4999 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5000 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5001 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5002 
5003 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5004 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5005 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5006 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5007 
5008 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5009 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5010 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5011 }
5012 
5013 static int
5014 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5015 {
5016 	uint64_t size = 0, capacity = 128;
5017 	*mods = NULL;
5018 
5019 	/* We have not hooked up any pre-GFX9 modifiers. */
5020 	if (adev->family < AMDGPU_FAMILY_AI)
5021 		return 0;
5022 
5023 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5024 
5025 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5026 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5027 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5028 		return *mods ? 0 : -ENOMEM;
5029 	}
5030 
5031 	switch (adev->family) {
5032 	case AMDGPU_FAMILY_AI:
5033 	case AMDGPU_FAMILY_RV:
5034 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5035 		break;
5036 	case AMDGPU_FAMILY_NV:
5037 	case AMDGPU_FAMILY_VGH:
5038 	case AMDGPU_FAMILY_YC:
5039 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
5040 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5041 		else
5042 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5043 		break;
5044 	}
5045 
5046 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5047 
5048 	/* INVALID marks the end of the list. */
5049 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5050 
5051 	if (!*mods)
5052 		return -ENOMEM;
5053 
5054 	return 0;
5055 }
5056 
5057 static int
5058 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5059 					  const struct amdgpu_framebuffer *afb,
5060 					  const enum surface_pixel_format format,
5061 					  const enum dc_rotation_angle rotation,
5062 					  const struct plane_size *plane_size,
5063 					  union dc_tiling_info *tiling_info,
5064 					  struct dc_plane_dcc_param *dcc,
5065 					  struct dc_plane_address *address,
5066 					  const bool force_disable_dcc)
5067 {
5068 	const uint64_t modifier = afb->base.modifier;
5069 	int ret = 0;
5070 
5071 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5072 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5073 
5074 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5075 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5076 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5077 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5078 
5079 		dcc->enable = 1;
5080 		dcc->meta_pitch = afb->base.pitches[1];
5081 		dcc->independent_64b_blks = independent_64b_blks;
5082 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5083 			if (independent_64b_blks && independent_128b_blks)
5084 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5085 			else if (independent_128b_blks)
5086 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5087 			else if (independent_64b_blks && !independent_128b_blks)
5088 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5089 			else
5090 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5091 		} else {
5092 			if (independent_64b_blks)
5093 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5094 			else
5095 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5096 		}
5097 
5098 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5099 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5100 	}
5101 
5102 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5103 	if (ret)
5104 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5105 
5106 	return ret;
5107 }
5108 
5109 static int
5110 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5111 			     const struct amdgpu_framebuffer *afb,
5112 			     const enum surface_pixel_format format,
5113 			     const enum dc_rotation_angle rotation,
5114 			     const uint64_t tiling_flags,
5115 			     union dc_tiling_info *tiling_info,
5116 			     struct plane_size *plane_size,
5117 			     struct dc_plane_dcc_param *dcc,
5118 			     struct dc_plane_address *address,
5119 			     bool tmz_surface,
5120 			     bool force_disable_dcc)
5121 {
5122 	const struct drm_framebuffer *fb = &afb->base;
5123 	int ret;
5124 
5125 	memset(tiling_info, 0, sizeof(*tiling_info));
5126 	memset(plane_size, 0, sizeof(*plane_size));
5127 	memset(dcc, 0, sizeof(*dcc));
5128 	memset(address, 0, sizeof(*address));
5129 
5130 	address->tmz_surface = tmz_surface;
5131 
5132 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5133 		uint64_t addr = afb->address + fb->offsets[0];
5134 
5135 		plane_size->surface_size.x = 0;
5136 		plane_size->surface_size.y = 0;
5137 		plane_size->surface_size.width = fb->width;
5138 		plane_size->surface_size.height = fb->height;
5139 		plane_size->surface_pitch =
5140 			fb->pitches[0] / fb->format->cpp[0];
5141 
5142 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5143 		address->grph.addr.low_part = lower_32_bits(addr);
5144 		address->grph.addr.high_part = upper_32_bits(addr);
5145 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5146 		uint64_t luma_addr = afb->address + fb->offsets[0];
5147 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5148 
5149 		plane_size->surface_size.x = 0;
5150 		plane_size->surface_size.y = 0;
5151 		plane_size->surface_size.width = fb->width;
5152 		plane_size->surface_size.height = fb->height;
5153 		plane_size->surface_pitch =
5154 			fb->pitches[0] / fb->format->cpp[0];
5155 
5156 		plane_size->chroma_size.x = 0;
5157 		plane_size->chroma_size.y = 0;
5158 		/* TODO: set these based on surface format */
5159 		plane_size->chroma_size.width = fb->width / 2;
5160 		plane_size->chroma_size.height = fb->height / 2;
5161 
5162 		plane_size->chroma_pitch =
5163 			fb->pitches[1] / fb->format->cpp[1];
5164 
5165 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5166 		address->video_progressive.luma_addr.low_part =
5167 			lower_32_bits(luma_addr);
5168 		address->video_progressive.luma_addr.high_part =
5169 			upper_32_bits(luma_addr);
5170 		address->video_progressive.chroma_addr.low_part =
5171 			lower_32_bits(chroma_addr);
5172 		address->video_progressive.chroma_addr.high_part =
5173 			upper_32_bits(chroma_addr);
5174 	}
5175 
5176 	if (adev->family >= AMDGPU_FAMILY_AI) {
5177 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5178 								rotation, plane_size,
5179 								tiling_info, dcc,
5180 								address,
5181 								force_disable_dcc);
5182 		if (ret)
5183 			return ret;
5184 	} else {
5185 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5186 	}
5187 
5188 	return 0;
5189 }
5190 
5191 static void
5192 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5193 			       bool *per_pixel_alpha, bool *global_alpha,
5194 			       int *global_alpha_value)
5195 {
5196 	*per_pixel_alpha = false;
5197 	*global_alpha = false;
5198 	*global_alpha_value = 0xff;
5199 
5200 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5201 		return;
5202 
5203 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5204 		static const uint32_t alpha_formats[] = {
5205 			DRM_FORMAT_ARGB8888,
5206 			DRM_FORMAT_RGBA8888,
5207 			DRM_FORMAT_ABGR8888,
5208 		};
5209 		uint32_t format = plane_state->fb->format->format;
5210 		unsigned int i;
5211 
5212 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5213 			if (format == alpha_formats[i]) {
5214 				*per_pixel_alpha = true;
5215 				break;
5216 			}
5217 		}
5218 	}
5219 
5220 	if (plane_state->alpha < 0xffff) {
5221 		*global_alpha = true;
5222 		*global_alpha_value = plane_state->alpha >> 8;
5223 	}
5224 }
5225 
5226 static int
5227 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5228 			    const enum surface_pixel_format format,
5229 			    enum dc_color_space *color_space)
5230 {
5231 	bool full_range;
5232 
5233 	*color_space = COLOR_SPACE_SRGB;
5234 
5235 	/* DRM color properties only affect non-RGB formats. */
5236 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5237 		return 0;
5238 
5239 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5240 
5241 	switch (plane_state->color_encoding) {
5242 	case DRM_COLOR_YCBCR_BT601:
5243 		if (full_range)
5244 			*color_space = COLOR_SPACE_YCBCR601;
5245 		else
5246 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5247 		break;
5248 
5249 	case DRM_COLOR_YCBCR_BT709:
5250 		if (full_range)
5251 			*color_space = COLOR_SPACE_YCBCR709;
5252 		else
5253 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5254 		break;
5255 
5256 	case DRM_COLOR_YCBCR_BT2020:
5257 		if (full_range)
5258 			*color_space = COLOR_SPACE_2020_YCBCR;
5259 		else
5260 			return -EINVAL;
5261 		break;
5262 
5263 	default:
5264 		return -EINVAL;
5265 	}
5266 
5267 	return 0;
5268 }
5269 
5270 static int
5271 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5272 			    const struct drm_plane_state *plane_state,
5273 			    const uint64_t tiling_flags,
5274 			    struct dc_plane_info *plane_info,
5275 			    struct dc_plane_address *address,
5276 			    bool tmz_surface,
5277 			    bool force_disable_dcc)
5278 {
5279 	const struct drm_framebuffer *fb = plane_state->fb;
5280 	const struct amdgpu_framebuffer *afb =
5281 		to_amdgpu_framebuffer(plane_state->fb);
5282 	int ret;
5283 
5284 	memset(plane_info, 0, sizeof(*plane_info));
5285 
5286 	switch (fb->format->format) {
5287 	case DRM_FORMAT_C8:
5288 		plane_info->format =
5289 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5290 		break;
5291 	case DRM_FORMAT_RGB565:
5292 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5293 		break;
5294 	case DRM_FORMAT_XRGB8888:
5295 	case DRM_FORMAT_ARGB8888:
5296 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5297 		break;
5298 	case DRM_FORMAT_XRGB2101010:
5299 	case DRM_FORMAT_ARGB2101010:
5300 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5301 		break;
5302 	case DRM_FORMAT_XBGR2101010:
5303 	case DRM_FORMAT_ABGR2101010:
5304 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5305 		break;
5306 	case DRM_FORMAT_XBGR8888:
5307 	case DRM_FORMAT_ABGR8888:
5308 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5309 		break;
5310 	case DRM_FORMAT_NV21:
5311 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5312 		break;
5313 	case DRM_FORMAT_NV12:
5314 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5315 		break;
5316 	case DRM_FORMAT_P010:
5317 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5318 		break;
5319 	case DRM_FORMAT_XRGB16161616F:
5320 	case DRM_FORMAT_ARGB16161616F:
5321 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5322 		break;
5323 	case DRM_FORMAT_XBGR16161616F:
5324 	case DRM_FORMAT_ABGR16161616F:
5325 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5326 		break;
5327 	case DRM_FORMAT_XRGB16161616:
5328 	case DRM_FORMAT_ARGB16161616:
5329 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5330 		break;
5331 	case DRM_FORMAT_XBGR16161616:
5332 	case DRM_FORMAT_ABGR16161616:
5333 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5334 		break;
5335 	default:
5336 		DRM_ERROR(
5337 			"Unsupported screen format %p4cc\n",
5338 			&fb->format->format);
5339 		return -EINVAL;
5340 	}
5341 
5342 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5343 	case DRM_MODE_ROTATE_0:
5344 		plane_info->rotation = ROTATION_ANGLE_0;
5345 		break;
5346 	case DRM_MODE_ROTATE_90:
5347 		plane_info->rotation = ROTATION_ANGLE_90;
5348 		break;
5349 	case DRM_MODE_ROTATE_180:
5350 		plane_info->rotation = ROTATION_ANGLE_180;
5351 		break;
5352 	case DRM_MODE_ROTATE_270:
5353 		plane_info->rotation = ROTATION_ANGLE_270;
5354 		break;
5355 	default:
5356 		plane_info->rotation = ROTATION_ANGLE_0;
5357 		break;
5358 	}
5359 
5360 	plane_info->visible = true;
5361 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5362 
5363 	plane_info->layer_index = 0;
5364 
5365 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5366 					  &plane_info->color_space);
5367 	if (ret)
5368 		return ret;
5369 
5370 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5371 					   plane_info->rotation, tiling_flags,
5372 					   &plane_info->tiling_info,
5373 					   &plane_info->plane_size,
5374 					   &plane_info->dcc, address, tmz_surface,
5375 					   force_disable_dcc);
5376 	if (ret)
5377 		return ret;
5378 
5379 	fill_blending_from_plane_state(
5380 		plane_state, &plane_info->per_pixel_alpha,
5381 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5382 
5383 	return 0;
5384 }
5385 
5386 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5387 				    struct dc_plane_state *dc_plane_state,
5388 				    struct drm_plane_state *plane_state,
5389 				    struct drm_crtc_state *crtc_state)
5390 {
5391 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5392 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5393 	struct dc_scaling_info scaling_info;
5394 	struct dc_plane_info plane_info;
5395 	int ret;
5396 	bool force_disable_dcc = false;
5397 
5398 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5399 	if (ret)
5400 		return ret;
5401 
5402 	dc_plane_state->src_rect = scaling_info.src_rect;
5403 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5404 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5405 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5406 
5407 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5408 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5409 					  afb->tiling_flags,
5410 					  &plane_info,
5411 					  &dc_plane_state->address,
5412 					  afb->tmz_surface,
5413 					  force_disable_dcc);
5414 	if (ret)
5415 		return ret;
5416 
5417 	dc_plane_state->format = plane_info.format;
5418 	dc_plane_state->color_space = plane_info.color_space;
5419 	dc_plane_state->format = plane_info.format;
5420 	dc_plane_state->plane_size = plane_info.plane_size;
5421 	dc_plane_state->rotation = plane_info.rotation;
5422 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5423 	dc_plane_state->stereo_format = plane_info.stereo_format;
5424 	dc_plane_state->tiling_info = plane_info.tiling_info;
5425 	dc_plane_state->visible = plane_info.visible;
5426 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5427 	dc_plane_state->global_alpha = plane_info.global_alpha;
5428 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5429 	dc_plane_state->dcc = plane_info.dcc;
5430 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5431 	dc_plane_state->flip_int_enabled = true;
5432 
5433 	/*
5434 	 * Always set input transfer function, since plane state is refreshed
5435 	 * every time.
5436 	 */
5437 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5438 	if (ret)
5439 		return ret;
5440 
5441 	return 0;
5442 }
5443 
5444 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5445 					   const struct dm_connector_state *dm_state,
5446 					   struct dc_stream_state *stream)
5447 {
5448 	enum amdgpu_rmx_type rmx_type;
5449 
5450 	struct rect src = { 0 }; /* viewport in composition space*/
5451 	struct rect dst = { 0 }; /* stream addressable area */
5452 
5453 	/* no mode. nothing to be done */
5454 	if (!mode)
5455 		return;
5456 
5457 	/* Full screen scaling by default */
5458 	src.width = mode->hdisplay;
5459 	src.height = mode->vdisplay;
5460 	dst.width = stream->timing.h_addressable;
5461 	dst.height = stream->timing.v_addressable;
5462 
5463 	if (dm_state) {
5464 		rmx_type = dm_state->scaling;
5465 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5466 			if (src.width * dst.height <
5467 					src.height * dst.width) {
5468 				/* height needs less upscaling/more downscaling */
5469 				dst.width = src.width *
5470 						dst.height / src.height;
5471 			} else {
5472 				/* width needs less upscaling/more downscaling */
5473 				dst.height = src.height *
5474 						dst.width / src.width;
5475 			}
5476 		} else if (rmx_type == RMX_CENTER) {
5477 			dst = src;
5478 		}
5479 
5480 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5481 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5482 
5483 		if (dm_state->underscan_enable) {
5484 			dst.x += dm_state->underscan_hborder / 2;
5485 			dst.y += dm_state->underscan_vborder / 2;
5486 			dst.width -= dm_state->underscan_hborder;
5487 			dst.height -= dm_state->underscan_vborder;
5488 		}
5489 	}
5490 
5491 	stream->src = src;
5492 	stream->dst = dst;
5493 
5494 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5495 		      dst.x, dst.y, dst.width, dst.height);
5496 
5497 }
5498 
5499 static enum dc_color_depth
5500 convert_color_depth_from_display_info(const struct drm_connector *connector,
5501 				      bool is_y420, int requested_bpc)
5502 {
5503 	uint8_t bpc;
5504 
5505 	if (is_y420) {
5506 		bpc = 8;
5507 
5508 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5509 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5510 			bpc = 16;
5511 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5512 			bpc = 12;
5513 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5514 			bpc = 10;
5515 	} else {
5516 		bpc = (uint8_t)connector->display_info.bpc;
5517 		/* Assume 8 bpc by default if no bpc is specified. */
5518 		bpc = bpc ? bpc : 8;
5519 	}
5520 
5521 	if (requested_bpc > 0) {
5522 		/*
5523 		 * Cap display bpc based on the user requested value.
5524 		 *
5525 		 * The value for state->max_bpc may not correctly updated
5526 		 * depending on when the connector gets added to the state
5527 		 * or if this was called outside of atomic check, so it
5528 		 * can't be used directly.
5529 		 */
5530 		bpc = min_t(u8, bpc, requested_bpc);
5531 
5532 		/* Round down to the nearest even number. */
5533 		bpc = bpc - (bpc & 1);
5534 	}
5535 
5536 	switch (bpc) {
5537 	case 0:
5538 		/*
5539 		 * Temporary Work around, DRM doesn't parse color depth for
5540 		 * EDID revision before 1.4
5541 		 * TODO: Fix edid parsing
5542 		 */
5543 		return COLOR_DEPTH_888;
5544 	case 6:
5545 		return COLOR_DEPTH_666;
5546 	case 8:
5547 		return COLOR_DEPTH_888;
5548 	case 10:
5549 		return COLOR_DEPTH_101010;
5550 	case 12:
5551 		return COLOR_DEPTH_121212;
5552 	case 14:
5553 		return COLOR_DEPTH_141414;
5554 	case 16:
5555 		return COLOR_DEPTH_161616;
5556 	default:
5557 		return COLOR_DEPTH_UNDEFINED;
5558 	}
5559 }
5560 
5561 static enum dc_aspect_ratio
5562 get_aspect_ratio(const struct drm_display_mode *mode_in)
5563 {
5564 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5565 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5566 }
5567 
5568 static enum dc_color_space
5569 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5570 {
5571 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5572 
5573 	switch (dc_crtc_timing->pixel_encoding)	{
5574 	case PIXEL_ENCODING_YCBCR422:
5575 	case PIXEL_ENCODING_YCBCR444:
5576 	case PIXEL_ENCODING_YCBCR420:
5577 	{
5578 		/*
5579 		 * 27030khz is the separation point between HDTV and SDTV
5580 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5581 		 * respectively
5582 		 */
5583 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5584 			if (dc_crtc_timing->flags.Y_ONLY)
5585 				color_space =
5586 					COLOR_SPACE_YCBCR709_LIMITED;
5587 			else
5588 				color_space = COLOR_SPACE_YCBCR709;
5589 		} else {
5590 			if (dc_crtc_timing->flags.Y_ONLY)
5591 				color_space =
5592 					COLOR_SPACE_YCBCR601_LIMITED;
5593 			else
5594 				color_space = COLOR_SPACE_YCBCR601;
5595 		}
5596 
5597 	}
5598 	break;
5599 	case PIXEL_ENCODING_RGB:
5600 		color_space = COLOR_SPACE_SRGB;
5601 		break;
5602 
5603 	default:
5604 		WARN_ON(1);
5605 		break;
5606 	}
5607 
5608 	return color_space;
5609 }
5610 
5611 static bool adjust_colour_depth_from_display_info(
5612 	struct dc_crtc_timing *timing_out,
5613 	const struct drm_display_info *info)
5614 {
5615 	enum dc_color_depth depth = timing_out->display_color_depth;
5616 	int normalized_clk;
5617 	do {
5618 		normalized_clk = timing_out->pix_clk_100hz / 10;
5619 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5620 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5621 			normalized_clk /= 2;
5622 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5623 		switch (depth) {
5624 		case COLOR_DEPTH_888:
5625 			break;
5626 		case COLOR_DEPTH_101010:
5627 			normalized_clk = (normalized_clk * 30) / 24;
5628 			break;
5629 		case COLOR_DEPTH_121212:
5630 			normalized_clk = (normalized_clk * 36) / 24;
5631 			break;
5632 		case COLOR_DEPTH_161616:
5633 			normalized_clk = (normalized_clk * 48) / 24;
5634 			break;
5635 		default:
5636 			/* The above depths are the only ones valid for HDMI. */
5637 			return false;
5638 		}
5639 		if (normalized_clk <= info->max_tmds_clock) {
5640 			timing_out->display_color_depth = depth;
5641 			return true;
5642 		}
5643 	} while (--depth > COLOR_DEPTH_666);
5644 	return false;
5645 }
5646 
5647 static void fill_stream_properties_from_drm_display_mode(
5648 	struct dc_stream_state *stream,
5649 	const struct drm_display_mode *mode_in,
5650 	const struct drm_connector *connector,
5651 	const struct drm_connector_state *connector_state,
5652 	const struct dc_stream_state *old_stream,
5653 	int requested_bpc)
5654 {
5655 	struct dc_crtc_timing *timing_out = &stream->timing;
5656 	const struct drm_display_info *info = &connector->display_info;
5657 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5658 	struct hdmi_vendor_infoframe hv_frame;
5659 	struct hdmi_avi_infoframe avi_frame;
5660 
5661 	memset(&hv_frame, 0, sizeof(hv_frame));
5662 	memset(&avi_frame, 0, sizeof(avi_frame));
5663 
5664 	timing_out->h_border_left = 0;
5665 	timing_out->h_border_right = 0;
5666 	timing_out->v_border_top = 0;
5667 	timing_out->v_border_bottom = 0;
5668 	/* TODO: un-hardcode */
5669 	if (drm_mode_is_420_only(info, mode_in)
5670 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5671 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5672 	else if (drm_mode_is_420_also(info, mode_in)
5673 			&& aconnector->force_yuv420_output)
5674 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5675 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5676 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5677 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5678 	else
5679 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5680 
5681 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5682 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5683 		connector,
5684 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5685 		requested_bpc);
5686 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5687 	timing_out->hdmi_vic = 0;
5688 
5689 	if(old_stream) {
5690 		timing_out->vic = old_stream->timing.vic;
5691 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5692 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5693 	} else {
5694 		timing_out->vic = drm_match_cea_mode(mode_in);
5695 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5696 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5697 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5698 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5699 	}
5700 
5701 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5702 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5703 		timing_out->vic = avi_frame.video_code;
5704 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5705 		timing_out->hdmi_vic = hv_frame.vic;
5706 	}
5707 
5708 	if (is_freesync_video_mode(mode_in, aconnector)) {
5709 		timing_out->h_addressable = mode_in->hdisplay;
5710 		timing_out->h_total = mode_in->htotal;
5711 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5712 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5713 		timing_out->v_total = mode_in->vtotal;
5714 		timing_out->v_addressable = mode_in->vdisplay;
5715 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5716 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5717 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5718 	} else {
5719 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5720 		timing_out->h_total = mode_in->crtc_htotal;
5721 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5722 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5723 		timing_out->v_total = mode_in->crtc_vtotal;
5724 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5725 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5726 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5727 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5728 	}
5729 
5730 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5731 
5732 	stream->output_color_space = get_output_color_space(timing_out);
5733 
5734 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5735 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5736 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5737 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5738 		    drm_mode_is_420_also(info, mode_in) &&
5739 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5740 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5741 			adjust_colour_depth_from_display_info(timing_out, info);
5742 		}
5743 	}
5744 }
5745 
5746 static void fill_audio_info(struct audio_info *audio_info,
5747 			    const struct drm_connector *drm_connector,
5748 			    const struct dc_sink *dc_sink)
5749 {
5750 	int i = 0;
5751 	int cea_revision = 0;
5752 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5753 
5754 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5755 	audio_info->product_id = edid_caps->product_id;
5756 
5757 	cea_revision = drm_connector->display_info.cea_rev;
5758 
5759 	strscpy(audio_info->display_name,
5760 		edid_caps->display_name,
5761 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5762 
5763 	if (cea_revision >= 3) {
5764 		audio_info->mode_count = edid_caps->audio_mode_count;
5765 
5766 		for (i = 0; i < audio_info->mode_count; ++i) {
5767 			audio_info->modes[i].format_code =
5768 					(enum audio_format_code)
5769 					(edid_caps->audio_modes[i].format_code);
5770 			audio_info->modes[i].channel_count =
5771 					edid_caps->audio_modes[i].channel_count;
5772 			audio_info->modes[i].sample_rates.all =
5773 					edid_caps->audio_modes[i].sample_rate;
5774 			audio_info->modes[i].sample_size =
5775 					edid_caps->audio_modes[i].sample_size;
5776 		}
5777 	}
5778 
5779 	audio_info->flags.all = edid_caps->speaker_flags;
5780 
5781 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5782 	if (drm_connector->latency_present[0]) {
5783 		audio_info->video_latency = drm_connector->video_latency[0];
5784 		audio_info->audio_latency = drm_connector->audio_latency[0];
5785 	}
5786 
5787 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5788 
5789 }
5790 
5791 static void
5792 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5793 				      struct drm_display_mode *dst_mode)
5794 {
5795 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5796 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5797 	dst_mode->crtc_clock = src_mode->crtc_clock;
5798 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5799 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5800 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5801 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5802 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5803 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5804 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5805 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5806 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5807 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5808 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5809 }
5810 
5811 static void
5812 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5813 					const struct drm_display_mode *native_mode,
5814 					bool scale_enabled)
5815 {
5816 	if (scale_enabled) {
5817 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5818 	} else if (native_mode->clock == drm_mode->clock &&
5819 			native_mode->htotal == drm_mode->htotal &&
5820 			native_mode->vtotal == drm_mode->vtotal) {
5821 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5822 	} else {
5823 		/* no scaling nor amdgpu inserted, no need to patch */
5824 	}
5825 }
5826 
5827 static struct dc_sink *
5828 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5829 {
5830 	struct dc_sink_init_data sink_init_data = { 0 };
5831 	struct dc_sink *sink = NULL;
5832 	sink_init_data.link = aconnector->dc_link;
5833 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5834 
5835 	sink = dc_sink_create(&sink_init_data);
5836 	if (!sink) {
5837 		DRM_ERROR("Failed to create sink!\n");
5838 		return NULL;
5839 	}
5840 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5841 
5842 	return sink;
5843 }
5844 
5845 static void set_multisync_trigger_params(
5846 		struct dc_stream_state *stream)
5847 {
5848 	struct dc_stream_state *master = NULL;
5849 
5850 	if (stream->triggered_crtc_reset.enabled) {
5851 		master = stream->triggered_crtc_reset.event_source;
5852 		stream->triggered_crtc_reset.event =
5853 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5854 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5855 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5856 	}
5857 }
5858 
5859 static void set_master_stream(struct dc_stream_state *stream_set[],
5860 			      int stream_count)
5861 {
5862 	int j, highest_rfr = 0, master_stream = 0;
5863 
5864 	for (j = 0;  j < stream_count; j++) {
5865 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5866 			int refresh_rate = 0;
5867 
5868 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5869 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5870 			if (refresh_rate > highest_rfr) {
5871 				highest_rfr = refresh_rate;
5872 				master_stream = j;
5873 			}
5874 		}
5875 	}
5876 	for (j = 0;  j < stream_count; j++) {
5877 		if (stream_set[j])
5878 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5879 	}
5880 }
5881 
5882 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5883 {
5884 	int i = 0;
5885 	struct dc_stream_state *stream;
5886 
5887 	if (context->stream_count < 2)
5888 		return;
5889 	for (i = 0; i < context->stream_count ; i++) {
5890 		if (!context->streams[i])
5891 			continue;
5892 		/*
5893 		 * TODO: add a function to read AMD VSDB bits and set
5894 		 * crtc_sync_master.multi_sync_enabled flag
5895 		 * For now it's set to false
5896 		 */
5897 	}
5898 
5899 	set_master_stream(context->streams, context->stream_count);
5900 
5901 	for (i = 0; i < context->stream_count ; i++) {
5902 		stream = context->streams[i];
5903 
5904 		if (!stream)
5905 			continue;
5906 
5907 		set_multisync_trigger_params(stream);
5908 	}
5909 }
5910 
5911 #if defined(CONFIG_DRM_AMD_DC_DCN)
5912 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5913 							struct dc_sink *sink, struct dc_stream_state *stream,
5914 							struct dsc_dec_dpcd_caps *dsc_caps)
5915 {
5916 	stream->timing.flags.DSC = 0;
5917 
5918 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5919 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5920 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5921 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5922 				      dsc_caps);
5923 	}
5924 }
5925 
5926 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5927 										struct dc_sink *sink, struct dc_stream_state *stream,
5928 										struct dsc_dec_dpcd_caps *dsc_caps)
5929 {
5930 	struct drm_connector *drm_connector = &aconnector->base;
5931 	uint32_t link_bandwidth_kbps;
5932 	uint32_t max_dsc_target_bpp_limit_override = 0;
5933 
5934 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5935 							dc_link_get_link_cap(aconnector->dc_link));
5936 
5937 	if (stream->link && stream->link->local_sink)
5938 		max_dsc_target_bpp_limit_override =
5939 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5940 
5941 	/* Set DSC policy according to dsc_clock_en */
5942 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5943 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5944 
5945 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5946 
5947 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5948 						dsc_caps,
5949 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5950 						max_dsc_target_bpp_limit_override,
5951 						link_bandwidth_kbps,
5952 						&stream->timing,
5953 						&stream->timing.dsc_cfg)) {
5954 			stream->timing.flags.DSC = 1;
5955 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5956 		}
5957 	}
5958 
5959 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5960 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5961 		stream->timing.flags.DSC = 1;
5962 
5963 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5964 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5965 
5966 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5967 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5968 
5969 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5970 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5971 }
5972 #endif
5973 
5974 /**
5975  * DOC: FreeSync Video
5976  *
5977  * When a userspace application wants to play a video, the content follows a
5978  * standard format definition that usually specifies the FPS for that format.
5979  * The below list illustrates some video format and the expected FPS,
5980  * respectively:
5981  *
5982  * - TV/NTSC (23.976 FPS)
5983  * - Cinema (24 FPS)
5984  * - TV/PAL (25 FPS)
5985  * - TV/NTSC (29.97 FPS)
5986  * - TV/NTSC (30 FPS)
5987  * - Cinema HFR (48 FPS)
5988  * - TV/PAL (50 FPS)
5989  * - Commonly used (60 FPS)
5990  * - Multiples of 24 (48,72,96 FPS)
5991  *
5992  * The list of standards video format is not huge and can be added to the
5993  * connector modeset list beforehand. With that, userspace can leverage
5994  * FreeSync to extends the front porch in order to attain the target refresh
5995  * rate. Such a switch will happen seamlessly, without screen blanking or
5996  * reprogramming of the output in any other way. If the userspace requests a
5997  * modesetting change compatible with FreeSync modes that only differ in the
5998  * refresh rate, DC will skip the full update and avoid blink during the
5999  * transition. For example, the video player can change the modesetting from
6000  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6001  * causing any display blink. This same concept can be applied to a mode
6002  * setting change.
6003  */
6004 static struct drm_display_mode *
6005 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6006 			  bool use_probed_modes)
6007 {
6008 	struct drm_display_mode *m, *m_pref = NULL;
6009 	u16 current_refresh, highest_refresh;
6010 	struct list_head *list_head = use_probed_modes ?
6011 						    &aconnector->base.probed_modes :
6012 						    &aconnector->base.modes;
6013 
6014 	if (aconnector->freesync_vid_base.clock != 0)
6015 		return &aconnector->freesync_vid_base;
6016 
6017 	/* Find the preferred mode */
6018 	list_for_each_entry (m, list_head, head) {
6019 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6020 			m_pref = m;
6021 			break;
6022 		}
6023 	}
6024 
6025 	if (!m_pref) {
6026 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6027 		m_pref = list_first_entry_or_null(
6028 			&aconnector->base.modes, struct drm_display_mode, head);
6029 		if (!m_pref) {
6030 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6031 			return NULL;
6032 		}
6033 	}
6034 
6035 	highest_refresh = drm_mode_vrefresh(m_pref);
6036 
6037 	/*
6038 	 * Find the mode with highest refresh rate with same resolution.
6039 	 * For some monitors, preferred mode is not the mode with highest
6040 	 * supported refresh rate.
6041 	 */
6042 	list_for_each_entry (m, list_head, head) {
6043 		current_refresh  = drm_mode_vrefresh(m);
6044 
6045 		if (m->hdisplay == m_pref->hdisplay &&
6046 		    m->vdisplay == m_pref->vdisplay &&
6047 		    highest_refresh < current_refresh) {
6048 			highest_refresh = current_refresh;
6049 			m_pref = m;
6050 		}
6051 	}
6052 
6053 	aconnector->freesync_vid_base = *m_pref;
6054 	return m_pref;
6055 }
6056 
6057 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6058 				   struct amdgpu_dm_connector *aconnector)
6059 {
6060 	struct drm_display_mode *high_mode;
6061 	int timing_diff;
6062 
6063 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6064 	if (!high_mode || !mode)
6065 		return false;
6066 
6067 	timing_diff = high_mode->vtotal - mode->vtotal;
6068 
6069 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6070 	    high_mode->hdisplay != mode->hdisplay ||
6071 	    high_mode->vdisplay != mode->vdisplay ||
6072 	    high_mode->hsync_start != mode->hsync_start ||
6073 	    high_mode->hsync_end != mode->hsync_end ||
6074 	    high_mode->htotal != mode->htotal ||
6075 	    high_mode->hskew != mode->hskew ||
6076 	    high_mode->vscan != mode->vscan ||
6077 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6078 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6079 		return false;
6080 	else
6081 		return true;
6082 }
6083 
6084 static struct dc_stream_state *
6085 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6086 		       const struct drm_display_mode *drm_mode,
6087 		       const struct dm_connector_state *dm_state,
6088 		       const struct dc_stream_state *old_stream,
6089 		       int requested_bpc)
6090 {
6091 	struct drm_display_mode *preferred_mode = NULL;
6092 	struct drm_connector *drm_connector;
6093 	const struct drm_connector_state *con_state =
6094 		dm_state ? &dm_state->base : NULL;
6095 	struct dc_stream_state *stream = NULL;
6096 	struct drm_display_mode mode = *drm_mode;
6097 	struct drm_display_mode saved_mode;
6098 	struct drm_display_mode *freesync_mode = NULL;
6099 	bool native_mode_found = false;
6100 	bool recalculate_timing = false;
6101 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6102 	int mode_refresh;
6103 	int preferred_refresh = 0;
6104 #if defined(CONFIG_DRM_AMD_DC_DCN)
6105 	struct dsc_dec_dpcd_caps dsc_caps;
6106 #endif
6107 	struct dc_sink *sink = NULL;
6108 
6109 	memset(&saved_mode, 0, sizeof(saved_mode));
6110 
6111 	if (aconnector == NULL) {
6112 		DRM_ERROR("aconnector is NULL!\n");
6113 		return stream;
6114 	}
6115 
6116 	drm_connector = &aconnector->base;
6117 
6118 	if (!aconnector->dc_sink) {
6119 		sink = create_fake_sink(aconnector);
6120 		if (!sink)
6121 			return stream;
6122 	} else {
6123 		sink = aconnector->dc_sink;
6124 		dc_sink_retain(sink);
6125 	}
6126 
6127 	stream = dc_create_stream_for_sink(sink);
6128 
6129 	if (stream == NULL) {
6130 		DRM_ERROR("Failed to create stream for sink!\n");
6131 		goto finish;
6132 	}
6133 
6134 	stream->dm_stream_context = aconnector;
6135 
6136 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6137 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6138 
6139 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6140 		/* Search for preferred mode */
6141 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6142 			native_mode_found = true;
6143 			break;
6144 		}
6145 	}
6146 	if (!native_mode_found)
6147 		preferred_mode = list_first_entry_or_null(
6148 				&aconnector->base.modes,
6149 				struct drm_display_mode,
6150 				head);
6151 
6152 	mode_refresh = drm_mode_vrefresh(&mode);
6153 
6154 	if (preferred_mode == NULL) {
6155 		/*
6156 		 * This may not be an error, the use case is when we have no
6157 		 * usermode calls to reset and set mode upon hotplug. In this
6158 		 * case, we call set mode ourselves to restore the previous mode
6159 		 * and the modelist may not be filled in in time.
6160 		 */
6161 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6162 	} else {
6163 		recalculate_timing = amdgpu_freesync_vid_mode &&
6164 				 is_freesync_video_mode(&mode, aconnector);
6165 		if (recalculate_timing) {
6166 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6167 			saved_mode = mode;
6168 			mode = *freesync_mode;
6169 		} else {
6170 			decide_crtc_timing_for_drm_display_mode(
6171 				&mode, preferred_mode, scale);
6172 
6173 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6174 		}
6175 	}
6176 
6177 	if (recalculate_timing)
6178 		drm_mode_set_crtcinfo(&saved_mode, 0);
6179 	else if (!dm_state)
6180 		drm_mode_set_crtcinfo(&mode, 0);
6181 
6182        /*
6183 	* If scaling is enabled and refresh rate didn't change
6184 	* we copy the vic and polarities of the old timings
6185 	*/
6186 	if (!scale || mode_refresh != preferred_refresh)
6187 		fill_stream_properties_from_drm_display_mode(
6188 			stream, &mode, &aconnector->base, con_state, NULL,
6189 			requested_bpc);
6190 	else
6191 		fill_stream_properties_from_drm_display_mode(
6192 			stream, &mode, &aconnector->base, con_state, old_stream,
6193 			requested_bpc);
6194 
6195 #if defined(CONFIG_DRM_AMD_DC_DCN)
6196 	/* SST DSC determination policy */
6197 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6198 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6199 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6200 #endif
6201 
6202 	update_stream_scaling_settings(&mode, dm_state, stream);
6203 
6204 	fill_audio_info(
6205 		&stream->audio_info,
6206 		drm_connector,
6207 		sink);
6208 
6209 	update_stream_signal(stream, sink);
6210 
6211 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6212 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6213 
6214 	if (stream->link->psr_settings.psr_feature_enabled) {
6215 		//
6216 		// should decide stream support vsc sdp colorimetry capability
6217 		// before building vsc info packet
6218 		//
6219 		stream->use_vsc_sdp_for_colorimetry = false;
6220 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6221 			stream->use_vsc_sdp_for_colorimetry =
6222 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6223 		} else {
6224 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6225 				stream->use_vsc_sdp_for_colorimetry = true;
6226 		}
6227 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6228 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6229 
6230 	}
6231 finish:
6232 	dc_sink_release(sink);
6233 
6234 	return stream;
6235 }
6236 
6237 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6238 {
6239 	drm_crtc_cleanup(crtc);
6240 	kfree(crtc);
6241 }
6242 
6243 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6244 				  struct drm_crtc_state *state)
6245 {
6246 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6247 
6248 	/* TODO Destroy dc_stream objects are stream object is flattened */
6249 	if (cur->stream)
6250 		dc_stream_release(cur->stream);
6251 
6252 
6253 	__drm_atomic_helper_crtc_destroy_state(state);
6254 
6255 
6256 	kfree(state);
6257 }
6258 
6259 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6260 {
6261 	struct dm_crtc_state *state;
6262 
6263 	if (crtc->state)
6264 		dm_crtc_destroy_state(crtc, crtc->state);
6265 
6266 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6267 	if (WARN_ON(!state))
6268 		return;
6269 
6270 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6271 }
6272 
6273 static struct drm_crtc_state *
6274 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6275 {
6276 	struct dm_crtc_state *state, *cur;
6277 
6278 	cur = to_dm_crtc_state(crtc->state);
6279 
6280 	if (WARN_ON(!crtc->state))
6281 		return NULL;
6282 
6283 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6284 	if (!state)
6285 		return NULL;
6286 
6287 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6288 
6289 	if (cur->stream) {
6290 		state->stream = cur->stream;
6291 		dc_stream_retain(state->stream);
6292 	}
6293 
6294 	state->active_planes = cur->active_planes;
6295 	state->vrr_infopacket = cur->vrr_infopacket;
6296 	state->abm_level = cur->abm_level;
6297 	state->vrr_supported = cur->vrr_supported;
6298 	state->freesync_config = cur->freesync_config;
6299 	state->cm_has_degamma = cur->cm_has_degamma;
6300 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6301 	state->force_dpms_off = cur->force_dpms_off;
6302 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6303 
6304 	return &state->base;
6305 }
6306 
6307 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6308 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6309 {
6310 	crtc_debugfs_init(crtc);
6311 
6312 	return 0;
6313 }
6314 #endif
6315 
6316 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6317 {
6318 	enum dc_irq_source irq_source;
6319 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6320 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6321 	int rc;
6322 
6323 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6324 
6325 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6326 
6327 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6328 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6329 	return rc;
6330 }
6331 
6332 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6333 {
6334 	enum dc_irq_source irq_source;
6335 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6336 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6337 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6338 #if defined(CONFIG_DRM_AMD_DC_DCN)
6339 	struct amdgpu_display_manager *dm = &adev->dm;
6340 	struct vblank_control_work *work;
6341 #endif
6342 	int rc = 0;
6343 
6344 	if (enable) {
6345 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6346 		if (amdgpu_dm_vrr_active(acrtc_state))
6347 			rc = dm_set_vupdate_irq(crtc, true);
6348 	} else {
6349 		/* vblank irq off -> vupdate irq off */
6350 		rc = dm_set_vupdate_irq(crtc, false);
6351 	}
6352 
6353 	if (rc)
6354 		return rc;
6355 
6356 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6357 
6358 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6359 		return -EBUSY;
6360 
6361 	if (amdgpu_in_reset(adev))
6362 		return 0;
6363 
6364 #if defined(CONFIG_DRM_AMD_DC_DCN)
6365 	if (dm->vblank_control_workqueue) {
6366 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6367 		if (!work)
6368 			return -ENOMEM;
6369 
6370 		INIT_WORK(&work->work, vblank_control_worker);
6371 		work->dm = dm;
6372 		work->acrtc = acrtc;
6373 		work->enable = enable;
6374 
6375 		if (acrtc_state->stream) {
6376 			dc_stream_retain(acrtc_state->stream);
6377 			work->stream = acrtc_state->stream;
6378 		}
6379 
6380 		queue_work(dm->vblank_control_workqueue, &work->work);
6381 	}
6382 #endif
6383 
6384 	return 0;
6385 }
6386 
6387 static int dm_enable_vblank(struct drm_crtc *crtc)
6388 {
6389 	return dm_set_vblank(crtc, true);
6390 }
6391 
6392 static void dm_disable_vblank(struct drm_crtc *crtc)
6393 {
6394 	dm_set_vblank(crtc, false);
6395 }
6396 
6397 /* Implemented only the options currently availible for the driver */
6398 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6399 	.reset = dm_crtc_reset_state,
6400 	.destroy = amdgpu_dm_crtc_destroy,
6401 	.set_config = drm_atomic_helper_set_config,
6402 	.page_flip = drm_atomic_helper_page_flip,
6403 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6404 	.atomic_destroy_state = dm_crtc_destroy_state,
6405 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6406 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6407 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6408 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6409 	.enable_vblank = dm_enable_vblank,
6410 	.disable_vblank = dm_disable_vblank,
6411 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6412 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6413 	.late_register = amdgpu_dm_crtc_late_register,
6414 #endif
6415 };
6416 
6417 static enum drm_connector_status
6418 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6419 {
6420 	bool connected;
6421 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6422 
6423 	/*
6424 	 * Notes:
6425 	 * 1. This interface is NOT called in context of HPD irq.
6426 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6427 	 * makes it a bad place for *any* MST-related activity.
6428 	 */
6429 
6430 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6431 	    !aconnector->fake_enable)
6432 		connected = (aconnector->dc_sink != NULL);
6433 	else
6434 		connected = (aconnector->base.force == DRM_FORCE_ON);
6435 
6436 	update_subconnector_property(aconnector);
6437 
6438 	return (connected ? connector_status_connected :
6439 			connector_status_disconnected);
6440 }
6441 
6442 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6443 					    struct drm_connector_state *connector_state,
6444 					    struct drm_property *property,
6445 					    uint64_t val)
6446 {
6447 	struct drm_device *dev = connector->dev;
6448 	struct amdgpu_device *adev = drm_to_adev(dev);
6449 	struct dm_connector_state *dm_old_state =
6450 		to_dm_connector_state(connector->state);
6451 	struct dm_connector_state *dm_new_state =
6452 		to_dm_connector_state(connector_state);
6453 
6454 	int ret = -EINVAL;
6455 
6456 	if (property == dev->mode_config.scaling_mode_property) {
6457 		enum amdgpu_rmx_type rmx_type;
6458 
6459 		switch (val) {
6460 		case DRM_MODE_SCALE_CENTER:
6461 			rmx_type = RMX_CENTER;
6462 			break;
6463 		case DRM_MODE_SCALE_ASPECT:
6464 			rmx_type = RMX_ASPECT;
6465 			break;
6466 		case DRM_MODE_SCALE_FULLSCREEN:
6467 			rmx_type = RMX_FULL;
6468 			break;
6469 		case DRM_MODE_SCALE_NONE:
6470 		default:
6471 			rmx_type = RMX_OFF;
6472 			break;
6473 		}
6474 
6475 		if (dm_old_state->scaling == rmx_type)
6476 			return 0;
6477 
6478 		dm_new_state->scaling = rmx_type;
6479 		ret = 0;
6480 	} else if (property == adev->mode_info.underscan_hborder_property) {
6481 		dm_new_state->underscan_hborder = val;
6482 		ret = 0;
6483 	} else if (property == adev->mode_info.underscan_vborder_property) {
6484 		dm_new_state->underscan_vborder = val;
6485 		ret = 0;
6486 	} else if (property == adev->mode_info.underscan_property) {
6487 		dm_new_state->underscan_enable = val;
6488 		ret = 0;
6489 	} else if (property == adev->mode_info.abm_level_property) {
6490 		dm_new_state->abm_level = val;
6491 		ret = 0;
6492 	}
6493 
6494 	return ret;
6495 }
6496 
6497 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6498 					    const struct drm_connector_state *state,
6499 					    struct drm_property *property,
6500 					    uint64_t *val)
6501 {
6502 	struct drm_device *dev = connector->dev;
6503 	struct amdgpu_device *adev = drm_to_adev(dev);
6504 	struct dm_connector_state *dm_state =
6505 		to_dm_connector_state(state);
6506 	int ret = -EINVAL;
6507 
6508 	if (property == dev->mode_config.scaling_mode_property) {
6509 		switch (dm_state->scaling) {
6510 		case RMX_CENTER:
6511 			*val = DRM_MODE_SCALE_CENTER;
6512 			break;
6513 		case RMX_ASPECT:
6514 			*val = DRM_MODE_SCALE_ASPECT;
6515 			break;
6516 		case RMX_FULL:
6517 			*val = DRM_MODE_SCALE_FULLSCREEN;
6518 			break;
6519 		case RMX_OFF:
6520 		default:
6521 			*val = DRM_MODE_SCALE_NONE;
6522 			break;
6523 		}
6524 		ret = 0;
6525 	} else if (property == adev->mode_info.underscan_hborder_property) {
6526 		*val = dm_state->underscan_hborder;
6527 		ret = 0;
6528 	} else if (property == adev->mode_info.underscan_vborder_property) {
6529 		*val = dm_state->underscan_vborder;
6530 		ret = 0;
6531 	} else if (property == adev->mode_info.underscan_property) {
6532 		*val = dm_state->underscan_enable;
6533 		ret = 0;
6534 	} else if (property == adev->mode_info.abm_level_property) {
6535 		*val = dm_state->abm_level;
6536 		ret = 0;
6537 	}
6538 
6539 	return ret;
6540 }
6541 
6542 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6543 {
6544 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6545 
6546 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6547 }
6548 
6549 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6550 {
6551 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6552 	const struct dc_link *link = aconnector->dc_link;
6553 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6554 	struct amdgpu_display_manager *dm = &adev->dm;
6555 	int i;
6556 
6557 	/*
6558 	 * Call only if mst_mgr was iniitalized before since it's not done
6559 	 * for all connector types.
6560 	 */
6561 	if (aconnector->mst_mgr.dev)
6562 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6563 
6564 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6565 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6566 	for (i = 0; i < dm->num_of_edps; i++) {
6567 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6568 			backlight_device_unregister(dm->backlight_dev[i]);
6569 			dm->backlight_dev[i] = NULL;
6570 		}
6571 	}
6572 #endif
6573 
6574 	if (aconnector->dc_em_sink)
6575 		dc_sink_release(aconnector->dc_em_sink);
6576 	aconnector->dc_em_sink = NULL;
6577 	if (aconnector->dc_sink)
6578 		dc_sink_release(aconnector->dc_sink);
6579 	aconnector->dc_sink = NULL;
6580 
6581 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6582 	drm_connector_unregister(connector);
6583 	drm_connector_cleanup(connector);
6584 	if (aconnector->i2c) {
6585 		i2c_del_adapter(&aconnector->i2c->base);
6586 		kfree(aconnector->i2c);
6587 	}
6588 	kfree(aconnector->dm_dp_aux.aux.name);
6589 
6590 	kfree(connector);
6591 }
6592 
6593 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6594 {
6595 	struct dm_connector_state *state =
6596 		to_dm_connector_state(connector->state);
6597 
6598 	if (connector->state)
6599 		__drm_atomic_helper_connector_destroy_state(connector->state);
6600 
6601 	kfree(state);
6602 
6603 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6604 
6605 	if (state) {
6606 		state->scaling = RMX_OFF;
6607 		state->underscan_enable = false;
6608 		state->underscan_hborder = 0;
6609 		state->underscan_vborder = 0;
6610 		state->base.max_requested_bpc = 8;
6611 		state->vcpi_slots = 0;
6612 		state->pbn = 0;
6613 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6614 			state->abm_level = amdgpu_dm_abm_level;
6615 
6616 		__drm_atomic_helper_connector_reset(connector, &state->base);
6617 	}
6618 }
6619 
6620 struct drm_connector_state *
6621 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6622 {
6623 	struct dm_connector_state *state =
6624 		to_dm_connector_state(connector->state);
6625 
6626 	struct dm_connector_state *new_state =
6627 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6628 
6629 	if (!new_state)
6630 		return NULL;
6631 
6632 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6633 
6634 	new_state->freesync_capable = state->freesync_capable;
6635 	new_state->abm_level = state->abm_level;
6636 	new_state->scaling = state->scaling;
6637 	new_state->underscan_enable = state->underscan_enable;
6638 	new_state->underscan_hborder = state->underscan_hborder;
6639 	new_state->underscan_vborder = state->underscan_vborder;
6640 	new_state->vcpi_slots = state->vcpi_slots;
6641 	new_state->pbn = state->pbn;
6642 	return &new_state->base;
6643 }
6644 
6645 static int
6646 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6647 {
6648 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6649 		to_amdgpu_dm_connector(connector);
6650 	int r;
6651 
6652 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6653 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6654 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6655 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6656 		if (r)
6657 			return r;
6658 	}
6659 
6660 #if defined(CONFIG_DEBUG_FS)
6661 	connector_debugfs_init(amdgpu_dm_connector);
6662 #endif
6663 
6664 	return 0;
6665 }
6666 
6667 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6668 	.reset = amdgpu_dm_connector_funcs_reset,
6669 	.detect = amdgpu_dm_connector_detect,
6670 	.fill_modes = drm_helper_probe_single_connector_modes,
6671 	.destroy = amdgpu_dm_connector_destroy,
6672 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6673 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6674 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6675 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6676 	.late_register = amdgpu_dm_connector_late_register,
6677 	.early_unregister = amdgpu_dm_connector_unregister
6678 };
6679 
6680 static int get_modes(struct drm_connector *connector)
6681 {
6682 	return amdgpu_dm_connector_get_modes(connector);
6683 }
6684 
6685 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6686 {
6687 	struct dc_sink_init_data init_params = {
6688 			.link = aconnector->dc_link,
6689 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6690 	};
6691 	struct edid *edid;
6692 
6693 	if (!aconnector->base.edid_blob_ptr) {
6694 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6695 				aconnector->base.name);
6696 
6697 		aconnector->base.force = DRM_FORCE_OFF;
6698 		aconnector->base.override_edid = false;
6699 		return;
6700 	}
6701 
6702 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6703 
6704 	aconnector->edid = edid;
6705 
6706 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6707 		aconnector->dc_link,
6708 		(uint8_t *)edid,
6709 		(edid->extensions + 1) * EDID_LENGTH,
6710 		&init_params);
6711 
6712 	if (aconnector->base.force == DRM_FORCE_ON) {
6713 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6714 		aconnector->dc_link->local_sink :
6715 		aconnector->dc_em_sink;
6716 		dc_sink_retain(aconnector->dc_sink);
6717 	}
6718 }
6719 
6720 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6721 {
6722 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6723 
6724 	/*
6725 	 * In case of headless boot with force on for DP managed connector
6726 	 * Those settings have to be != 0 to get initial modeset
6727 	 */
6728 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6729 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6730 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6731 	}
6732 
6733 
6734 	aconnector->base.override_edid = true;
6735 	create_eml_sink(aconnector);
6736 }
6737 
6738 static struct dc_stream_state *
6739 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6740 				const struct drm_display_mode *drm_mode,
6741 				const struct dm_connector_state *dm_state,
6742 				const struct dc_stream_state *old_stream)
6743 {
6744 	struct drm_connector *connector = &aconnector->base;
6745 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6746 	struct dc_stream_state *stream;
6747 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6748 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6749 	enum dc_status dc_result = DC_OK;
6750 
6751 	do {
6752 		stream = create_stream_for_sink(aconnector, drm_mode,
6753 						dm_state, old_stream,
6754 						requested_bpc);
6755 		if (stream == NULL) {
6756 			DRM_ERROR("Failed to create stream for sink!\n");
6757 			break;
6758 		}
6759 
6760 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6761 
6762 		if (dc_result != DC_OK) {
6763 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6764 				      drm_mode->hdisplay,
6765 				      drm_mode->vdisplay,
6766 				      drm_mode->clock,
6767 				      dc_result,
6768 				      dc_status_to_str(dc_result));
6769 
6770 			dc_stream_release(stream);
6771 			stream = NULL;
6772 			requested_bpc -= 2; /* lower bpc to retry validation */
6773 		}
6774 
6775 	} while (stream == NULL && requested_bpc >= 6);
6776 
6777 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6778 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6779 
6780 		aconnector->force_yuv420_output = true;
6781 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6782 						dm_state, old_stream);
6783 		aconnector->force_yuv420_output = false;
6784 	}
6785 
6786 	return stream;
6787 }
6788 
6789 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6790 				   struct drm_display_mode *mode)
6791 {
6792 	int result = MODE_ERROR;
6793 	struct dc_sink *dc_sink;
6794 	/* TODO: Unhardcode stream count */
6795 	struct dc_stream_state *stream;
6796 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6797 
6798 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6799 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6800 		return result;
6801 
6802 	/*
6803 	 * Only run this the first time mode_valid is called to initilialize
6804 	 * EDID mgmt
6805 	 */
6806 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6807 		!aconnector->dc_em_sink)
6808 		handle_edid_mgmt(aconnector);
6809 
6810 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6811 
6812 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6813 				aconnector->base.force != DRM_FORCE_ON) {
6814 		DRM_ERROR("dc_sink is NULL!\n");
6815 		goto fail;
6816 	}
6817 
6818 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6819 	if (stream) {
6820 		dc_stream_release(stream);
6821 		result = MODE_OK;
6822 	}
6823 
6824 fail:
6825 	/* TODO: error handling*/
6826 	return result;
6827 }
6828 
6829 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6830 				struct dc_info_packet *out)
6831 {
6832 	struct hdmi_drm_infoframe frame;
6833 	unsigned char buf[30]; /* 26 + 4 */
6834 	ssize_t len;
6835 	int ret, i;
6836 
6837 	memset(out, 0, sizeof(*out));
6838 
6839 	if (!state->hdr_output_metadata)
6840 		return 0;
6841 
6842 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6843 	if (ret)
6844 		return ret;
6845 
6846 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6847 	if (len < 0)
6848 		return (int)len;
6849 
6850 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6851 	if (len != 30)
6852 		return -EINVAL;
6853 
6854 	/* Prepare the infopacket for DC. */
6855 	switch (state->connector->connector_type) {
6856 	case DRM_MODE_CONNECTOR_HDMIA:
6857 		out->hb0 = 0x87; /* type */
6858 		out->hb1 = 0x01; /* version */
6859 		out->hb2 = 0x1A; /* length */
6860 		out->sb[0] = buf[3]; /* checksum */
6861 		i = 1;
6862 		break;
6863 
6864 	case DRM_MODE_CONNECTOR_DisplayPort:
6865 	case DRM_MODE_CONNECTOR_eDP:
6866 		out->hb0 = 0x00; /* sdp id, zero */
6867 		out->hb1 = 0x87; /* type */
6868 		out->hb2 = 0x1D; /* payload len - 1 */
6869 		out->hb3 = (0x13 << 2); /* sdp version */
6870 		out->sb[0] = 0x01; /* version */
6871 		out->sb[1] = 0x1A; /* length */
6872 		i = 2;
6873 		break;
6874 
6875 	default:
6876 		return -EINVAL;
6877 	}
6878 
6879 	memcpy(&out->sb[i], &buf[4], 26);
6880 	out->valid = true;
6881 
6882 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6883 		       sizeof(out->sb), false);
6884 
6885 	return 0;
6886 }
6887 
6888 static int
6889 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6890 				 struct drm_atomic_state *state)
6891 {
6892 	struct drm_connector_state *new_con_state =
6893 		drm_atomic_get_new_connector_state(state, conn);
6894 	struct drm_connector_state *old_con_state =
6895 		drm_atomic_get_old_connector_state(state, conn);
6896 	struct drm_crtc *crtc = new_con_state->crtc;
6897 	struct drm_crtc_state *new_crtc_state;
6898 	int ret;
6899 
6900 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6901 
6902 	if (!crtc)
6903 		return 0;
6904 
6905 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6906 		struct dc_info_packet hdr_infopacket;
6907 
6908 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6909 		if (ret)
6910 			return ret;
6911 
6912 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6913 		if (IS_ERR(new_crtc_state))
6914 			return PTR_ERR(new_crtc_state);
6915 
6916 		/*
6917 		 * DC considers the stream backends changed if the
6918 		 * static metadata changes. Forcing the modeset also
6919 		 * gives a simple way for userspace to switch from
6920 		 * 8bpc to 10bpc when setting the metadata to enter
6921 		 * or exit HDR.
6922 		 *
6923 		 * Changing the static metadata after it's been
6924 		 * set is permissible, however. So only force a
6925 		 * modeset if we're entering or exiting HDR.
6926 		 */
6927 		new_crtc_state->mode_changed =
6928 			!old_con_state->hdr_output_metadata ||
6929 			!new_con_state->hdr_output_metadata;
6930 	}
6931 
6932 	return 0;
6933 }
6934 
6935 static const struct drm_connector_helper_funcs
6936 amdgpu_dm_connector_helper_funcs = {
6937 	/*
6938 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6939 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6940 	 * are missing after user start lightdm. So we need to renew modes list.
6941 	 * in get_modes call back, not just return the modes count
6942 	 */
6943 	.get_modes = get_modes,
6944 	.mode_valid = amdgpu_dm_connector_mode_valid,
6945 	.atomic_check = amdgpu_dm_connector_atomic_check,
6946 };
6947 
6948 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6949 {
6950 }
6951 
6952 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6953 {
6954 	struct drm_atomic_state *state = new_crtc_state->state;
6955 	struct drm_plane *plane;
6956 	int num_active = 0;
6957 
6958 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6959 		struct drm_plane_state *new_plane_state;
6960 
6961 		/* Cursor planes are "fake". */
6962 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6963 			continue;
6964 
6965 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6966 
6967 		if (!new_plane_state) {
6968 			/*
6969 			 * The plane is enable on the CRTC and hasn't changed
6970 			 * state. This means that it previously passed
6971 			 * validation and is therefore enabled.
6972 			 */
6973 			num_active += 1;
6974 			continue;
6975 		}
6976 
6977 		/* We need a framebuffer to be considered enabled. */
6978 		num_active += (new_plane_state->fb != NULL);
6979 	}
6980 
6981 	return num_active;
6982 }
6983 
6984 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6985 					 struct drm_crtc_state *new_crtc_state)
6986 {
6987 	struct dm_crtc_state *dm_new_crtc_state =
6988 		to_dm_crtc_state(new_crtc_state);
6989 
6990 	dm_new_crtc_state->active_planes = 0;
6991 
6992 	if (!dm_new_crtc_state->stream)
6993 		return;
6994 
6995 	dm_new_crtc_state->active_planes =
6996 		count_crtc_active_planes(new_crtc_state);
6997 }
6998 
6999 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7000 				       struct drm_atomic_state *state)
7001 {
7002 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7003 									  crtc);
7004 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7005 	struct dc *dc = adev->dm.dc;
7006 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7007 	int ret = -EINVAL;
7008 
7009 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7010 
7011 	dm_update_crtc_active_planes(crtc, crtc_state);
7012 
7013 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7014 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7015 		return ret;
7016 	}
7017 
7018 	/*
7019 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7020 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7021 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7022 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7023 	 */
7024 	if (crtc_state->enable &&
7025 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7026 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7027 		return -EINVAL;
7028 	}
7029 
7030 	/* In some use cases, like reset, no stream is attached */
7031 	if (!dm_crtc_state->stream)
7032 		return 0;
7033 
7034 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7035 		return 0;
7036 
7037 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7038 	return ret;
7039 }
7040 
7041 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7042 				      const struct drm_display_mode *mode,
7043 				      struct drm_display_mode *adjusted_mode)
7044 {
7045 	return true;
7046 }
7047 
7048 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7049 	.disable = dm_crtc_helper_disable,
7050 	.atomic_check = dm_crtc_helper_atomic_check,
7051 	.mode_fixup = dm_crtc_helper_mode_fixup,
7052 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7053 };
7054 
7055 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7056 {
7057 
7058 }
7059 
7060 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7061 {
7062 	switch (display_color_depth) {
7063 		case COLOR_DEPTH_666:
7064 			return 6;
7065 		case COLOR_DEPTH_888:
7066 			return 8;
7067 		case COLOR_DEPTH_101010:
7068 			return 10;
7069 		case COLOR_DEPTH_121212:
7070 			return 12;
7071 		case COLOR_DEPTH_141414:
7072 			return 14;
7073 		case COLOR_DEPTH_161616:
7074 			return 16;
7075 		default:
7076 			break;
7077 		}
7078 	return 0;
7079 }
7080 
7081 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7082 					  struct drm_crtc_state *crtc_state,
7083 					  struct drm_connector_state *conn_state)
7084 {
7085 	struct drm_atomic_state *state = crtc_state->state;
7086 	struct drm_connector *connector = conn_state->connector;
7087 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7088 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7089 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7090 	struct drm_dp_mst_topology_mgr *mst_mgr;
7091 	struct drm_dp_mst_port *mst_port;
7092 	enum dc_color_depth color_depth;
7093 	int clock, bpp = 0;
7094 	bool is_y420 = false;
7095 
7096 	if (!aconnector->port || !aconnector->dc_sink)
7097 		return 0;
7098 
7099 	mst_port = aconnector->port;
7100 	mst_mgr = &aconnector->mst_port->mst_mgr;
7101 
7102 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7103 		return 0;
7104 
7105 	if (!state->duplicated) {
7106 		int max_bpc = conn_state->max_requested_bpc;
7107 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7108 				aconnector->force_yuv420_output;
7109 		color_depth = convert_color_depth_from_display_info(connector,
7110 								    is_y420,
7111 								    max_bpc);
7112 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7113 		clock = adjusted_mode->clock;
7114 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7115 	}
7116 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7117 									   mst_mgr,
7118 									   mst_port,
7119 									   dm_new_connector_state->pbn,
7120 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7121 	if (dm_new_connector_state->vcpi_slots < 0) {
7122 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7123 		return dm_new_connector_state->vcpi_slots;
7124 	}
7125 	return 0;
7126 }
7127 
7128 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7129 	.disable = dm_encoder_helper_disable,
7130 	.atomic_check = dm_encoder_helper_atomic_check
7131 };
7132 
7133 #if defined(CONFIG_DRM_AMD_DC_DCN)
7134 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7135 					    struct dc_state *dc_state,
7136 					    struct dsc_mst_fairness_vars *vars)
7137 {
7138 	struct dc_stream_state *stream = NULL;
7139 	struct drm_connector *connector;
7140 	struct drm_connector_state *new_con_state;
7141 	struct amdgpu_dm_connector *aconnector;
7142 	struct dm_connector_state *dm_conn_state;
7143 	int i, j, clock;
7144 	int vcpi, pbn_div, pbn = 0;
7145 
7146 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7147 
7148 		aconnector = to_amdgpu_dm_connector(connector);
7149 
7150 		if (!aconnector->port)
7151 			continue;
7152 
7153 		if (!new_con_state || !new_con_state->crtc)
7154 			continue;
7155 
7156 		dm_conn_state = to_dm_connector_state(new_con_state);
7157 
7158 		for (j = 0; j < dc_state->stream_count; j++) {
7159 			stream = dc_state->streams[j];
7160 			if (!stream)
7161 				continue;
7162 
7163 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7164 				break;
7165 
7166 			stream = NULL;
7167 		}
7168 
7169 		if (!stream)
7170 			continue;
7171 
7172 		if (stream->timing.flags.DSC != 1) {
7173 			drm_dp_mst_atomic_enable_dsc(state,
7174 						     aconnector->port,
7175 						     dm_conn_state->pbn,
7176 						     0,
7177 						     false);
7178 			continue;
7179 		}
7180 
7181 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7182 		clock = stream->timing.pix_clk_100hz / 10;
7183 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7184 		for (j = 0; j < dc_state->stream_count; j++) {
7185 			if (vars[j].aconnector == aconnector) {
7186 				pbn = vars[j].pbn;
7187 				break;
7188 			}
7189 		}
7190 
7191 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7192 						    aconnector->port,
7193 						    pbn, pbn_div,
7194 						    true);
7195 		if (vcpi < 0)
7196 			return vcpi;
7197 
7198 		dm_conn_state->pbn = pbn;
7199 		dm_conn_state->vcpi_slots = vcpi;
7200 	}
7201 	return 0;
7202 }
7203 #endif
7204 
7205 static void dm_drm_plane_reset(struct drm_plane *plane)
7206 {
7207 	struct dm_plane_state *amdgpu_state = NULL;
7208 
7209 	if (plane->state)
7210 		plane->funcs->atomic_destroy_state(plane, plane->state);
7211 
7212 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7213 	WARN_ON(amdgpu_state == NULL);
7214 
7215 	if (amdgpu_state)
7216 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7217 }
7218 
7219 static struct drm_plane_state *
7220 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7221 {
7222 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7223 
7224 	old_dm_plane_state = to_dm_plane_state(plane->state);
7225 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7226 	if (!dm_plane_state)
7227 		return NULL;
7228 
7229 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7230 
7231 	if (old_dm_plane_state->dc_state) {
7232 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7233 		dc_plane_state_retain(dm_plane_state->dc_state);
7234 	}
7235 
7236 	return &dm_plane_state->base;
7237 }
7238 
7239 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7240 				struct drm_plane_state *state)
7241 {
7242 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7243 
7244 	if (dm_plane_state->dc_state)
7245 		dc_plane_state_release(dm_plane_state->dc_state);
7246 
7247 	drm_atomic_helper_plane_destroy_state(plane, state);
7248 }
7249 
7250 static const struct drm_plane_funcs dm_plane_funcs = {
7251 	.update_plane	= drm_atomic_helper_update_plane,
7252 	.disable_plane	= drm_atomic_helper_disable_plane,
7253 	.destroy	= drm_primary_helper_destroy,
7254 	.reset = dm_drm_plane_reset,
7255 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7256 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7257 	.format_mod_supported = dm_plane_format_mod_supported,
7258 };
7259 
7260 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7261 				      struct drm_plane_state *new_state)
7262 {
7263 	struct amdgpu_framebuffer *afb;
7264 	struct drm_gem_object *obj;
7265 	struct amdgpu_device *adev;
7266 	struct amdgpu_bo *rbo;
7267 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7268 	struct list_head list;
7269 	struct ttm_validate_buffer tv;
7270 	struct ww_acquire_ctx ticket;
7271 	uint32_t domain;
7272 	int r;
7273 
7274 	if (!new_state->fb) {
7275 		DRM_DEBUG_KMS("No FB bound\n");
7276 		return 0;
7277 	}
7278 
7279 	afb = to_amdgpu_framebuffer(new_state->fb);
7280 	obj = new_state->fb->obj[0];
7281 	rbo = gem_to_amdgpu_bo(obj);
7282 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7283 	INIT_LIST_HEAD(&list);
7284 
7285 	tv.bo = &rbo->tbo;
7286 	tv.num_shared = 1;
7287 	list_add(&tv.head, &list);
7288 
7289 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7290 	if (r) {
7291 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7292 		return r;
7293 	}
7294 
7295 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7296 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7297 	else
7298 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7299 
7300 	r = amdgpu_bo_pin(rbo, domain);
7301 	if (unlikely(r != 0)) {
7302 		if (r != -ERESTARTSYS)
7303 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7304 		ttm_eu_backoff_reservation(&ticket, &list);
7305 		return r;
7306 	}
7307 
7308 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7309 	if (unlikely(r != 0)) {
7310 		amdgpu_bo_unpin(rbo);
7311 		ttm_eu_backoff_reservation(&ticket, &list);
7312 		DRM_ERROR("%p bind failed\n", rbo);
7313 		return r;
7314 	}
7315 
7316 	ttm_eu_backoff_reservation(&ticket, &list);
7317 
7318 	afb->address = amdgpu_bo_gpu_offset(rbo);
7319 
7320 	amdgpu_bo_ref(rbo);
7321 
7322 	/**
7323 	 * We don't do surface updates on planes that have been newly created,
7324 	 * but we also don't have the afb->address during atomic check.
7325 	 *
7326 	 * Fill in buffer attributes depending on the address here, but only on
7327 	 * newly created planes since they're not being used by DC yet and this
7328 	 * won't modify global state.
7329 	 */
7330 	dm_plane_state_old = to_dm_plane_state(plane->state);
7331 	dm_plane_state_new = to_dm_plane_state(new_state);
7332 
7333 	if (dm_plane_state_new->dc_state &&
7334 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7335 		struct dc_plane_state *plane_state =
7336 			dm_plane_state_new->dc_state;
7337 		bool force_disable_dcc = !plane_state->dcc.enable;
7338 
7339 		fill_plane_buffer_attributes(
7340 			adev, afb, plane_state->format, plane_state->rotation,
7341 			afb->tiling_flags,
7342 			&plane_state->tiling_info, &plane_state->plane_size,
7343 			&plane_state->dcc, &plane_state->address,
7344 			afb->tmz_surface, force_disable_dcc);
7345 	}
7346 
7347 	return 0;
7348 }
7349 
7350 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7351 				       struct drm_plane_state *old_state)
7352 {
7353 	struct amdgpu_bo *rbo;
7354 	int r;
7355 
7356 	if (!old_state->fb)
7357 		return;
7358 
7359 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7360 	r = amdgpu_bo_reserve(rbo, false);
7361 	if (unlikely(r)) {
7362 		DRM_ERROR("failed to reserve rbo before unpin\n");
7363 		return;
7364 	}
7365 
7366 	amdgpu_bo_unpin(rbo);
7367 	amdgpu_bo_unreserve(rbo);
7368 	amdgpu_bo_unref(&rbo);
7369 }
7370 
7371 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7372 				       struct drm_crtc_state *new_crtc_state)
7373 {
7374 	struct drm_framebuffer *fb = state->fb;
7375 	int min_downscale, max_upscale;
7376 	int min_scale = 0;
7377 	int max_scale = INT_MAX;
7378 
7379 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7380 	if (fb && state->crtc) {
7381 		/* Validate viewport to cover the case when only the position changes */
7382 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7383 			int viewport_width = state->crtc_w;
7384 			int viewport_height = state->crtc_h;
7385 
7386 			if (state->crtc_x < 0)
7387 				viewport_width += state->crtc_x;
7388 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7389 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7390 
7391 			if (state->crtc_y < 0)
7392 				viewport_height += state->crtc_y;
7393 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7394 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7395 
7396 			if (viewport_width < 0 || viewport_height < 0) {
7397 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7398 				return -EINVAL;
7399 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7400 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7401 				return -EINVAL;
7402 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7403 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7404 				return -EINVAL;
7405 			}
7406 
7407 		}
7408 
7409 		/* Get min/max allowed scaling factors from plane caps. */
7410 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7411 					     &min_downscale, &max_upscale);
7412 		/*
7413 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7414 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7415 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7416 		 */
7417 		min_scale = (1000 << 16) / max_upscale;
7418 		max_scale = (1000 << 16) / min_downscale;
7419 	}
7420 
7421 	return drm_atomic_helper_check_plane_state(
7422 		state, new_crtc_state, min_scale, max_scale, true, true);
7423 }
7424 
7425 static int dm_plane_atomic_check(struct drm_plane *plane,
7426 				 struct drm_atomic_state *state)
7427 {
7428 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7429 										 plane);
7430 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7431 	struct dc *dc = adev->dm.dc;
7432 	struct dm_plane_state *dm_plane_state;
7433 	struct dc_scaling_info scaling_info;
7434 	struct drm_crtc_state *new_crtc_state;
7435 	int ret;
7436 
7437 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7438 
7439 	dm_plane_state = to_dm_plane_state(new_plane_state);
7440 
7441 	if (!dm_plane_state->dc_state)
7442 		return 0;
7443 
7444 	new_crtc_state =
7445 		drm_atomic_get_new_crtc_state(state,
7446 					      new_plane_state->crtc);
7447 	if (!new_crtc_state)
7448 		return -EINVAL;
7449 
7450 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7451 	if (ret)
7452 		return ret;
7453 
7454 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7455 	if (ret)
7456 		return ret;
7457 
7458 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7459 		return 0;
7460 
7461 	return -EINVAL;
7462 }
7463 
7464 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7465 				       struct drm_atomic_state *state)
7466 {
7467 	/* Only support async updates on cursor planes. */
7468 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7469 		return -EINVAL;
7470 
7471 	return 0;
7472 }
7473 
7474 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7475 					 struct drm_atomic_state *state)
7476 {
7477 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7478 									   plane);
7479 	struct drm_plane_state *old_state =
7480 		drm_atomic_get_old_plane_state(state, plane);
7481 
7482 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7483 
7484 	swap(plane->state->fb, new_state->fb);
7485 
7486 	plane->state->src_x = new_state->src_x;
7487 	plane->state->src_y = new_state->src_y;
7488 	plane->state->src_w = new_state->src_w;
7489 	plane->state->src_h = new_state->src_h;
7490 	plane->state->crtc_x = new_state->crtc_x;
7491 	plane->state->crtc_y = new_state->crtc_y;
7492 	plane->state->crtc_w = new_state->crtc_w;
7493 	plane->state->crtc_h = new_state->crtc_h;
7494 
7495 	handle_cursor_update(plane, old_state);
7496 }
7497 
7498 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7499 	.prepare_fb = dm_plane_helper_prepare_fb,
7500 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7501 	.atomic_check = dm_plane_atomic_check,
7502 	.atomic_async_check = dm_plane_atomic_async_check,
7503 	.atomic_async_update = dm_plane_atomic_async_update
7504 };
7505 
7506 /*
7507  * TODO: these are currently initialized to rgb formats only.
7508  * For future use cases we should either initialize them dynamically based on
7509  * plane capabilities, or initialize this array to all formats, so internal drm
7510  * check will succeed, and let DC implement proper check
7511  */
7512 static const uint32_t rgb_formats[] = {
7513 	DRM_FORMAT_XRGB8888,
7514 	DRM_FORMAT_ARGB8888,
7515 	DRM_FORMAT_RGBA8888,
7516 	DRM_FORMAT_XRGB2101010,
7517 	DRM_FORMAT_XBGR2101010,
7518 	DRM_FORMAT_ARGB2101010,
7519 	DRM_FORMAT_ABGR2101010,
7520 	DRM_FORMAT_XRGB16161616,
7521 	DRM_FORMAT_XBGR16161616,
7522 	DRM_FORMAT_ARGB16161616,
7523 	DRM_FORMAT_ABGR16161616,
7524 	DRM_FORMAT_XBGR8888,
7525 	DRM_FORMAT_ABGR8888,
7526 	DRM_FORMAT_RGB565,
7527 };
7528 
7529 static const uint32_t overlay_formats[] = {
7530 	DRM_FORMAT_XRGB8888,
7531 	DRM_FORMAT_ARGB8888,
7532 	DRM_FORMAT_RGBA8888,
7533 	DRM_FORMAT_XBGR8888,
7534 	DRM_FORMAT_ABGR8888,
7535 	DRM_FORMAT_RGB565
7536 };
7537 
7538 static const u32 cursor_formats[] = {
7539 	DRM_FORMAT_ARGB8888
7540 };
7541 
7542 static int get_plane_formats(const struct drm_plane *plane,
7543 			     const struct dc_plane_cap *plane_cap,
7544 			     uint32_t *formats, int max_formats)
7545 {
7546 	int i, num_formats = 0;
7547 
7548 	/*
7549 	 * TODO: Query support for each group of formats directly from
7550 	 * DC plane caps. This will require adding more formats to the
7551 	 * caps list.
7552 	 */
7553 
7554 	switch (plane->type) {
7555 	case DRM_PLANE_TYPE_PRIMARY:
7556 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7557 			if (num_formats >= max_formats)
7558 				break;
7559 
7560 			formats[num_formats++] = rgb_formats[i];
7561 		}
7562 
7563 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7564 			formats[num_formats++] = DRM_FORMAT_NV12;
7565 		if (plane_cap && plane_cap->pixel_format_support.p010)
7566 			formats[num_formats++] = DRM_FORMAT_P010;
7567 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7568 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7569 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7570 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7571 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7572 		}
7573 		break;
7574 
7575 	case DRM_PLANE_TYPE_OVERLAY:
7576 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7577 			if (num_formats >= max_formats)
7578 				break;
7579 
7580 			formats[num_formats++] = overlay_formats[i];
7581 		}
7582 		break;
7583 
7584 	case DRM_PLANE_TYPE_CURSOR:
7585 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7586 			if (num_formats >= max_formats)
7587 				break;
7588 
7589 			formats[num_formats++] = cursor_formats[i];
7590 		}
7591 		break;
7592 	}
7593 
7594 	return num_formats;
7595 }
7596 
7597 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7598 				struct drm_plane *plane,
7599 				unsigned long possible_crtcs,
7600 				const struct dc_plane_cap *plane_cap)
7601 {
7602 	uint32_t formats[32];
7603 	int num_formats;
7604 	int res = -EPERM;
7605 	unsigned int supported_rotations;
7606 	uint64_t *modifiers = NULL;
7607 
7608 	num_formats = get_plane_formats(plane, plane_cap, formats,
7609 					ARRAY_SIZE(formats));
7610 
7611 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7612 	if (res)
7613 		return res;
7614 
7615 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7616 				       &dm_plane_funcs, formats, num_formats,
7617 				       modifiers, plane->type, NULL);
7618 	kfree(modifiers);
7619 	if (res)
7620 		return res;
7621 
7622 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7623 	    plane_cap && plane_cap->per_pixel_alpha) {
7624 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7625 					  BIT(DRM_MODE_BLEND_PREMULTI);
7626 
7627 		drm_plane_create_alpha_property(plane);
7628 		drm_plane_create_blend_mode_property(plane, blend_caps);
7629 	}
7630 
7631 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7632 	    plane_cap &&
7633 	    (plane_cap->pixel_format_support.nv12 ||
7634 	     plane_cap->pixel_format_support.p010)) {
7635 		/* This only affects YUV formats. */
7636 		drm_plane_create_color_properties(
7637 			plane,
7638 			BIT(DRM_COLOR_YCBCR_BT601) |
7639 			BIT(DRM_COLOR_YCBCR_BT709) |
7640 			BIT(DRM_COLOR_YCBCR_BT2020),
7641 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7642 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7643 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7644 	}
7645 
7646 	supported_rotations =
7647 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7648 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7649 
7650 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7651 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7652 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7653 						   supported_rotations);
7654 
7655 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7656 
7657 	/* Create (reset) the plane state */
7658 	if (plane->funcs->reset)
7659 		plane->funcs->reset(plane);
7660 
7661 	return 0;
7662 }
7663 
7664 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7665 			       struct drm_plane *plane,
7666 			       uint32_t crtc_index)
7667 {
7668 	struct amdgpu_crtc *acrtc = NULL;
7669 	struct drm_plane *cursor_plane;
7670 
7671 	int res = -ENOMEM;
7672 
7673 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7674 	if (!cursor_plane)
7675 		goto fail;
7676 
7677 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7678 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7679 
7680 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7681 	if (!acrtc)
7682 		goto fail;
7683 
7684 	res = drm_crtc_init_with_planes(
7685 			dm->ddev,
7686 			&acrtc->base,
7687 			plane,
7688 			cursor_plane,
7689 			&amdgpu_dm_crtc_funcs, NULL);
7690 
7691 	if (res)
7692 		goto fail;
7693 
7694 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7695 
7696 	/* Create (reset) the plane state */
7697 	if (acrtc->base.funcs->reset)
7698 		acrtc->base.funcs->reset(&acrtc->base);
7699 
7700 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7701 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7702 
7703 	acrtc->crtc_id = crtc_index;
7704 	acrtc->base.enabled = false;
7705 	acrtc->otg_inst = -1;
7706 
7707 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7708 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7709 				   true, MAX_COLOR_LUT_ENTRIES);
7710 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7711 
7712 	return 0;
7713 
7714 fail:
7715 	kfree(acrtc);
7716 	kfree(cursor_plane);
7717 	return res;
7718 }
7719 
7720 
7721 static int to_drm_connector_type(enum signal_type st)
7722 {
7723 	switch (st) {
7724 	case SIGNAL_TYPE_HDMI_TYPE_A:
7725 		return DRM_MODE_CONNECTOR_HDMIA;
7726 	case SIGNAL_TYPE_EDP:
7727 		return DRM_MODE_CONNECTOR_eDP;
7728 	case SIGNAL_TYPE_LVDS:
7729 		return DRM_MODE_CONNECTOR_LVDS;
7730 	case SIGNAL_TYPE_RGB:
7731 		return DRM_MODE_CONNECTOR_VGA;
7732 	case SIGNAL_TYPE_DISPLAY_PORT:
7733 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7734 		return DRM_MODE_CONNECTOR_DisplayPort;
7735 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7736 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7737 		return DRM_MODE_CONNECTOR_DVID;
7738 	case SIGNAL_TYPE_VIRTUAL:
7739 		return DRM_MODE_CONNECTOR_VIRTUAL;
7740 
7741 	default:
7742 		return DRM_MODE_CONNECTOR_Unknown;
7743 	}
7744 }
7745 
7746 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7747 {
7748 	struct drm_encoder *encoder;
7749 
7750 	/* There is only one encoder per connector */
7751 	drm_connector_for_each_possible_encoder(connector, encoder)
7752 		return encoder;
7753 
7754 	return NULL;
7755 }
7756 
7757 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7758 {
7759 	struct drm_encoder *encoder;
7760 	struct amdgpu_encoder *amdgpu_encoder;
7761 
7762 	encoder = amdgpu_dm_connector_to_encoder(connector);
7763 
7764 	if (encoder == NULL)
7765 		return;
7766 
7767 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7768 
7769 	amdgpu_encoder->native_mode.clock = 0;
7770 
7771 	if (!list_empty(&connector->probed_modes)) {
7772 		struct drm_display_mode *preferred_mode = NULL;
7773 
7774 		list_for_each_entry(preferred_mode,
7775 				    &connector->probed_modes,
7776 				    head) {
7777 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7778 				amdgpu_encoder->native_mode = *preferred_mode;
7779 
7780 			break;
7781 		}
7782 
7783 	}
7784 }
7785 
7786 static struct drm_display_mode *
7787 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7788 			     char *name,
7789 			     int hdisplay, int vdisplay)
7790 {
7791 	struct drm_device *dev = encoder->dev;
7792 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7793 	struct drm_display_mode *mode = NULL;
7794 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7795 
7796 	mode = drm_mode_duplicate(dev, native_mode);
7797 
7798 	if (mode == NULL)
7799 		return NULL;
7800 
7801 	mode->hdisplay = hdisplay;
7802 	mode->vdisplay = vdisplay;
7803 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7804 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7805 
7806 	return mode;
7807 
7808 }
7809 
7810 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7811 						 struct drm_connector *connector)
7812 {
7813 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7814 	struct drm_display_mode *mode = NULL;
7815 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7816 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7817 				to_amdgpu_dm_connector(connector);
7818 	int i;
7819 	int n;
7820 	struct mode_size {
7821 		char name[DRM_DISPLAY_MODE_LEN];
7822 		int w;
7823 		int h;
7824 	} common_modes[] = {
7825 		{  "640x480",  640,  480},
7826 		{  "800x600",  800,  600},
7827 		{ "1024x768", 1024,  768},
7828 		{ "1280x720", 1280,  720},
7829 		{ "1280x800", 1280,  800},
7830 		{"1280x1024", 1280, 1024},
7831 		{ "1440x900", 1440,  900},
7832 		{"1680x1050", 1680, 1050},
7833 		{"1600x1200", 1600, 1200},
7834 		{"1920x1080", 1920, 1080},
7835 		{"1920x1200", 1920, 1200}
7836 	};
7837 
7838 	n = ARRAY_SIZE(common_modes);
7839 
7840 	for (i = 0; i < n; i++) {
7841 		struct drm_display_mode *curmode = NULL;
7842 		bool mode_existed = false;
7843 
7844 		if (common_modes[i].w > native_mode->hdisplay ||
7845 		    common_modes[i].h > native_mode->vdisplay ||
7846 		   (common_modes[i].w == native_mode->hdisplay &&
7847 		    common_modes[i].h == native_mode->vdisplay))
7848 			continue;
7849 
7850 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7851 			if (common_modes[i].w == curmode->hdisplay &&
7852 			    common_modes[i].h == curmode->vdisplay) {
7853 				mode_existed = true;
7854 				break;
7855 			}
7856 		}
7857 
7858 		if (mode_existed)
7859 			continue;
7860 
7861 		mode = amdgpu_dm_create_common_mode(encoder,
7862 				common_modes[i].name, common_modes[i].w,
7863 				common_modes[i].h);
7864 		drm_mode_probed_add(connector, mode);
7865 		amdgpu_dm_connector->num_modes++;
7866 	}
7867 }
7868 
7869 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7870 {
7871 	struct drm_encoder *encoder;
7872 	struct amdgpu_encoder *amdgpu_encoder;
7873 	const struct drm_display_mode *native_mode;
7874 
7875 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7876 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7877 		return;
7878 
7879 	encoder = amdgpu_dm_connector_to_encoder(connector);
7880 	if (!encoder)
7881 		return;
7882 
7883 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7884 
7885 	native_mode = &amdgpu_encoder->native_mode;
7886 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7887 		return;
7888 
7889 	drm_connector_set_panel_orientation_with_quirk(connector,
7890 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7891 						       native_mode->hdisplay,
7892 						       native_mode->vdisplay);
7893 }
7894 
7895 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7896 					      struct edid *edid)
7897 {
7898 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7899 			to_amdgpu_dm_connector(connector);
7900 
7901 	if (edid) {
7902 		/* empty probed_modes */
7903 		INIT_LIST_HEAD(&connector->probed_modes);
7904 		amdgpu_dm_connector->num_modes =
7905 				drm_add_edid_modes(connector, edid);
7906 
7907 		/* sorting the probed modes before calling function
7908 		 * amdgpu_dm_get_native_mode() since EDID can have
7909 		 * more than one preferred mode. The modes that are
7910 		 * later in the probed mode list could be of higher
7911 		 * and preferred resolution. For example, 3840x2160
7912 		 * resolution in base EDID preferred timing and 4096x2160
7913 		 * preferred resolution in DID extension block later.
7914 		 */
7915 		drm_mode_sort(&connector->probed_modes);
7916 		amdgpu_dm_get_native_mode(connector);
7917 
7918 		/* Freesync capabilities are reset by calling
7919 		 * drm_add_edid_modes() and need to be
7920 		 * restored here.
7921 		 */
7922 		amdgpu_dm_update_freesync_caps(connector, edid);
7923 
7924 		amdgpu_set_panel_orientation(connector);
7925 	} else {
7926 		amdgpu_dm_connector->num_modes = 0;
7927 	}
7928 }
7929 
7930 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7931 			      struct drm_display_mode *mode)
7932 {
7933 	struct drm_display_mode *m;
7934 
7935 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7936 		if (drm_mode_equal(m, mode))
7937 			return true;
7938 	}
7939 
7940 	return false;
7941 }
7942 
7943 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7944 {
7945 	const struct drm_display_mode *m;
7946 	struct drm_display_mode *new_mode;
7947 	uint i;
7948 	uint32_t new_modes_count = 0;
7949 
7950 	/* Standard FPS values
7951 	 *
7952 	 * 23.976   - TV/NTSC
7953 	 * 24 	    - Cinema
7954 	 * 25 	    - TV/PAL
7955 	 * 29.97    - TV/NTSC
7956 	 * 30 	    - TV/NTSC
7957 	 * 48 	    - Cinema HFR
7958 	 * 50 	    - TV/PAL
7959 	 * 60 	    - Commonly used
7960 	 * 48,72,96 - Multiples of 24
7961 	 */
7962 	static const uint32_t common_rates[] = {
7963 		23976, 24000, 25000, 29970, 30000,
7964 		48000, 50000, 60000, 72000, 96000
7965 	};
7966 
7967 	/*
7968 	 * Find mode with highest refresh rate with the same resolution
7969 	 * as the preferred mode. Some monitors report a preferred mode
7970 	 * with lower resolution than the highest refresh rate supported.
7971 	 */
7972 
7973 	m = get_highest_refresh_rate_mode(aconnector, true);
7974 	if (!m)
7975 		return 0;
7976 
7977 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7978 		uint64_t target_vtotal, target_vtotal_diff;
7979 		uint64_t num, den;
7980 
7981 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7982 			continue;
7983 
7984 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7985 		    common_rates[i] > aconnector->max_vfreq * 1000)
7986 			continue;
7987 
7988 		num = (unsigned long long)m->clock * 1000 * 1000;
7989 		den = common_rates[i] * (unsigned long long)m->htotal;
7990 		target_vtotal = div_u64(num, den);
7991 		target_vtotal_diff = target_vtotal - m->vtotal;
7992 
7993 		/* Check for illegal modes */
7994 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7995 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7996 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7997 			continue;
7998 
7999 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8000 		if (!new_mode)
8001 			goto out;
8002 
8003 		new_mode->vtotal += (u16)target_vtotal_diff;
8004 		new_mode->vsync_start += (u16)target_vtotal_diff;
8005 		new_mode->vsync_end += (u16)target_vtotal_diff;
8006 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8007 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8008 
8009 		if (!is_duplicate_mode(aconnector, new_mode)) {
8010 			drm_mode_probed_add(&aconnector->base, new_mode);
8011 			new_modes_count += 1;
8012 		} else
8013 			drm_mode_destroy(aconnector->base.dev, new_mode);
8014 	}
8015  out:
8016 	return new_modes_count;
8017 }
8018 
8019 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8020 						   struct edid *edid)
8021 {
8022 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8023 		to_amdgpu_dm_connector(connector);
8024 
8025 	if (!(amdgpu_freesync_vid_mode && edid))
8026 		return;
8027 
8028 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8029 		amdgpu_dm_connector->num_modes +=
8030 			add_fs_modes(amdgpu_dm_connector);
8031 }
8032 
8033 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8034 {
8035 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8036 			to_amdgpu_dm_connector(connector);
8037 	struct drm_encoder *encoder;
8038 	struct edid *edid = amdgpu_dm_connector->edid;
8039 
8040 	encoder = amdgpu_dm_connector_to_encoder(connector);
8041 
8042 	if (!drm_edid_is_valid(edid)) {
8043 		amdgpu_dm_connector->num_modes =
8044 				drm_add_modes_noedid(connector, 640, 480);
8045 	} else {
8046 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8047 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8048 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8049 	}
8050 	amdgpu_dm_fbc_init(connector);
8051 
8052 	return amdgpu_dm_connector->num_modes;
8053 }
8054 
8055 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8056 				     struct amdgpu_dm_connector *aconnector,
8057 				     int connector_type,
8058 				     struct dc_link *link,
8059 				     int link_index)
8060 {
8061 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8062 
8063 	/*
8064 	 * Some of the properties below require access to state, like bpc.
8065 	 * Allocate some default initial connector state with our reset helper.
8066 	 */
8067 	if (aconnector->base.funcs->reset)
8068 		aconnector->base.funcs->reset(&aconnector->base);
8069 
8070 	aconnector->connector_id = link_index;
8071 	aconnector->dc_link = link;
8072 	aconnector->base.interlace_allowed = false;
8073 	aconnector->base.doublescan_allowed = false;
8074 	aconnector->base.stereo_allowed = false;
8075 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8076 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8077 	aconnector->audio_inst = -1;
8078 	mutex_init(&aconnector->hpd_lock);
8079 
8080 	/*
8081 	 * configure support HPD hot plug connector_>polled default value is 0
8082 	 * which means HPD hot plug not supported
8083 	 */
8084 	switch (connector_type) {
8085 	case DRM_MODE_CONNECTOR_HDMIA:
8086 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8087 		aconnector->base.ycbcr_420_allowed =
8088 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8089 		break;
8090 	case DRM_MODE_CONNECTOR_DisplayPort:
8091 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8092 		aconnector->base.ycbcr_420_allowed =
8093 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8094 		break;
8095 	case DRM_MODE_CONNECTOR_DVID:
8096 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8097 		break;
8098 	default:
8099 		break;
8100 	}
8101 
8102 	drm_object_attach_property(&aconnector->base.base,
8103 				dm->ddev->mode_config.scaling_mode_property,
8104 				DRM_MODE_SCALE_NONE);
8105 
8106 	drm_object_attach_property(&aconnector->base.base,
8107 				adev->mode_info.underscan_property,
8108 				UNDERSCAN_OFF);
8109 	drm_object_attach_property(&aconnector->base.base,
8110 				adev->mode_info.underscan_hborder_property,
8111 				0);
8112 	drm_object_attach_property(&aconnector->base.base,
8113 				adev->mode_info.underscan_vborder_property,
8114 				0);
8115 
8116 	if (!aconnector->mst_port)
8117 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8118 
8119 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8120 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8121 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8122 
8123 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8124 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8125 		drm_object_attach_property(&aconnector->base.base,
8126 				adev->mode_info.abm_level_property, 0);
8127 	}
8128 
8129 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8130 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8131 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8132 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8133 
8134 		if (!aconnector->mst_port)
8135 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8136 
8137 #ifdef CONFIG_DRM_AMD_DC_HDCP
8138 		if (adev->dm.hdcp_workqueue)
8139 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8140 #endif
8141 	}
8142 }
8143 
8144 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8145 			      struct i2c_msg *msgs, int num)
8146 {
8147 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8148 	struct ddc_service *ddc_service = i2c->ddc_service;
8149 	struct i2c_command cmd;
8150 	int i;
8151 	int result = -EIO;
8152 
8153 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8154 
8155 	if (!cmd.payloads)
8156 		return result;
8157 
8158 	cmd.number_of_payloads = num;
8159 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8160 	cmd.speed = 100;
8161 
8162 	for (i = 0; i < num; i++) {
8163 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8164 		cmd.payloads[i].address = msgs[i].addr;
8165 		cmd.payloads[i].length = msgs[i].len;
8166 		cmd.payloads[i].data = msgs[i].buf;
8167 	}
8168 
8169 	if (dc_submit_i2c(
8170 			ddc_service->ctx->dc,
8171 			ddc_service->ddc_pin->hw_info.ddc_channel,
8172 			&cmd))
8173 		result = num;
8174 
8175 	kfree(cmd.payloads);
8176 	return result;
8177 }
8178 
8179 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8180 {
8181 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8182 }
8183 
8184 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8185 	.master_xfer = amdgpu_dm_i2c_xfer,
8186 	.functionality = amdgpu_dm_i2c_func,
8187 };
8188 
8189 static struct amdgpu_i2c_adapter *
8190 create_i2c(struct ddc_service *ddc_service,
8191 	   int link_index,
8192 	   int *res)
8193 {
8194 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8195 	struct amdgpu_i2c_adapter *i2c;
8196 
8197 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8198 	if (!i2c)
8199 		return NULL;
8200 	i2c->base.owner = THIS_MODULE;
8201 	i2c->base.class = I2C_CLASS_DDC;
8202 	i2c->base.dev.parent = &adev->pdev->dev;
8203 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8204 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8205 	i2c_set_adapdata(&i2c->base, i2c);
8206 	i2c->ddc_service = ddc_service;
8207 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8208 
8209 	return i2c;
8210 }
8211 
8212 
8213 /*
8214  * Note: this function assumes that dc_link_detect() was called for the
8215  * dc_link which will be represented by this aconnector.
8216  */
8217 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8218 				    struct amdgpu_dm_connector *aconnector,
8219 				    uint32_t link_index,
8220 				    struct amdgpu_encoder *aencoder)
8221 {
8222 	int res = 0;
8223 	int connector_type;
8224 	struct dc *dc = dm->dc;
8225 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8226 	struct amdgpu_i2c_adapter *i2c;
8227 
8228 	link->priv = aconnector;
8229 
8230 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8231 
8232 	i2c = create_i2c(link->ddc, link->link_index, &res);
8233 	if (!i2c) {
8234 		DRM_ERROR("Failed to create i2c adapter data\n");
8235 		return -ENOMEM;
8236 	}
8237 
8238 	aconnector->i2c = i2c;
8239 	res = i2c_add_adapter(&i2c->base);
8240 
8241 	if (res) {
8242 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8243 		goto out_free;
8244 	}
8245 
8246 	connector_type = to_drm_connector_type(link->connector_signal);
8247 
8248 	res = drm_connector_init_with_ddc(
8249 			dm->ddev,
8250 			&aconnector->base,
8251 			&amdgpu_dm_connector_funcs,
8252 			connector_type,
8253 			&i2c->base);
8254 
8255 	if (res) {
8256 		DRM_ERROR("connector_init failed\n");
8257 		aconnector->connector_id = -1;
8258 		goto out_free;
8259 	}
8260 
8261 	drm_connector_helper_add(
8262 			&aconnector->base,
8263 			&amdgpu_dm_connector_helper_funcs);
8264 
8265 	amdgpu_dm_connector_init_helper(
8266 		dm,
8267 		aconnector,
8268 		connector_type,
8269 		link,
8270 		link_index);
8271 
8272 	drm_connector_attach_encoder(
8273 		&aconnector->base, &aencoder->base);
8274 
8275 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8276 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8277 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8278 
8279 out_free:
8280 	if (res) {
8281 		kfree(i2c);
8282 		aconnector->i2c = NULL;
8283 	}
8284 	return res;
8285 }
8286 
8287 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8288 {
8289 	switch (adev->mode_info.num_crtc) {
8290 	case 1:
8291 		return 0x1;
8292 	case 2:
8293 		return 0x3;
8294 	case 3:
8295 		return 0x7;
8296 	case 4:
8297 		return 0xf;
8298 	case 5:
8299 		return 0x1f;
8300 	case 6:
8301 	default:
8302 		return 0x3f;
8303 	}
8304 }
8305 
8306 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8307 				  struct amdgpu_encoder *aencoder,
8308 				  uint32_t link_index)
8309 {
8310 	struct amdgpu_device *adev = drm_to_adev(dev);
8311 
8312 	int res = drm_encoder_init(dev,
8313 				   &aencoder->base,
8314 				   &amdgpu_dm_encoder_funcs,
8315 				   DRM_MODE_ENCODER_TMDS,
8316 				   NULL);
8317 
8318 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8319 
8320 	if (!res)
8321 		aencoder->encoder_id = link_index;
8322 	else
8323 		aencoder->encoder_id = -1;
8324 
8325 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8326 
8327 	return res;
8328 }
8329 
8330 static void manage_dm_interrupts(struct amdgpu_device *adev,
8331 				 struct amdgpu_crtc *acrtc,
8332 				 bool enable)
8333 {
8334 	/*
8335 	 * We have no guarantee that the frontend index maps to the same
8336 	 * backend index - some even map to more than one.
8337 	 *
8338 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8339 	 */
8340 	int irq_type =
8341 		amdgpu_display_crtc_idx_to_irq_type(
8342 			adev,
8343 			acrtc->crtc_id);
8344 
8345 	if (enable) {
8346 		drm_crtc_vblank_on(&acrtc->base);
8347 		amdgpu_irq_get(
8348 			adev,
8349 			&adev->pageflip_irq,
8350 			irq_type);
8351 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8352 		amdgpu_irq_get(
8353 			adev,
8354 			&adev->vline0_irq,
8355 			irq_type);
8356 #endif
8357 	} else {
8358 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8359 		amdgpu_irq_put(
8360 			adev,
8361 			&adev->vline0_irq,
8362 			irq_type);
8363 #endif
8364 		amdgpu_irq_put(
8365 			adev,
8366 			&adev->pageflip_irq,
8367 			irq_type);
8368 		drm_crtc_vblank_off(&acrtc->base);
8369 	}
8370 }
8371 
8372 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8373 				      struct amdgpu_crtc *acrtc)
8374 {
8375 	int irq_type =
8376 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8377 
8378 	/**
8379 	 * This reads the current state for the IRQ and force reapplies
8380 	 * the setting to hardware.
8381 	 */
8382 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8383 }
8384 
8385 static bool
8386 is_scaling_state_different(const struct dm_connector_state *dm_state,
8387 			   const struct dm_connector_state *old_dm_state)
8388 {
8389 	if (dm_state->scaling != old_dm_state->scaling)
8390 		return true;
8391 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8392 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8393 			return true;
8394 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8395 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8396 			return true;
8397 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8398 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8399 		return true;
8400 	return false;
8401 }
8402 
8403 #ifdef CONFIG_DRM_AMD_DC_HDCP
8404 static bool is_content_protection_different(struct drm_connector_state *state,
8405 					    const struct drm_connector_state *old_state,
8406 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8407 {
8408 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8409 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8410 
8411 	/* Handle: Type0/1 change */
8412 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8413 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8414 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8415 		return true;
8416 	}
8417 
8418 	/* CP is being re enabled, ignore this
8419 	 *
8420 	 * Handles:	ENABLED -> DESIRED
8421 	 */
8422 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8423 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8424 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8425 		return false;
8426 	}
8427 
8428 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8429 	 *
8430 	 * Handles:	UNDESIRED -> ENABLED
8431 	 */
8432 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8433 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8434 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8435 
8436 	/* Stream removed and re-enabled
8437 	 *
8438 	 * Can sometimes overlap with the HPD case,
8439 	 * thus set update_hdcp to false to avoid
8440 	 * setting HDCP multiple times.
8441 	 *
8442 	 * Handles:	DESIRED -> DESIRED (Special case)
8443 	 */
8444 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8445 		state->crtc && state->crtc->enabled &&
8446 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8447 		dm_con_state->update_hdcp = false;
8448 		return true;
8449 	}
8450 
8451 	/* Hot-plug, headless s3, dpms
8452 	 *
8453 	 * Only start HDCP if the display is connected/enabled.
8454 	 * update_hdcp flag will be set to false until the next
8455 	 * HPD comes in.
8456 	 *
8457 	 * Handles:	DESIRED -> DESIRED (Special case)
8458 	 */
8459 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8460 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8461 		dm_con_state->update_hdcp = false;
8462 		return true;
8463 	}
8464 
8465 	/*
8466 	 * Handles:	UNDESIRED -> UNDESIRED
8467 	 *		DESIRED -> DESIRED
8468 	 *		ENABLED -> ENABLED
8469 	 */
8470 	if (old_state->content_protection == state->content_protection)
8471 		return false;
8472 
8473 	/*
8474 	 * Handles:	UNDESIRED -> DESIRED
8475 	 *		DESIRED -> UNDESIRED
8476 	 *		ENABLED -> UNDESIRED
8477 	 */
8478 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8479 		return true;
8480 
8481 	/*
8482 	 * Handles:	DESIRED -> ENABLED
8483 	 */
8484 	return false;
8485 }
8486 
8487 #endif
8488 static void remove_stream(struct amdgpu_device *adev,
8489 			  struct amdgpu_crtc *acrtc,
8490 			  struct dc_stream_state *stream)
8491 {
8492 	/* this is the update mode case */
8493 
8494 	acrtc->otg_inst = -1;
8495 	acrtc->enabled = false;
8496 }
8497 
8498 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8499 			       struct dc_cursor_position *position)
8500 {
8501 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8502 	int x, y;
8503 	int xorigin = 0, yorigin = 0;
8504 
8505 	if (!crtc || !plane->state->fb)
8506 		return 0;
8507 
8508 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8509 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8510 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8511 			  __func__,
8512 			  plane->state->crtc_w,
8513 			  plane->state->crtc_h);
8514 		return -EINVAL;
8515 	}
8516 
8517 	x = plane->state->crtc_x;
8518 	y = plane->state->crtc_y;
8519 
8520 	if (x <= -amdgpu_crtc->max_cursor_width ||
8521 	    y <= -amdgpu_crtc->max_cursor_height)
8522 		return 0;
8523 
8524 	if (x < 0) {
8525 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8526 		x = 0;
8527 	}
8528 	if (y < 0) {
8529 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8530 		y = 0;
8531 	}
8532 	position->enable = true;
8533 	position->translate_by_source = true;
8534 	position->x = x;
8535 	position->y = y;
8536 	position->x_hotspot = xorigin;
8537 	position->y_hotspot = yorigin;
8538 
8539 	return 0;
8540 }
8541 
8542 static void handle_cursor_update(struct drm_plane *plane,
8543 				 struct drm_plane_state *old_plane_state)
8544 {
8545 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8546 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8547 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8548 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8549 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8550 	uint64_t address = afb ? afb->address : 0;
8551 	struct dc_cursor_position position = {0};
8552 	struct dc_cursor_attributes attributes;
8553 	int ret;
8554 
8555 	if (!plane->state->fb && !old_plane_state->fb)
8556 		return;
8557 
8558 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8559 		      __func__,
8560 		      amdgpu_crtc->crtc_id,
8561 		      plane->state->crtc_w,
8562 		      plane->state->crtc_h);
8563 
8564 	ret = get_cursor_position(plane, crtc, &position);
8565 	if (ret)
8566 		return;
8567 
8568 	if (!position.enable) {
8569 		/* turn off cursor */
8570 		if (crtc_state && crtc_state->stream) {
8571 			mutex_lock(&adev->dm.dc_lock);
8572 			dc_stream_set_cursor_position(crtc_state->stream,
8573 						      &position);
8574 			mutex_unlock(&adev->dm.dc_lock);
8575 		}
8576 		return;
8577 	}
8578 
8579 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8580 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8581 
8582 	memset(&attributes, 0, sizeof(attributes));
8583 	attributes.address.high_part = upper_32_bits(address);
8584 	attributes.address.low_part  = lower_32_bits(address);
8585 	attributes.width             = plane->state->crtc_w;
8586 	attributes.height            = plane->state->crtc_h;
8587 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8588 	attributes.rotation_angle    = 0;
8589 	attributes.attribute_flags.value = 0;
8590 
8591 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8592 
8593 	if (crtc_state->stream) {
8594 		mutex_lock(&adev->dm.dc_lock);
8595 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8596 							 &attributes))
8597 			DRM_ERROR("DC failed to set cursor attributes\n");
8598 
8599 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8600 						   &position))
8601 			DRM_ERROR("DC failed to set cursor position\n");
8602 		mutex_unlock(&adev->dm.dc_lock);
8603 	}
8604 }
8605 
8606 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8607 {
8608 
8609 	assert_spin_locked(&acrtc->base.dev->event_lock);
8610 	WARN_ON(acrtc->event);
8611 
8612 	acrtc->event = acrtc->base.state->event;
8613 
8614 	/* Set the flip status */
8615 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8616 
8617 	/* Mark this event as consumed */
8618 	acrtc->base.state->event = NULL;
8619 
8620 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8621 		     acrtc->crtc_id);
8622 }
8623 
8624 static void update_freesync_state_on_stream(
8625 	struct amdgpu_display_manager *dm,
8626 	struct dm_crtc_state *new_crtc_state,
8627 	struct dc_stream_state *new_stream,
8628 	struct dc_plane_state *surface,
8629 	u32 flip_timestamp_in_us)
8630 {
8631 	struct mod_vrr_params vrr_params;
8632 	struct dc_info_packet vrr_infopacket = {0};
8633 	struct amdgpu_device *adev = dm->adev;
8634 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8635 	unsigned long flags;
8636 	bool pack_sdp_v1_3 = false;
8637 
8638 	if (!new_stream)
8639 		return;
8640 
8641 	/*
8642 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8643 	 * For now it's sufficient to just guard against these conditions.
8644 	 */
8645 
8646 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8647 		return;
8648 
8649 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8650         vrr_params = acrtc->dm_irq_params.vrr_params;
8651 
8652 	if (surface) {
8653 		mod_freesync_handle_preflip(
8654 			dm->freesync_module,
8655 			surface,
8656 			new_stream,
8657 			flip_timestamp_in_us,
8658 			&vrr_params);
8659 
8660 		if (adev->family < AMDGPU_FAMILY_AI &&
8661 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8662 			mod_freesync_handle_v_update(dm->freesync_module,
8663 						     new_stream, &vrr_params);
8664 
8665 			/* Need to call this before the frame ends. */
8666 			dc_stream_adjust_vmin_vmax(dm->dc,
8667 						   new_crtc_state->stream,
8668 						   &vrr_params.adjust);
8669 		}
8670 	}
8671 
8672 	mod_freesync_build_vrr_infopacket(
8673 		dm->freesync_module,
8674 		new_stream,
8675 		&vrr_params,
8676 		PACKET_TYPE_VRR,
8677 		TRANSFER_FUNC_UNKNOWN,
8678 		&vrr_infopacket,
8679 		pack_sdp_v1_3);
8680 
8681 	new_crtc_state->freesync_timing_changed |=
8682 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8683 			&vrr_params.adjust,
8684 			sizeof(vrr_params.adjust)) != 0);
8685 
8686 	new_crtc_state->freesync_vrr_info_changed |=
8687 		(memcmp(&new_crtc_state->vrr_infopacket,
8688 			&vrr_infopacket,
8689 			sizeof(vrr_infopacket)) != 0);
8690 
8691 	acrtc->dm_irq_params.vrr_params = vrr_params;
8692 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8693 
8694 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8695 	new_stream->vrr_infopacket = vrr_infopacket;
8696 
8697 	if (new_crtc_state->freesync_vrr_info_changed)
8698 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8699 			      new_crtc_state->base.crtc->base.id,
8700 			      (int)new_crtc_state->base.vrr_enabled,
8701 			      (int)vrr_params.state);
8702 
8703 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8704 }
8705 
8706 static void update_stream_irq_parameters(
8707 	struct amdgpu_display_manager *dm,
8708 	struct dm_crtc_state *new_crtc_state)
8709 {
8710 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8711 	struct mod_vrr_params vrr_params;
8712 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8713 	struct amdgpu_device *adev = dm->adev;
8714 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8715 	unsigned long flags;
8716 
8717 	if (!new_stream)
8718 		return;
8719 
8720 	/*
8721 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8722 	 * For now it's sufficient to just guard against these conditions.
8723 	 */
8724 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8725 		return;
8726 
8727 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8728 	vrr_params = acrtc->dm_irq_params.vrr_params;
8729 
8730 	if (new_crtc_state->vrr_supported &&
8731 	    config.min_refresh_in_uhz &&
8732 	    config.max_refresh_in_uhz) {
8733 		/*
8734 		 * if freesync compatible mode was set, config.state will be set
8735 		 * in atomic check
8736 		 */
8737 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8738 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8739 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8740 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8741 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8742 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8743 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8744 		} else {
8745 			config.state = new_crtc_state->base.vrr_enabled ?
8746 						     VRR_STATE_ACTIVE_VARIABLE :
8747 						     VRR_STATE_INACTIVE;
8748 		}
8749 	} else {
8750 		config.state = VRR_STATE_UNSUPPORTED;
8751 	}
8752 
8753 	mod_freesync_build_vrr_params(dm->freesync_module,
8754 				      new_stream,
8755 				      &config, &vrr_params);
8756 
8757 	new_crtc_state->freesync_timing_changed |=
8758 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8759 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8760 
8761 	new_crtc_state->freesync_config = config;
8762 	/* Copy state for access from DM IRQ handler */
8763 	acrtc->dm_irq_params.freesync_config = config;
8764 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8765 	acrtc->dm_irq_params.vrr_params = vrr_params;
8766 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8767 }
8768 
8769 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8770 					    struct dm_crtc_state *new_state)
8771 {
8772 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8773 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8774 
8775 	if (!old_vrr_active && new_vrr_active) {
8776 		/* Transition VRR inactive -> active:
8777 		 * While VRR is active, we must not disable vblank irq, as a
8778 		 * reenable after disable would compute bogus vblank/pflip
8779 		 * timestamps if it likely happened inside display front-porch.
8780 		 *
8781 		 * We also need vupdate irq for the actual core vblank handling
8782 		 * at end of vblank.
8783 		 */
8784 		dm_set_vupdate_irq(new_state->base.crtc, true);
8785 		drm_crtc_vblank_get(new_state->base.crtc);
8786 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8787 				 __func__, new_state->base.crtc->base.id);
8788 	} else if (old_vrr_active && !new_vrr_active) {
8789 		/* Transition VRR active -> inactive:
8790 		 * Allow vblank irq disable again for fixed refresh rate.
8791 		 */
8792 		dm_set_vupdate_irq(new_state->base.crtc, false);
8793 		drm_crtc_vblank_put(new_state->base.crtc);
8794 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8795 				 __func__, new_state->base.crtc->base.id);
8796 	}
8797 }
8798 
8799 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8800 {
8801 	struct drm_plane *plane;
8802 	struct drm_plane_state *old_plane_state;
8803 	int i;
8804 
8805 	/*
8806 	 * TODO: Make this per-stream so we don't issue redundant updates for
8807 	 * commits with multiple streams.
8808 	 */
8809 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8810 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8811 			handle_cursor_update(plane, old_plane_state);
8812 }
8813 
8814 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8815 				    struct dc_state *dc_state,
8816 				    struct drm_device *dev,
8817 				    struct amdgpu_display_manager *dm,
8818 				    struct drm_crtc *pcrtc,
8819 				    bool wait_for_vblank)
8820 {
8821 	uint32_t i;
8822 	uint64_t timestamp_ns;
8823 	struct drm_plane *plane;
8824 	struct drm_plane_state *old_plane_state, *new_plane_state;
8825 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8826 	struct drm_crtc_state *new_pcrtc_state =
8827 			drm_atomic_get_new_crtc_state(state, pcrtc);
8828 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8829 	struct dm_crtc_state *dm_old_crtc_state =
8830 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8831 	int planes_count = 0, vpos, hpos;
8832 	long r;
8833 	unsigned long flags;
8834 	struct amdgpu_bo *abo;
8835 	uint32_t target_vblank, last_flip_vblank;
8836 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8837 	bool pflip_present = false;
8838 	struct {
8839 		struct dc_surface_update surface_updates[MAX_SURFACES];
8840 		struct dc_plane_info plane_infos[MAX_SURFACES];
8841 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8842 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8843 		struct dc_stream_update stream_update;
8844 	} *bundle;
8845 
8846 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8847 
8848 	if (!bundle) {
8849 		dm_error("Failed to allocate update bundle\n");
8850 		goto cleanup;
8851 	}
8852 
8853 	/*
8854 	 * Disable the cursor first if we're disabling all the planes.
8855 	 * It'll remain on the screen after the planes are re-enabled
8856 	 * if we don't.
8857 	 */
8858 	if (acrtc_state->active_planes == 0)
8859 		amdgpu_dm_commit_cursors(state);
8860 
8861 	/* update planes when needed */
8862 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8863 		struct drm_crtc *crtc = new_plane_state->crtc;
8864 		struct drm_crtc_state *new_crtc_state;
8865 		struct drm_framebuffer *fb = new_plane_state->fb;
8866 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8867 		bool plane_needs_flip;
8868 		struct dc_plane_state *dc_plane;
8869 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8870 
8871 		/* Cursor plane is handled after stream updates */
8872 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8873 			continue;
8874 
8875 		if (!fb || !crtc || pcrtc != crtc)
8876 			continue;
8877 
8878 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8879 		if (!new_crtc_state->active)
8880 			continue;
8881 
8882 		dc_plane = dm_new_plane_state->dc_state;
8883 
8884 		bundle->surface_updates[planes_count].surface = dc_plane;
8885 		if (new_pcrtc_state->color_mgmt_changed) {
8886 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8887 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8888 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8889 		}
8890 
8891 		fill_dc_scaling_info(new_plane_state,
8892 				     &bundle->scaling_infos[planes_count]);
8893 
8894 		bundle->surface_updates[planes_count].scaling_info =
8895 			&bundle->scaling_infos[planes_count];
8896 
8897 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8898 
8899 		pflip_present = pflip_present || plane_needs_flip;
8900 
8901 		if (!plane_needs_flip) {
8902 			planes_count += 1;
8903 			continue;
8904 		}
8905 
8906 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8907 
8908 		/*
8909 		 * Wait for all fences on this FB. Do limited wait to avoid
8910 		 * deadlock during GPU reset when this fence will not signal
8911 		 * but we hold reservation lock for the BO.
8912 		 */
8913 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8914 					  msecs_to_jiffies(5000));
8915 		if (unlikely(r <= 0))
8916 			DRM_ERROR("Waiting for fences timed out!");
8917 
8918 		fill_dc_plane_info_and_addr(
8919 			dm->adev, new_plane_state,
8920 			afb->tiling_flags,
8921 			&bundle->plane_infos[planes_count],
8922 			&bundle->flip_addrs[planes_count].address,
8923 			afb->tmz_surface, false);
8924 
8925 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8926 				 new_plane_state->plane->index,
8927 				 bundle->plane_infos[planes_count].dcc.enable);
8928 
8929 		bundle->surface_updates[planes_count].plane_info =
8930 			&bundle->plane_infos[planes_count];
8931 
8932 		/*
8933 		 * Only allow immediate flips for fast updates that don't
8934 		 * change FB pitch, DCC state, rotation or mirroing.
8935 		 */
8936 		bundle->flip_addrs[planes_count].flip_immediate =
8937 			crtc->state->async_flip &&
8938 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8939 
8940 		timestamp_ns = ktime_get_ns();
8941 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8942 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8943 		bundle->surface_updates[planes_count].surface = dc_plane;
8944 
8945 		if (!bundle->surface_updates[planes_count].surface) {
8946 			DRM_ERROR("No surface for CRTC: id=%d\n",
8947 					acrtc_attach->crtc_id);
8948 			continue;
8949 		}
8950 
8951 		if (plane == pcrtc->primary)
8952 			update_freesync_state_on_stream(
8953 				dm,
8954 				acrtc_state,
8955 				acrtc_state->stream,
8956 				dc_plane,
8957 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8958 
8959 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8960 				 __func__,
8961 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8962 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8963 
8964 		planes_count += 1;
8965 
8966 	}
8967 
8968 	if (pflip_present) {
8969 		if (!vrr_active) {
8970 			/* Use old throttling in non-vrr fixed refresh rate mode
8971 			 * to keep flip scheduling based on target vblank counts
8972 			 * working in a backwards compatible way, e.g., for
8973 			 * clients using the GLX_OML_sync_control extension or
8974 			 * DRI3/Present extension with defined target_msc.
8975 			 */
8976 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8977 		}
8978 		else {
8979 			/* For variable refresh rate mode only:
8980 			 * Get vblank of last completed flip to avoid > 1 vrr
8981 			 * flips per video frame by use of throttling, but allow
8982 			 * flip programming anywhere in the possibly large
8983 			 * variable vrr vblank interval for fine-grained flip
8984 			 * timing control and more opportunity to avoid stutter
8985 			 * on late submission of flips.
8986 			 */
8987 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8988 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8989 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8990 		}
8991 
8992 		target_vblank = last_flip_vblank + wait_for_vblank;
8993 
8994 		/*
8995 		 * Wait until we're out of the vertical blank period before the one
8996 		 * targeted by the flip
8997 		 */
8998 		while ((acrtc_attach->enabled &&
8999 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9000 							    0, &vpos, &hpos, NULL,
9001 							    NULL, &pcrtc->hwmode)
9002 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9003 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9004 			(int)(target_vblank -
9005 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9006 			usleep_range(1000, 1100);
9007 		}
9008 
9009 		/**
9010 		 * Prepare the flip event for the pageflip interrupt to handle.
9011 		 *
9012 		 * This only works in the case where we've already turned on the
9013 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9014 		 * from 0 -> n planes we have to skip a hardware generated event
9015 		 * and rely on sending it from software.
9016 		 */
9017 		if (acrtc_attach->base.state->event &&
9018 		    acrtc_state->active_planes > 0 &&
9019 		    !acrtc_state->force_dpms_off) {
9020 			drm_crtc_vblank_get(pcrtc);
9021 
9022 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9023 
9024 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9025 			prepare_flip_isr(acrtc_attach);
9026 
9027 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9028 		}
9029 
9030 		if (acrtc_state->stream) {
9031 			if (acrtc_state->freesync_vrr_info_changed)
9032 				bundle->stream_update.vrr_infopacket =
9033 					&acrtc_state->stream->vrr_infopacket;
9034 		}
9035 	}
9036 
9037 	/* Update the planes if changed or disable if we don't have any. */
9038 	if ((planes_count || acrtc_state->active_planes == 0) &&
9039 		acrtc_state->stream) {
9040 #if defined(CONFIG_DRM_AMD_DC_DCN)
9041 		/*
9042 		 * If PSR or idle optimizations are enabled then flush out
9043 		 * any pending work before hardware programming.
9044 		 */
9045 		if (dm->vblank_control_workqueue)
9046 			flush_workqueue(dm->vblank_control_workqueue);
9047 #endif
9048 
9049 		bundle->stream_update.stream = acrtc_state->stream;
9050 		if (new_pcrtc_state->mode_changed) {
9051 			bundle->stream_update.src = acrtc_state->stream->src;
9052 			bundle->stream_update.dst = acrtc_state->stream->dst;
9053 		}
9054 
9055 		if (new_pcrtc_state->color_mgmt_changed) {
9056 			/*
9057 			 * TODO: This isn't fully correct since we've actually
9058 			 * already modified the stream in place.
9059 			 */
9060 			bundle->stream_update.gamut_remap =
9061 				&acrtc_state->stream->gamut_remap_matrix;
9062 			bundle->stream_update.output_csc_transform =
9063 				&acrtc_state->stream->csc_color_matrix;
9064 			bundle->stream_update.out_transfer_func =
9065 				acrtc_state->stream->out_transfer_func;
9066 		}
9067 
9068 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9069 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9070 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9071 
9072 		/*
9073 		 * If FreeSync state on the stream has changed then we need to
9074 		 * re-adjust the min/max bounds now that DC doesn't handle this
9075 		 * as part of commit.
9076 		 */
9077 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9078 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9079 			dc_stream_adjust_vmin_vmax(
9080 				dm->dc, acrtc_state->stream,
9081 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9082 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9083 		}
9084 		mutex_lock(&dm->dc_lock);
9085 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9086 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9087 			amdgpu_dm_psr_disable(acrtc_state->stream);
9088 
9089 		dc_commit_updates_for_stream(dm->dc,
9090 						     bundle->surface_updates,
9091 						     planes_count,
9092 						     acrtc_state->stream,
9093 						     &bundle->stream_update,
9094 						     dc_state);
9095 
9096 		/**
9097 		 * Enable or disable the interrupts on the backend.
9098 		 *
9099 		 * Most pipes are put into power gating when unused.
9100 		 *
9101 		 * When power gating is enabled on a pipe we lose the
9102 		 * interrupt enablement state when power gating is disabled.
9103 		 *
9104 		 * So we need to update the IRQ control state in hardware
9105 		 * whenever the pipe turns on (since it could be previously
9106 		 * power gated) or off (since some pipes can't be power gated
9107 		 * on some ASICs).
9108 		 */
9109 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9110 			dm_update_pflip_irq_state(drm_to_adev(dev),
9111 						  acrtc_attach);
9112 
9113 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9114 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9115 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9116 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9117 
9118 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9119 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9120 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9121 			struct amdgpu_dm_connector *aconn =
9122 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9123 
9124 			if (aconn->psr_skip_count > 0)
9125 				aconn->psr_skip_count--;
9126 
9127 			/* Allow PSR when skip count is 0. */
9128 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9129 		} else {
9130 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9131 		}
9132 
9133 		mutex_unlock(&dm->dc_lock);
9134 	}
9135 
9136 	/*
9137 	 * Update cursor state *after* programming all the planes.
9138 	 * This avoids redundant programming in the case where we're going
9139 	 * to be disabling a single plane - those pipes are being disabled.
9140 	 */
9141 	if (acrtc_state->active_planes)
9142 		amdgpu_dm_commit_cursors(state);
9143 
9144 cleanup:
9145 	kfree(bundle);
9146 }
9147 
9148 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9149 				   struct drm_atomic_state *state)
9150 {
9151 	struct amdgpu_device *adev = drm_to_adev(dev);
9152 	struct amdgpu_dm_connector *aconnector;
9153 	struct drm_connector *connector;
9154 	struct drm_connector_state *old_con_state, *new_con_state;
9155 	struct drm_crtc_state *new_crtc_state;
9156 	struct dm_crtc_state *new_dm_crtc_state;
9157 	const struct dc_stream_status *status;
9158 	int i, inst;
9159 
9160 	/* Notify device removals. */
9161 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9162 		if (old_con_state->crtc != new_con_state->crtc) {
9163 			/* CRTC changes require notification. */
9164 			goto notify;
9165 		}
9166 
9167 		if (!new_con_state->crtc)
9168 			continue;
9169 
9170 		new_crtc_state = drm_atomic_get_new_crtc_state(
9171 			state, new_con_state->crtc);
9172 
9173 		if (!new_crtc_state)
9174 			continue;
9175 
9176 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9177 			continue;
9178 
9179 	notify:
9180 		aconnector = to_amdgpu_dm_connector(connector);
9181 
9182 		mutex_lock(&adev->dm.audio_lock);
9183 		inst = aconnector->audio_inst;
9184 		aconnector->audio_inst = -1;
9185 		mutex_unlock(&adev->dm.audio_lock);
9186 
9187 		amdgpu_dm_audio_eld_notify(adev, inst);
9188 	}
9189 
9190 	/* Notify audio device additions. */
9191 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9192 		if (!new_con_state->crtc)
9193 			continue;
9194 
9195 		new_crtc_state = drm_atomic_get_new_crtc_state(
9196 			state, new_con_state->crtc);
9197 
9198 		if (!new_crtc_state)
9199 			continue;
9200 
9201 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9202 			continue;
9203 
9204 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9205 		if (!new_dm_crtc_state->stream)
9206 			continue;
9207 
9208 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9209 		if (!status)
9210 			continue;
9211 
9212 		aconnector = to_amdgpu_dm_connector(connector);
9213 
9214 		mutex_lock(&adev->dm.audio_lock);
9215 		inst = status->audio_inst;
9216 		aconnector->audio_inst = inst;
9217 		mutex_unlock(&adev->dm.audio_lock);
9218 
9219 		amdgpu_dm_audio_eld_notify(adev, inst);
9220 	}
9221 }
9222 
9223 /*
9224  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9225  * @crtc_state: the DRM CRTC state
9226  * @stream_state: the DC stream state.
9227  *
9228  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9229  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9230  */
9231 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9232 						struct dc_stream_state *stream_state)
9233 {
9234 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9235 }
9236 
9237 /**
9238  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9239  * @state: The atomic state to commit
9240  *
9241  * This will tell DC to commit the constructed DC state from atomic_check,
9242  * programming the hardware. Any failures here implies a hardware failure, since
9243  * atomic check should have filtered anything non-kosher.
9244  */
9245 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9246 {
9247 	struct drm_device *dev = state->dev;
9248 	struct amdgpu_device *adev = drm_to_adev(dev);
9249 	struct amdgpu_display_manager *dm = &adev->dm;
9250 	struct dm_atomic_state *dm_state;
9251 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9252 	uint32_t i, j;
9253 	struct drm_crtc *crtc;
9254 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9255 	unsigned long flags;
9256 	bool wait_for_vblank = true;
9257 	struct drm_connector *connector;
9258 	struct drm_connector_state *old_con_state, *new_con_state;
9259 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9260 	int crtc_disable_count = 0;
9261 	bool mode_set_reset_required = false;
9262 
9263 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9264 
9265 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9266 
9267 	dm_state = dm_atomic_get_new_state(state);
9268 	if (dm_state && dm_state->context) {
9269 		dc_state = dm_state->context;
9270 	} else {
9271 		/* No state changes, retain current state. */
9272 		dc_state_temp = dc_create_state(dm->dc);
9273 		ASSERT(dc_state_temp);
9274 		dc_state = dc_state_temp;
9275 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9276 	}
9277 
9278 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9279 				       new_crtc_state, i) {
9280 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9281 
9282 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9283 
9284 		if (old_crtc_state->active &&
9285 		    (!new_crtc_state->active ||
9286 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9287 			manage_dm_interrupts(adev, acrtc, false);
9288 			dc_stream_release(dm_old_crtc_state->stream);
9289 		}
9290 	}
9291 
9292 	drm_atomic_helper_calc_timestamping_constants(state);
9293 
9294 	/* update changed items */
9295 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9296 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9297 
9298 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9299 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9300 
9301 		DRM_DEBUG_ATOMIC(
9302 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9303 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9304 			"connectors_changed:%d\n",
9305 			acrtc->crtc_id,
9306 			new_crtc_state->enable,
9307 			new_crtc_state->active,
9308 			new_crtc_state->planes_changed,
9309 			new_crtc_state->mode_changed,
9310 			new_crtc_state->active_changed,
9311 			new_crtc_state->connectors_changed);
9312 
9313 		/* Disable cursor if disabling crtc */
9314 		if (old_crtc_state->active && !new_crtc_state->active) {
9315 			struct dc_cursor_position position;
9316 
9317 			memset(&position, 0, sizeof(position));
9318 			mutex_lock(&dm->dc_lock);
9319 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9320 			mutex_unlock(&dm->dc_lock);
9321 		}
9322 
9323 		/* Copy all transient state flags into dc state */
9324 		if (dm_new_crtc_state->stream) {
9325 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9326 							    dm_new_crtc_state->stream);
9327 		}
9328 
9329 		/* handles headless hotplug case, updating new_state and
9330 		 * aconnector as needed
9331 		 */
9332 
9333 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9334 
9335 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9336 
9337 			if (!dm_new_crtc_state->stream) {
9338 				/*
9339 				 * this could happen because of issues with
9340 				 * userspace notifications delivery.
9341 				 * In this case userspace tries to set mode on
9342 				 * display which is disconnected in fact.
9343 				 * dc_sink is NULL in this case on aconnector.
9344 				 * We expect reset mode will come soon.
9345 				 *
9346 				 * This can also happen when unplug is done
9347 				 * during resume sequence ended
9348 				 *
9349 				 * In this case, we want to pretend we still
9350 				 * have a sink to keep the pipe running so that
9351 				 * hw state is consistent with the sw state
9352 				 */
9353 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9354 						__func__, acrtc->base.base.id);
9355 				continue;
9356 			}
9357 
9358 			if (dm_old_crtc_state->stream)
9359 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9360 
9361 			pm_runtime_get_noresume(dev->dev);
9362 
9363 			acrtc->enabled = true;
9364 			acrtc->hw_mode = new_crtc_state->mode;
9365 			crtc->hwmode = new_crtc_state->mode;
9366 			mode_set_reset_required = true;
9367 		} else if (modereset_required(new_crtc_state)) {
9368 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9369 			/* i.e. reset mode */
9370 			if (dm_old_crtc_state->stream)
9371 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9372 
9373 			mode_set_reset_required = true;
9374 		}
9375 	} /* for_each_crtc_in_state() */
9376 
9377 	if (dc_state) {
9378 		/* if there mode set or reset, disable eDP PSR */
9379 		if (mode_set_reset_required) {
9380 #if defined(CONFIG_DRM_AMD_DC_DCN)
9381 			if (dm->vblank_control_workqueue)
9382 				flush_workqueue(dm->vblank_control_workqueue);
9383 #endif
9384 			amdgpu_dm_psr_disable_all(dm);
9385 		}
9386 
9387 		dm_enable_per_frame_crtc_master_sync(dc_state);
9388 		mutex_lock(&dm->dc_lock);
9389 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9390 #if defined(CONFIG_DRM_AMD_DC_DCN)
9391                /* Allow idle optimization when vblank count is 0 for display off */
9392                if (dm->active_vblank_irq_count == 0)
9393                    dc_allow_idle_optimizations(dm->dc,true);
9394 #endif
9395 		mutex_unlock(&dm->dc_lock);
9396 	}
9397 
9398 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9399 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9400 
9401 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9402 
9403 		if (dm_new_crtc_state->stream != NULL) {
9404 			const struct dc_stream_status *status =
9405 					dc_stream_get_status(dm_new_crtc_state->stream);
9406 
9407 			if (!status)
9408 				status = dc_stream_get_status_from_state(dc_state,
9409 									 dm_new_crtc_state->stream);
9410 			if (!status)
9411 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9412 			else
9413 				acrtc->otg_inst = status->primary_otg_inst;
9414 		}
9415 	}
9416 #ifdef CONFIG_DRM_AMD_DC_HDCP
9417 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9418 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9419 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9420 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9421 
9422 		new_crtc_state = NULL;
9423 
9424 		if (acrtc)
9425 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9426 
9427 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9428 
9429 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9430 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9431 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9432 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9433 			dm_new_con_state->update_hdcp = true;
9434 			continue;
9435 		}
9436 
9437 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9438 			hdcp_update_display(
9439 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9440 				new_con_state->hdcp_content_type,
9441 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9442 	}
9443 #endif
9444 
9445 	/* Handle connector state changes */
9446 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9447 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9448 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9449 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9450 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9451 		struct dc_stream_update stream_update;
9452 		struct dc_info_packet hdr_packet;
9453 		struct dc_stream_status *status = NULL;
9454 		bool abm_changed, hdr_changed, scaling_changed;
9455 
9456 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9457 		memset(&stream_update, 0, sizeof(stream_update));
9458 
9459 		if (acrtc) {
9460 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9461 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9462 		}
9463 
9464 		/* Skip any modesets/resets */
9465 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9466 			continue;
9467 
9468 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9469 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9470 
9471 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9472 							     dm_old_con_state);
9473 
9474 		abm_changed = dm_new_crtc_state->abm_level !=
9475 			      dm_old_crtc_state->abm_level;
9476 
9477 		hdr_changed =
9478 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9479 
9480 		if (!scaling_changed && !abm_changed && !hdr_changed)
9481 			continue;
9482 
9483 		stream_update.stream = dm_new_crtc_state->stream;
9484 		if (scaling_changed) {
9485 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9486 					dm_new_con_state, dm_new_crtc_state->stream);
9487 
9488 			stream_update.src = dm_new_crtc_state->stream->src;
9489 			stream_update.dst = dm_new_crtc_state->stream->dst;
9490 		}
9491 
9492 		if (abm_changed) {
9493 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9494 
9495 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9496 		}
9497 
9498 		if (hdr_changed) {
9499 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9500 			stream_update.hdr_static_metadata = &hdr_packet;
9501 		}
9502 
9503 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9504 
9505 		if (WARN_ON(!status))
9506 			continue;
9507 
9508 		WARN_ON(!status->plane_count);
9509 
9510 		/*
9511 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9512 		 * Here we create an empty update on each plane.
9513 		 * To fix this, DC should permit updating only stream properties.
9514 		 */
9515 		for (j = 0; j < status->plane_count; j++)
9516 			dummy_updates[j].surface = status->plane_states[0];
9517 
9518 
9519 		mutex_lock(&dm->dc_lock);
9520 		dc_commit_updates_for_stream(dm->dc,
9521 						     dummy_updates,
9522 						     status->plane_count,
9523 						     dm_new_crtc_state->stream,
9524 						     &stream_update,
9525 						     dc_state);
9526 		mutex_unlock(&dm->dc_lock);
9527 	}
9528 
9529 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9530 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9531 				      new_crtc_state, i) {
9532 		if (old_crtc_state->active && !new_crtc_state->active)
9533 			crtc_disable_count++;
9534 
9535 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9536 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9537 
9538 		/* For freesync config update on crtc state and params for irq */
9539 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9540 
9541 		/* Handle vrr on->off / off->on transitions */
9542 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9543 						dm_new_crtc_state);
9544 	}
9545 
9546 	/**
9547 	 * Enable interrupts for CRTCs that are newly enabled or went through
9548 	 * a modeset. It was intentionally deferred until after the front end
9549 	 * state was modified to wait until the OTG was on and so the IRQ
9550 	 * handlers didn't access stale or invalid state.
9551 	 */
9552 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9553 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9554 #ifdef CONFIG_DEBUG_FS
9555 		bool configure_crc = false;
9556 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9557 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9558 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9559 #endif
9560 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9561 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9562 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9563 #endif
9564 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9565 
9566 		if (new_crtc_state->active &&
9567 		    (!old_crtc_state->active ||
9568 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9569 			dc_stream_retain(dm_new_crtc_state->stream);
9570 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9571 			manage_dm_interrupts(adev, acrtc, true);
9572 
9573 #ifdef CONFIG_DEBUG_FS
9574 			/**
9575 			 * Frontend may have changed so reapply the CRC capture
9576 			 * settings for the stream.
9577 			 */
9578 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9579 
9580 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9581 				configure_crc = true;
9582 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9583 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9584 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9585 					acrtc->dm_irq_params.crc_window.update_win = true;
9586 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9587 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9588 					crc_rd_wrk->crtc = crtc;
9589 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9590 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9591 				}
9592 #endif
9593 			}
9594 
9595 			if (configure_crc)
9596 				if (amdgpu_dm_crtc_configure_crc_source(
9597 					crtc, dm_new_crtc_state, cur_crc_src))
9598 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9599 #endif
9600 		}
9601 	}
9602 
9603 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9604 		if (new_crtc_state->async_flip)
9605 			wait_for_vblank = false;
9606 
9607 	/* update planes when needed per crtc*/
9608 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9609 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9610 
9611 		if (dm_new_crtc_state->stream)
9612 			amdgpu_dm_commit_planes(state, dc_state, dev,
9613 						dm, crtc, wait_for_vblank);
9614 	}
9615 
9616 	/* Update audio instances for each connector. */
9617 	amdgpu_dm_commit_audio(dev, state);
9618 
9619 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9620 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9621 	/* restore the backlight level */
9622 	for (i = 0; i < dm->num_of_edps; i++) {
9623 		if (dm->backlight_dev[i] &&
9624 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9625 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9626 	}
9627 #endif
9628 	/*
9629 	 * send vblank event on all events not handled in flip and
9630 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9631 	 */
9632 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9633 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9634 
9635 		if (new_crtc_state->event)
9636 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9637 
9638 		new_crtc_state->event = NULL;
9639 	}
9640 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9641 
9642 	/* Signal HW programming completion */
9643 	drm_atomic_helper_commit_hw_done(state);
9644 
9645 	if (wait_for_vblank)
9646 		drm_atomic_helper_wait_for_flip_done(dev, state);
9647 
9648 	drm_atomic_helper_cleanup_planes(dev, state);
9649 
9650 	/* return the stolen vga memory back to VRAM */
9651 	if (!adev->mman.keep_stolen_vga_memory)
9652 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9653 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9654 
9655 	/*
9656 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9657 	 * so we can put the GPU into runtime suspend if we're not driving any
9658 	 * displays anymore
9659 	 */
9660 	for (i = 0; i < crtc_disable_count; i++)
9661 		pm_runtime_put_autosuspend(dev->dev);
9662 	pm_runtime_mark_last_busy(dev->dev);
9663 
9664 	if (dc_state_temp)
9665 		dc_release_state(dc_state_temp);
9666 }
9667 
9668 
9669 static int dm_force_atomic_commit(struct drm_connector *connector)
9670 {
9671 	int ret = 0;
9672 	struct drm_device *ddev = connector->dev;
9673 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9674 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9675 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9676 	struct drm_connector_state *conn_state;
9677 	struct drm_crtc_state *crtc_state;
9678 	struct drm_plane_state *plane_state;
9679 
9680 	if (!state)
9681 		return -ENOMEM;
9682 
9683 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9684 
9685 	/* Construct an atomic state to restore previous display setting */
9686 
9687 	/*
9688 	 * Attach connectors to drm_atomic_state
9689 	 */
9690 	conn_state = drm_atomic_get_connector_state(state, connector);
9691 
9692 	ret = PTR_ERR_OR_ZERO(conn_state);
9693 	if (ret)
9694 		goto out;
9695 
9696 	/* Attach crtc to drm_atomic_state*/
9697 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9698 
9699 	ret = PTR_ERR_OR_ZERO(crtc_state);
9700 	if (ret)
9701 		goto out;
9702 
9703 	/* force a restore */
9704 	crtc_state->mode_changed = true;
9705 
9706 	/* Attach plane to drm_atomic_state */
9707 	plane_state = drm_atomic_get_plane_state(state, plane);
9708 
9709 	ret = PTR_ERR_OR_ZERO(plane_state);
9710 	if (ret)
9711 		goto out;
9712 
9713 	/* Call commit internally with the state we just constructed */
9714 	ret = drm_atomic_commit(state);
9715 
9716 out:
9717 	drm_atomic_state_put(state);
9718 	if (ret)
9719 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9720 
9721 	return ret;
9722 }
9723 
9724 /*
9725  * This function handles all cases when set mode does not come upon hotplug.
9726  * This includes when a display is unplugged then plugged back into the
9727  * same port and when running without usermode desktop manager supprot
9728  */
9729 void dm_restore_drm_connector_state(struct drm_device *dev,
9730 				    struct drm_connector *connector)
9731 {
9732 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9733 	struct amdgpu_crtc *disconnected_acrtc;
9734 	struct dm_crtc_state *acrtc_state;
9735 
9736 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9737 		return;
9738 
9739 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9740 	if (!disconnected_acrtc)
9741 		return;
9742 
9743 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9744 	if (!acrtc_state->stream)
9745 		return;
9746 
9747 	/*
9748 	 * If the previous sink is not released and different from the current,
9749 	 * we deduce we are in a state where we can not rely on usermode call
9750 	 * to turn on the display, so we do it here
9751 	 */
9752 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9753 		dm_force_atomic_commit(&aconnector->base);
9754 }
9755 
9756 /*
9757  * Grabs all modesetting locks to serialize against any blocking commits,
9758  * Waits for completion of all non blocking commits.
9759  */
9760 static int do_aquire_global_lock(struct drm_device *dev,
9761 				 struct drm_atomic_state *state)
9762 {
9763 	struct drm_crtc *crtc;
9764 	struct drm_crtc_commit *commit;
9765 	long ret;
9766 
9767 	/*
9768 	 * Adding all modeset locks to aquire_ctx will
9769 	 * ensure that when the framework release it the
9770 	 * extra locks we are locking here will get released to
9771 	 */
9772 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9773 	if (ret)
9774 		return ret;
9775 
9776 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9777 		spin_lock(&crtc->commit_lock);
9778 		commit = list_first_entry_or_null(&crtc->commit_list,
9779 				struct drm_crtc_commit, commit_entry);
9780 		if (commit)
9781 			drm_crtc_commit_get(commit);
9782 		spin_unlock(&crtc->commit_lock);
9783 
9784 		if (!commit)
9785 			continue;
9786 
9787 		/*
9788 		 * Make sure all pending HW programming completed and
9789 		 * page flips done
9790 		 */
9791 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9792 
9793 		if (ret > 0)
9794 			ret = wait_for_completion_interruptible_timeout(
9795 					&commit->flip_done, 10*HZ);
9796 
9797 		if (ret == 0)
9798 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9799 				  "timed out\n", crtc->base.id, crtc->name);
9800 
9801 		drm_crtc_commit_put(commit);
9802 	}
9803 
9804 	return ret < 0 ? ret : 0;
9805 }
9806 
9807 static void get_freesync_config_for_crtc(
9808 	struct dm_crtc_state *new_crtc_state,
9809 	struct dm_connector_state *new_con_state)
9810 {
9811 	struct mod_freesync_config config = {0};
9812 	struct amdgpu_dm_connector *aconnector =
9813 			to_amdgpu_dm_connector(new_con_state->base.connector);
9814 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9815 	int vrefresh = drm_mode_vrefresh(mode);
9816 	bool fs_vid_mode = false;
9817 
9818 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9819 					vrefresh >= aconnector->min_vfreq &&
9820 					vrefresh <= aconnector->max_vfreq;
9821 
9822 	if (new_crtc_state->vrr_supported) {
9823 		new_crtc_state->stream->ignore_msa_timing_param = true;
9824 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9825 
9826 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9827 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9828 		config.vsif_supported = true;
9829 		config.btr = true;
9830 
9831 		if (fs_vid_mode) {
9832 			config.state = VRR_STATE_ACTIVE_FIXED;
9833 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9834 			goto out;
9835 		} else if (new_crtc_state->base.vrr_enabled) {
9836 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9837 		} else {
9838 			config.state = VRR_STATE_INACTIVE;
9839 		}
9840 	}
9841 out:
9842 	new_crtc_state->freesync_config = config;
9843 }
9844 
9845 static void reset_freesync_config_for_crtc(
9846 	struct dm_crtc_state *new_crtc_state)
9847 {
9848 	new_crtc_state->vrr_supported = false;
9849 
9850 	memset(&new_crtc_state->vrr_infopacket, 0,
9851 	       sizeof(new_crtc_state->vrr_infopacket));
9852 }
9853 
9854 static bool
9855 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9856 				 struct drm_crtc_state *new_crtc_state)
9857 {
9858 	struct drm_display_mode old_mode, new_mode;
9859 
9860 	if (!old_crtc_state || !new_crtc_state)
9861 		return false;
9862 
9863 	old_mode = old_crtc_state->mode;
9864 	new_mode = new_crtc_state->mode;
9865 
9866 	if (old_mode.clock       == new_mode.clock &&
9867 	    old_mode.hdisplay    == new_mode.hdisplay &&
9868 	    old_mode.vdisplay    == new_mode.vdisplay &&
9869 	    old_mode.htotal      == new_mode.htotal &&
9870 	    old_mode.vtotal      != new_mode.vtotal &&
9871 	    old_mode.hsync_start == new_mode.hsync_start &&
9872 	    old_mode.vsync_start != new_mode.vsync_start &&
9873 	    old_mode.hsync_end   == new_mode.hsync_end &&
9874 	    old_mode.vsync_end   != new_mode.vsync_end &&
9875 	    old_mode.hskew       == new_mode.hskew &&
9876 	    old_mode.vscan       == new_mode.vscan &&
9877 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9878 	    (new_mode.vsync_end - new_mode.vsync_start))
9879 		return true;
9880 
9881 	return false;
9882 }
9883 
9884 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9885 	uint64_t num, den, res;
9886 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9887 
9888 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9889 
9890 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9891 	den = (unsigned long long)new_crtc_state->mode.htotal *
9892 	      (unsigned long long)new_crtc_state->mode.vtotal;
9893 
9894 	res = div_u64(num, den);
9895 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9896 }
9897 
9898 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9899 				struct drm_atomic_state *state,
9900 				struct drm_crtc *crtc,
9901 				struct drm_crtc_state *old_crtc_state,
9902 				struct drm_crtc_state *new_crtc_state,
9903 				bool enable,
9904 				bool *lock_and_validation_needed)
9905 {
9906 	struct dm_atomic_state *dm_state = NULL;
9907 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9908 	struct dc_stream_state *new_stream;
9909 	int ret = 0;
9910 
9911 	/*
9912 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9913 	 * update changed items
9914 	 */
9915 	struct amdgpu_crtc *acrtc = NULL;
9916 	struct amdgpu_dm_connector *aconnector = NULL;
9917 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9918 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9919 
9920 	new_stream = NULL;
9921 
9922 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9923 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9924 	acrtc = to_amdgpu_crtc(crtc);
9925 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9926 
9927 	/* TODO This hack should go away */
9928 	if (aconnector && enable) {
9929 		/* Make sure fake sink is created in plug-in scenario */
9930 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9931 							    &aconnector->base);
9932 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9933 							    &aconnector->base);
9934 
9935 		if (IS_ERR(drm_new_conn_state)) {
9936 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9937 			goto fail;
9938 		}
9939 
9940 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9941 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9942 
9943 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9944 			goto skip_modeset;
9945 
9946 		new_stream = create_validate_stream_for_sink(aconnector,
9947 							     &new_crtc_state->mode,
9948 							     dm_new_conn_state,
9949 							     dm_old_crtc_state->stream);
9950 
9951 		/*
9952 		 * we can have no stream on ACTION_SET if a display
9953 		 * was disconnected during S3, in this case it is not an
9954 		 * error, the OS will be updated after detection, and
9955 		 * will do the right thing on next atomic commit
9956 		 */
9957 
9958 		if (!new_stream) {
9959 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9960 					__func__, acrtc->base.base.id);
9961 			ret = -ENOMEM;
9962 			goto fail;
9963 		}
9964 
9965 		/*
9966 		 * TODO: Check VSDB bits to decide whether this should
9967 		 * be enabled or not.
9968 		 */
9969 		new_stream->triggered_crtc_reset.enabled =
9970 			dm->force_timing_sync;
9971 
9972 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9973 
9974 		ret = fill_hdr_info_packet(drm_new_conn_state,
9975 					   &new_stream->hdr_static_metadata);
9976 		if (ret)
9977 			goto fail;
9978 
9979 		/*
9980 		 * If we already removed the old stream from the context
9981 		 * (and set the new stream to NULL) then we can't reuse
9982 		 * the old stream even if the stream and scaling are unchanged.
9983 		 * We'll hit the BUG_ON and black screen.
9984 		 *
9985 		 * TODO: Refactor this function to allow this check to work
9986 		 * in all conditions.
9987 		 */
9988 		if (amdgpu_freesync_vid_mode &&
9989 		    dm_new_crtc_state->stream &&
9990 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9991 			goto skip_modeset;
9992 
9993 		if (dm_new_crtc_state->stream &&
9994 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9995 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9996 			new_crtc_state->mode_changed = false;
9997 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9998 					 new_crtc_state->mode_changed);
9999 		}
10000 	}
10001 
10002 	/* mode_changed flag may get updated above, need to check again */
10003 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10004 		goto skip_modeset;
10005 
10006 	DRM_DEBUG_ATOMIC(
10007 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10008 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10009 		"connectors_changed:%d\n",
10010 		acrtc->crtc_id,
10011 		new_crtc_state->enable,
10012 		new_crtc_state->active,
10013 		new_crtc_state->planes_changed,
10014 		new_crtc_state->mode_changed,
10015 		new_crtc_state->active_changed,
10016 		new_crtc_state->connectors_changed);
10017 
10018 	/* Remove stream for any changed/disabled CRTC */
10019 	if (!enable) {
10020 
10021 		if (!dm_old_crtc_state->stream)
10022 			goto skip_modeset;
10023 
10024 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10025 		    is_timing_unchanged_for_freesync(new_crtc_state,
10026 						     old_crtc_state)) {
10027 			new_crtc_state->mode_changed = false;
10028 			DRM_DEBUG_DRIVER(
10029 				"Mode change not required for front porch change, "
10030 				"setting mode_changed to %d",
10031 				new_crtc_state->mode_changed);
10032 
10033 			set_freesync_fixed_config(dm_new_crtc_state);
10034 
10035 			goto skip_modeset;
10036 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10037 			   is_freesync_video_mode(&new_crtc_state->mode,
10038 						  aconnector)) {
10039 			struct drm_display_mode *high_mode;
10040 
10041 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10042 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10043 				set_freesync_fixed_config(dm_new_crtc_state);
10044 			}
10045 		}
10046 
10047 		ret = dm_atomic_get_state(state, &dm_state);
10048 		if (ret)
10049 			goto fail;
10050 
10051 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10052 				crtc->base.id);
10053 
10054 		/* i.e. reset mode */
10055 		if (dc_remove_stream_from_ctx(
10056 				dm->dc,
10057 				dm_state->context,
10058 				dm_old_crtc_state->stream) != DC_OK) {
10059 			ret = -EINVAL;
10060 			goto fail;
10061 		}
10062 
10063 		dc_stream_release(dm_old_crtc_state->stream);
10064 		dm_new_crtc_state->stream = NULL;
10065 
10066 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10067 
10068 		*lock_and_validation_needed = true;
10069 
10070 	} else {/* Add stream for any updated/enabled CRTC */
10071 		/*
10072 		 * Quick fix to prevent NULL pointer on new_stream when
10073 		 * added MST connectors not found in existing crtc_state in the chained mode
10074 		 * TODO: need to dig out the root cause of that
10075 		 */
10076 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10077 			goto skip_modeset;
10078 
10079 		if (modereset_required(new_crtc_state))
10080 			goto skip_modeset;
10081 
10082 		if (modeset_required(new_crtc_state, new_stream,
10083 				     dm_old_crtc_state->stream)) {
10084 
10085 			WARN_ON(dm_new_crtc_state->stream);
10086 
10087 			ret = dm_atomic_get_state(state, &dm_state);
10088 			if (ret)
10089 				goto fail;
10090 
10091 			dm_new_crtc_state->stream = new_stream;
10092 
10093 			dc_stream_retain(new_stream);
10094 
10095 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10096 					 crtc->base.id);
10097 
10098 			if (dc_add_stream_to_ctx(
10099 					dm->dc,
10100 					dm_state->context,
10101 					dm_new_crtc_state->stream) != DC_OK) {
10102 				ret = -EINVAL;
10103 				goto fail;
10104 			}
10105 
10106 			*lock_and_validation_needed = true;
10107 		}
10108 	}
10109 
10110 skip_modeset:
10111 	/* Release extra reference */
10112 	if (new_stream)
10113 		 dc_stream_release(new_stream);
10114 
10115 	/*
10116 	 * We want to do dc stream updates that do not require a
10117 	 * full modeset below.
10118 	 */
10119 	if (!(enable && aconnector && new_crtc_state->active))
10120 		return 0;
10121 	/*
10122 	 * Given above conditions, the dc state cannot be NULL because:
10123 	 * 1. We're in the process of enabling CRTCs (just been added
10124 	 *    to the dc context, or already is on the context)
10125 	 * 2. Has a valid connector attached, and
10126 	 * 3. Is currently active and enabled.
10127 	 * => The dc stream state currently exists.
10128 	 */
10129 	BUG_ON(dm_new_crtc_state->stream == NULL);
10130 
10131 	/* Scaling or underscan settings */
10132 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10133 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10134 		update_stream_scaling_settings(
10135 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10136 
10137 	/* ABM settings */
10138 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10139 
10140 	/*
10141 	 * Color management settings. We also update color properties
10142 	 * when a modeset is needed, to ensure it gets reprogrammed.
10143 	 */
10144 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10145 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10146 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10147 		if (ret)
10148 			goto fail;
10149 	}
10150 
10151 	/* Update Freesync settings. */
10152 	get_freesync_config_for_crtc(dm_new_crtc_state,
10153 				     dm_new_conn_state);
10154 
10155 	return ret;
10156 
10157 fail:
10158 	if (new_stream)
10159 		dc_stream_release(new_stream);
10160 	return ret;
10161 }
10162 
10163 static bool should_reset_plane(struct drm_atomic_state *state,
10164 			       struct drm_plane *plane,
10165 			       struct drm_plane_state *old_plane_state,
10166 			       struct drm_plane_state *new_plane_state)
10167 {
10168 	struct drm_plane *other;
10169 	struct drm_plane_state *old_other_state, *new_other_state;
10170 	struct drm_crtc_state *new_crtc_state;
10171 	int i;
10172 
10173 	/*
10174 	 * TODO: Remove this hack once the checks below are sufficient
10175 	 * enough to determine when we need to reset all the planes on
10176 	 * the stream.
10177 	 */
10178 	if (state->allow_modeset)
10179 		return true;
10180 
10181 	/* Exit early if we know that we're adding or removing the plane. */
10182 	if (old_plane_state->crtc != new_plane_state->crtc)
10183 		return true;
10184 
10185 	/* old crtc == new_crtc == NULL, plane not in context. */
10186 	if (!new_plane_state->crtc)
10187 		return false;
10188 
10189 	new_crtc_state =
10190 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10191 
10192 	if (!new_crtc_state)
10193 		return true;
10194 
10195 	/* CRTC Degamma changes currently require us to recreate planes. */
10196 	if (new_crtc_state->color_mgmt_changed)
10197 		return true;
10198 
10199 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10200 		return true;
10201 
10202 	/*
10203 	 * If there are any new primary or overlay planes being added or
10204 	 * removed then the z-order can potentially change. To ensure
10205 	 * correct z-order and pipe acquisition the current DC architecture
10206 	 * requires us to remove and recreate all existing planes.
10207 	 *
10208 	 * TODO: Come up with a more elegant solution for this.
10209 	 */
10210 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10211 		struct amdgpu_framebuffer *old_afb, *new_afb;
10212 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10213 			continue;
10214 
10215 		if (old_other_state->crtc != new_plane_state->crtc &&
10216 		    new_other_state->crtc != new_plane_state->crtc)
10217 			continue;
10218 
10219 		if (old_other_state->crtc != new_other_state->crtc)
10220 			return true;
10221 
10222 		/* Src/dst size and scaling updates. */
10223 		if (old_other_state->src_w != new_other_state->src_w ||
10224 		    old_other_state->src_h != new_other_state->src_h ||
10225 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10226 		    old_other_state->crtc_h != new_other_state->crtc_h)
10227 			return true;
10228 
10229 		/* Rotation / mirroring updates. */
10230 		if (old_other_state->rotation != new_other_state->rotation)
10231 			return true;
10232 
10233 		/* Blending updates. */
10234 		if (old_other_state->pixel_blend_mode !=
10235 		    new_other_state->pixel_blend_mode)
10236 			return true;
10237 
10238 		/* Alpha updates. */
10239 		if (old_other_state->alpha != new_other_state->alpha)
10240 			return true;
10241 
10242 		/* Colorspace changes. */
10243 		if (old_other_state->color_range != new_other_state->color_range ||
10244 		    old_other_state->color_encoding != new_other_state->color_encoding)
10245 			return true;
10246 
10247 		/* Framebuffer checks fall at the end. */
10248 		if (!old_other_state->fb || !new_other_state->fb)
10249 			continue;
10250 
10251 		/* Pixel format changes can require bandwidth updates. */
10252 		if (old_other_state->fb->format != new_other_state->fb->format)
10253 			return true;
10254 
10255 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10256 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10257 
10258 		/* Tiling and DCC changes also require bandwidth updates. */
10259 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10260 		    old_afb->base.modifier != new_afb->base.modifier)
10261 			return true;
10262 	}
10263 
10264 	return false;
10265 }
10266 
10267 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10268 			      struct drm_plane_state *new_plane_state,
10269 			      struct drm_framebuffer *fb)
10270 {
10271 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10272 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10273 	unsigned int pitch;
10274 	bool linear;
10275 
10276 	if (fb->width > new_acrtc->max_cursor_width ||
10277 	    fb->height > new_acrtc->max_cursor_height) {
10278 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10279 				 new_plane_state->fb->width,
10280 				 new_plane_state->fb->height);
10281 		return -EINVAL;
10282 	}
10283 	if (new_plane_state->src_w != fb->width << 16 ||
10284 	    new_plane_state->src_h != fb->height << 16) {
10285 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10286 		return -EINVAL;
10287 	}
10288 
10289 	/* Pitch in pixels */
10290 	pitch = fb->pitches[0] / fb->format->cpp[0];
10291 
10292 	if (fb->width != pitch) {
10293 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10294 				 fb->width, pitch);
10295 		return -EINVAL;
10296 	}
10297 
10298 	switch (pitch) {
10299 	case 64:
10300 	case 128:
10301 	case 256:
10302 		/* FB pitch is supported by cursor plane */
10303 		break;
10304 	default:
10305 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10306 		return -EINVAL;
10307 	}
10308 
10309 	/* Core DRM takes care of checking FB modifiers, so we only need to
10310 	 * check tiling flags when the FB doesn't have a modifier. */
10311 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10312 		if (adev->family < AMDGPU_FAMILY_AI) {
10313 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10314 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10315 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10316 		} else {
10317 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10318 		}
10319 		if (!linear) {
10320 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10321 			return -EINVAL;
10322 		}
10323 	}
10324 
10325 	return 0;
10326 }
10327 
10328 static int dm_update_plane_state(struct dc *dc,
10329 				 struct drm_atomic_state *state,
10330 				 struct drm_plane *plane,
10331 				 struct drm_plane_state *old_plane_state,
10332 				 struct drm_plane_state *new_plane_state,
10333 				 bool enable,
10334 				 bool *lock_and_validation_needed)
10335 {
10336 
10337 	struct dm_atomic_state *dm_state = NULL;
10338 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10339 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10340 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10341 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10342 	struct amdgpu_crtc *new_acrtc;
10343 	bool needs_reset;
10344 	int ret = 0;
10345 
10346 
10347 	new_plane_crtc = new_plane_state->crtc;
10348 	old_plane_crtc = old_plane_state->crtc;
10349 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10350 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10351 
10352 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10353 		if (!enable || !new_plane_crtc ||
10354 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10355 			return 0;
10356 
10357 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10358 
10359 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10360 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10361 			return -EINVAL;
10362 		}
10363 
10364 		if (new_plane_state->fb) {
10365 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10366 						 new_plane_state->fb);
10367 			if (ret)
10368 				return ret;
10369 		}
10370 
10371 		return 0;
10372 	}
10373 
10374 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10375 					 new_plane_state);
10376 
10377 	/* Remove any changed/removed planes */
10378 	if (!enable) {
10379 		if (!needs_reset)
10380 			return 0;
10381 
10382 		if (!old_plane_crtc)
10383 			return 0;
10384 
10385 		old_crtc_state = drm_atomic_get_old_crtc_state(
10386 				state, old_plane_crtc);
10387 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10388 
10389 		if (!dm_old_crtc_state->stream)
10390 			return 0;
10391 
10392 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10393 				plane->base.id, old_plane_crtc->base.id);
10394 
10395 		ret = dm_atomic_get_state(state, &dm_state);
10396 		if (ret)
10397 			return ret;
10398 
10399 		if (!dc_remove_plane_from_context(
10400 				dc,
10401 				dm_old_crtc_state->stream,
10402 				dm_old_plane_state->dc_state,
10403 				dm_state->context)) {
10404 
10405 			return -EINVAL;
10406 		}
10407 
10408 
10409 		dc_plane_state_release(dm_old_plane_state->dc_state);
10410 		dm_new_plane_state->dc_state = NULL;
10411 
10412 		*lock_and_validation_needed = true;
10413 
10414 	} else { /* Add new planes */
10415 		struct dc_plane_state *dc_new_plane_state;
10416 
10417 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10418 			return 0;
10419 
10420 		if (!new_plane_crtc)
10421 			return 0;
10422 
10423 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10424 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10425 
10426 		if (!dm_new_crtc_state->stream)
10427 			return 0;
10428 
10429 		if (!needs_reset)
10430 			return 0;
10431 
10432 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10433 		if (ret)
10434 			return ret;
10435 
10436 		WARN_ON(dm_new_plane_state->dc_state);
10437 
10438 		dc_new_plane_state = dc_create_plane_state(dc);
10439 		if (!dc_new_plane_state)
10440 			return -ENOMEM;
10441 
10442 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10443 				 plane->base.id, new_plane_crtc->base.id);
10444 
10445 		ret = fill_dc_plane_attributes(
10446 			drm_to_adev(new_plane_crtc->dev),
10447 			dc_new_plane_state,
10448 			new_plane_state,
10449 			new_crtc_state);
10450 		if (ret) {
10451 			dc_plane_state_release(dc_new_plane_state);
10452 			return ret;
10453 		}
10454 
10455 		ret = dm_atomic_get_state(state, &dm_state);
10456 		if (ret) {
10457 			dc_plane_state_release(dc_new_plane_state);
10458 			return ret;
10459 		}
10460 
10461 		/*
10462 		 * Any atomic check errors that occur after this will
10463 		 * not need a release. The plane state will be attached
10464 		 * to the stream, and therefore part of the atomic
10465 		 * state. It'll be released when the atomic state is
10466 		 * cleaned.
10467 		 */
10468 		if (!dc_add_plane_to_context(
10469 				dc,
10470 				dm_new_crtc_state->stream,
10471 				dc_new_plane_state,
10472 				dm_state->context)) {
10473 
10474 			dc_plane_state_release(dc_new_plane_state);
10475 			return -EINVAL;
10476 		}
10477 
10478 		dm_new_plane_state->dc_state = dc_new_plane_state;
10479 
10480 		/* Tell DC to do a full surface update every time there
10481 		 * is a plane change. Inefficient, but works for now.
10482 		 */
10483 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10484 
10485 		*lock_and_validation_needed = true;
10486 	}
10487 
10488 
10489 	return ret;
10490 }
10491 
10492 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10493 				struct drm_crtc *crtc,
10494 				struct drm_crtc_state *new_crtc_state)
10495 {
10496 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10497 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10498 
10499 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10500 	 * cursor per pipe but it's going to inherit the scaling and
10501 	 * positioning from the underlying pipe. Check the cursor plane's
10502 	 * blending properties match the primary plane's. */
10503 
10504 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10505 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10506 	if (!new_cursor_state || !new_primary_state ||
10507 	    !new_cursor_state->fb || !new_primary_state->fb) {
10508 		return 0;
10509 	}
10510 
10511 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10512 			 (new_cursor_state->src_w >> 16);
10513 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10514 			 (new_cursor_state->src_h >> 16);
10515 
10516 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10517 			 (new_primary_state->src_w >> 16);
10518 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10519 			 (new_primary_state->src_h >> 16);
10520 
10521 	if (cursor_scale_w != primary_scale_w ||
10522 	    cursor_scale_h != primary_scale_h) {
10523 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10524 		return -EINVAL;
10525 	}
10526 
10527 	return 0;
10528 }
10529 
10530 #if defined(CONFIG_DRM_AMD_DC_DCN)
10531 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10532 {
10533 	struct drm_connector *connector;
10534 	struct drm_connector_state *conn_state;
10535 	struct amdgpu_dm_connector *aconnector = NULL;
10536 	int i;
10537 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10538 		if (conn_state->crtc != crtc)
10539 			continue;
10540 
10541 		aconnector = to_amdgpu_dm_connector(connector);
10542 		if (!aconnector->port || !aconnector->mst_port)
10543 			aconnector = NULL;
10544 		else
10545 			break;
10546 	}
10547 
10548 	if (!aconnector)
10549 		return 0;
10550 
10551 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10552 }
10553 #endif
10554 
10555 static int validate_overlay(struct drm_atomic_state *state)
10556 {
10557 	int i;
10558 	struct drm_plane *plane;
10559 	struct drm_plane_state *new_plane_state;
10560 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10561 
10562 	/* Check if primary plane is contained inside overlay */
10563 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10564 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10565 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10566 				return 0;
10567 
10568 			overlay_state = new_plane_state;
10569 			continue;
10570 		}
10571 	}
10572 
10573 	/* check if we're making changes to the overlay plane */
10574 	if (!overlay_state)
10575 		return 0;
10576 
10577 	/* check if overlay plane is enabled */
10578 	if (!overlay_state->crtc)
10579 		return 0;
10580 
10581 	/* find the primary plane for the CRTC that the overlay is enabled on */
10582 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10583 	if (IS_ERR(primary_state))
10584 		return PTR_ERR(primary_state);
10585 
10586 	/* check if primary plane is enabled */
10587 	if (!primary_state->crtc)
10588 		return 0;
10589 
10590 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10591 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10592 	    primary_state->crtc_y < overlay_state->crtc_y ||
10593 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10594 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10595 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10596 		return -EINVAL;
10597 	}
10598 
10599 	return 0;
10600 }
10601 
10602 /**
10603  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10604  * @dev: The DRM device
10605  * @state: The atomic state to commit
10606  *
10607  * Validate that the given atomic state is programmable by DC into hardware.
10608  * This involves constructing a &struct dc_state reflecting the new hardware
10609  * state we wish to commit, then querying DC to see if it is programmable. It's
10610  * important not to modify the existing DC state. Otherwise, atomic_check
10611  * may unexpectedly commit hardware changes.
10612  *
10613  * When validating the DC state, it's important that the right locks are
10614  * acquired. For full updates case which removes/adds/updates streams on one
10615  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10616  * that any such full update commit will wait for completion of any outstanding
10617  * flip using DRMs synchronization events.
10618  *
10619  * Note that DM adds the affected connectors for all CRTCs in state, when that
10620  * might not seem necessary. This is because DC stream creation requires the
10621  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10622  * be possible but non-trivial - a possible TODO item.
10623  *
10624  * Return: -Error code if validation failed.
10625  */
10626 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10627 				  struct drm_atomic_state *state)
10628 {
10629 	struct amdgpu_device *adev = drm_to_adev(dev);
10630 	struct dm_atomic_state *dm_state = NULL;
10631 	struct dc *dc = adev->dm.dc;
10632 	struct drm_connector *connector;
10633 	struct drm_connector_state *old_con_state, *new_con_state;
10634 	struct drm_crtc *crtc;
10635 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10636 	struct drm_plane *plane;
10637 	struct drm_plane_state *old_plane_state, *new_plane_state;
10638 	enum dc_status status;
10639 	int ret, i;
10640 	bool lock_and_validation_needed = false;
10641 	struct dm_crtc_state *dm_old_crtc_state;
10642 #if defined(CONFIG_DRM_AMD_DC_DCN)
10643 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10644 #endif
10645 
10646 	trace_amdgpu_dm_atomic_check_begin(state);
10647 
10648 	ret = drm_atomic_helper_check_modeset(dev, state);
10649 	if (ret)
10650 		goto fail;
10651 
10652 	/* Check connector changes */
10653 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10654 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10655 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10656 
10657 		/* Skip connectors that are disabled or part of modeset already. */
10658 		if (!old_con_state->crtc && !new_con_state->crtc)
10659 			continue;
10660 
10661 		if (!new_con_state->crtc)
10662 			continue;
10663 
10664 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10665 		if (IS_ERR(new_crtc_state)) {
10666 			ret = PTR_ERR(new_crtc_state);
10667 			goto fail;
10668 		}
10669 
10670 		if (dm_old_con_state->abm_level !=
10671 		    dm_new_con_state->abm_level)
10672 			new_crtc_state->connectors_changed = true;
10673 	}
10674 
10675 #if defined(CONFIG_DRM_AMD_DC_DCN)
10676 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10677 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10678 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10679 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10680 				if (ret)
10681 					goto fail;
10682 			}
10683 		}
10684 	}
10685 #endif
10686 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10687 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10688 
10689 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10690 		    !new_crtc_state->color_mgmt_changed &&
10691 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10692 			dm_old_crtc_state->dsc_force_changed == false)
10693 			continue;
10694 
10695 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10696 		if (ret)
10697 			goto fail;
10698 
10699 		if (!new_crtc_state->enable)
10700 			continue;
10701 
10702 		ret = drm_atomic_add_affected_connectors(state, crtc);
10703 		if (ret)
10704 			return ret;
10705 
10706 		ret = drm_atomic_add_affected_planes(state, crtc);
10707 		if (ret)
10708 			goto fail;
10709 
10710 		if (dm_old_crtc_state->dsc_force_changed)
10711 			new_crtc_state->mode_changed = true;
10712 	}
10713 
10714 	/*
10715 	 * Add all primary and overlay planes on the CRTC to the state
10716 	 * whenever a plane is enabled to maintain correct z-ordering
10717 	 * and to enable fast surface updates.
10718 	 */
10719 	drm_for_each_crtc(crtc, dev) {
10720 		bool modified = false;
10721 
10722 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10723 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10724 				continue;
10725 
10726 			if (new_plane_state->crtc == crtc ||
10727 			    old_plane_state->crtc == crtc) {
10728 				modified = true;
10729 				break;
10730 			}
10731 		}
10732 
10733 		if (!modified)
10734 			continue;
10735 
10736 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10737 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10738 				continue;
10739 
10740 			new_plane_state =
10741 				drm_atomic_get_plane_state(state, plane);
10742 
10743 			if (IS_ERR(new_plane_state)) {
10744 				ret = PTR_ERR(new_plane_state);
10745 				goto fail;
10746 			}
10747 		}
10748 	}
10749 
10750 	/* Remove exiting planes if they are modified */
10751 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10752 		ret = dm_update_plane_state(dc, state, plane,
10753 					    old_plane_state,
10754 					    new_plane_state,
10755 					    false,
10756 					    &lock_and_validation_needed);
10757 		if (ret)
10758 			goto fail;
10759 	}
10760 
10761 	/* Disable all crtcs which require disable */
10762 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10763 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10764 					   old_crtc_state,
10765 					   new_crtc_state,
10766 					   false,
10767 					   &lock_and_validation_needed);
10768 		if (ret)
10769 			goto fail;
10770 	}
10771 
10772 	/* Enable all crtcs which require enable */
10773 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10774 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10775 					   old_crtc_state,
10776 					   new_crtc_state,
10777 					   true,
10778 					   &lock_and_validation_needed);
10779 		if (ret)
10780 			goto fail;
10781 	}
10782 
10783 	ret = validate_overlay(state);
10784 	if (ret)
10785 		goto fail;
10786 
10787 	/* Add new/modified planes */
10788 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10789 		ret = dm_update_plane_state(dc, state, plane,
10790 					    old_plane_state,
10791 					    new_plane_state,
10792 					    true,
10793 					    &lock_and_validation_needed);
10794 		if (ret)
10795 			goto fail;
10796 	}
10797 
10798 	/* Run this here since we want to validate the streams we created */
10799 	ret = drm_atomic_helper_check_planes(dev, state);
10800 	if (ret)
10801 		goto fail;
10802 
10803 	/* Check cursor planes scaling */
10804 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10805 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10806 		if (ret)
10807 			goto fail;
10808 	}
10809 
10810 	if (state->legacy_cursor_update) {
10811 		/*
10812 		 * This is a fast cursor update coming from the plane update
10813 		 * helper, check if it can be done asynchronously for better
10814 		 * performance.
10815 		 */
10816 		state->async_update =
10817 			!drm_atomic_helper_async_check(dev, state);
10818 
10819 		/*
10820 		 * Skip the remaining global validation if this is an async
10821 		 * update. Cursor updates can be done without affecting
10822 		 * state or bandwidth calcs and this avoids the performance
10823 		 * penalty of locking the private state object and
10824 		 * allocating a new dc_state.
10825 		 */
10826 		if (state->async_update)
10827 			return 0;
10828 	}
10829 
10830 	/* Check scaling and underscan changes*/
10831 	/* TODO Removed scaling changes validation due to inability to commit
10832 	 * new stream into context w\o causing full reset. Need to
10833 	 * decide how to handle.
10834 	 */
10835 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10836 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10837 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10838 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10839 
10840 		/* Skip any modesets/resets */
10841 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10842 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10843 			continue;
10844 
10845 		/* Skip any thing not scale or underscan changes */
10846 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10847 			continue;
10848 
10849 		lock_and_validation_needed = true;
10850 	}
10851 
10852 	/**
10853 	 * Streams and planes are reset when there are changes that affect
10854 	 * bandwidth. Anything that affects bandwidth needs to go through
10855 	 * DC global validation to ensure that the configuration can be applied
10856 	 * to hardware.
10857 	 *
10858 	 * We have to currently stall out here in atomic_check for outstanding
10859 	 * commits to finish in this case because our IRQ handlers reference
10860 	 * DRM state directly - we can end up disabling interrupts too early
10861 	 * if we don't.
10862 	 *
10863 	 * TODO: Remove this stall and drop DM state private objects.
10864 	 */
10865 	if (lock_and_validation_needed) {
10866 		ret = dm_atomic_get_state(state, &dm_state);
10867 		if (ret)
10868 			goto fail;
10869 
10870 		ret = do_aquire_global_lock(dev, state);
10871 		if (ret)
10872 			goto fail;
10873 
10874 #if defined(CONFIG_DRM_AMD_DC_DCN)
10875 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10876 			goto fail;
10877 
10878 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10879 		if (ret)
10880 			goto fail;
10881 #endif
10882 
10883 		/*
10884 		 * Perform validation of MST topology in the state:
10885 		 * We need to perform MST atomic check before calling
10886 		 * dc_validate_global_state(), or there is a chance
10887 		 * to get stuck in an infinite loop and hang eventually.
10888 		 */
10889 		ret = drm_dp_mst_atomic_check(state);
10890 		if (ret)
10891 			goto fail;
10892 		status = dc_validate_global_state(dc, dm_state->context, false);
10893 		if (status != DC_OK) {
10894 			drm_dbg_atomic(dev,
10895 				       "DC global validation failure: %s (%d)",
10896 				       dc_status_to_str(status), status);
10897 			ret = -EINVAL;
10898 			goto fail;
10899 		}
10900 	} else {
10901 		/*
10902 		 * The commit is a fast update. Fast updates shouldn't change
10903 		 * the DC context, affect global validation, and can have their
10904 		 * commit work done in parallel with other commits not touching
10905 		 * the same resource. If we have a new DC context as part of
10906 		 * the DM atomic state from validation we need to free it and
10907 		 * retain the existing one instead.
10908 		 *
10909 		 * Furthermore, since the DM atomic state only contains the DC
10910 		 * context and can safely be annulled, we can free the state
10911 		 * and clear the associated private object now to free
10912 		 * some memory and avoid a possible use-after-free later.
10913 		 */
10914 
10915 		for (i = 0; i < state->num_private_objs; i++) {
10916 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10917 
10918 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10919 				int j = state->num_private_objs-1;
10920 
10921 				dm_atomic_destroy_state(obj,
10922 						state->private_objs[i].state);
10923 
10924 				/* If i is not at the end of the array then the
10925 				 * last element needs to be moved to where i was
10926 				 * before the array can safely be truncated.
10927 				 */
10928 				if (i != j)
10929 					state->private_objs[i] =
10930 						state->private_objs[j];
10931 
10932 				state->private_objs[j].ptr = NULL;
10933 				state->private_objs[j].state = NULL;
10934 				state->private_objs[j].old_state = NULL;
10935 				state->private_objs[j].new_state = NULL;
10936 
10937 				state->num_private_objs = j;
10938 				break;
10939 			}
10940 		}
10941 	}
10942 
10943 	/* Store the overall update type for use later in atomic check. */
10944 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10945 		struct dm_crtc_state *dm_new_crtc_state =
10946 			to_dm_crtc_state(new_crtc_state);
10947 
10948 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10949 							 UPDATE_TYPE_FULL :
10950 							 UPDATE_TYPE_FAST;
10951 	}
10952 
10953 	/* Must be success */
10954 	WARN_ON(ret);
10955 
10956 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10957 
10958 	return ret;
10959 
10960 fail:
10961 	if (ret == -EDEADLK)
10962 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10963 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10964 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10965 	else
10966 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10967 
10968 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10969 
10970 	return ret;
10971 }
10972 
10973 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10974 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10975 {
10976 	uint8_t dpcd_data;
10977 	bool capable = false;
10978 
10979 	if (amdgpu_dm_connector->dc_link &&
10980 		dm_helpers_dp_read_dpcd(
10981 				NULL,
10982 				amdgpu_dm_connector->dc_link,
10983 				DP_DOWN_STREAM_PORT_COUNT,
10984 				&dpcd_data,
10985 				sizeof(dpcd_data))) {
10986 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10987 	}
10988 
10989 	return capable;
10990 }
10991 
10992 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10993 		unsigned int offset,
10994 		unsigned int total_length,
10995 		uint8_t *data,
10996 		unsigned int length,
10997 		struct amdgpu_hdmi_vsdb_info *vsdb)
10998 {
10999 	bool res;
11000 	union dmub_rb_cmd cmd;
11001 	struct dmub_cmd_send_edid_cea *input;
11002 	struct dmub_cmd_edid_cea_output *output;
11003 
11004 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11005 		return false;
11006 
11007 	memset(&cmd, 0, sizeof(cmd));
11008 
11009 	input = &cmd.edid_cea.data.input;
11010 
11011 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11012 	cmd.edid_cea.header.sub_type = 0;
11013 	cmd.edid_cea.header.payload_bytes =
11014 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11015 	input->offset = offset;
11016 	input->length = length;
11017 	input->total_length = total_length;
11018 	memcpy(input->payload, data, length);
11019 
11020 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11021 	if (!res) {
11022 		DRM_ERROR("EDID CEA parser failed\n");
11023 		return false;
11024 	}
11025 
11026 	output = &cmd.edid_cea.data.output;
11027 
11028 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11029 		if (!output->ack.success) {
11030 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11031 					output->ack.offset);
11032 		}
11033 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11034 		if (!output->amd_vsdb.vsdb_found)
11035 			return false;
11036 
11037 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11038 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11039 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11040 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11041 	} else {
11042 		DRM_WARN("Unknown EDID CEA parser results\n");
11043 		return false;
11044 	}
11045 
11046 	return true;
11047 }
11048 
11049 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11050 		uint8_t *edid_ext, int len,
11051 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11052 {
11053 	int i;
11054 
11055 	/* send extension block to DMCU for parsing */
11056 	for (i = 0; i < len; i += 8) {
11057 		bool res;
11058 		int offset;
11059 
11060 		/* send 8 bytes a time */
11061 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11062 			return false;
11063 
11064 		if (i+8 == len) {
11065 			/* EDID block sent completed, expect result */
11066 			int version, min_rate, max_rate;
11067 
11068 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11069 			if (res) {
11070 				/* amd vsdb found */
11071 				vsdb_info->freesync_supported = 1;
11072 				vsdb_info->amd_vsdb_version = version;
11073 				vsdb_info->min_refresh_rate_hz = min_rate;
11074 				vsdb_info->max_refresh_rate_hz = max_rate;
11075 				return true;
11076 			}
11077 			/* not amd vsdb */
11078 			return false;
11079 		}
11080 
11081 		/* check for ack*/
11082 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11083 		if (!res)
11084 			return false;
11085 	}
11086 
11087 	return false;
11088 }
11089 
11090 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11091 		uint8_t *edid_ext, int len,
11092 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11093 {
11094 	int i;
11095 
11096 	/* send extension block to DMCU for parsing */
11097 	for (i = 0; i < len; i += 8) {
11098 		/* send 8 bytes a time */
11099 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11100 			return false;
11101 	}
11102 
11103 	return vsdb_info->freesync_supported;
11104 }
11105 
11106 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11107 		uint8_t *edid_ext, int len,
11108 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11109 {
11110 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11111 
11112 	if (adev->dm.dmub_srv)
11113 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11114 	else
11115 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11116 }
11117 
11118 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11119 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11120 {
11121 	uint8_t *edid_ext = NULL;
11122 	int i;
11123 	bool valid_vsdb_found = false;
11124 
11125 	/*----- drm_find_cea_extension() -----*/
11126 	/* No EDID or EDID extensions */
11127 	if (edid == NULL || edid->extensions == 0)
11128 		return -ENODEV;
11129 
11130 	/* Find CEA extension */
11131 	for (i = 0; i < edid->extensions; i++) {
11132 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11133 		if (edid_ext[0] == CEA_EXT)
11134 			break;
11135 	}
11136 
11137 	if (i == edid->extensions)
11138 		return -ENODEV;
11139 
11140 	/*----- cea_db_offsets() -----*/
11141 	if (edid_ext[0] != CEA_EXT)
11142 		return -ENODEV;
11143 
11144 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11145 
11146 	return valid_vsdb_found ? i : -ENODEV;
11147 }
11148 
11149 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11150 					struct edid *edid)
11151 {
11152 	int i = 0;
11153 	struct detailed_timing *timing;
11154 	struct detailed_non_pixel *data;
11155 	struct detailed_data_monitor_range *range;
11156 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11157 			to_amdgpu_dm_connector(connector);
11158 	struct dm_connector_state *dm_con_state = NULL;
11159 	struct dc_sink *sink;
11160 
11161 	struct drm_device *dev = connector->dev;
11162 	struct amdgpu_device *adev = drm_to_adev(dev);
11163 	bool freesync_capable = false;
11164 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11165 
11166 	if (!connector->state) {
11167 		DRM_ERROR("%s - Connector has no state", __func__);
11168 		goto update;
11169 	}
11170 
11171 	sink = amdgpu_dm_connector->dc_sink ?
11172 		amdgpu_dm_connector->dc_sink :
11173 		amdgpu_dm_connector->dc_em_sink;
11174 
11175 	if (!edid || !sink) {
11176 		dm_con_state = to_dm_connector_state(connector->state);
11177 
11178 		amdgpu_dm_connector->min_vfreq = 0;
11179 		amdgpu_dm_connector->max_vfreq = 0;
11180 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11181 		connector->display_info.monitor_range.min_vfreq = 0;
11182 		connector->display_info.monitor_range.max_vfreq = 0;
11183 		freesync_capable = false;
11184 
11185 		goto update;
11186 	}
11187 
11188 	dm_con_state = to_dm_connector_state(connector->state);
11189 
11190 	if (!adev->dm.freesync_module)
11191 		goto update;
11192 
11193 
11194 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11195 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11196 		bool edid_check_required = false;
11197 
11198 		if (edid) {
11199 			edid_check_required = is_dp_capable_without_timing_msa(
11200 						adev->dm.dc,
11201 						amdgpu_dm_connector);
11202 		}
11203 
11204 		if (edid_check_required == true && (edid->version > 1 ||
11205 		   (edid->version == 1 && edid->revision > 1))) {
11206 			for (i = 0; i < 4; i++) {
11207 
11208 				timing	= &edid->detailed_timings[i];
11209 				data	= &timing->data.other_data;
11210 				range	= &data->data.range;
11211 				/*
11212 				 * Check if monitor has continuous frequency mode
11213 				 */
11214 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11215 					continue;
11216 				/*
11217 				 * Check for flag range limits only. If flag == 1 then
11218 				 * no additional timing information provided.
11219 				 * Default GTF, GTF Secondary curve and CVT are not
11220 				 * supported
11221 				 */
11222 				if (range->flags != 1)
11223 					continue;
11224 
11225 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11226 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11227 				amdgpu_dm_connector->pixel_clock_mhz =
11228 					range->pixel_clock_mhz * 10;
11229 
11230 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11231 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11232 
11233 				break;
11234 			}
11235 
11236 			if (amdgpu_dm_connector->max_vfreq -
11237 			    amdgpu_dm_connector->min_vfreq > 10) {
11238 
11239 				freesync_capable = true;
11240 			}
11241 		}
11242 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11243 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11244 		if (i >= 0 && vsdb_info.freesync_supported) {
11245 			timing  = &edid->detailed_timings[i];
11246 			data    = &timing->data.other_data;
11247 
11248 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11249 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11250 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11251 				freesync_capable = true;
11252 
11253 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11254 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11255 		}
11256 	}
11257 
11258 update:
11259 	if (dm_con_state)
11260 		dm_con_state->freesync_capable = freesync_capable;
11261 
11262 	if (connector->vrr_capable_property)
11263 		drm_connector_set_vrr_capable_property(connector,
11264 						       freesync_capable);
11265 }
11266 
11267 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11268 {
11269 	struct amdgpu_device *adev = drm_to_adev(dev);
11270 	struct dc *dc = adev->dm.dc;
11271 	int i;
11272 
11273 	mutex_lock(&adev->dm.dc_lock);
11274 	if (dc->current_state) {
11275 		for (i = 0; i < dc->current_state->stream_count; ++i)
11276 			dc->current_state->streams[i]
11277 				->triggered_crtc_reset.enabled =
11278 				adev->dm.force_timing_sync;
11279 
11280 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11281 		dc_trigger_sync(dc, dc->current_state);
11282 	}
11283 	mutex_unlock(&adev->dm.dc_lock);
11284 }
11285 
11286 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11287 		       uint32_t value, const char *func_name)
11288 {
11289 #ifdef DM_CHECK_ADDR_0
11290 	if (address == 0) {
11291 		DC_ERR("invalid register write. address = 0");
11292 		return;
11293 	}
11294 #endif
11295 	cgs_write_register(ctx->cgs_device, address, value);
11296 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11297 }
11298 
11299 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11300 			  const char *func_name)
11301 {
11302 	uint32_t value;
11303 #ifdef DM_CHECK_ADDR_0
11304 	if (address == 0) {
11305 		DC_ERR("invalid register read; address = 0\n");
11306 		return 0;
11307 	}
11308 #endif
11309 
11310 	if (ctx->dmub_srv &&
11311 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11312 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11313 		ASSERT(false);
11314 		return 0;
11315 	}
11316 
11317 	value = cgs_read_register(ctx->cgs_device, address);
11318 
11319 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11320 
11321 	return value;
11322 }
11323 
11324 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11325 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
11326 {
11327 	struct amdgpu_device *adev = ctx->driver_context;
11328 	int ret = 0;
11329 
11330 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11331 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11332 	if (ret == 0) {
11333 		*operation_result = AUX_RET_ERROR_TIMEOUT;
11334 		return -1;
11335 	}
11336 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11337 
11338 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11339 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11340 
11341 		// For read case, Copy data to payload
11342 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11343 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11344 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11345 			adev->dm.dmub_notify->aux_reply.length);
11346 	}
11347 
11348 	return adev->dm.dmub_notify->aux_reply.length;
11349 }
11350