1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
121 
122 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
124 
125 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127 
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
130 
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
133 
134 /**
135  * DOC: overview
136  *
137  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139  * requests into DC requests, and DC responses into DRM responses.
140  *
141  * The root control structure is &struct amdgpu_display_manager.
142  */
143 
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
148 
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150 {
151 	switch (link->dpcd_caps.dongle_type) {
152 	case DISPLAY_DONGLE_NONE:
153 		return DRM_MODE_SUBCONNECTOR_Native;
154 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155 		return DRM_MODE_SUBCONNECTOR_VGA;
156 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
158 		return DRM_MODE_SUBCONNECTOR_DVID;
159 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161 		return DRM_MODE_SUBCONNECTOR_HDMIA;
162 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163 	default:
164 		return DRM_MODE_SUBCONNECTOR_Unknown;
165 	}
166 }
167 
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169 {
170 	struct dc_link *link = aconnector->dc_link;
171 	struct drm_connector *connector = &aconnector->base;
172 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173 
174 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175 		return;
176 
177 	if (aconnector->dc_sink)
178 		subconnector = get_subconnector_type(link);
179 
180 	drm_object_property_set_value(&connector->base,
181 			connector->dev->mode_config.dp_subconnector_property,
182 			subconnector);
183 }
184 
185 /*
186  * initializes drm_device display related structures, based on the information
187  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188  * drm_encoder, drm_mode_config
189  *
190  * Returns 0 on success
191  */
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195 
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197 				struct drm_plane *plane,
198 				unsigned long possible_crtcs,
199 				const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201 			       struct drm_plane *plane,
202 			       uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
205 				    uint32_t link_index,
206 				    struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208 				  struct amdgpu_encoder *aencoder,
209 				  uint32_t link_index);
210 
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212 
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214 
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216 				  struct drm_atomic_state *state);
217 
218 static void handle_cursor_update(struct drm_plane *plane,
219 				 struct drm_plane_state *old_plane_state);
220 
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223 
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
226 
227 static bool
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229 				 struct drm_crtc_state *new_crtc_state);
230 /*
231  * dm_vblank_get_counter
232  *
233  * @brief
234  * Get counter for number of vertical blanks
235  *
236  * @param
237  * struct amdgpu_device *adev - [in] desired amdgpu device
238  * int disp_idx - [in] which CRTC to get the counter from
239  *
240  * @return
241  * Counter for vertical blanks
242  */
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244 {
245 	if (crtc >= adev->mode_info.num_crtc)
246 		return 0;
247 	else {
248 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249 
250 		if (acrtc->dm_irq_params.stream == NULL) {
251 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252 				  crtc);
253 			return 0;
254 		}
255 
256 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
257 	}
258 }
259 
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261 				  u32 *vbl, u32 *position)
262 {
263 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
264 
265 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266 		return -EINVAL;
267 	else {
268 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269 
270 		if (acrtc->dm_irq_params.stream ==  NULL) {
271 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272 				  crtc);
273 			return 0;
274 		}
275 
276 		/*
277 		 * TODO rework base driver to use values directly.
278 		 * for now parse it back into reg-format
279 		 */
280 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
281 					 &v_blank_start,
282 					 &v_blank_end,
283 					 &h_position,
284 					 &v_position);
285 
286 		*position = v_position | (h_position << 16);
287 		*vbl = v_blank_start | (v_blank_end << 16);
288 	}
289 
290 	return 0;
291 }
292 
293 static bool dm_is_idle(void *handle)
294 {
295 	/* XXX todo */
296 	return true;
297 }
298 
299 static int dm_wait_for_idle(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static bool dm_check_soft_reset(void *handle)
306 {
307 	return false;
308 }
309 
310 static int dm_soft_reset(void *handle)
311 {
312 	/* XXX todo */
313 	return 0;
314 }
315 
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
318 		     int otg_inst)
319 {
320 	struct drm_device *dev = adev_to_drm(adev);
321 	struct drm_crtc *crtc;
322 	struct amdgpu_crtc *amdgpu_crtc;
323 
324 	if (WARN_ON(otg_inst == -1))
325 		return adev->mode_info.crtcs[0];
326 
327 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328 		amdgpu_crtc = to_amdgpu_crtc(crtc);
329 
330 		if (amdgpu_crtc->otg_inst == otg_inst)
331 			return amdgpu_crtc;
332 	}
333 
334 	return NULL;
335 }
336 
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338 {
339 	return acrtc->dm_irq_params.freesync_config.state ==
340 		       VRR_STATE_ACTIVE_VARIABLE ||
341 	       acrtc->dm_irq_params.freesync_config.state ==
342 		       VRR_STATE_ACTIVE_FIXED;
343 }
344 
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346 {
347 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349 }
350 
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352 					      struct dm_crtc_state *new_state)
353 {
354 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
355 		return true;
356 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357 		return true;
358 	else
359 		return false;
360 }
361 
362 /**
363  * dm_pflip_high_irq() - Handle pageflip interrupt
364  * @interrupt_params: ignored
365  *
366  * Handles the pageflip interrupt by notifying all interested parties
367  * that the pageflip has been completed.
368  */
369 static void dm_pflip_high_irq(void *interrupt_params)
370 {
371 	struct amdgpu_crtc *amdgpu_crtc;
372 	struct common_irq_params *irq_params = interrupt_params;
373 	struct amdgpu_device *adev = irq_params->adev;
374 	unsigned long flags;
375 	struct drm_pending_vblank_event *e;
376 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
377 	bool vrr_active;
378 
379 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380 
381 	/* IRQ could occur when in initial stage */
382 	/* TODO work and BO cleanup */
383 	if (amdgpu_crtc == NULL) {
384 		DC_LOG_PFLIP("CRTC is null, returning.\n");
385 		return;
386 	}
387 
388 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
389 
390 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392 						 amdgpu_crtc->pflip_status,
393 						 AMDGPU_FLIP_SUBMITTED,
394 						 amdgpu_crtc->crtc_id,
395 						 amdgpu_crtc);
396 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397 		return;
398 	}
399 
400 	/* page flip completed. */
401 	e = amdgpu_crtc->event;
402 	amdgpu_crtc->event = NULL;
403 
404 	WARN_ON(!e);
405 
406 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
407 
408 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
409 	if (!vrr_active ||
410 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411 				      &v_blank_end, &hpos, &vpos) ||
412 	    (vpos < v_blank_start)) {
413 		/* Update to correct count and vblank timestamp if racing with
414 		 * vblank irq. This also updates to the correct vblank timestamp
415 		 * even in VRR mode, as scanout is past the front-porch atm.
416 		 */
417 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
418 
419 		/* Wake up userspace by sending the pageflip event with proper
420 		 * count and timestamp of vblank of flip completion.
421 		 */
422 		if (e) {
423 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424 
425 			/* Event sent, so done with vblank for this flip */
426 			drm_crtc_vblank_put(&amdgpu_crtc->base);
427 		}
428 	} else if (e) {
429 		/* VRR active and inside front-porch: vblank count and
430 		 * timestamp for pageflip event will only be up to date after
431 		 * drm_crtc_handle_vblank() has been executed from late vblank
432 		 * irq handler after start of back-porch (vline 0). We queue the
433 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
434 		 * updated timestamp and count, once it runs after us.
435 		 *
436 		 * We need to open-code this instead of using the helper
437 		 * drm_crtc_arm_vblank_event(), as that helper would
438 		 * call drm_crtc_accurate_vblank_count(), which we must
439 		 * not call in VRR mode while we are in front-porch!
440 		 */
441 
442 		/* sequence will be replaced by real count during send-out. */
443 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444 		e->pipe = amdgpu_crtc->crtc_id;
445 
446 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447 		e = NULL;
448 	}
449 
450 	/* Keep track of vblank of this flip for flip throttling. We use the
451 	 * cooked hw counter, as that one incremented at start of this vblank
452 	 * of pageflip completion, so last_flip_vblank is the forbidden count
453 	 * for queueing new pageflips if vsync + VRR is enabled.
454 	 */
455 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
456 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
457 
458 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
460 
461 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
463 		     vrr_active, (int) !e);
464 }
465 
466 static void dm_vupdate_high_irq(void *interrupt_params)
467 {
468 	struct common_irq_params *irq_params = interrupt_params;
469 	struct amdgpu_device *adev = irq_params->adev;
470 	struct amdgpu_crtc *acrtc;
471 	struct drm_device *drm_dev;
472 	struct drm_vblank_crtc *vblank;
473 	ktime_t frame_duration_ns, previous_timestamp;
474 	unsigned long flags;
475 	int vrr_active;
476 
477 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
478 
479 	if (acrtc) {
480 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
481 		drm_dev = acrtc->base.dev;
482 		vblank = &drm_dev->vblank[acrtc->base.index];
483 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
484 		frame_duration_ns = vblank->time - previous_timestamp;
485 
486 		if (frame_duration_ns > 0) {
487 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
488 						frame_duration_ns,
489 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
490 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
491 		}
492 
493 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
494 			      acrtc->crtc_id,
495 			      vrr_active);
496 
497 		/* Core vblank handling is done here after end of front-porch in
498 		 * vrr mode, as vblank timestamping will give valid results
499 		 * while now done after front-porch. This will also deliver
500 		 * page-flip completion events that have been queued to us
501 		 * if a pageflip happened inside front-porch.
502 		 */
503 		if (vrr_active) {
504 			drm_crtc_handle_vblank(&acrtc->base);
505 
506 			/* BTR processing for pre-DCE12 ASICs */
507 			if (acrtc->dm_irq_params.stream &&
508 			    adev->family < AMDGPU_FAMILY_AI) {
509 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
510 				mod_freesync_handle_v_update(
511 				    adev->dm.freesync_module,
512 				    acrtc->dm_irq_params.stream,
513 				    &acrtc->dm_irq_params.vrr_params);
514 
515 				dc_stream_adjust_vmin_vmax(
516 				    adev->dm.dc,
517 				    acrtc->dm_irq_params.stream,
518 				    &acrtc->dm_irq_params.vrr_params.adjust);
519 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520 			}
521 		}
522 	}
523 }
524 
525 /**
526  * dm_crtc_high_irq() - Handles CRTC interrupt
527  * @interrupt_params: used for determining the CRTC instance
528  *
529  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
530  * event handler.
531  */
532 static void dm_crtc_high_irq(void *interrupt_params)
533 {
534 	struct common_irq_params *irq_params = interrupt_params;
535 	struct amdgpu_device *adev = irq_params->adev;
536 	struct amdgpu_crtc *acrtc;
537 	unsigned long flags;
538 	int vrr_active;
539 
540 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
541 	if (!acrtc)
542 		return;
543 
544 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
545 
546 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
547 		      vrr_active, acrtc->dm_irq_params.active_planes);
548 
549 	/**
550 	 * Core vblank handling at start of front-porch is only possible
551 	 * in non-vrr mode, as only there vblank timestamping will give
552 	 * valid results while done in front-porch. Otherwise defer it
553 	 * to dm_vupdate_high_irq after end of front-porch.
554 	 */
555 	if (!vrr_active)
556 		drm_crtc_handle_vblank(&acrtc->base);
557 
558 	/**
559 	 * Following stuff must happen at start of vblank, for crc
560 	 * computation and below-the-range btr support in vrr mode.
561 	 */
562 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
563 
564 	/* BTR updates need to happen before VUPDATE on Vega and above. */
565 	if (adev->family < AMDGPU_FAMILY_AI)
566 		return;
567 
568 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
569 
570 	if (acrtc->dm_irq_params.stream &&
571 	    acrtc->dm_irq_params.vrr_params.supported &&
572 	    acrtc->dm_irq_params.freesync_config.state ==
573 		    VRR_STATE_ACTIVE_VARIABLE) {
574 		mod_freesync_handle_v_update(adev->dm.freesync_module,
575 					     acrtc->dm_irq_params.stream,
576 					     &acrtc->dm_irq_params.vrr_params);
577 
578 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
579 					   &acrtc->dm_irq_params.vrr_params.adjust);
580 	}
581 
582 	/*
583 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
584 	 * In that case, pageflip completion interrupts won't fire and pageflip
585 	 * completion events won't get delivered. Prevent this by sending
586 	 * pending pageflip events from here if a flip is still pending.
587 	 *
588 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
589 	 * avoid race conditions between flip programming and completion,
590 	 * which could cause too early flip completion events.
591 	 */
592 	if (adev->family >= AMDGPU_FAMILY_RV &&
593 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
594 	    acrtc->dm_irq_params.active_planes == 0) {
595 		if (acrtc->event) {
596 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
597 			acrtc->event = NULL;
598 			drm_crtc_vblank_put(&acrtc->base);
599 		}
600 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
601 	}
602 
603 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
604 }
605 
606 #if defined(CONFIG_DRM_AMD_DC_DCN)
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
608 /**
609  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610  * DCN generation ASICs
611  * @interrupt_params: interrupt parameters
612  *
613  * Used to set crc window/read out crc value at vertical line 0 position
614  */
615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
616 {
617 	struct common_irq_params *irq_params = interrupt_params;
618 	struct amdgpu_device *adev = irq_params->adev;
619 	struct amdgpu_crtc *acrtc;
620 
621 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 
623 	if (!acrtc)
624 		return;
625 
626 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 }
628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
629 
630 /**
631  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
632  * @adev: amdgpu_device pointer
633  * @notify: dmub notification structure
634  *
635  * Dmub AUX or SET_CONFIG command completion processing callback
636  * Copies dmub notification to DM which is to be read by AUX command.
637  * issuing thread and also signals the event to wake up the thread.
638  */
639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640 					struct dmub_notification *notify)
641 {
642 	if (adev->dm.dmub_notify)
643 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645 		complete(&adev->dm.dmub_aux_transfer_done);
646 }
647 
648 /**
649  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650  * @adev: amdgpu_device pointer
651  * @notify: dmub notification structure
652  *
653  * Dmub Hpd interrupt processing callback. Gets displayindex through the
654  * ink index and calls helper to do the processing.
655  */
656 static void dmub_hpd_callback(struct amdgpu_device *adev,
657 			      struct dmub_notification *notify)
658 {
659 	struct amdgpu_dm_connector *aconnector;
660 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
661 	struct drm_connector *connector;
662 	struct drm_connector_list_iter iter;
663 	struct dc_link *link;
664 	uint8_t link_index = 0;
665 	struct drm_device *dev;
666 
667 	if (adev == NULL)
668 		return;
669 
670 	if (notify == NULL) {
671 		DRM_ERROR("DMUB HPD callback notification was NULL");
672 		return;
673 	}
674 
675 	if (notify->link_index > adev->dm.dc->link_count) {
676 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
677 		return;
678 	}
679 
680 	link_index = notify->link_index;
681 	link = adev->dm.dc->links[link_index];
682 	dev = adev->dm.ddev;
683 
684 	drm_connector_list_iter_begin(dev, &iter);
685 	drm_for_each_connector_iter(connector, &iter) {
686 		aconnector = to_amdgpu_dm_connector(connector);
687 		if (link && aconnector->dc_link == link) {
688 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
689 			hpd_aconnector = aconnector;
690 			break;
691 		}
692 	}
693 	drm_connector_list_iter_end(&iter);
694 
695 	if (hpd_aconnector) {
696 		if (notify->type == DMUB_NOTIFICATION_HPD)
697 			handle_hpd_irq_helper(hpd_aconnector);
698 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699 			handle_hpd_rx_irq(hpd_aconnector);
700 	}
701 }
702 
703 /**
704  * register_dmub_notify_callback - Sets callback for DMUB notify
705  * @adev: amdgpu_device pointer
706  * @type: Type of dmub notification
707  * @callback: Dmub interrupt callback function
708  * @dmub_int_thread_offload: offload indicator
709  *
710  * API to register a dmub callback handler for a dmub notification
711  * Also sets indicator whether callback processing to be offloaded.
712  * to dmub interrupt handling thread
713  * Return: true if successfully registered, false if there is existing registration
714  */
715 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716 					  enum dmub_notification_type type,
717 					  dmub_notify_interrupt_callback_t callback,
718 					  bool dmub_int_thread_offload)
719 {
720 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721 		adev->dm.dmub_callback[type] = callback;
722 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
723 	} else
724 		return false;
725 
726 	return true;
727 }
728 
729 static void dm_handle_hpd_work(struct work_struct *work)
730 {
731 	struct dmub_hpd_work *dmub_hpd_wrk;
732 
733 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
734 
735 	if (!dmub_hpd_wrk->dmub_notify) {
736 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
737 		return;
738 	}
739 
740 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742 		dmub_hpd_wrk->dmub_notify);
743 	}
744 
745 	kfree(dmub_hpd_wrk->dmub_notify);
746 	kfree(dmub_hpd_wrk);
747 
748 }
749 
750 #define DMUB_TRACE_MAX_READ 64
751 /**
752  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753  * @interrupt_params: used for determining the Outbox instance
754  *
755  * Handles the Outbox Interrupt
756  * event handler.
757  */
758 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
759 {
760 	struct dmub_notification notify;
761 	struct common_irq_params *irq_params = interrupt_params;
762 	struct amdgpu_device *adev = irq_params->adev;
763 	struct amdgpu_display_manager *dm = &adev->dm;
764 	struct dmcub_trace_buf_entry entry = { 0 };
765 	uint32_t count = 0;
766 	struct dmub_hpd_work *dmub_hpd_wrk;
767 	struct dc_link *plink = NULL;
768 
769 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
770 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
771 
772 		do {
773 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
774 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
775 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
776 				continue;
777 			}
778 			if (!dm->dmub_callback[notify.type]) {
779 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
780 				continue;
781 			}
782 			if (dm->dmub_thread_offload[notify.type] == true) {
783 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
784 				if (!dmub_hpd_wrk) {
785 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
786 					return;
787 				}
788 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789 				if (!dmub_hpd_wrk->dmub_notify) {
790 					kfree(dmub_hpd_wrk);
791 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
792 					return;
793 				}
794 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795 				if (dmub_hpd_wrk->dmub_notify)
796 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
797 				dmub_hpd_wrk->adev = adev;
798 				if (notify.type == DMUB_NOTIFICATION_HPD) {
799 					plink = adev->dm.dc->links[notify.link_index];
800 					if (plink) {
801 						plink->hpd_status =
802 							notify.hpd_status == DP_HPD_PLUG;
803 					}
804 				}
805 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
806 			} else {
807 				dm->dmub_callback[notify.type](adev, &notify);
808 			}
809 		} while (notify.pending_notification);
810 	}
811 
812 
813 	do {
814 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816 							entry.param0, entry.param1);
817 
818 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
820 		} else
821 			break;
822 
823 		count++;
824 
825 	} while (count <= DMUB_TRACE_MAX_READ);
826 
827 	if (count > DMUB_TRACE_MAX_READ)
828 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
829 }
830 #endif /* CONFIG_DRM_AMD_DC_DCN */
831 
832 static int dm_set_clockgating_state(void *handle,
833 		  enum amd_clockgating_state state)
834 {
835 	return 0;
836 }
837 
838 static int dm_set_powergating_state(void *handle,
839 		  enum amd_powergating_state state)
840 {
841 	return 0;
842 }
843 
844 /* Prototypes of private functions */
845 static int dm_early_init(void* handle);
846 
847 /* Allocate memory for FBC compressed data  */
848 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
849 {
850 	struct drm_device *dev = connector->dev;
851 	struct amdgpu_device *adev = drm_to_adev(dev);
852 	struct dm_compressor_info *compressor = &adev->dm.compressor;
853 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
854 	struct drm_display_mode *mode;
855 	unsigned long max_size = 0;
856 
857 	if (adev->dm.dc->fbc_compressor == NULL)
858 		return;
859 
860 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
861 		return;
862 
863 	if (compressor->bo_ptr)
864 		return;
865 
866 
867 	list_for_each_entry(mode, &connector->modes, head) {
868 		if (max_size < mode->htotal * mode->vtotal)
869 			max_size = mode->htotal * mode->vtotal;
870 	}
871 
872 	if (max_size) {
873 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
874 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
875 			    &compressor->gpu_addr, &compressor->cpu_addr);
876 
877 		if (r)
878 			DRM_ERROR("DM: Failed to initialize FBC\n");
879 		else {
880 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
881 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
882 		}
883 
884 	}
885 
886 }
887 
888 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
889 					  int pipe, bool *enabled,
890 					  unsigned char *buf, int max_bytes)
891 {
892 	struct drm_device *dev = dev_get_drvdata(kdev);
893 	struct amdgpu_device *adev = drm_to_adev(dev);
894 	struct drm_connector *connector;
895 	struct drm_connector_list_iter conn_iter;
896 	struct amdgpu_dm_connector *aconnector;
897 	int ret = 0;
898 
899 	*enabled = false;
900 
901 	mutex_lock(&adev->dm.audio_lock);
902 
903 	drm_connector_list_iter_begin(dev, &conn_iter);
904 	drm_for_each_connector_iter(connector, &conn_iter) {
905 		aconnector = to_amdgpu_dm_connector(connector);
906 		if (aconnector->audio_inst != port)
907 			continue;
908 
909 		*enabled = true;
910 		ret = drm_eld_size(connector->eld);
911 		memcpy(buf, connector->eld, min(max_bytes, ret));
912 
913 		break;
914 	}
915 	drm_connector_list_iter_end(&conn_iter);
916 
917 	mutex_unlock(&adev->dm.audio_lock);
918 
919 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
920 
921 	return ret;
922 }
923 
924 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
925 	.get_eld = amdgpu_dm_audio_component_get_eld,
926 };
927 
928 static int amdgpu_dm_audio_component_bind(struct device *kdev,
929 				       struct device *hda_kdev, void *data)
930 {
931 	struct drm_device *dev = dev_get_drvdata(kdev);
932 	struct amdgpu_device *adev = drm_to_adev(dev);
933 	struct drm_audio_component *acomp = data;
934 
935 	acomp->ops = &amdgpu_dm_audio_component_ops;
936 	acomp->dev = kdev;
937 	adev->dm.audio_component = acomp;
938 
939 	return 0;
940 }
941 
942 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
943 					  struct device *hda_kdev, void *data)
944 {
945 	struct drm_device *dev = dev_get_drvdata(kdev);
946 	struct amdgpu_device *adev = drm_to_adev(dev);
947 	struct drm_audio_component *acomp = data;
948 
949 	acomp->ops = NULL;
950 	acomp->dev = NULL;
951 	adev->dm.audio_component = NULL;
952 }
953 
954 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
955 	.bind	= amdgpu_dm_audio_component_bind,
956 	.unbind	= amdgpu_dm_audio_component_unbind,
957 };
958 
959 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
960 {
961 	int i, ret;
962 
963 	if (!amdgpu_audio)
964 		return 0;
965 
966 	adev->mode_info.audio.enabled = true;
967 
968 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
969 
970 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
971 		adev->mode_info.audio.pin[i].channels = -1;
972 		adev->mode_info.audio.pin[i].rate = -1;
973 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
974 		adev->mode_info.audio.pin[i].status_bits = 0;
975 		adev->mode_info.audio.pin[i].category_code = 0;
976 		adev->mode_info.audio.pin[i].connected = false;
977 		adev->mode_info.audio.pin[i].id =
978 			adev->dm.dc->res_pool->audios[i]->inst;
979 		adev->mode_info.audio.pin[i].offset = 0;
980 	}
981 
982 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
983 	if (ret < 0)
984 		return ret;
985 
986 	adev->dm.audio_registered = true;
987 
988 	return 0;
989 }
990 
991 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
992 {
993 	if (!amdgpu_audio)
994 		return;
995 
996 	if (!adev->mode_info.audio.enabled)
997 		return;
998 
999 	if (adev->dm.audio_registered) {
1000 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001 		adev->dm.audio_registered = false;
1002 	}
1003 
1004 	/* TODO: Disable audio? */
1005 
1006 	adev->mode_info.audio.enabled = false;
1007 }
1008 
1009 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1010 {
1011 	struct drm_audio_component *acomp = adev->dm.audio_component;
1012 
1013 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1014 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1015 
1016 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1017 						 pin, -1);
1018 	}
1019 }
1020 
1021 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1022 {
1023 	const struct dmcub_firmware_header_v1_0 *hdr;
1024 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1025 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1026 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1027 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1028 	struct abm *abm = adev->dm.dc->res_pool->abm;
1029 	struct dmub_srv_hw_params hw_params;
1030 	enum dmub_status status;
1031 	const unsigned char *fw_inst_const, *fw_bss_data;
1032 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1033 	bool has_hw_support;
1034 
1035 	if (!dmub_srv)
1036 		/* DMUB isn't supported on the ASIC. */
1037 		return 0;
1038 
1039 	if (!fb_info) {
1040 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1041 		return -EINVAL;
1042 	}
1043 
1044 	if (!dmub_fw) {
1045 		/* Firmware required for DMUB support. */
1046 		DRM_ERROR("No firmware provided for DMUB.\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1051 	if (status != DMUB_STATUS_OK) {
1052 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1053 		return -EINVAL;
1054 	}
1055 
1056 	if (!has_hw_support) {
1057 		DRM_INFO("DMUB unsupported on ASIC\n");
1058 		return 0;
1059 	}
1060 
1061 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1062 	status = dmub_srv_hw_reset(dmub_srv);
1063 	if (status != DMUB_STATUS_OK)
1064 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1065 
1066 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1067 
1068 	fw_inst_const = dmub_fw->data +
1069 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070 			PSP_HEADER_BYTES;
1071 
1072 	fw_bss_data = dmub_fw->data +
1073 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1074 		      le32_to_cpu(hdr->inst_const_bytes);
1075 
1076 	/* Copy firmware and bios info into FB memory. */
1077 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1078 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1079 
1080 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1081 
1082 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1083 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1084 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1085 	 * will be done by dm_dmub_hw_init
1086 	 */
1087 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1088 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1089 				fw_inst_const_size);
1090 	}
1091 
1092 	if (fw_bss_data_size)
1093 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1094 		       fw_bss_data, fw_bss_data_size);
1095 
1096 	/* Copy firmware bios info into FB memory. */
1097 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1098 	       adev->bios_size);
1099 
1100 	/* Reset regions that need to be reset. */
1101 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1102 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1103 
1104 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1105 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1106 
1107 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1108 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1109 
1110 	/* Initialize hardware. */
1111 	memset(&hw_params, 0, sizeof(hw_params));
1112 	hw_params.fb_base = adev->gmc.fb_start;
1113 	hw_params.fb_offset = adev->gmc.aper_base;
1114 
1115 	/* backdoor load firmware and trigger dmub running */
1116 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1117 		hw_params.load_inst_const = true;
1118 
1119 	if (dmcu)
1120 		hw_params.psp_version = dmcu->psp_version;
1121 
1122 	for (i = 0; i < fb_info->num_fb; ++i)
1123 		hw_params.fb[i] = &fb_info->fb[i];
1124 
1125 	switch (adev->ip_versions[DCE_HWIP][0]) {
1126 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1127 		hw_params.dpia_supported = true;
1128 #if defined(CONFIG_DRM_AMD_DC_DCN)
1129 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1130 #endif
1131 		break;
1132 	default:
1133 		break;
1134 	}
1135 
1136 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1137 	if (status != DMUB_STATUS_OK) {
1138 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1139 		return -EINVAL;
1140 	}
1141 
1142 	/* Wait for firmware load to finish. */
1143 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1144 	if (status != DMUB_STATUS_OK)
1145 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1146 
1147 	/* Init DMCU and ABM if available. */
1148 	if (dmcu && abm) {
1149 		dmcu->funcs->dmcu_init(dmcu);
1150 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1151 	}
1152 
1153 	if (!adev->dm.dc->ctx->dmub_srv)
1154 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1155 	if (!adev->dm.dc->ctx->dmub_srv) {
1156 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1157 		return -ENOMEM;
1158 	}
1159 
1160 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1161 		 adev->dm.dmcub_fw_version);
1162 
1163 	return 0;
1164 }
1165 
1166 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1167 {
1168 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1169 	enum dmub_status status;
1170 	bool init;
1171 
1172 	if (!dmub_srv) {
1173 		/* DMUB isn't supported on the ASIC. */
1174 		return;
1175 	}
1176 
1177 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1178 	if (status != DMUB_STATUS_OK)
1179 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1180 
1181 	if (status == DMUB_STATUS_OK && init) {
1182 		/* Wait for firmware load to finish. */
1183 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1184 		if (status != DMUB_STATUS_OK)
1185 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1186 	} else {
1187 		/* Perform the full hardware initialization. */
1188 		dm_dmub_hw_init(adev);
1189 	}
1190 }
1191 
1192 #if defined(CONFIG_DRM_AMD_DC_DCN)
1193 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1194 {
1195 	uint64_t pt_base;
1196 	uint32_t logical_addr_low;
1197 	uint32_t logical_addr_high;
1198 	uint32_t agp_base, agp_bot, agp_top;
1199 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1200 
1201 	memset(pa_config, 0, sizeof(*pa_config));
1202 
1203 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1204 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1205 
1206 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1207 		/*
1208 		 * Raven2 has a HW issue that it is unable to use the vram which
1209 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1210 		 * workaround that increase system aperture high address (add 1)
1211 		 * to get rid of the VM fault and hardware hang.
1212 		 */
1213 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1214 	else
1215 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1216 
1217 	agp_base = 0;
1218 	agp_bot = adev->gmc.agp_start >> 24;
1219 	agp_top = adev->gmc.agp_end >> 24;
1220 
1221 
1222 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1223 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1224 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1225 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1226 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1227 	page_table_base.low_part = lower_32_bits(pt_base);
1228 
1229 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1230 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1231 
1232 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1233 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1234 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1235 
1236 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1237 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1238 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1239 
1240 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1241 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1242 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1243 
1244 	pa_config->is_hvm_enabled = 0;
1245 
1246 }
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_DC_DCN)
1249 static void vblank_control_worker(struct work_struct *work)
1250 {
1251 	struct vblank_control_work *vblank_work =
1252 		container_of(work, struct vblank_control_work, work);
1253 	struct amdgpu_display_manager *dm = vblank_work->dm;
1254 
1255 	mutex_lock(&dm->dc_lock);
1256 
1257 	if (vblank_work->enable)
1258 		dm->active_vblank_irq_count++;
1259 	else if(dm->active_vblank_irq_count)
1260 		dm->active_vblank_irq_count--;
1261 
1262 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1263 
1264 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1265 
1266 	/* Control PSR based on vblank requirements from OS */
1267 	if (vblank_work->stream && vblank_work->stream->link) {
1268 		if (vblank_work->enable) {
1269 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1270 				amdgpu_dm_psr_disable(vblank_work->stream);
1271 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1272 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1273 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1274 			amdgpu_dm_psr_enable(vblank_work->stream);
1275 		}
1276 	}
1277 
1278 	mutex_unlock(&dm->dc_lock);
1279 
1280 	dc_stream_release(vblank_work->stream);
1281 
1282 	kfree(vblank_work);
1283 }
1284 
1285 #endif
1286 
1287 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288 {
1289 	struct hpd_rx_irq_offload_work *offload_work;
1290 	struct amdgpu_dm_connector *aconnector;
1291 	struct dc_link *dc_link;
1292 	struct amdgpu_device *adev;
1293 	enum dc_connection_type new_connection_type = dc_connection_none;
1294 	unsigned long flags;
1295 
1296 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297 	aconnector = offload_work->offload_wq->aconnector;
1298 
1299 	if (!aconnector) {
1300 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1301 		goto skip;
1302 	}
1303 
1304 	adev = drm_to_adev(aconnector->base.dev);
1305 	dc_link = aconnector->dc_link;
1306 
1307 	mutex_lock(&aconnector->hpd_lock);
1308 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309 		DRM_ERROR("KMS: Failed to detect connector\n");
1310 	mutex_unlock(&aconnector->hpd_lock);
1311 
1312 	if (new_connection_type == dc_connection_none)
1313 		goto skip;
1314 
1315 	if (amdgpu_in_reset(adev))
1316 		goto skip;
1317 
1318 	mutex_lock(&adev->dm.dc_lock);
1319 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320 		dc_link_dp_handle_automated_test(dc_link);
1321 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324 		dc_link_dp_handle_link_loss(dc_link);
1325 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326 		offload_work->offload_wq->is_handling_link_loss = false;
1327 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328 	}
1329 	mutex_unlock(&adev->dm.dc_lock);
1330 
1331 skip:
1332 	kfree(offload_work);
1333 
1334 }
1335 
1336 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337 {
1338 	int max_caps = dc->caps.max_links;
1339 	int i = 0;
1340 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341 
1342 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343 
1344 	if (!hpd_rx_offload_wq)
1345 		return NULL;
1346 
1347 
1348 	for (i = 0; i < max_caps; i++) {
1349 		hpd_rx_offload_wq[i].wq =
1350 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351 
1352 		if (hpd_rx_offload_wq[i].wq == NULL) {
1353 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1354 			return NULL;
1355 		}
1356 
1357 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1358 	}
1359 
1360 	return hpd_rx_offload_wq;
1361 }
1362 
1363 struct amdgpu_stutter_quirk {
1364 	u16 chip_vendor;
1365 	u16 chip_device;
1366 	u16 subsys_vendor;
1367 	u16 subsys_device;
1368 	u8 revision;
1369 };
1370 
1371 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1374 	{ 0, 0, 0, 0, 0 },
1375 };
1376 
1377 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378 {
1379 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380 
1381 	while (p && p->chip_device != 0) {
1382 		if (pdev->vendor == p->chip_vendor &&
1383 		    pdev->device == p->chip_device &&
1384 		    pdev->subsystem_vendor == p->subsys_vendor &&
1385 		    pdev->subsystem_device == p->subsys_device &&
1386 		    pdev->revision == p->revision) {
1387 			return true;
1388 		}
1389 		++p;
1390 	}
1391 	return false;
1392 }
1393 
1394 static int amdgpu_dm_init(struct amdgpu_device *adev)
1395 {
1396 	struct dc_init_data init_data;
1397 #ifdef CONFIG_DRM_AMD_DC_HDCP
1398 	struct dc_callback_init init_params;
1399 #endif
1400 	int r;
1401 
1402 	adev->dm.ddev = adev_to_drm(adev);
1403 	adev->dm.adev = adev;
1404 
1405 	/* Zero all the fields */
1406 	memset(&init_data, 0, sizeof(init_data));
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408 	memset(&init_params, 0, sizeof(init_params));
1409 #endif
1410 
1411 	mutex_init(&adev->dm.dc_lock);
1412 	mutex_init(&adev->dm.audio_lock);
1413 #if defined(CONFIG_DRM_AMD_DC_DCN)
1414 	spin_lock_init(&adev->dm.vblank_lock);
1415 #endif
1416 
1417 	if(amdgpu_dm_irq_init(adev)) {
1418 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1419 		goto error;
1420 	}
1421 
1422 	init_data.asic_id.chip_family = adev->family;
1423 
1424 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1425 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1426 	init_data.asic_id.chip_id = adev->pdev->device;
1427 
1428 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1429 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1430 	init_data.asic_id.atombios_base_address =
1431 		adev->mode_info.atom_context->bios;
1432 
1433 	init_data.driver = adev;
1434 
1435 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1436 
1437 	if (!adev->dm.cgs_device) {
1438 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1439 		goto error;
1440 	}
1441 
1442 	init_data.cgs_device = adev->dm.cgs_device;
1443 
1444 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1445 
1446 	switch (adev->ip_versions[DCE_HWIP][0]) {
1447 	case IP_VERSION(2, 1, 0):
1448 		switch (adev->dm.dmcub_fw_version) {
1449 		case 0: /* development */
1450 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1451 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1452 			init_data.flags.disable_dmcu = false;
1453 			break;
1454 		default:
1455 			init_data.flags.disable_dmcu = true;
1456 		}
1457 		break;
1458 	case IP_VERSION(2, 0, 3):
1459 		init_data.flags.disable_dmcu = true;
1460 		break;
1461 	default:
1462 		break;
1463 	}
1464 
1465 	switch (adev->asic_type) {
1466 	case CHIP_CARRIZO:
1467 	case CHIP_STONEY:
1468 		init_data.flags.gpu_vm_support = true;
1469 		break;
1470 	default:
1471 		switch (adev->ip_versions[DCE_HWIP][0]) {
1472 		case IP_VERSION(1, 0, 0):
1473 		case IP_VERSION(1, 0, 1):
1474 			/* enable S/G on PCO and RV2 */
1475 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1476 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1477 				init_data.flags.gpu_vm_support = true;
1478 			break;
1479 		case IP_VERSION(2, 1, 0):
1480 		case IP_VERSION(3, 0, 1):
1481 		case IP_VERSION(3, 1, 2):
1482 		case IP_VERSION(3, 1, 3):
1483 		case IP_VERSION(3, 1, 5):
1484 			init_data.flags.gpu_vm_support = true;
1485 			break;
1486 		default:
1487 			break;
1488 		}
1489 		break;
1490 	}
1491 
1492 	if (init_data.flags.gpu_vm_support)
1493 		adev->mode_info.gpu_vm_support = true;
1494 
1495 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1496 		init_data.flags.fbc_support = true;
1497 
1498 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1499 		init_data.flags.multi_mon_pp_mclk_switch = true;
1500 
1501 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1502 		init_data.flags.disable_fractional_pwm = true;
1503 
1504 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1505 		init_data.flags.edp_no_power_sequencing = true;
1506 
1507 #ifdef CONFIG_DRM_AMD_DC_DCN
1508 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1509 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1510 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1511 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1512 #endif
1513 
1514 	init_data.flags.seamless_boot_edp_requested = false;
1515 
1516 	if (check_seamless_boot_capability(adev)) {
1517 		init_data.flags.seamless_boot_edp_requested = true;
1518 		init_data.flags.allow_seamless_boot_optimization = true;
1519 		DRM_INFO("Seamless boot condition check passed\n");
1520 	}
1521 
1522 	INIT_LIST_HEAD(&adev->dm.da_list);
1523 	/* Display Core create. */
1524 	adev->dm.dc = dc_create(&init_data);
1525 
1526 	if (adev->dm.dc) {
1527 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1528 	} else {
1529 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1530 		goto error;
1531 	}
1532 
1533 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1534 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1535 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1536 	}
1537 
1538 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1539 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1540 	if (dm_should_disable_stutter(adev->pdev))
1541 		adev->dm.dc->debug.disable_stutter = true;
1542 
1543 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1544 		adev->dm.dc->debug.disable_stutter = true;
1545 
1546 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1547 		adev->dm.dc->debug.disable_dsc = true;
1548 		adev->dm.dc->debug.disable_dsc_edp = true;
1549 	}
1550 
1551 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1552 		adev->dm.dc->debug.disable_clock_gate = true;
1553 
1554 	r = dm_dmub_hw_init(adev);
1555 	if (r) {
1556 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1557 		goto error;
1558 	}
1559 
1560 	dc_hardware_init(adev->dm.dc);
1561 
1562 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1563 	if (!adev->dm.hpd_rx_offload_wq) {
1564 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1565 		goto error;
1566 	}
1567 
1568 #if defined(CONFIG_DRM_AMD_DC_DCN)
1569 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1570 		struct dc_phy_addr_space_config pa_config;
1571 
1572 		mmhub_read_system_context(adev, &pa_config);
1573 
1574 		// Call the DC init_memory func
1575 		dc_setup_system_context(adev->dm.dc, &pa_config);
1576 	}
1577 #endif
1578 
1579 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1580 	if (!adev->dm.freesync_module) {
1581 		DRM_ERROR(
1582 		"amdgpu: failed to initialize freesync_module.\n");
1583 	} else
1584 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1585 				adev->dm.freesync_module);
1586 
1587 	amdgpu_dm_init_color_mod();
1588 
1589 #if defined(CONFIG_DRM_AMD_DC_DCN)
1590 	if (adev->dm.dc->caps.max_links > 0) {
1591 		adev->dm.vblank_control_workqueue =
1592 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1593 		if (!adev->dm.vblank_control_workqueue)
1594 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1595 	}
1596 #endif
1597 
1598 #ifdef CONFIG_DRM_AMD_DC_HDCP
1599 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1600 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1601 
1602 		if (!adev->dm.hdcp_workqueue)
1603 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1604 		else
1605 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1606 
1607 		dc_init_callbacks(adev->dm.dc, &init_params);
1608 	}
1609 #endif
1610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1611 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1612 #endif
1613 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1614 		init_completion(&adev->dm.dmub_aux_transfer_done);
1615 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1616 		if (!adev->dm.dmub_notify) {
1617 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1618 			goto error;
1619 		}
1620 
1621 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1622 		if (!adev->dm.delayed_hpd_wq) {
1623 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1624 			goto error;
1625 		}
1626 
1627 		amdgpu_dm_outbox_init(adev);
1628 #if defined(CONFIG_DRM_AMD_DC_DCN)
1629 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1630 			dmub_aux_setconfig_callback, false)) {
1631 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1632 			goto error;
1633 		}
1634 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1635 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1636 			goto error;
1637 		}
1638 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1639 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1640 			goto error;
1641 		}
1642 #endif /* CONFIG_DRM_AMD_DC_DCN */
1643 	}
1644 
1645 	if (amdgpu_dm_initialize_drm_device(adev)) {
1646 		DRM_ERROR(
1647 		"amdgpu: failed to initialize sw for display support.\n");
1648 		goto error;
1649 	}
1650 
1651 	/* create fake encoders for MST */
1652 	dm_dp_create_fake_mst_encoders(adev);
1653 
1654 	/* TODO: Add_display_info? */
1655 
1656 	/* TODO use dynamic cursor width */
1657 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1658 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1659 
1660 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1661 		DRM_ERROR(
1662 		"amdgpu: failed to initialize sw for display support.\n");
1663 		goto error;
1664 	}
1665 
1666 
1667 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1668 
1669 	return 0;
1670 error:
1671 	amdgpu_dm_fini(adev);
1672 
1673 	return -EINVAL;
1674 }
1675 
1676 static int amdgpu_dm_early_fini(void *handle)
1677 {
1678 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1679 
1680 	amdgpu_dm_audio_fini(adev);
1681 
1682 	return 0;
1683 }
1684 
1685 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1686 {
1687 	int i;
1688 
1689 #if defined(CONFIG_DRM_AMD_DC_DCN)
1690 	if (adev->dm.vblank_control_workqueue) {
1691 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1692 		adev->dm.vblank_control_workqueue = NULL;
1693 	}
1694 #endif
1695 
1696 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1697 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1698 	}
1699 
1700 	amdgpu_dm_destroy_drm_device(&adev->dm);
1701 
1702 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1703 	if (adev->dm.crc_rd_wrk) {
1704 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1705 		kfree(adev->dm.crc_rd_wrk);
1706 		adev->dm.crc_rd_wrk = NULL;
1707 	}
1708 #endif
1709 #ifdef CONFIG_DRM_AMD_DC_HDCP
1710 	if (adev->dm.hdcp_workqueue) {
1711 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1712 		adev->dm.hdcp_workqueue = NULL;
1713 	}
1714 
1715 	if (adev->dm.dc)
1716 		dc_deinit_callbacks(adev->dm.dc);
1717 #endif
1718 
1719 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1720 
1721 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1722 		kfree(adev->dm.dmub_notify);
1723 		adev->dm.dmub_notify = NULL;
1724 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1725 		adev->dm.delayed_hpd_wq = NULL;
1726 	}
1727 
1728 	if (adev->dm.dmub_bo)
1729 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1730 				      &adev->dm.dmub_bo_gpu_addr,
1731 				      &adev->dm.dmub_bo_cpu_addr);
1732 
1733 	if (adev->dm.hpd_rx_offload_wq) {
1734 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1735 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1736 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1737 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1738 			}
1739 		}
1740 
1741 		kfree(adev->dm.hpd_rx_offload_wq);
1742 		adev->dm.hpd_rx_offload_wq = NULL;
1743 	}
1744 
1745 	/* DC Destroy TODO: Replace destroy DAL */
1746 	if (adev->dm.dc)
1747 		dc_destroy(&adev->dm.dc);
1748 	/*
1749 	 * TODO: pageflip, vlank interrupt
1750 	 *
1751 	 * amdgpu_dm_irq_fini(adev);
1752 	 */
1753 
1754 	if (adev->dm.cgs_device) {
1755 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1756 		adev->dm.cgs_device = NULL;
1757 	}
1758 	if (adev->dm.freesync_module) {
1759 		mod_freesync_destroy(adev->dm.freesync_module);
1760 		adev->dm.freesync_module = NULL;
1761 	}
1762 
1763 	mutex_destroy(&adev->dm.audio_lock);
1764 	mutex_destroy(&adev->dm.dc_lock);
1765 
1766 	return;
1767 }
1768 
1769 static int load_dmcu_fw(struct amdgpu_device *adev)
1770 {
1771 	const char *fw_name_dmcu = NULL;
1772 	int r;
1773 	const struct dmcu_firmware_header_v1_0 *hdr;
1774 
1775 	switch(adev->asic_type) {
1776 #if defined(CONFIG_DRM_AMD_DC_SI)
1777 	case CHIP_TAHITI:
1778 	case CHIP_PITCAIRN:
1779 	case CHIP_VERDE:
1780 	case CHIP_OLAND:
1781 #endif
1782 	case CHIP_BONAIRE:
1783 	case CHIP_HAWAII:
1784 	case CHIP_KAVERI:
1785 	case CHIP_KABINI:
1786 	case CHIP_MULLINS:
1787 	case CHIP_TONGA:
1788 	case CHIP_FIJI:
1789 	case CHIP_CARRIZO:
1790 	case CHIP_STONEY:
1791 	case CHIP_POLARIS11:
1792 	case CHIP_POLARIS10:
1793 	case CHIP_POLARIS12:
1794 	case CHIP_VEGAM:
1795 	case CHIP_VEGA10:
1796 	case CHIP_VEGA12:
1797 	case CHIP_VEGA20:
1798 		return 0;
1799 	case CHIP_NAVI12:
1800 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1801 		break;
1802 	case CHIP_RAVEN:
1803 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1804 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1805 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1806 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1807 		else
1808 			return 0;
1809 		break;
1810 	default:
1811 		switch (adev->ip_versions[DCE_HWIP][0]) {
1812 		case IP_VERSION(2, 0, 2):
1813 		case IP_VERSION(2, 0, 3):
1814 		case IP_VERSION(2, 0, 0):
1815 		case IP_VERSION(2, 1, 0):
1816 		case IP_VERSION(3, 0, 0):
1817 		case IP_VERSION(3, 0, 2):
1818 		case IP_VERSION(3, 0, 3):
1819 		case IP_VERSION(3, 0, 1):
1820 		case IP_VERSION(3, 1, 2):
1821 		case IP_VERSION(3, 1, 3):
1822 		case IP_VERSION(3, 1, 5):
1823 		case IP_VERSION(3, 1, 6):
1824 			return 0;
1825 		default:
1826 			break;
1827 		}
1828 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1829 		return -EINVAL;
1830 	}
1831 
1832 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1833 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1834 		return 0;
1835 	}
1836 
1837 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1838 	if (r == -ENOENT) {
1839 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1840 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1841 		adev->dm.fw_dmcu = NULL;
1842 		return 0;
1843 	}
1844 	if (r) {
1845 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1846 			fw_name_dmcu);
1847 		return r;
1848 	}
1849 
1850 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1851 	if (r) {
1852 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1853 			fw_name_dmcu);
1854 		release_firmware(adev->dm.fw_dmcu);
1855 		adev->dm.fw_dmcu = NULL;
1856 		return r;
1857 	}
1858 
1859 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1860 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1861 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1862 	adev->firmware.fw_size +=
1863 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1864 
1865 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1866 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1867 	adev->firmware.fw_size +=
1868 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1869 
1870 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1871 
1872 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1873 
1874 	return 0;
1875 }
1876 
1877 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1878 {
1879 	struct amdgpu_device *adev = ctx;
1880 
1881 	return dm_read_reg(adev->dm.dc->ctx, address);
1882 }
1883 
1884 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1885 				     uint32_t value)
1886 {
1887 	struct amdgpu_device *adev = ctx;
1888 
1889 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1890 }
1891 
1892 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1893 {
1894 	struct dmub_srv_create_params create_params;
1895 	struct dmub_srv_region_params region_params;
1896 	struct dmub_srv_region_info region_info;
1897 	struct dmub_srv_fb_params fb_params;
1898 	struct dmub_srv_fb_info *fb_info;
1899 	struct dmub_srv *dmub_srv;
1900 	const struct dmcub_firmware_header_v1_0 *hdr;
1901 	const char *fw_name_dmub;
1902 	enum dmub_asic dmub_asic;
1903 	enum dmub_status status;
1904 	int r;
1905 
1906 	switch (adev->ip_versions[DCE_HWIP][0]) {
1907 	case IP_VERSION(2, 1, 0):
1908 		dmub_asic = DMUB_ASIC_DCN21;
1909 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1910 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1911 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1912 		break;
1913 	case IP_VERSION(3, 0, 0):
1914 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1915 			dmub_asic = DMUB_ASIC_DCN30;
1916 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1917 		} else {
1918 			dmub_asic = DMUB_ASIC_DCN30;
1919 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1920 		}
1921 		break;
1922 	case IP_VERSION(3, 0, 1):
1923 		dmub_asic = DMUB_ASIC_DCN301;
1924 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1925 		break;
1926 	case IP_VERSION(3, 0, 2):
1927 		dmub_asic = DMUB_ASIC_DCN302;
1928 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1929 		break;
1930 	case IP_VERSION(3, 0, 3):
1931 		dmub_asic = DMUB_ASIC_DCN303;
1932 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1933 		break;
1934 	case IP_VERSION(3, 1, 2):
1935 	case IP_VERSION(3, 1, 3):
1936 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1937 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1938 		break;
1939 	case IP_VERSION(3, 1, 5):
1940 		dmub_asic = DMUB_ASIC_DCN315;
1941 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1942 		break;
1943 	case IP_VERSION(3, 1, 6):
1944 		dmub_asic = DMUB_ASIC_DCN316;
1945 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1946 		break;
1947 	default:
1948 		/* ASIC doesn't support DMUB. */
1949 		return 0;
1950 	}
1951 
1952 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1953 	if (r) {
1954 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1955 		return 0;
1956 	}
1957 
1958 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1959 	if (r) {
1960 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1961 		return 0;
1962 	}
1963 
1964 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1965 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1966 
1967 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1968 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1969 			AMDGPU_UCODE_ID_DMCUB;
1970 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1971 			adev->dm.dmub_fw;
1972 		adev->firmware.fw_size +=
1973 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1974 
1975 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1976 			 adev->dm.dmcub_fw_version);
1977 	}
1978 
1979 
1980 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1981 	dmub_srv = adev->dm.dmub_srv;
1982 
1983 	if (!dmub_srv) {
1984 		DRM_ERROR("Failed to allocate DMUB service!\n");
1985 		return -ENOMEM;
1986 	}
1987 
1988 	memset(&create_params, 0, sizeof(create_params));
1989 	create_params.user_ctx = adev;
1990 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1991 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1992 	create_params.asic = dmub_asic;
1993 
1994 	/* Create the DMUB service. */
1995 	status = dmub_srv_create(dmub_srv, &create_params);
1996 	if (status != DMUB_STATUS_OK) {
1997 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1998 		return -EINVAL;
1999 	}
2000 
2001 	/* Calculate the size of all the regions for the DMUB service. */
2002 	memset(&region_params, 0, sizeof(region_params));
2003 
2004 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2005 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2006 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2007 	region_params.vbios_size = adev->bios_size;
2008 	region_params.fw_bss_data = region_params.bss_data_size ?
2009 		adev->dm.dmub_fw->data +
2010 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2011 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2012 	region_params.fw_inst_const =
2013 		adev->dm.dmub_fw->data +
2014 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2015 		PSP_HEADER_BYTES;
2016 
2017 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2018 					   &region_info);
2019 
2020 	if (status != DMUB_STATUS_OK) {
2021 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2022 		return -EINVAL;
2023 	}
2024 
2025 	/*
2026 	 * Allocate a framebuffer based on the total size of all the regions.
2027 	 * TODO: Move this into GART.
2028 	 */
2029 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2030 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2031 				    &adev->dm.dmub_bo_gpu_addr,
2032 				    &adev->dm.dmub_bo_cpu_addr);
2033 	if (r)
2034 		return r;
2035 
2036 	/* Rebase the regions on the framebuffer address. */
2037 	memset(&fb_params, 0, sizeof(fb_params));
2038 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2039 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2040 	fb_params.region_info = &region_info;
2041 
2042 	adev->dm.dmub_fb_info =
2043 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2044 	fb_info = adev->dm.dmub_fb_info;
2045 
2046 	if (!fb_info) {
2047 		DRM_ERROR(
2048 			"Failed to allocate framebuffer info for DMUB service!\n");
2049 		return -ENOMEM;
2050 	}
2051 
2052 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2053 	if (status != DMUB_STATUS_OK) {
2054 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2055 		return -EINVAL;
2056 	}
2057 
2058 	return 0;
2059 }
2060 
2061 static int dm_sw_init(void *handle)
2062 {
2063 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2064 	int r;
2065 
2066 	r = dm_dmub_sw_init(adev);
2067 	if (r)
2068 		return r;
2069 
2070 	return load_dmcu_fw(adev);
2071 }
2072 
2073 static int dm_sw_fini(void *handle)
2074 {
2075 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2076 
2077 	kfree(adev->dm.dmub_fb_info);
2078 	adev->dm.dmub_fb_info = NULL;
2079 
2080 	if (adev->dm.dmub_srv) {
2081 		dmub_srv_destroy(adev->dm.dmub_srv);
2082 		adev->dm.dmub_srv = NULL;
2083 	}
2084 
2085 	release_firmware(adev->dm.dmub_fw);
2086 	adev->dm.dmub_fw = NULL;
2087 
2088 	release_firmware(adev->dm.fw_dmcu);
2089 	adev->dm.fw_dmcu = NULL;
2090 
2091 	return 0;
2092 }
2093 
2094 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2095 {
2096 	struct amdgpu_dm_connector *aconnector;
2097 	struct drm_connector *connector;
2098 	struct drm_connector_list_iter iter;
2099 	int ret = 0;
2100 
2101 	drm_connector_list_iter_begin(dev, &iter);
2102 	drm_for_each_connector_iter(connector, &iter) {
2103 		aconnector = to_amdgpu_dm_connector(connector);
2104 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2105 		    aconnector->mst_mgr.aux) {
2106 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2107 					 aconnector,
2108 					 aconnector->base.base.id);
2109 
2110 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2111 			if (ret < 0) {
2112 				DRM_ERROR("DM_MST: Failed to start MST\n");
2113 				aconnector->dc_link->type =
2114 					dc_connection_single;
2115 				break;
2116 			}
2117 		}
2118 	}
2119 	drm_connector_list_iter_end(&iter);
2120 
2121 	return ret;
2122 }
2123 
2124 static int dm_late_init(void *handle)
2125 {
2126 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2127 
2128 	struct dmcu_iram_parameters params;
2129 	unsigned int linear_lut[16];
2130 	int i;
2131 	struct dmcu *dmcu = NULL;
2132 
2133 	dmcu = adev->dm.dc->res_pool->dmcu;
2134 
2135 	for (i = 0; i < 16; i++)
2136 		linear_lut[i] = 0xFFFF * i / 15;
2137 
2138 	params.set = 0;
2139 	params.backlight_ramping_override = false;
2140 	params.backlight_ramping_start = 0xCCCC;
2141 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2142 	params.backlight_lut_array_size = 16;
2143 	params.backlight_lut_array = linear_lut;
2144 
2145 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2146 	 * 0xFFFF x 0.01 = 0x28F
2147 	 */
2148 	params.min_abm_backlight = 0x28F;
2149 	/* In the case where abm is implemented on dmcub,
2150 	* dmcu object will be null.
2151 	* ABM 2.4 and up are implemented on dmcub.
2152 	*/
2153 	if (dmcu) {
2154 		if (!dmcu_load_iram(dmcu, params))
2155 			return -EINVAL;
2156 	} else if (adev->dm.dc->ctx->dmub_srv) {
2157 		struct dc_link *edp_links[MAX_NUM_EDP];
2158 		int edp_num;
2159 
2160 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2161 		for (i = 0; i < edp_num; i++) {
2162 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2163 				return -EINVAL;
2164 		}
2165 	}
2166 
2167 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2168 }
2169 
2170 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2171 {
2172 	struct amdgpu_dm_connector *aconnector;
2173 	struct drm_connector *connector;
2174 	struct drm_connector_list_iter iter;
2175 	struct drm_dp_mst_topology_mgr *mgr;
2176 	int ret;
2177 	bool need_hotplug = false;
2178 
2179 	drm_connector_list_iter_begin(dev, &iter);
2180 	drm_for_each_connector_iter(connector, &iter) {
2181 		aconnector = to_amdgpu_dm_connector(connector);
2182 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2183 		    aconnector->mst_port)
2184 			continue;
2185 
2186 		mgr = &aconnector->mst_mgr;
2187 
2188 		if (suspend) {
2189 			drm_dp_mst_topology_mgr_suspend(mgr);
2190 		} else {
2191 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2192 			if (ret < 0) {
2193 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2194 				need_hotplug = true;
2195 			}
2196 		}
2197 	}
2198 	drm_connector_list_iter_end(&iter);
2199 
2200 	if (need_hotplug)
2201 		drm_kms_helper_hotplug_event(dev);
2202 }
2203 
2204 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2205 {
2206 	int ret = 0;
2207 
2208 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2209 	 * on window driver dc implementation.
2210 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2211 	 * should be passed to smu during boot up and resume from s3.
2212 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2213 	 * dcn20_resource_construct
2214 	 * then call pplib functions below to pass the settings to smu:
2215 	 * smu_set_watermarks_for_clock_ranges
2216 	 * smu_set_watermarks_table
2217 	 * navi10_set_watermarks_table
2218 	 * smu_write_watermarks_table
2219 	 *
2220 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2221 	 * dc has implemented different flow for window driver:
2222 	 * dc_hardware_init / dc_set_power_state
2223 	 * dcn10_init_hw
2224 	 * notify_wm_ranges
2225 	 * set_wm_ranges
2226 	 * -- Linux
2227 	 * smu_set_watermarks_for_clock_ranges
2228 	 * renoir_set_watermarks_table
2229 	 * smu_write_watermarks_table
2230 	 *
2231 	 * For Linux,
2232 	 * dc_hardware_init -> amdgpu_dm_init
2233 	 * dc_set_power_state --> dm_resume
2234 	 *
2235 	 * therefore, this function apply to navi10/12/14 but not Renoir
2236 	 * *
2237 	 */
2238 	switch (adev->ip_versions[DCE_HWIP][0]) {
2239 	case IP_VERSION(2, 0, 2):
2240 	case IP_VERSION(2, 0, 0):
2241 		break;
2242 	default:
2243 		return 0;
2244 	}
2245 
2246 	ret = amdgpu_dpm_write_watermarks_table(adev);
2247 	if (ret) {
2248 		DRM_ERROR("Failed to update WMTABLE!\n");
2249 		return ret;
2250 	}
2251 
2252 	return 0;
2253 }
2254 
2255 /**
2256  * dm_hw_init() - Initialize DC device
2257  * @handle: The base driver device containing the amdgpu_dm device.
2258  *
2259  * Initialize the &struct amdgpu_display_manager device. This involves calling
2260  * the initializers of each DM component, then populating the struct with them.
2261  *
2262  * Although the function implies hardware initialization, both hardware and
2263  * software are initialized here. Splitting them out to their relevant init
2264  * hooks is a future TODO item.
2265  *
2266  * Some notable things that are initialized here:
2267  *
2268  * - Display Core, both software and hardware
2269  * - DC modules that we need (freesync and color management)
2270  * - DRM software states
2271  * - Interrupt sources and handlers
2272  * - Vblank support
2273  * - Debug FS entries, if enabled
2274  */
2275 static int dm_hw_init(void *handle)
2276 {
2277 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2278 	/* Create DAL display manager */
2279 	amdgpu_dm_init(adev);
2280 	amdgpu_dm_hpd_init(adev);
2281 
2282 	return 0;
2283 }
2284 
2285 /**
2286  * dm_hw_fini() - Teardown DC device
2287  * @handle: The base driver device containing the amdgpu_dm device.
2288  *
2289  * Teardown components within &struct amdgpu_display_manager that require
2290  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2291  * were loaded. Also flush IRQ workqueues and disable them.
2292  */
2293 static int dm_hw_fini(void *handle)
2294 {
2295 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2296 
2297 	amdgpu_dm_hpd_fini(adev);
2298 
2299 	amdgpu_dm_irq_fini(adev);
2300 	amdgpu_dm_fini(adev);
2301 	return 0;
2302 }
2303 
2304 
2305 static int dm_enable_vblank(struct drm_crtc *crtc);
2306 static void dm_disable_vblank(struct drm_crtc *crtc);
2307 
2308 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2309 				 struct dc_state *state, bool enable)
2310 {
2311 	enum dc_irq_source irq_source;
2312 	struct amdgpu_crtc *acrtc;
2313 	int rc = -EBUSY;
2314 	int i = 0;
2315 
2316 	for (i = 0; i < state->stream_count; i++) {
2317 		acrtc = get_crtc_by_otg_inst(
2318 				adev, state->stream_status[i].primary_otg_inst);
2319 
2320 		if (acrtc && state->stream_status[i].plane_count != 0) {
2321 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2322 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2323 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2324 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2325 			if (rc)
2326 				DRM_WARN("Failed to %s pflip interrupts\n",
2327 					 enable ? "enable" : "disable");
2328 
2329 			if (enable) {
2330 				rc = dm_enable_vblank(&acrtc->base);
2331 				if (rc)
2332 					DRM_WARN("Failed to enable vblank interrupts\n");
2333 			} else {
2334 				dm_disable_vblank(&acrtc->base);
2335 			}
2336 
2337 		}
2338 	}
2339 
2340 }
2341 
2342 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2343 {
2344 	struct dc_state *context = NULL;
2345 	enum dc_status res = DC_ERROR_UNEXPECTED;
2346 	int i;
2347 	struct dc_stream_state *del_streams[MAX_PIPES];
2348 	int del_streams_count = 0;
2349 
2350 	memset(del_streams, 0, sizeof(del_streams));
2351 
2352 	context = dc_create_state(dc);
2353 	if (context == NULL)
2354 		goto context_alloc_fail;
2355 
2356 	dc_resource_state_copy_construct_current(dc, context);
2357 
2358 	/* First remove from context all streams */
2359 	for (i = 0; i < context->stream_count; i++) {
2360 		struct dc_stream_state *stream = context->streams[i];
2361 
2362 		del_streams[del_streams_count++] = stream;
2363 	}
2364 
2365 	/* Remove all planes for removed streams and then remove the streams */
2366 	for (i = 0; i < del_streams_count; i++) {
2367 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2368 			res = DC_FAIL_DETACH_SURFACES;
2369 			goto fail;
2370 		}
2371 
2372 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2373 		if (res != DC_OK)
2374 			goto fail;
2375 	}
2376 
2377 	res = dc_commit_state(dc, context);
2378 
2379 fail:
2380 	dc_release_state(context);
2381 
2382 context_alloc_fail:
2383 	return res;
2384 }
2385 
2386 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2387 {
2388 	int i;
2389 
2390 	if (dm->hpd_rx_offload_wq) {
2391 		for (i = 0; i < dm->dc->caps.max_links; i++)
2392 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2393 	}
2394 }
2395 
2396 static int dm_suspend(void *handle)
2397 {
2398 	struct amdgpu_device *adev = handle;
2399 	struct amdgpu_display_manager *dm = &adev->dm;
2400 	int ret = 0;
2401 
2402 	if (amdgpu_in_reset(adev)) {
2403 		mutex_lock(&dm->dc_lock);
2404 
2405 #if defined(CONFIG_DRM_AMD_DC_DCN)
2406 		dc_allow_idle_optimizations(adev->dm.dc, false);
2407 #endif
2408 
2409 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2410 
2411 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2412 
2413 		amdgpu_dm_commit_zero_streams(dm->dc);
2414 
2415 		amdgpu_dm_irq_suspend(adev);
2416 
2417 		hpd_rx_irq_work_suspend(dm);
2418 
2419 		return ret;
2420 	}
2421 
2422 	WARN_ON(adev->dm.cached_state);
2423 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2424 
2425 	s3_handle_mst(adev_to_drm(adev), true);
2426 
2427 	amdgpu_dm_irq_suspend(adev);
2428 
2429 	hpd_rx_irq_work_suspend(dm);
2430 
2431 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2432 
2433 	return 0;
2434 }
2435 
2436 struct amdgpu_dm_connector *
2437 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2438 					     struct drm_crtc *crtc)
2439 {
2440 	uint32_t i;
2441 	struct drm_connector_state *new_con_state;
2442 	struct drm_connector *connector;
2443 	struct drm_crtc *crtc_from_state;
2444 
2445 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2446 		crtc_from_state = new_con_state->crtc;
2447 
2448 		if (crtc_from_state == crtc)
2449 			return to_amdgpu_dm_connector(connector);
2450 	}
2451 
2452 	return NULL;
2453 }
2454 
2455 static void emulated_link_detect(struct dc_link *link)
2456 {
2457 	struct dc_sink_init_data sink_init_data = { 0 };
2458 	struct display_sink_capability sink_caps = { 0 };
2459 	enum dc_edid_status edid_status;
2460 	struct dc_context *dc_ctx = link->ctx;
2461 	struct dc_sink *sink = NULL;
2462 	struct dc_sink *prev_sink = NULL;
2463 
2464 	link->type = dc_connection_none;
2465 	prev_sink = link->local_sink;
2466 
2467 	if (prev_sink)
2468 		dc_sink_release(prev_sink);
2469 
2470 	switch (link->connector_signal) {
2471 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2472 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2473 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2474 		break;
2475 	}
2476 
2477 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2478 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2479 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2480 		break;
2481 	}
2482 
2483 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2484 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2485 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2486 		break;
2487 	}
2488 
2489 	case SIGNAL_TYPE_LVDS: {
2490 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2491 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2492 		break;
2493 	}
2494 
2495 	case SIGNAL_TYPE_EDP: {
2496 		sink_caps.transaction_type =
2497 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2498 		sink_caps.signal = SIGNAL_TYPE_EDP;
2499 		break;
2500 	}
2501 
2502 	case SIGNAL_TYPE_DISPLAY_PORT: {
2503 		sink_caps.transaction_type =
2504 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2505 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2506 		break;
2507 	}
2508 
2509 	default:
2510 		DC_ERROR("Invalid connector type! signal:%d\n",
2511 			link->connector_signal);
2512 		return;
2513 	}
2514 
2515 	sink_init_data.link = link;
2516 	sink_init_data.sink_signal = sink_caps.signal;
2517 
2518 	sink = dc_sink_create(&sink_init_data);
2519 	if (!sink) {
2520 		DC_ERROR("Failed to create sink!\n");
2521 		return;
2522 	}
2523 
2524 	/* dc_sink_create returns a new reference */
2525 	link->local_sink = sink;
2526 
2527 	edid_status = dm_helpers_read_local_edid(
2528 			link->ctx,
2529 			link,
2530 			sink);
2531 
2532 	if (edid_status != EDID_OK)
2533 		DC_ERROR("Failed to read EDID");
2534 
2535 }
2536 
2537 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2538 				     struct amdgpu_display_manager *dm)
2539 {
2540 	struct {
2541 		struct dc_surface_update surface_updates[MAX_SURFACES];
2542 		struct dc_plane_info plane_infos[MAX_SURFACES];
2543 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2544 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2545 		struct dc_stream_update stream_update;
2546 	} * bundle;
2547 	int k, m;
2548 
2549 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2550 
2551 	if (!bundle) {
2552 		dm_error("Failed to allocate update bundle\n");
2553 		goto cleanup;
2554 	}
2555 
2556 	for (k = 0; k < dc_state->stream_count; k++) {
2557 		bundle->stream_update.stream = dc_state->streams[k];
2558 
2559 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2560 			bundle->surface_updates[m].surface =
2561 				dc_state->stream_status->plane_states[m];
2562 			bundle->surface_updates[m].surface->force_full_update =
2563 				true;
2564 		}
2565 		dc_commit_updates_for_stream(
2566 			dm->dc, bundle->surface_updates,
2567 			dc_state->stream_status->plane_count,
2568 			dc_state->streams[k], &bundle->stream_update, dc_state);
2569 	}
2570 
2571 cleanup:
2572 	kfree(bundle);
2573 
2574 	return;
2575 }
2576 
2577 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2578 {
2579 	struct dc_stream_state *stream_state;
2580 	struct amdgpu_dm_connector *aconnector = link->priv;
2581 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2582 	struct dc_stream_update stream_update;
2583 	bool dpms_off = true;
2584 
2585 	memset(&stream_update, 0, sizeof(stream_update));
2586 	stream_update.dpms_off = &dpms_off;
2587 
2588 	mutex_lock(&adev->dm.dc_lock);
2589 	stream_state = dc_stream_find_from_link(link);
2590 
2591 	if (stream_state == NULL) {
2592 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2593 		mutex_unlock(&adev->dm.dc_lock);
2594 		return;
2595 	}
2596 
2597 	stream_update.stream = stream_state;
2598 	acrtc_state->force_dpms_off = true;
2599 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2600 				     stream_state, &stream_update,
2601 				     stream_state->ctx->dc->current_state);
2602 	mutex_unlock(&adev->dm.dc_lock);
2603 }
2604 
2605 static int dm_resume(void *handle)
2606 {
2607 	struct amdgpu_device *adev = handle;
2608 	struct drm_device *ddev = adev_to_drm(adev);
2609 	struct amdgpu_display_manager *dm = &adev->dm;
2610 	struct amdgpu_dm_connector *aconnector;
2611 	struct drm_connector *connector;
2612 	struct drm_connector_list_iter iter;
2613 	struct drm_crtc *crtc;
2614 	struct drm_crtc_state *new_crtc_state;
2615 	struct dm_crtc_state *dm_new_crtc_state;
2616 	struct drm_plane *plane;
2617 	struct drm_plane_state *new_plane_state;
2618 	struct dm_plane_state *dm_new_plane_state;
2619 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2620 	enum dc_connection_type new_connection_type = dc_connection_none;
2621 	struct dc_state *dc_state;
2622 	int i, r, j;
2623 
2624 	if (amdgpu_in_reset(adev)) {
2625 		dc_state = dm->cached_dc_state;
2626 
2627 		/*
2628 		 * The dc->current_state is backed up into dm->cached_dc_state
2629 		 * before we commit 0 streams.
2630 		 *
2631 		 * DC will clear link encoder assignments on the real state
2632 		 * but the changes won't propagate over to the copy we made
2633 		 * before the 0 streams commit.
2634 		 *
2635 		 * DC expects that link encoder assignments are *not* valid
2636 		 * when committing a state, so as a workaround it needs to be
2637 		 * cleared here.
2638 		 */
2639 		link_enc_cfg_init(dm->dc, dc_state);
2640 
2641 		if (dc_enable_dmub_notifications(adev->dm.dc))
2642 			amdgpu_dm_outbox_init(adev);
2643 
2644 		r = dm_dmub_hw_init(adev);
2645 		if (r)
2646 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2647 
2648 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2649 		dc_resume(dm->dc);
2650 
2651 		amdgpu_dm_irq_resume_early(adev);
2652 
2653 		for (i = 0; i < dc_state->stream_count; i++) {
2654 			dc_state->streams[i]->mode_changed = true;
2655 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2656 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2657 					= 0xffffffff;
2658 			}
2659 		}
2660 
2661 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2662 
2663 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2664 
2665 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2666 
2667 		dc_release_state(dm->cached_dc_state);
2668 		dm->cached_dc_state = NULL;
2669 
2670 		amdgpu_dm_irq_resume_late(adev);
2671 
2672 		mutex_unlock(&dm->dc_lock);
2673 
2674 		return 0;
2675 	}
2676 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2677 	dc_release_state(dm_state->context);
2678 	dm_state->context = dc_create_state(dm->dc);
2679 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2680 	dc_resource_state_construct(dm->dc, dm_state->context);
2681 
2682 	/* Re-enable outbox interrupts for DPIA. */
2683 	if (dc_enable_dmub_notifications(adev->dm.dc))
2684 		amdgpu_dm_outbox_init(adev);
2685 
2686 	/* Before powering on DC we need to re-initialize DMUB. */
2687 	dm_dmub_hw_resume(adev);
2688 
2689 	/* power on hardware */
2690 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2691 
2692 	/* program HPD filter */
2693 	dc_resume(dm->dc);
2694 
2695 	/*
2696 	 * early enable HPD Rx IRQ, should be done before set mode as short
2697 	 * pulse interrupts are used for MST
2698 	 */
2699 	amdgpu_dm_irq_resume_early(adev);
2700 
2701 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2702 	s3_handle_mst(ddev, false);
2703 
2704 	/* Do detection*/
2705 	drm_connector_list_iter_begin(ddev, &iter);
2706 	drm_for_each_connector_iter(connector, &iter) {
2707 		aconnector = to_amdgpu_dm_connector(connector);
2708 
2709 		/*
2710 		 * this is the case when traversing through already created
2711 		 * MST connectors, should be skipped
2712 		 */
2713 		if (aconnector->mst_port)
2714 			continue;
2715 
2716 		mutex_lock(&aconnector->hpd_lock);
2717 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2718 			DRM_ERROR("KMS: Failed to detect connector\n");
2719 
2720 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2721 			emulated_link_detect(aconnector->dc_link);
2722 		else
2723 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2724 
2725 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2726 			aconnector->fake_enable = false;
2727 
2728 		if (aconnector->dc_sink)
2729 			dc_sink_release(aconnector->dc_sink);
2730 		aconnector->dc_sink = NULL;
2731 		amdgpu_dm_update_connector_after_detect(aconnector);
2732 		mutex_unlock(&aconnector->hpd_lock);
2733 	}
2734 	drm_connector_list_iter_end(&iter);
2735 
2736 	/* Force mode set in atomic commit */
2737 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2738 		new_crtc_state->active_changed = true;
2739 
2740 	/*
2741 	 * atomic_check is expected to create the dc states. We need to release
2742 	 * them here, since they were duplicated as part of the suspend
2743 	 * procedure.
2744 	 */
2745 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2746 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2747 		if (dm_new_crtc_state->stream) {
2748 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2749 			dc_stream_release(dm_new_crtc_state->stream);
2750 			dm_new_crtc_state->stream = NULL;
2751 		}
2752 	}
2753 
2754 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2755 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2756 		if (dm_new_plane_state->dc_state) {
2757 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2758 			dc_plane_state_release(dm_new_plane_state->dc_state);
2759 			dm_new_plane_state->dc_state = NULL;
2760 		}
2761 	}
2762 
2763 	drm_atomic_helper_resume(ddev, dm->cached_state);
2764 
2765 	dm->cached_state = NULL;
2766 
2767 	amdgpu_dm_irq_resume_late(adev);
2768 
2769 	amdgpu_dm_smu_write_watermarks_table(adev);
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  * DOC: DM Lifecycle
2776  *
2777  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2778  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2779  * the base driver's device list to be initialized and torn down accordingly.
2780  *
2781  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2782  */
2783 
2784 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2785 	.name = "dm",
2786 	.early_init = dm_early_init,
2787 	.late_init = dm_late_init,
2788 	.sw_init = dm_sw_init,
2789 	.sw_fini = dm_sw_fini,
2790 	.early_fini = amdgpu_dm_early_fini,
2791 	.hw_init = dm_hw_init,
2792 	.hw_fini = dm_hw_fini,
2793 	.suspend = dm_suspend,
2794 	.resume = dm_resume,
2795 	.is_idle = dm_is_idle,
2796 	.wait_for_idle = dm_wait_for_idle,
2797 	.check_soft_reset = dm_check_soft_reset,
2798 	.soft_reset = dm_soft_reset,
2799 	.set_clockgating_state = dm_set_clockgating_state,
2800 	.set_powergating_state = dm_set_powergating_state,
2801 };
2802 
2803 const struct amdgpu_ip_block_version dm_ip_block =
2804 {
2805 	.type = AMD_IP_BLOCK_TYPE_DCE,
2806 	.major = 1,
2807 	.minor = 0,
2808 	.rev = 0,
2809 	.funcs = &amdgpu_dm_funcs,
2810 };
2811 
2812 
2813 /**
2814  * DOC: atomic
2815  *
2816  * *WIP*
2817  */
2818 
2819 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2820 	.fb_create = amdgpu_display_user_framebuffer_create,
2821 	.get_format_info = amd_get_format_info,
2822 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2823 	.atomic_check = amdgpu_dm_atomic_check,
2824 	.atomic_commit = drm_atomic_helper_commit,
2825 };
2826 
2827 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2828 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2829 };
2830 
2831 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2832 {
2833 	u32 max_cll, min_cll, max, min, q, r;
2834 	struct amdgpu_dm_backlight_caps *caps;
2835 	struct amdgpu_display_manager *dm;
2836 	struct drm_connector *conn_base;
2837 	struct amdgpu_device *adev;
2838 	struct dc_link *link = NULL;
2839 	static const u8 pre_computed_values[] = {
2840 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2841 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2842 	int i;
2843 
2844 	if (!aconnector || !aconnector->dc_link)
2845 		return;
2846 
2847 	link = aconnector->dc_link;
2848 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2849 		return;
2850 
2851 	conn_base = &aconnector->base;
2852 	adev = drm_to_adev(conn_base->dev);
2853 	dm = &adev->dm;
2854 	for (i = 0; i < dm->num_of_edps; i++) {
2855 		if (link == dm->backlight_link[i])
2856 			break;
2857 	}
2858 	if (i >= dm->num_of_edps)
2859 		return;
2860 	caps = &dm->backlight_caps[i];
2861 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2862 	caps->aux_support = false;
2863 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2864 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2865 
2866 	if (caps->ext_caps->bits.oled == 1 /*||
2867 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2868 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2869 		caps->aux_support = true;
2870 
2871 	if (amdgpu_backlight == 0)
2872 		caps->aux_support = false;
2873 	else if (amdgpu_backlight == 1)
2874 		caps->aux_support = true;
2875 
2876 	/* From the specification (CTA-861-G), for calculating the maximum
2877 	 * luminance we need to use:
2878 	 *	Luminance = 50*2**(CV/32)
2879 	 * Where CV is a one-byte value.
2880 	 * For calculating this expression we may need float point precision;
2881 	 * to avoid this complexity level, we take advantage that CV is divided
2882 	 * by a constant. From the Euclids division algorithm, we know that CV
2883 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2884 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2885 	 * need to pre-compute the value of r/32. For pre-computing the values
2886 	 * We just used the following Ruby line:
2887 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2888 	 * The results of the above expressions can be verified at
2889 	 * pre_computed_values.
2890 	 */
2891 	q = max_cll >> 5;
2892 	r = max_cll % 32;
2893 	max = (1 << q) * pre_computed_values[r];
2894 
2895 	// min luminance: maxLum * (CV/255)^2 / 100
2896 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2897 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2898 
2899 	caps->aux_max_input_signal = max;
2900 	caps->aux_min_input_signal = min;
2901 }
2902 
2903 void amdgpu_dm_update_connector_after_detect(
2904 		struct amdgpu_dm_connector *aconnector)
2905 {
2906 	struct drm_connector *connector = &aconnector->base;
2907 	struct drm_device *dev = connector->dev;
2908 	struct dc_sink *sink;
2909 
2910 	/* MST handled by drm_mst framework */
2911 	if (aconnector->mst_mgr.mst_state == true)
2912 		return;
2913 
2914 	sink = aconnector->dc_link->local_sink;
2915 	if (sink)
2916 		dc_sink_retain(sink);
2917 
2918 	/*
2919 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2920 	 * the connector sink is set to either fake or physical sink depends on link status.
2921 	 * Skip if already done during boot.
2922 	 */
2923 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2924 			&& aconnector->dc_em_sink) {
2925 
2926 		/*
2927 		 * For S3 resume with headless use eml_sink to fake stream
2928 		 * because on resume connector->sink is set to NULL
2929 		 */
2930 		mutex_lock(&dev->mode_config.mutex);
2931 
2932 		if (sink) {
2933 			if (aconnector->dc_sink) {
2934 				amdgpu_dm_update_freesync_caps(connector, NULL);
2935 				/*
2936 				 * retain and release below are used to
2937 				 * bump up refcount for sink because the link doesn't point
2938 				 * to it anymore after disconnect, so on next crtc to connector
2939 				 * reshuffle by UMD we will get into unwanted dc_sink release
2940 				 */
2941 				dc_sink_release(aconnector->dc_sink);
2942 			}
2943 			aconnector->dc_sink = sink;
2944 			dc_sink_retain(aconnector->dc_sink);
2945 			amdgpu_dm_update_freesync_caps(connector,
2946 					aconnector->edid);
2947 		} else {
2948 			amdgpu_dm_update_freesync_caps(connector, NULL);
2949 			if (!aconnector->dc_sink) {
2950 				aconnector->dc_sink = aconnector->dc_em_sink;
2951 				dc_sink_retain(aconnector->dc_sink);
2952 			}
2953 		}
2954 
2955 		mutex_unlock(&dev->mode_config.mutex);
2956 
2957 		if (sink)
2958 			dc_sink_release(sink);
2959 		return;
2960 	}
2961 
2962 	/*
2963 	 * TODO: temporary guard to look for proper fix
2964 	 * if this sink is MST sink, we should not do anything
2965 	 */
2966 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2967 		dc_sink_release(sink);
2968 		return;
2969 	}
2970 
2971 	if (aconnector->dc_sink == sink) {
2972 		/*
2973 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2974 		 * Do nothing!!
2975 		 */
2976 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2977 				aconnector->connector_id);
2978 		if (sink)
2979 			dc_sink_release(sink);
2980 		return;
2981 	}
2982 
2983 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2984 		aconnector->connector_id, aconnector->dc_sink, sink);
2985 
2986 	mutex_lock(&dev->mode_config.mutex);
2987 
2988 	/*
2989 	 * 1. Update status of the drm connector
2990 	 * 2. Send an event and let userspace tell us what to do
2991 	 */
2992 	if (sink) {
2993 		/*
2994 		 * TODO: check if we still need the S3 mode update workaround.
2995 		 * If yes, put it here.
2996 		 */
2997 		if (aconnector->dc_sink) {
2998 			amdgpu_dm_update_freesync_caps(connector, NULL);
2999 			dc_sink_release(aconnector->dc_sink);
3000 		}
3001 
3002 		aconnector->dc_sink = sink;
3003 		dc_sink_retain(aconnector->dc_sink);
3004 		if (sink->dc_edid.length == 0) {
3005 			aconnector->edid = NULL;
3006 			if (aconnector->dc_link->aux_mode) {
3007 				drm_dp_cec_unset_edid(
3008 					&aconnector->dm_dp_aux.aux);
3009 			}
3010 		} else {
3011 			aconnector->edid =
3012 				(struct edid *)sink->dc_edid.raw_edid;
3013 
3014 			if (aconnector->dc_link->aux_mode)
3015 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3016 						    aconnector->edid);
3017 		}
3018 
3019 		drm_connector_update_edid_property(connector, aconnector->edid);
3020 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3021 		update_connector_ext_caps(aconnector);
3022 	} else {
3023 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3024 		amdgpu_dm_update_freesync_caps(connector, NULL);
3025 		drm_connector_update_edid_property(connector, NULL);
3026 		aconnector->num_modes = 0;
3027 		dc_sink_release(aconnector->dc_sink);
3028 		aconnector->dc_sink = NULL;
3029 		aconnector->edid = NULL;
3030 #ifdef CONFIG_DRM_AMD_DC_HDCP
3031 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3032 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3033 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3034 #endif
3035 	}
3036 
3037 	mutex_unlock(&dev->mode_config.mutex);
3038 
3039 	update_subconnector_property(aconnector);
3040 
3041 	if (sink)
3042 		dc_sink_release(sink);
3043 }
3044 
3045 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3046 {
3047 	struct drm_connector *connector = &aconnector->base;
3048 	struct drm_device *dev = connector->dev;
3049 	enum dc_connection_type new_connection_type = dc_connection_none;
3050 	struct amdgpu_device *adev = drm_to_adev(dev);
3051 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3052 	struct dm_crtc_state *dm_crtc_state = NULL;
3053 
3054 	if (adev->dm.disable_hpd_irq)
3055 		return;
3056 
3057 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3058 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3059 					dm_con_state->base.state,
3060 					dm_con_state->base.crtc));
3061 	/*
3062 	 * In case of failure or MST no need to update connector status or notify the OS
3063 	 * since (for MST case) MST does this in its own context.
3064 	 */
3065 	mutex_lock(&aconnector->hpd_lock);
3066 
3067 #ifdef CONFIG_DRM_AMD_DC_HDCP
3068 	if (adev->dm.hdcp_workqueue) {
3069 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3070 		dm_con_state->update_hdcp = true;
3071 	}
3072 #endif
3073 	if (aconnector->fake_enable)
3074 		aconnector->fake_enable = false;
3075 
3076 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3077 		DRM_ERROR("KMS: Failed to detect connector\n");
3078 
3079 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3080 		emulated_link_detect(aconnector->dc_link);
3081 
3082 		drm_modeset_lock_all(dev);
3083 		dm_restore_drm_connector_state(dev, connector);
3084 		drm_modeset_unlock_all(dev);
3085 
3086 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3087 			drm_kms_helper_connector_hotplug_event(connector);
3088 
3089 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3090 		if (new_connection_type == dc_connection_none &&
3091 		    aconnector->dc_link->type == dc_connection_none &&
3092 		    dm_crtc_state)
3093 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3094 
3095 		amdgpu_dm_update_connector_after_detect(aconnector);
3096 
3097 		drm_modeset_lock_all(dev);
3098 		dm_restore_drm_connector_state(dev, connector);
3099 		drm_modeset_unlock_all(dev);
3100 
3101 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3102 			drm_kms_helper_connector_hotplug_event(connector);
3103 	}
3104 	mutex_unlock(&aconnector->hpd_lock);
3105 
3106 }
3107 
3108 static void handle_hpd_irq(void *param)
3109 {
3110 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3111 
3112 	handle_hpd_irq_helper(aconnector);
3113 
3114 }
3115 
3116 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3117 {
3118 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3119 	uint8_t dret;
3120 	bool new_irq_handled = false;
3121 	int dpcd_addr;
3122 	int dpcd_bytes_to_read;
3123 
3124 	const int max_process_count = 30;
3125 	int process_count = 0;
3126 
3127 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3128 
3129 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3130 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3131 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3132 		dpcd_addr = DP_SINK_COUNT;
3133 	} else {
3134 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3135 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3136 		dpcd_addr = DP_SINK_COUNT_ESI;
3137 	}
3138 
3139 	dret = drm_dp_dpcd_read(
3140 		&aconnector->dm_dp_aux.aux,
3141 		dpcd_addr,
3142 		esi,
3143 		dpcd_bytes_to_read);
3144 
3145 	while (dret == dpcd_bytes_to_read &&
3146 		process_count < max_process_count) {
3147 		uint8_t retry;
3148 		dret = 0;
3149 
3150 		process_count++;
3151 
3152 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3153 		/* handle HPD short pulse irq */
3154 		if (aconnector->mst_mgr.mst_state)
3155 			drm_dp_mst_hpd_irq(
3156 				&aconnector->mst_mgr,
3157 				esi,
3158 				&new_irq_handled);
3159 
3160 		if (new_irq_handled) {
3161 			/* ACK at DPCD to notify down stream */
3162 			const int ack_dpcd_bytes_to_write =
3163 				dpcd_bytes_to_read - 1;
3164 
3165 			for (retry = 0; retry < 3; retry++) {
3166 				uint8_t wret;
3167 
3168 				wret = drm_dp_dpcd_write(
3169 					&aconnector->dm_dp_aux.aux,
3170 					dpcd_addr + 1,
3171 					&esi[1],
3172 					ack_dpcd_bytes_to_write);
3173 				if (wret == ack_dpcd_bytes_to_write)
3174 					break;
3175 			}
3176 
3177 			/* check if there is new irq to be handled */
3178 			dret = drm_dp_dpcd_read(
3179 				&aconnector->dm_dp_aux.aux,
3180 				dpcd_addr,
3181 				esi,
3182 				dpcd_bytes_to_read);
3183 
3184 			new_irq_handled = false;
3185 		} else {
3186 			break;
3187 		}
3188 	}
3189 
3190 	if (process_count == max_process_count)
3191 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3192 }
3193 
3194 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3195 							union hpd_irq_data hpd_irq_data)
3196 {
3197 	struct hpd_rx_irq_offload_work *offload_work =
3198 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3199 
3200 	if (!offload_work) {
3201 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3202 		return;
3203 	}
3204 
3205 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3206 	offload_work->data = hpd_irq_data;
3207 	offload_work->offload_wq = offload_wq;
3208 
3209 	queue_work(offload_wq->wq, &offload_work->work);
3210 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3211 }
3212 
3213 static void handle_hpd_rx_irq(void *param)
3214 {
3215 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3216 	struct drm_connector *connector = &aconnector->base;
3217 	struct drm_device *dev = connector->dev;
3218 	struct dc_link *dc_link = aconnector->dc_link;
3219 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3220 	bool result = false;
3221 	enum dc_connection_type new_connection_type = dc_connection_none;
3222 	struct amdgpu_device *adev = drm_to_adev(dev);
3223 	union hpd_irq_data hpd_irq_data;
3224 	bool link_loss = false;
3225 	bool has_left_work = false;
3226 	int idx = aconnector->base.index;
3227 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3228 
3229 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3230 
3231 	if (adev->dm.disable_hpd_irq)
3232 		return;
3233 
3234 	/*
3235 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3236 	 * conflict, after implement i2c helper, this mutex should be
3237 	 * retired.
3238 	 */
3239 	mutex_lock(&aconnector->hpd_lock);
3240 
3241 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3242 						&link_loss, true, &has_left_work);
3243 
3244 	if (!has_left_work)
3245 		goto out;
3246 
3247 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3248 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3249 		goto out;
3250 	}
3251 
3252 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3253 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3254 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3255 			dm_handle_mst_sideband_msg(aconnector);
3256 			goto out;
3257 		}
3258 
3259 		if (link_loss) {
3260 			bool skip = false;
3261 
3262 			spin_lock(&offload_wq->offload_lock);
3263 			skip = offload_wq->is_handling_link_loss;
3264 
3265 			if (!skip)
3266 				offload_wq->is_handling_link_loss = true;
3267 
3268 			spin_unlock(&offload_wq->offload_lock);
3269 
3270 			if (!skip)
3271 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3272 
3273 			goto out;
3274 		}
3275 	}
3276 
3277 out:
3278 	if (result && !is_mst_root_connector) {
3279 		/* Downstream Port status changed. */
3280 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3281 			DRM_ERROR("KMS: Failed to detect connector\n");
3282 
3283 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3284 			emulated_link_detect(dc_link);
3285 
3286 			if (aconnector->fake_enable)
3287 				aconnector->fake_enable = false;
3288 
3289 			amdgpu_dm_update_connector_after_detect(aconnector);
3290 
3291 
3292 			drm_modeset_lock_all(dev);
3293 			dm_restore_drm_connector_state(dev, connector);
3294 			drm_modeset_unlock_all(dev);
3295 
3296 			drm_kms_helper_connector_hotplug_event(connector);
3297 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3298 
3299 			if (aconnector->fake_enable)
3300 				aconnector->fake_enable = false;
3301 
3302 			amdgpu_dm_update_connector_after_detect(aconnector);
3303 
3304 
3305 			drm_modeset_lock_all(dev);
3306 			dm_restore_drm_connector_state(dev, connector);
3307 			drm_modeset_unlock_all(dev);
3308 
3309 			drm_kms_helper_connector_hotplug_event(connector);
3310 		}
3311 	}
3312 #ifdef CONFIG_DRM_AMD_DC_HDCP
3313 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3314 		if (adev->dm.hdcp_workqueue)
3315 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3316 	}
3317 #endif
3318 
3319 	if (dc_link->type != dc_connection_mst_branch)
3320 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3321 
3322 	mutex_unlock(&aconnector->hpd_lock);
3323 }
3324 
3325 static void register_hpd_handlers(struct amdgpu_device *adev)
3326 {
3327 	struct drm_device *dev = adev_to_drm(adev);
3328 	struct drm_connector *connector;
3329 	struct amdgpu_dm_connector *aconnector;
3330 	const struct dc_link *dc_link;
3331 	struct dc_interrupt_params int_params = {0};
3332 
3333 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3334 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3335 
3336 	list_for_each_entry(connector,
3337 			&dev->mode_config.connector_list, head)	{
3338 
3339 		aconnector = to_amdgpu_dm_connector(connector);
3340 		dc_link = aconnector->dc_link;
3341 
3342 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3343 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3344 			int_params.irq_source = dc_link->irq_source_hpd;
3345 
3346 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3347 					handle_hpd_irq,
3348 					(void *) aconnector);
3349 		}
3350 
3351 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3352 
3353 			/* Also register for DP short pulse (hpd_rx). */
3354 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3355 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3356 
3357 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3358 					handle_hpd_rx_irq,
3359 					(void *) aconnector);
3360 
3361 			if (adev->dm.hpd_rx_offload_wq)
3362 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3363 					aconnector;
3364 		}
3365 	}
3366 }
3367 
3368 #if defined(CONFIG_DRM_AMD_DC_SI)
3369 /* Register IRQ sources and initialize IRQ callbacks */
3370 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3371 {
3372 	struct dc *dc = adev->dm.dc;
3373 	struct common_irq_params *c_irq_params;
3374 	struct dc_interrupt_params int_params = {0};
3375 	int r;
3376 	int i;
3377 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3378 
3379 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3380 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3381 
3382 	/*
3383 	 * Actions of amdgpu_irq_add_id():
3384 	 * 1. Register a set() function with base driver.
3385 	 *    Base driver will call set() function to enable/disable an
3386 	 *    interrupt in DC hardware.
3387 	 * 2. Register amdgpu_dm_irq_handler().
3388 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3389 	 *    coming from DC hardware.
3390 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3391 	 *    for acknowledging and handling. */
3392 
3393 	/* Use VBLANK interrupt */
3394 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3395 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3396 		if (r) {
3397 			DRM_ERROR("Failed to add crtc irq id!\n");
3398 			return r;
3399 		}
3400 
3401 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3402 		int_params.irq_source =
3403 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3404 
3405 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3406 
3407 		c_irq_params->adev = adev;
3408 		c_irq_params->irq_src = int_params.irq_source;
3409 
3410 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3411 				dm_crtc_high_irq, c_irq_params);
3412 	}
3413 
3414 	/* Use GRPH_PFLIP interrupt */
3415 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3416 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3417 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3418 		if (r) {
3419 			DRM_ERROR("Failed to add page flip irq id!\n");
3420 			return r;
3421 		}
3422 
3423 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3424 		int_params.irq_source =
3425 			dc_interrupt_to_irq_source(dc, i, 0);
3426 
3427 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3428 
3429 		c_irq_params->adev = adev;
3430 		c_irq_params->irq_src = int_params.irq_source;
3431 
3432 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3433 				dm_pflip_high_irq, c_irq_params);
3434 
3435 	}
3436 
3437 	/* HPD */
3438 	r = amdgpu_irq_add_id(adev, client_id,
3439 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3440 	if (r) {
3441 		DRM_ERROR("Failed to add hpd irq id!\n");
3442 		return r;
3443 	}
3444 
3445 	register_hpd_handlers(adev);
3446 
3447 	return 0;
3448 }
3449 #endif
3450 
3451 /* Register IRQ sources and initialize IRQ callbacks */
3452 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3453 {
3454 	struct dc *dc = adev->dm.dc;
3455 	struct common_irq_params *c_irq_params;
3456 	struct dc_interrupt_params int_params = {0};
3457 	int r;
3458 	int i;
3459 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3460 
3461 	if (adev->family >= AMDGPU_FAMILY_AI)
3462 		client_id = SOC15_IH_CLIENTID_DCE;
3463 
3464 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3465 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3466 
3467 	/*
3468 	 * Actions of amdgpu_irq_add_id():
3469 	 * 1. Register a set() function with base driver.
3470 	 *    Base driver will call set() function to enable/disable an
3471 	 *    interrupt in DC hardware.
3472 	 * 2. Register amdgpu_dm_irq_handler().
3473 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3474 	 *    coming from DC hardware.
3475 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3476 	 *    for acknowledging and handling. */
3477 
3478 	/* Use VBLANK interrupt */
3479 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3480 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3481 		if (r) {
3482 			DRM_ERROR("Failed to add crtc irq id!\n");
3483 			return r;
3484 		}
3485 
3486 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3487 		int_params.irq_source =
3488 			dc_interrupt_to_irq_source(dc, i, 0);
3489 
3490 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3491 
3492 		c_irq_params->adev = adev;
3493 		c_irq_params->irq_src = int_params.irq_source;
3494 
3495 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3496 				dm_crtc_high_irq, c_irq_params);
3497 	}
3498 
3499 	/* Use VUPDATE interrupt */
3500 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3501 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3502 		if (r) {
3503 			DRM_ERROR("Failed to add vupdate irq id!\n");
3504 			return r;
3505 		}
3506 
3507 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3508 		int_params.irq_source =
3509 			dc_interrupt_to_irq_source(dc, i, 0);
3510 
3511 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3512 
3513 		c_irq_params->adev = adev;
3514 		c_irq_params->irq_src = int_params.irq_source;
3515 
3516 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3517 				dm_vupdate_high_irq, c_irq_params);
3518 	}
3519 
3520 	/* Use GRPH_PFLIP interrupt */
3521 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3522 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3523 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3524 		if (r) {
3525 			DRM_ERROR("Failed to add page flip irq id!\n");
3526 			return r;
3527 		}
3528 
3529 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3530 		int_params.irq_source =
3531 			dc_interrupt_to_irq_source(dc, i, 0);
3532 
3533 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3534 
3535 		c_irq_params->adev = adev;
3536 		c_irq_params->irq_src = int_params.irq_source;
3537 
3538 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3539 				dm_pflip_high_irq, c_irq_params);
3540 
3541 	}
3542 
3543 	/* HPD */
3544 	r = amdgpu_irq_add_id(adev, client_id,
3545 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3546 	if (r) {
3547 		DRM_ERROR("Failed to add hpd irq id!\n");
3548 		return r;
3549 	}
3550 
3551 	register_hpd_handlers(adev);
3552 
3553 	return 0;
3554 }
3555 
3556 #if defined(CONFIG_DRM_AMD_DC_DCN)
3557 /* Register IRQ sources and initialize IRQ callbacks */
3558 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3559 {
3560 	struct dc *dc = adev->dm.dc;
3561 	struct common_irq_params *c_irq_params;
3562 	struct dc_interrupt_params int_params = {0};
3563 	int r;
3564 	int i;
3565 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3566 	static const unsigned int vrtl_int_srcid[] = {
3567 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3568 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3569 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3570 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3571 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3572 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3573 	};
3574 #endif
3575 
3576 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3577 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3578 
3579 	/*
3580 	 * Actions of amdgpu_irq_add_id():
3581 	 * 1. Register a set() function with base driver.
3582 	 *    Base driver will call set() function to enable/disable an
3583 	 *    interrupt in DC hardware.
3584 	 * 2. Register amdgpu_dm_irq_handler().
3585 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3586 	 *    coming from DC hardware.
3587 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3588 	 *    for acknowledging and handling.
3589 	 */
3590 
3591 	/* Use VSTARTUP interrupt */
3592 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3593 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3594 			i++) {
3595 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3596 
3597 		if (r) {
3598 			DRM_ERROR("Failed to add crtc irq id!\n");
3599 			return r;
3600 		}
3601 
3602 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3603 		int_params.irq_source =
3604 			dc_interrupt_to_irq_source(dc, i, 0);
3605 
3606 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3607 
3608 		c_irq_params->adev = adev;
3609 		c_irq_params->irq_src = int_params.irq_source;
3610 
3611 		amdgpu_dm_irq_register_interrupt(
3612 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3613 	}
3614 
3615 	/* Use otg vertical line interrupt */
3616 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3617 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3618 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3619 				vrtl_int_srcid[i], &adev->vline0_irq);
3620 
3621 		if (r) {
3622 			DRM_ERROR("Failed to add vline0 irq id!\n");
3623 			return r;
3624 		}
3625 
3626 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3627 		int_params.irq_source =
3628 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3629 
3630 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3631 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3632 			break;
3633 		}
3634 
3635 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3636 					- DC_IRQ_SOURCE_DC1_VLINE0];
3637 
3638 		c_irq_params->adev = adev;
3639 		c_irq_params->irq_src = int_params.irq_source;
3640 
3641 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3642 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3643 	}
3644 #endif
3645 
3646 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3647 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3648 	 * to trigger at end of each vblank, regardless of state of the lock,
3649 	 * matching DCE behaviour.
3650 	 */
3651 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3652 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3653 	     i++) {
3654 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3655 
3656 		if (r) {
3657 			DRM_ERROR("Failed to add vupdate irq id!\n");
3658 			return r;
3659 		}
3660 
3661 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3662 		int_params.irq_source =
3663 			dc_interrupt_to_irq_source(dc, i, 0);
3664 
3665 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3666 
3667 		c_irq_params->adev = adev;
3668 		c_irq_params->irq_src = int_params.irq_source;
3669 
3670 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3671 				dm_vupdate_high_irq, c_irq_params);
3672 	}
3673 
3674 	/* Use GRPH_PFLIP interrupt */
3675 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3676 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3677 			i++) {
3678 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3679 		if (r) {
3680 			DRM_ERROR("Failed to add page flip irq id!\n");
3681 			return r;
3682 		}
3683 
3684 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3685 		int_params.irq_source =
3686 			dc_interrupt_to_irq_source(dc, i, 0);
3687 
3688 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3689 
3690 		c_irq_params->adev = adev;
3691 		c_irq_params->irq_src = int_params.irq_source;
3692 
3693 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3694 				dm_pflip_high_irq, c_irq_params);
3695 
3696 	}
3697 
3698 	/* HPD */
3699 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3700 			&adev->hpd_irq);
3701 	if (r) {
3702 		DRM_ERROR("Failed to add hpd irq id!\n");
3703 		return r;
3704 	}
3705 
3706 	register_hpd_handlers(adev);
3707 
3708 	return 0;
3709 }
3710 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3711 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3712 {
3713 	struct dc *dc = adev->dm.dc;
3714 	struct common_irq_params *c_irq_params;
3715 	struct dc_interrupt_params int_params = {0};
3716 	int r, i;
3717 
3718 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3719 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3720 
3721 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3722 			&adev->dmub_outbox_irq);
3723 	if (r) {
3724 		DRM_ERROR("Failed to add outbox irq id!\n");
3725 		return r;
3726 	}
3727 
3728 	if (dc->ctx->dmub_srv) {
3729 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3730 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3731 		int_params.irq_source =
3732 		dc_interrupt_to_irq_source(dc, i, 0);
3733 
3734 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3735 
3736 		c_irq_params->adev = adev;
3737 		c_irq_params->irq_src = int_params.irq_source;
3738 
3739 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3740 				dm_dmub_outbox1_low_irq, c_irq_params);
3741 	}
3742 
3743 	return 0;
3744 }
3745 #endif
3746 
3747 /*
3748  * Acquires the lock for the atomic state object and returns
3749  * the new atomic state.
3750  *
3751  * This should only be called during atomic check.
3752  */
3753 int dm_atomic_get_state(struct drm_atomic_state *state,
3754 			struct dm_atomic_state **dm_state)
3755 {
3756 	struct drm_device *dev = state->dev;
3757 	struct amdgpu_device *adev = drm_to_adev(dev);
3758 	struct amdgpu_display_manager *dm = &adev->dm;
3759 	struct drm_private_state *priv_state;
3760 
3761 	if (*dm_state)
3762 		return 0;
3763 
3764 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3765 	if (IS_ERR(priv_state))
3766 		return PTR_ERR(priv_state);
3767 
3768 	*dm_state = to_dm_atomic_state(priv_state);
3769 
3770 	return 0;
3771 }
3772 
3773 static struct dm_atomic_state *
3774 dm_atomic_get_new_state(struct drm_atomic_state *state)
3775 {
3776 	struct drm_device *dev = state->dev;
3777 	struct amdgpu_device *adev = drm_to_adev(dev);
3778 	struct amdgpu_display_manager *dm = &adev->dm;
3779 	struct drm_private_obj *obj;
3780 	struct drm_private_state *new_obj_state;
3781 	int i;
3782 
3783 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3784 		if (obj->funcs == dm->atomic_obj.funcs)
3785 			return to_dm_atomic_state(new_obj_state);
3786 	}
3787 
3788 	return NULL;
3789 }
3790 
3791 static struct drm_private_state *
3792 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3793 {
3794 	struct dm_atomic_state *old_state, *new_state;
3795 
3796 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3797 	if (!new_state)
3798 		return NULL;
3799 
3800 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3801 
3802 	old_state = to_dm_atomic_state(obj->state);
3803 
3804 	if (old_state && old_state->context)
3805 		new_state->context = dc_copy_state(old_state->context);
3806 
3807 	if (!new_state->context) {
3808 		kfree(new_state);
3809 		return NULL;
3810 	}
3811 
3812 	return &new_state->base;
3813 }
3814 
3815 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3816 				    struct drm_private_state *state)
3817 {
3818 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3819 
3820 	if (dm_state && dm_state->context)
3821 		dc_release_state(dm_state->context);
3822 
3823 	kfree(dm_state);
3824 }
3825 
3826 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3827 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3828 	.atomic_destroy_state = dm_atomic_destroy_state,
3829 };
3830 
3831 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3832 {
3833 	struct dm_atomic_state *state;
3834 	int r;
3835 
3836 	adev->mode_info.mode_config_initialized = true;
3837 
3838 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3839 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3840 
3841 	adev_to_drm(adev)->mode_config.max_width = 16384;
3842 	adev_to_drm(adev)->mode_config.max_height = 16384;
3843 
3844 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3845 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3846 	/* indicates support for immediate flip */
3847 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3848 
3849 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3850 
3851 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3852 	if (!state)
3853 		return -ENOMEM;
3854 
3855 	state->context = dc_create_state(adev->dm.dc);
3856 	if (!state->context) {
3857 		kfree(state);
3858 		return -ENOMEM;
3859 	}
3860 
3861 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3862 
3863 	drm_atomic_private_obj_init(adev_to_drm(adev),
3864 				    &adev->dm.atomic_obj,
3865 				    &state->base,
3866 				    &dm_atomic_state_funcs);
3867 
3868 	r = amdgpu_display_modeset_create_props(adev);
3869 	if (r) {
3870 		dc_release_state(state->context);
3871 		kfree(state);
3872 		return r;
3873 	}
3874 
3875 	r = amdgpu_dm_audio_init(adev);
3876 	if (r) {
3877 		dc_release_state(state->context);
3878 		kfree(state);
3879 		return r;
3880 	}
3881 
3882 	return 0;
3883 }
3884 
3885 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3886 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3887 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3888 
3889 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3890 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3891 
3892 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3893 					    int bl_idx)
3894 {
3895 #if defined(CONFIG_ACPI)
3896 	struct amdgpu_dm_backlight_caps caps;
3897 
3898 	memset(&caps, 0, sizeof(caps));
3899 
3900 	if (dm->backlight_caps[bl_idx].caps_valid)
3901 		return;
3902 
3903 	amdgpu_acpi_get_backlight_caps(&caps);
3904 	if (caps.caps_valid) {
3905 		dm->backlight_caps[bl_idx].caps_valid = true;
3906 		if (caps.aux_support)
3907 			return;
3908 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3909 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3910 	} else {
3911 		dm->backlight_caps[bl_idx].min_input_signal =
3912 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3913 		dm->backlight_caps[bl_idx].max_input_signal =
3914 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3915 	}
3916 #else
3917 	if (dm->backlight_caps[bl_idx].aux_support)
3918 		return;
3919 
3920 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3921 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3922 #endif
3923 }
3924 
3925 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3926 				unsigned *min, unsigned *max)
3927 {
3928 	if (!caps)
3929 		return 0;
3930 
3931 	if (caps->aux_support) {
3932 		// Firmware limits are in nits, DC API wants millinits.
3933 		*max = 1000 * caps->aux_max_input_signal;
3934 		*min = 1000 * caps->aux_min_input_signal;
3935 	} else {
3936 		// Firmware limits are 8-bit, PWM control is 16-bit.
3937 		*max = 0x101 * caps->max_input_signal;
3938 		*min = 0x101 * caps->min_input_signal;
3939 	}
3940 	return 1;
3941 }
3942 
3943 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3944 					uint32_t brightness)
3945 {
3946 	unsigned min, max;
3947 
3948 	if (!get_brightness_range(caps, &min, &max))
3949 		return brightness;
3950 
3951 	// Rescale 0..255 to min..max
3952 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3953 				       AMDGPU_MAX_BL_LEVEL);
3954 }
3955 
3956 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3957 				      uint32_t brightness)
3958 {
3959 	unsigned min, max;
3960 
3961 	if (!get_brightness_range(caps, &min, &max))
3962 		return brightness;
3963 
3964 	if (brightness < min)
3965 		return 0;
3966 	// Rescale min..max to 0..255
3967 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3968 				 max - min);
3969 }
3970 
3971 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3972 					 int bl_idx,
3973 					 u32 user_brightness)
3974 {
3975 	struct amdgpu_dm_backlight_caps caps;
3976 	struct dc_link *link;
3977 	u32 brightness;
3978 	bool rc;
3979 
3980 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3981 	caps = dm->backlight_caps[bl_idx];
3982 
3983 	dm->brightness[bl_idx] = user_brightness;
3984 	/* update scratch register */
3985 	if (bl_idx == 0)
3986 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3987 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3988 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3989 
3990 	/* Change brightness based on AUX property */
3991 	if (caps.aux_support) {
3992 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3993 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3994 		if (!rc)
3995 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3996 	} else {
3997 		rc = dc_link_set_backlight_level(link, brightness, 0);
3998 		if (!rc)
3999 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4000 	}
4001 
4002 	return rc ? 0 : 1;
4003 }
4004 
4005 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4006 {
4007 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4008 	int i;
4009 
4010 	for (i = 0; i < dm->num_of_edps; i++) {
4011 		if (bd == dm->backlight_dev[i])
4012 			break;
4013 	}
4014 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4015 		i = 0;
4016 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4017 
4018 	return 0;
4019 }
4020 
4021 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4022 					 int bl_idx)
4023 {
4024 	struct amdgpu_dm_backlight_caps caps;
4025 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4026 
4027 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4028 	caps = dm->backlight_caps[bl_idx];
4029 
4030 	if (caps.aux_support) {
4031 		u32 avg, peak;
4032 		bool rc;
4033 
4034 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4035 		if (!rc)
4036 			return dm->brightness[bl_idx];
4037 		return convert_brightness_to_user(&caps, avg);
4038 	} else {
4039 		int ret = dc_link_get_backlight_level(link);
4040 
4041 		if (ret == DC_ERROR_UNEXPECTED)
4042 			return dm->brightness[bl_idx];
4043 		return convert_brightness_to_user(&caps, ret);
4044 	}
4045 }
4046 
4047 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4048 {
4049 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4050 	int i;
4051 
4052 	for (i = 0; i < dm->num_of_edps; i++) {
4053 		if (bd == dm->backlight_dev[i])
4054 			break;
4055 	}
4056 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4057 		i = 0;
4058 	return amdgpu_dm_backlight_get_level(dm, i);
4059 }
4060 
4061 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4062 	.options = BL_CORE_SUSPENDRESUME,
4063 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4064 	.update_status	= amdgpu_dm_backlight_update_status,
4065 };
4066 
4067 static void
4068 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4069 {
4070 	char bl_name[16];
4071 	struct backlight_properties props = { 0 };
4072 
4073 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4074 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4075 
4076 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4077 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4078 	props.type = BACKLIGHT_RAW;
4079 
4080 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4081 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4082 
4083 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4084 								       adev_to_drm(dm->adev)->dev,
4085 								       dm,
4086 								       &amdgpu_dm_backlight_ops,
4087 								       &props);
4088 
4089 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4090 		DRM_ERROR("DM: Backlight registration failed!\n");
4091 	else
4092 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4093 }
4094 #endif
4095 
4096 static int initialize_plane(struct amdgpu_display_manager *dm,
4097 			    struct amdgpu_mode_info *mode_info, int plane_id,
4098 			    enum drm_plane_type plane_type,
4099 			    const struct dc_plane_cap *plane_cap)
4100 {
4101 	struct drm_plane *plane;
4102 	unsigned long possible_crtcs;
4103 	int ret = 0;
4104 
4105 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4106 	if (!plane) {
4107 		DRM_ERROR("KMS: Failed to allocate plane\n");
4108 		return -ENOMEM;
4109 	}
4110 	plane->type = plane_type;
4111 
4112 	/*
4113 	 * HACK: IGT tests expect that the primary plane for a CRTC
4114 	 * can only have one possible CRTC. Only expose support for
4115 	 * any CRTC if they're not going to be used as a primary plane
4116 	 * for a CRTC - like overlay or underlay planes.
4117 	 */
4118 	possible_crtcs = 1 << plane_id;
4119 	if (plane_id >= dm->dc->caps.max_streams)
4120 		possible_crtcs = 0xff;
4121 
4122 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4123 
4124 	if (ret) {
4125 		DRM_ERROR("KMS: Failed to initialize plane\n");
4126 		kfree(plane);
4127 		return ret;
4128 	}
4129 
4130 	if (mode_info)
4131 		mode_info->planes[plane_id] = plane;
4132 
4133 	return ret;
4134 }
4135 
4136 
4137 static void register_backlight_device(struct amdgpu_display_manager *dm,
4138 				      struct dc_link *link)
4139 {
4140 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4141 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4142 
4143 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4144 	    link->type != dc_connection_none) {
4145 		/*
4146 		 * Event if registration failed, we should continue with
4147 		 * DM initialization because not having a backlight control
4148 		 * is better then a black screen.
4149 		 */
4150 		if (!dm->backlight_dev[dm->num_of_edps])
4151 			amdgpu_dm_register_backlight_device(dm);
4152 
4153 		if (dm->backlight_dev[dm->num_of_edps]) {
4154 			dm->backlight_link[dm->num_of_edps] = link;
4155 			dm->num_of_edps++;
4156 		}
4157 	}
4158 #endif
4159 }
4160 
4161 
4162 /*
4163  * In this architecture, the association
4164  * connector -> encoder -> crtc
4165  * id not really requried. The crtc and connector will hold the
4166  * display_index as an abstraction to use with DAL component
4167  *
4168  * Returns 0 on success
4169  */
4170 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4171 {
4172 	struct amdgpu_display_manager *dm = &adev->dm;
4173 	int32_t i;
4174 	struct amdgpu_dm_connector *aconnector = NULL;
4175 	struct amdgpu_encoder *aencoder = NULL;
4176 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4177 	uint32_t link_cnt;
4178 	int32_t primary_planes;
4179 	enum dc_connection_type new_connection_type = dc_connection_none;
4180 	const struct dc_plane_cap *plane;
4181 	bool psr_feature_enabled = false;
4182 
4183 	dm->display_indexes_num = dm->dc->caps.max_streams;
4184 	/* Update the actual used number of crtc */
4185 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4186 
4187 	link_cnt = dm->dc->caps.max_links;
4188 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4189 		DRM_ERROR("DM: Failed to initialize mode config\n");
4190 		return -EINVAL;
4191 	}
4192 
4193 	/* There is one primary plane per CRTC */
4194 	primary_planes = dm->dc->caps.max_streams;
4195 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4196 
4197 	/*
4198 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4199 	 * Order is reversed to match iteration order in atomic check.
4200 	 */
4201 	for (i = (primary_planes - 1); i >= 0; i--) {
4202 		plane = &dm->dc->caps.planes[i];
4203 
4204 		if (initialize_plane(dm, mode_info, i,
4205 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4206 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4207 			goto fail;
4208 		}
4209 	}
4210 
4211 	/*
4212 	 * Initialize overlay planes, index starting after primary planes.
4213 	 * These planes have a higher DRM index than the primary planes since
4214 	 * they should be considered as having a higher z-order.
4215 	 * Order is reversed to match iteration order in atomic check.
4216 	 *
4217 	 * Only support DCN for now, and only expose one so we don't encourage
4218 	 * userspace to use up all the pipes.
4219 	 */
4220 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4221 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4222 
4223 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4224 			continue;
4225 
4226 		if (!plane->blends_with_above || !plane->blends_with_below)
4227 			continue;
4228 
4229 		if (!plane->pixel_format_support.argb8888)
4230 			continue;
4231 
4232 		if (initialize_plane(dm, NULL, primary_planes + i,
4233 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4234 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4235 			goto fail;
4236 		}
4237 
4238 		/* Only create one overlay plane. */
4239 		break;
4240 	}
4241 
4242 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4243 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4244 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4245 			goto fail;
4246 		}
4247 
4248 #if defined(CONFIG_DRM_AMD_DC_DCN)
4249 	/* Use Outbox interrupt */
4250 	switch (adev->ip_versions[DCE_HWIP][0]) {
4251 	case IP_VERSION(3, 0, 0):
4252 	case IP_VERSION(3, 1, 2):
4253 	case IP_VERSION(3, 1, 3):
4254 	case IP_VERSION(3, 1, 5):
4255 	case IP_VERSION(3, 1, 6):
4256 	case IP_VERSION(2, 1, 0):
4257 		if (register_outbox_irq_handlers(dm->adev)) {
4258 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4259 			goto fail;
4260 		}
4261 		break;
4262 	default:
4263 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4264 			      adev->ip_versions[DCE_HWIP][0]);
4265 	}
4266 
4267 	/* Determine whether to enable PSR support by default. */
4268 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4269 		switch (adev->ip_versions[DCE_HWIP][0]) {
4270 		case IP_VERSION(3, 1, 2):
4271 		case IP_VERSION(3, 1, 3):
4272 		case IP_VERSION(3, 1, 5):
4273 		case IP_VERSION(3, 1, 6):
4274 			psr_feature_enabled = true;
4275 			break;
4276 		default:
4277 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4278 			break;
4279 		}
4280 	}
4281 #endif
4282 
4283 	/* Disable vblank IRQs aggressively for power-saving. */
4284 	adev_to_drm(adev)->vblank_disable_immediate = true;
4285 
4286 	/* loops over all connectors on the board */
4287 	for (i = 0; i < link_cnt; i++) {
4288 		struct dc_link *link = NULL;
4289 
4290 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4291 			DRM_ERROR(
4292 				"KMS: Cannot support more than %d display indexes\n",
4293 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4294 			continue;
4295 		}
4296 
4297 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4298 		if (!aconnector)
4299 			goto fail;
4300 
4301 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4302 		if (!aencoder)
4303 			goto fail;
4304 
4305 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4306 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4307 			goto fail;
4308 		}
4309 
4310 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4311 			DRM_ERROR("KMS: Failed to initialize connector\n");
4312 			goto fail;
4313 		}
4314 
4315 		link = dc_get_link_at_index(dm->dc, i);
4316 
4317 		if (!dc_link_detect_sink(link, &new_connection_type))
4318 			DRM_ERROR("KMS: Failed to detect connector\n");
4319 
4320 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4321 			emulated_link_detect(link);
4322 			amdgpu_dm_update_connector_after_detect(aconnector);
4323 
4324 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4325 			amdgpu_dm_update_connector_after_detect(aconnector);
4326 			register_backlight_device(dm, link);
4327 			if (dm->num_of_edps)
4328 				update_connector_ext_caps(aconnector);
4329 			if (psr_feature_enabled)
4330 				amdgpu_dm_set_psr_caps(link);
4331 
4332 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4333 			 * PSR is also supported.
4334 			 */
4335 			if (link->psr_settings.psr_feature_enabled)
4336 				adev_to_drm(adev)->vblank_disable_immediate = false;
4337 		}
4338 
4339 
4340 	}
4341 
4342 	/* Software is initialized. Now we can register interrupt handlers. */
4343 	switch (adev->asic_type) {
4344 #if defined(CONFIG_DRM_AMD_DC_SI)
4345 	case CHIP_TAHITI:
4346 	case CHIP_PITCAIRN:
4347 	case CHIP_VERDE:
4348 	case CHIP_OLAND:
4349 		if (dce60_register_irq_handlers(dm->adev)) {
4350 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4351 			goto fail;
4352 		}
4353 		break;
4354 #endif
4355 	case CHIP_BONAIRE:
4356 	case CHIP_HAWAII:
4357 	case CHIP_KAVERI:
4358 	case CHIP_KABINI:
4359 	case CHIP_MULLINS:
4360 	case CHIP_TONGA:
4361 	case CHIP_FIJI:
4362 	case CHIP_CARRIZO:
4363 	case CHIP_STONEY:
4364 	case CHIP_POLARIS11:
4365 	case CHIP_POLARIS10:
4366 	case CHIP_POLARIS12:
4367 	case CHIP_VEGAM:
4368 	case CHIP_VEGA10:
4369 	case CHIP_VEGA12:
4370 	case CHIP_VEGA20:
4371 		if (dce110_register_irq_handlers(dm->adev)) {
4372 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4373 			goto fail;
4374 		}
4375 		break;
4376 	default:
4377 #if defined(CONFIG_DRM_AMD_DC_DCN)
4378 		switch (adev->ip_versions[DCE_HWIP][0]) {
4379 		case IP_VERSION(1, 0, 0):
4380 		case IP_VERSION(1, 0, 1):
4381 		case IP_VERSION(2, 0, 2):
4382 		case IP_VERSION(2, 0, 3):
4383 		case IP_VERSION(2, 0, 0):
4384 		case IP_VERSION(2, 1, 0):
4385 		case IP_VERSION(3, 0, 0):
4386 		case IP_VERSION(3, 0, 2):
4387 		case IP_VERSION(3, 0, 3):
4388 		case IP_VERSION(3, 0, 1):
4389 		case IP_VERSION(3, 1, 2):
4390 		case IP_VERSION(3, 1, 3):
4391 		case IP_VERSION(3, 1, 5):
4392 		case IP_VERSION(3, 1, 6):
4393 			if (dcn10_register_irq_handlers(dm->adev)) {
4394 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4395 				goto fail;
4396 			}
4397 			break;
4398 		default:
4399 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4400 					adev->ip_versions[DCE_HWIP][0]);
4401 			goto fail;
4402 		}
4403 #endif
4404 		break;
4405 	}
4406 
4407 	return 0;
4408 fail:
4409 	kfree(aencoder);
4410 	kfree(aconnector);
4411 
4412 	return -EINVAL;
4413 }
4414 
4415 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4416 {
4417 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4418 	return;
4419 }
4420 
4421 /******************************************************************************
4422  * amdgpu_display_funcs functions
4423  *****************************************************************************/
4424 
4425 /*
4426  * dm_bandwidth_update - program display watermarks
4427  *
4428  * @adev: amdgpu_device pointer
4429  *
4430  * Calculate and program the display watermarks and line buffer allocation.
4431  */
4432 static void dm_bandwidth_update(struct amdgpu_device *adev)
4433 {
4434 	/* TODO: implement later */
4435 }
4436 
4437 static const struct amdgpu_display_funcs dm_display_funcs = {
4438 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4439 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4440 	.backlight_set_level = NULL, /* never called for DC */
4441 	.backlight_get_level = NULL, /* never called for DC */
4442 	.hpd_sense = NULL,/* called unconditionally */
4443 	.hpd_set_polarity = NULL, /* called unconditionally */
4444 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4445 	.page_flip_get_scanoutpos =
4446 		dm_crtc_get_scanoutpos,/* called unconditionally */
4447 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4448 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4449 };
4450 
4451 #if defined(CONFIG_DEBUG_KERNEL_DC)
4452 
4453 static ssize_t s3_debug_store(struct device *device,
4454 			      struct device_attribute *attr,
4455 			      const char *buf,
4456 			      size_t count)
4457 {
4458 	int ret;
4459 	int s3_state;
4460 	struct drm_device *drm_dev = dev_get_drvdata(device);
4461 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4462 
4463 	ret = kstrtoint(buf, 0, &s3_state);
4464 
4465 	if (ret == 0) {
4466 		if (s3_state) {
4467 			dm_resume(adev);
4468 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4469 		} else
4470 			dm_suspend(adev);
4471 	}
4472 
4473 	return ret == 0 ? count : 0;
4474 }
4475 
4476 DEVICE_ATTR_WO(s3_debug);
4477 
4478 #endif
4479 
4480 static int dm_early_init(void *handle)
4481 {
4482 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4483 
4484 	switch (adev->asic_type) {
4485 #if defined(CONFIG_DRM_AMD_DC_SI)
4486 	case CHIP_TAHITI:
4487 	case CHIP_PITCAIRN:
4488 	case CHIP_VERDE:
4489 		adev->mode_info.num_crtc = 6;
4490 		adev->mode_info.num_hpd = 6;
4491 		adev->mode_info.num_dig = 6;
4492 		break;
4493 	case CHIP_OLAND:
4494 		adev->mode_info.num_crtc = 2;
4495 		adev->mode_info.num_hpd = 2;
4496 		adev->mode_info.num_dig = 2;
4497 		break;
4498 #endif
4499 	case CHIP_BONAIRE:
4500 	case CHIP_HAWAII:
4501 		adev->mode_info.num_crtc = 6;
4502 		adev->mode_info.num_hpd = 6;
4503 		adev->mode_info.num_dig = 6;
4504 		break;
4505 	case CHIP_KAVERI:
4506 		adev->mode_info.num_crtc = 4;
4507 		adev->mode_info.num_hpd = 6;
4508 		adev->mode_info.num_dig = 7;
4509 		break;
4510 	case CHIP_KABINI:
4511 	case CHIP_MULLINS:
4512 		adev->mode_info.num_crtc = 2;
4513 		adev->mode_info.num_hpd = 6;
4514 		adev->mode_info.num_dig = 6;
4515 		break;
4516 	case CHIP_FIJI:
4517 	case CHIP_TONGA:
4518 		adev->mode_info.num_crtc = 6;
4519 		adev->mode_info.num_hpd = 6;
4520 		adev->mode_info.num_dig = 7;
4521 		break;
4522 	case CHIP_CARRIZO:
4523 		adev->mode_info.num_crtc = 3;
4524 		adev->mode_info.num_hpd = 6;
4525 		adev->mode_info.num_dig = 9;
4526 		break;
4527 	case CHIP_STONEY:
4528 		adev->mode_info.num_crtc = 2;
4529 		adev->mode_info.num_hpd = 6;
4530 		adev->mode_info.num_dig = 9;
4531 		break;
4532 	case CHIP_POLARIS11:
4533 	case CHIP_POLARIS12:
4534 		adev->mode_info.num_crtc = 5;
4535 		adev->mode_info.num_hpd = 5;
4536 		adev->mode_info.num_dig = 5;
4537 		break;
4538 	case CHIP_POLARIS10:
4539 	case CHIP_VEGAM:
4540 		adev->mode_info.num_crtc = 6;
4541 		adev->mode_info.num_hpd = 6;
4542 		adev->mode_info.num_dig = 6;
4543 		break;
4544 	case CHIP_VEGA10:
4545 	case CHIP_VEGA12:
4546 	case CHIP_VEGA20:
4547 		adev->mode_info.num_crtc = 6;
4548 		adev->mode_info.num_hpd = 6;
4549 		adev->mode_info.num_dig = 6;
4550 		break;
4551 	default:
4552 #if defined(CONFIG_DRM_AMD_DC_DCN)
4553 		switch (adev->ip_versions[DCE_HWIP][0]) {
4554 		case IP_VERSION(2, 0, 2):
4555 		case IP_VERSION(3, 0, 0):
4556 			adev->mode_info.num_crtc = 6;
4557 			adev->mode_info.num_hpd = 6;
4558 			adev->mode_info.num_dig = 6;
4559 			break;
4560 		case IP_VERSION(2, 0, 0):
4561 		case IP_VERSION(3, 0, 2):
4562 			adev->mode_info.num_crtc = 5;
4563 			adev->mode_info.num_hpd = 5;
4564 			adev->mode_info.num_dig = 5;
4565 			break;
4566 		case IP_VERSION(2, 0, 3):
4567 		case IP_VERSION(3, 0, 3):
4568 			adev->mode_info.num_crtc = 2;
4569 			adev->mode_info.num_hpd = 2;
4570 			adev->mode_info.num_dig = 2;
4571 			break;
4572 		case IP_VERSION(1, 0, 0):
4573 		case IP_VERSION(1, 0, 1):
4574 		case IP_VERSION(3, 0, 1):
4575 		case IP_VERSION(2, 1, 0):
4576 		case IP_VERSION(3, 1, 2):
4577 		case IP_VERSION(3, 1, 3):
4578 		case IP_VERSION(3, 1, 5):
4579 		case IP_VERSION(3, 1, 6):
4580 			adev->mode_info.num_crtc = 4;
4581 			adev->mode_info.num_hpd = 4;
4582 			adev->mode_info.num_dig = 4;
4583 			break;
4584 		default:
4585 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4586 					adev->ip_versions[DCE_HWIP][0]);
4587 			return -EINVAL;
4588 		}
4589 #endif
4590 		break;
4591 	}
4592 
4593 	amdgpu_dm_set_irq_funcs(adev);
4594 
4595 	if (adev->mode_info.funcs == NULL)
4596 		adev->mode_info.funcs = &dm_display_funcs;
4597 
4598 	/*
4599 	 * Note: Do NOT change adev->audio_endpt_rreg and
4600 	 * adev->audio_endpt_wreg because they are initialised in
4601 	 * amdgpu_device_init()
4602 	 */
4603 #if defined(CONFIG_DEBUG_KERNEL_DC)
4604 	device_create_file(
4605 		adev_to_drm(adev)->dev,
4606 		&dev_attr_s3_debug);
4607 #endif
4608 
4609 	return 0;
4610 }
4611 
4612 static bool modeset_required(struct drm_crtc_state *crtc_state,
4613 			     struct dc_stream_state *new_stream,
4614 			     struct dc_stream_state *old_stream)
4615 {
4616 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4617 }
4618 
4619 static bool modereset_required(struct drm_crtc_state *crtc_state)
4620 {
4621 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4622 }
4623 
4624 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4625 {
4626 	drm_encoder_cleanup(encoder);
4627 	kfree(encoder);
4628 }
4629 
4630 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4631 	.destroy = amdgpu_dm_encoder_destroy,
4632 };
4633 
4634 
4635 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4636 					 struct drm_framebuffer *fb,
4637 					 int *min_downscale, int *max_upscale)
4638 {
4639 	struct amdgpu_device *adev = drm_to_adev(dev);
4640 	struct dc *dc = adev->dm.dc;
4641 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4642 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4643 
4644 	switch (fb->format->format) {
4645 	case DRM_FORMAT_P010:
4646 	case DRM_FORMAT_NV12:
4647 	case DRM_FORMAT_NV21:
4648 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4649 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4650 		break;
4651 
4652 	case DRM_FORMAT_XRGB16161616F:
4653 	case DRM_FORMAT_ARGB16161616F:
4654 	case DRM_FORMAT_XBGR16161616F:
4655 	case DRM_FORMAT_ABGR16161616F:
4656 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4657 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4658 		break;
4659 
4660 	default:
4661 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4662 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4663 		break;
4664 	}
4665 
4666 	/*
4667 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4668 	 * scaling factor of 1.0 == 1000 units.
4669 	 */
4670 	if (*max_upscale == 1)
4671 		*max_upscale = 1000;
4672 
4673 	if (*min_downscale == 1)
4674 		*min_downscale = 1000;
4675 }
4676 
4677 
4678 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4679 				const struct drm_plane_state *state,
4680 				struct dc_scaling_info *scaling_info)
4681 {
4682 	int scale_w, scale_h, min_downscale, max_upscale;
4683 
4684 	memset(scaling_info, 0, sizeof(*scaling_info));
4685 
4686 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4687 	scaling_info->src_rect.x = state->src_x >> 16;
4688 	scaling_info->src_rect.y = state->src_y >> 16;
4689 
4690 	/*
4691 	 * For reasons we don't (yet) fully understand a non-zero
4692 	 * src_y coordinate into an NV12 buffer can cause a
4693 	 * system hang on DCN1x.
4694 	 * To avoid hangs (and maybe be overly cautious)
4695 	 * let's reject both non-zero src_x and src_y.
4696 	 *
4697 	 * We currently know of only one use-case to reproduce a
4698 	 * scenario with non-zero src_x and src_y for NV12, which
4699 	 * is to gesture the YouTube Android app into full screen
4700 	 * on ChromeOS.
4701 	 */
4702 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4703 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4704 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4705 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4706 		return -EINVAL;
4707 
4708 	scaling_info->src_rect.width = state->src_w >> 16;
4709 	if (scaling_info->src_rect.width == 0)
4710 		return -EINVAL;
4711 
4712 	scaling_info->src_rect.height = state->src_h >> 16;
4713 	if (scaling_info->src_rect.height == 0)
4714 		return -EINVAL;
4715 
4716 	scaling_info->dst_rect.x = state->crtc_x;
4717 	scaling_info->dst_rect.y = state->crtc_y;
4718 
4719 	if (state->crtc_w == 0)
4720 		return -EINVAL;
4721 
4722 	scaling_info->dst_rect.width = state->crtc_w;
4723 
4724 	if (state->crtc_h == 0)
4725 		return -EINVAL;
4726 
4727 	scaling_info->dst_rect.height = state->crtc_h;
4728 
4729 	/* DRM doesn't specify clipping on destination output. */
4730 	scaling_info->clip_rect = scaling_info->dst_rect;
4731 
4732 	/* Validate scaling per-format with DC plane caps */
4733 	if (state->plane && state->plane->dev && state->fb) {
4734 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4735 					     &min_downscale, &max_upscale);
4736 	} else {
4737 		min_downscale = 250;
4738 		max_upscale = 16000;
4739 	}
4740 
4741 	scale_w = scaling_info->dst_rect.width * 1000 /
4742 		  scaling_info->src_rect.width;
4743 
4744 	if (scale_w < min_downscale || scale_w > max_upscale)
4745 		return -EINVAL;
4746 
4747 	scale_h = scaling_info->dst_rect.height * 1000 /
4748 		  scaling_info->src_rect.height;
4749 
4750 	if (scale_h < min_downscale || scale_h > max_upscale)
4751 		return -EINVAL;
4752 
4753 	/*
4754 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4755 	 * assume reasonable defaults based on the format.
4756 	 */
4757 
4758 	return 0;
4759 }
4760 
4761 static void
4762 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4763 				 uint64_t tiling_flags)
4764 {
4765 	/* Fill GFX8 params */
4766 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4767 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4768 
4769 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4770 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4771 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4772 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4773 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4774 
4775 		/* XXX fix me for VI */
4776 		tiling_info->gfx8.num_banks = num_banks;
4777 		tiling_info->gfx8.array_mode =
4778 				DC_ARRAY_2D_TILED_THIN1;
4779 		tiling_info->gfx8.tile_split = tile_split;
4780 		tiling_info->gfx8.bank_width = bankw;
4781 		tiling_info->gfx8.bank_height = bankh;
4782 		tiling_info->gfx8.tile_aspect = mtaspect;
4783 		tiling_info->gfx8.tile_mode =
4784 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4785 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4786 			== DC_ARRAY_1D_TILED_THIN1) {
4787 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4788 	}
4789 
4790 	tiling_info->gfx8.pipe_config =
4791 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4792 }
4793 
4794 static void
4795 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4796 				  union dc_tiling_info *tiling_info)
4797 {
4798 	tiling_info->gfx9.num_pipes =
4799 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4800 	tiling_info->gfx9.num_banks =
4801 		adev->gfx.config.gb_addr_config_fields.num_banks;
4802 	tiling_info->gfx9.pipe_interleave =
4803 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4804 	tiling_info->gfx9.num_shader_engines =
4805 		adev->gfx.config.gb_addr_config_fields.num_se;
4806 	tiling_info->gfx9.max_compressed_frags =
4807 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4808 	tiling_info->gfx9.num_rb_per_se =
4809 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4810 	tiling_info->gfx9.shaderEnable = 1;
4811 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4812 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4813 }
4814 
4815 static int
4816 validate_dcc(struct amdgpu_device *adev,
4817 	     const enum surface_pixel_format format,
4818 	     const enum dc_rotation_angle rotation,
4819 	     const union dc_tiling_info *tiling_info,
4820 	     const struct dc_plane_dcc_param *dcc,
4821 	     const struct dc_plane_address *address,
4822 	     const struct plane_size *plane_size)
4823 {
4824 	struct dc *dc = adev->dm.dc;
4825 	struct dc_dcc_surface_param input;
4826 	struct dc_surface_dcc_cap output;
4827 
4828 	memset(&input, 0, sizeof(input));
4829 	memset(&output, 0, sizeof(output));
4830 
4831 	if (!dcc->enable)
4832 		return 0;
4833 
4834 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4835 	    !dc->cap_funcs.get_dcc_compression_cap)
4836 		return -EINVAL;
4837 
4838 	input.format = format;
4839 	input.surface_size.width = plane_size->surface_size.width;
4840 	input.surface_size.height = plane_size->surface_size.height;
4841 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4842 
4843 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4844 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4845 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4846 		input.scan = SCAN_DIRECTION_VERTICAL;
4847 
4848 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4849 		return -EINVAL;
4850 
4851 	if (!output.capable)
4852 		return -EINVAL;
4853 
4854 	if (dcc->independent_64b_blks == 0 &&
4855 	    output.grph.rgb.independent_64b_blks != 0)
4856 		return -EINVAL;
4857 
4858 	return 0;
4859 }
4860 
4861 static bool
4862 modifier_has_dcc(uint64_t modifier)
4863 {
4864 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4865 }
4866 
4867 static unsigned
4868 modifier_gfx9_swizzle_mode(uint64_t modifier)
4869 {
4870 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4871 		return 0;
4872 
4873 	return AMD_FMT_MOD_GET(TILE, modifier);
4874 }
4875 
4876 static const struct drm_format_info *
4877 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4878 {
4879 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4880 }
4881 
4882 static void
4883 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4884 				    union dc_tiling_info *tiling_info,
4885 				    uint64_t modifier)
4886 {
4887 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4888 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4889 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4890 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4891 
4892 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4893 
4894 	if (!IS_AMD_FMT_MOD(modifier))
4895 		return;
4896 
4897 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4898 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4899 
4900 	if (adev->family >= AMDGPU_FAMILY_NV) {
4901 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4902 	} else {
4903 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4904 
4905 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4906 	}
4907 }
4908 
4909 enum dm_micro_swizzle {
4910 	MICRO_SWIZZLE_Z = 0,
4911 	MICRO_SWIZZLE_S = 1,
4912 	MICRO_SWIZZLE_D = 2,
4913 	MICRO_SWIZZLE_R = 3
4914 };
4915 
4916 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4917 					  uint32_t format,
4918 					  uint64_t modifier)
4919 {
4920 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4921 	const struct drm_format_info *info = drm_format_info(format);
4922 	int i;
4923 
4924 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4925 
4926 	if (!info)
4927 		return false;
4928 
4929 	/*
4930 	 * We always have to allow these modifiers:
4931 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4932 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4933 	 */
4934 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4935 	    modifier == DRM_FORMAT_MOD_INVALID) {
4936 		return true;
4937 	}
4938 
4939 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4940 	for (i = 0; i < plane->modifier_count; i++) {
4941 		if (modifier == plane->modifiers[i])
4942 			break;
4943 	}
4944 	if (i == plane->modifier_count)
4945 		return false;
4946 
4947 	/*
4948 	 * For D swizzle the canonical modifier depends on the bpp, so check
4949 	 * it here.
4950 	 */
4951 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4952 	    adev->family >= AMDGPU_FAMILY_NV) {
4953 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4954 			return false;
4955 	}
4956 
4957 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4958 	    info->cpp[0] < 8)
4959 		return false;
4960 
4961 	if (modifier_has_dcc(modifier)) {
4962 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4963 		if (info->cpp[0] != 4)
4964 			return false;
4965 		/* We support multi-planar formats, but not when combined with
4966 		 * additional DCC metadata planes. */
4967 		if (info->num_planes > 1)
4968 			return false;
4969 	}
4970 
4971 	return true;
4972 }
4973 
4974 static void
4975 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4976 {
4977 	if (!*mods)
4978 		return;
4979 
4980 	if (*cap - *size < 1) {
4981 		uint64_t new_cap = *cap * 2;
4982 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4983 
4984 		if (!new_mods) {
4985 			kfree(*mods);
4986 			*mods = NULL;
4987 			return;
4988 		}
4989 
4990 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4991 		kfree(*mods);
4992 		*mods = new_mods;
4993 		*cap = new_cap;
4994 	}
4995 
4996 	(*mods)[*size] = mod;
4997 	*size += 1;
4998 }
4999 
5000 static void
5001 add_gfx9_modifiers(const struct amdgpu_device *adev,
5002 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5003 {
5004 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5005 	int pipe_xor_bits = min(8, pipes +
5006 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5007 	int bank_xor_bits = min(8 - pipe_xor_bits,
5008 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5009 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5010 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5011 
5012 
5013 	if (adev->family == AMDGPU_FAMILY_RV) {
5014 		/* Raven2 and later */
5015 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5016 
5017 		/*
5018 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5019 		 * doesn't support _D on DCN
5020 		 */
5021 
5022 		if (has_constant_encode) {
5023 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5024 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5025 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5026 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5027 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5028 				    AMD_FMT_MOD_SET(DCC, 1) |
5029 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5030 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5031 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5032 		}
5033 
5034 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5035 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5036 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5037 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5038 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5039 			    AMD_FMT_MOD_SET(DCC, 1) |
5040 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5041 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5042 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5043 
5044 		if (has_constant_encode) {
5045 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5046 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5047 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5048 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5049 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5050 				    AMD_FMT_MOD_SET(DCC, 1) |
5051 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5052 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5053 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5054 
5055 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5056 				    AMD_FMT_MOD_SET(RB, rb) |
5057 				    AMD_FMT_MOD_SET(PIPE, pipes));
5058 		}
5059 
5060 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5061 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5062 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5063 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5064 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5065 			    AMD_FMT_MOD_SET(DCC, 1) |
5066 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5067 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5068 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5069 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5070 			    AMD_FMT_MOD_SET(RB, rb) |
5071 			    AMD_FMT_MOD_SET(PIPE, pipes));
5072 	}
5073 
5074 	/*
5075 	 * Only supported for 64bpp on Raven, will be filtered on format in
5076 	 * dm_plane_format_mod_supported.
5077 	 */
5078 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5080 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5081 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5082 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5083 
5084 	if (adev->family == AMDGPU_FAMILY_RV) {
5085 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5086 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5087 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5088 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5089 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5090 	}
5091 
5092 	/*
5093 	 * Only supported for 64bpp on Raven, will be filtered on format in
5094 	 * dm_plane_format_mod_supported.
5095 	 */
5096 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5097 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5098 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5099 
5100 	if (adev->family == AMDGPU_FAMILY_RV) {
5101 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5102 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5103 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5104 	}
5105 }
5106 
5107 static void
5108 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5109 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5110 {
5111 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5112 
5113 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5115 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5116 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5117 		    AMD_FMT_MOD_SET(DCC, 1) |
5118 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5119 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5120 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5121 
5122 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5124 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5125 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5126 		    AMD_FMT_MOD_SET(DCC, 1) |
5127 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5128 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5129 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5130 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5131 
5132 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5133 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5134 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5135 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5136 
5137 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5138 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5139 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5140 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5141 
5142 
5143 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5144 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5145 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5146 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5147 
5148 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5149 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5150 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5151 }
5152 
5153 static void
5154 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5155 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5156 {
5157 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5158 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5159 
5160 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5161 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5162 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5163 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5164 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5165 		    AMD_FMT_MOD_SET(DCC, 1) |
5166 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5167 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5168 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5169 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5170 
5171 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5172 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5173 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5174 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5175 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5176 		    AMD_FMT_MOD_SET(DCC, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5178 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5179 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5180 
5181 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5183 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5184 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5185 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5186 		    AMD_FMT_MOD_SET(DCC, 1) |
5187 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5188 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5189 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5190 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5191 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5192 
5193 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5194 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5195 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5196 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5197 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5198 		    AMD_FMT_MOD_SET(DCC, 1) |
5199 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5200 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5201 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5202 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5203 
5204 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5205 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5206 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5207 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5208 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5209 
5210 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5211 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5212 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5213 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5214 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5215 
5216 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5217 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5218 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5219 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5220 
5221 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5222 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5223 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5224 }
5225 
5226 static int
5227 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5228 {
5229 	uint64_t size = 0, capacity = 128;
5230 	*mods = NULL;
5231 
5232 	/* We have not hooked up any pre-GFX9 modifiers. */
5233 	if (adev->family < AMDGPU_FAMILY_AI)
5234 		return 0;
5235 
5236 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5237 
5238 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5239 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5240 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5241 		return *mods ? 0 : -ENOMEM;
5242 	}
5243 
5244 	switch (adev->family) {
5245 	case AMDGPU_FAMILY_AI:
5246 	case AMDGPU_FAMILY_RV:
5247 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5248 		break;
5249 	case AMDGPU_FAMILY_NV:
5250 	case AMDGPU_FAMILY_VGH:
5251 	case AMDGPU_FAMILY_YC:
5252 	case AMDGPU_FAMILY_GC_10_3_6:
5253 	case AMDGPU_FAMILY_GC_10_3_7:
5254 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5255 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5256 		else
5257 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5258 		break;
5259 	}
5260 
5261 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5262 
5263 	/* INVALID marks the end of the list. */
5264 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5265 
5266 	if (!*mods)
5267 		return -ENOMEM;
5268 
5269 	return 0;
5270 }
5271 
5272 static int
5273 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5274 					  const struct amdgpu_framebuffer *afb,
5275 					  const enum surface_pixel_format format,
5276 					  const enum dc_rotation_angle rotation,
5277 					  const struct plane_size *plane_size,
5278 					  union dc_tiling_info *tiling_info,
5279 					  struct dc_plane_dcc_param *dcc,
5280 					  struct dc_plane_address *address,
5281 					  const bool force_disable_dcc)
5282 {
5283 	const uint64_t modifier = afb->base.modifier;
5284 	int ret = 0;
5285 
5286 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5287 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5288 
5289 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5290 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5291 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5292 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5293 
5294 		dcc->enable = 1;
5295 		dcc->meta_pitch = afb->base.pitches[1];
5296 		dcc->independent_64b_blks = independent_64b_blks;
5297 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5298 			if (independent_64b_blks && independent_128b_blks)
5299 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5300 			else if (independent_128b_blks)
5301 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5302 			else if (independent_64b_blks && !independent_128b_blks)
5303 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5304 			else
5305 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5306 		} else {
5307 			if (independent_64b_blks)
5308 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5309 			else
5310 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5311 		}
5312 
5313 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5314 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5315 	}
5316 
5317 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5318 	if (ret)
5319 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5320 
5321 	return ret;
5322 }
5323 
5324 static int
5325 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5326 			     const struct amdgpu_framebuffer *afb,
5327 			     const enum surface_pixel_format format,
5328 			     const enum dc_rotation_angle rotation,
5329 			     const uint64_t tiling_flags,
5330 			     union dc_tiling_info *tiling_info,
5331 			     struct plane_size *plane_size,
5332 			     struct dc_plane_dcc_param *dcc,
5333 			     struct dc_plane_address *address,
5334 			     bool tmz_surface,
5335 			     bool force_disable_dcc)
5336 {
5337 	const struct drm_framebuffer *fb = &afb->base;
5338 	int ret;
5339 
5340 	memset(tiling_info, 0, sizeof(*tiling_info));
5341 	memset(plane_size, 0, sizeof(*plane_size));
5342 	memset(dcc, 0, sizeof(*dcc));
5343 	memset(address, 0, sizeof(*address));
5344 
5345 	address->tmz_surface = tmz_surface;
5346 
5347 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5348 		uint64_t addr = afb->address + fb->offsets[0];
5349 
5350 		plane_size->surface_size.x = 0;
5351 		plane_size->surface_size.y = 0;
5352 		plane_size->surface_size.width = fb->width;
5353 		plane_size->surface_size.height = fb->height;
5354 		plane_size->surface_pitch =
5355 			fb->pitches[0] / fb->format->cpp[0];
5356 
5357 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5358 		address->grph.addr.low_part = lower_32_bits(addr);
5359 		address->grph.addr.high_part = upper_32_bits(addr);
5360 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5361 		uint64_t luma_addr = afb->address + fb->offsets[0];
5362 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5363 
5364 		plane_size->surface_size.x = 0;
5365 		plane_size->surface_size.y = 0;
5366 		plane_size->surface_size.width = fb->width;
5367 		plane_size->surface_size.height = fb->height;
5368 		plane_size->surface_pitch =
5369 			fb->pitches[0] / fb->format->cpp[0];
5370 
5371 		plane_size->chroma_size.x = 0;
5372 		plane_size->chroma_size.y = 0;
5373 		/* TODO: set these based on surface format */
5374 		plane_size->chroma_size.width = fb->width / 2;
5375 		plane_size->chroma_size.height = fb->height / 2;
5376 
5377 		plane_size->chroma_pitch =
5378 			fb->pitches[1] / fb->format->cpp[1];
5379 
5380 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5381 		address->video_progressive.luma_addr.low_part =
5382 			lower_32_bits(luma_addr);
5383 		address->video_progressive.luma_addr.high_part =
5384 			upper_32_bits(luma_addr);
5385 		address->video_progressive.chroma_addr.low_part =
5386 			lower_32_bits(chroma_addr);
5387 		address->video_progressive.chroma_addr.high_part =
5388 			upper_32_bits(chroma_addr);
5389 	}
5390 
5391 	if (adev->family >= AMDGPU_FAMILY_AI) {
5392 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5393 								rotation, plane_size,
5394 								tiling_info, dcc,
5395 								address,
5396 								force_disable_dcc);
5397 		if (ret)
5398 			return ret;
5399 	} else {
5400 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5401 	}
5402 
5403 	return 0;
5404 }
5405 
5406 static void
5407 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5408 			       bool *per_pixel_alpha, bool *global_alpha,
5409 			       int *global_alpha_value)
5410 {
5411 	*per_pixel_alpha = false;
5412 	*global_alpha = false;
5413 	*global_alpha_value = 0xff;
5414 
5415 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5416 		return;
5417 
5418 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5419 		static const uint32_t alpha_formats[] = {
5420 			DRM_FORMAT_ARGB8888,
5421 			DRM_FORMAT_RGBA8888,
5422 			DRM_FORMAT_ABGR8888,
5423 		};
5424 		uint32_t format = plane_state->fb->format->format;
5425 		unsigned int i;
5426 
5427 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5428 			if (format == alpha_formats[i]) {
5429 				*per_pixel_alpha = true;
5430 				break;
5431 			}
5432 		}
5433 	}
5434 
5435 	if (plane_state->alpha < 0xffff) {
5436 		*global_alpha = true;
5437 		*global_alpha_value = plane_state->alpha >> 8;
5438 	}
5439 }
5440 
5441 static int
5442 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5443 			    const enum surface_pixel_format format,
5444 			    enum dc_color_space *color_space)
5445 {
5446 	bool full_range;
5447 
5448 	*color_space = COLOR_SPACE_SRGB;
5449 
5450 	/* DRM color properties only affect non-RGB formats. */
5451 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5452 		return 0;
5453 
5454 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5455 
5456 	switch (plane_state->color_encoding) {
5457 	case DRM_COLOR_YCBCR_BT601:
5458 		if (full_range)
5459 			*color_space = COLOR_SPACE_YCBCR601;
5460 		else
5461 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5462 		break;
5463 
5464 	case DRM_COLOR_YCBCR_BT709:
5465 		if (full_range)
5466 			*color_space = COLOR_SPACE_YCBCR709;
5467 		else
5468 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5469 		break;
5470 
5471 	case DRM_COLOR_YCBCR_BT2020:
5472 		if (full_range)
5473 			*color_space = COLOR_SPACE_2020_YCBCR;
5474 		else
5475 			return -EINVAL;
5476 		break;
5477 
5478 	default:
5479 		return -EINVAL;
5480 	}
5481 
5482 	return 0;
5483 }
5484 
5485 static int
5486 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5487 			    const struct drm_plane_state *plane_state,
5488 			    const uint64_t tiling_flags,
5489 			    struct dc_plane_info *plane_info,
5490 			    struct dc_plane_address *address,
5491 			    bool tmz_surface,
5492 			    bool force_disable_dcc)
5493 {
5494 	const struct drm_framebuffer *fb = plane_state->fb;
5495 	const struct amdgpu_framebuffer *afb =
5496 		to_amdgpu_framebuffer(plane_state->fb);
5497 	int ret;
5498 
5499 	memset(plane_info, 0, sizeof(*plane_info));
5500 
5501 	switch (fb->format->format) {
5502 	case DRM_FORMAT_C8:
5503 		plane_info->format =
5504 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5505 		break;
5506 	case DRM_FORMAT_RGB565:
5507 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5508 		break;
5509 	case DRM_FORMAT_XRGB8888:
5510 	case DRM_FORMAT_ARGB8888:
5511 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5512 		break;
5513 	case DRM_FORMAT_XRGB2101010:
5514 	case DRM_FORMAT_ARGB2101010:
5515 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5516 		break;
5517 	case DRM_FORMAT_XBGR2101010:
5518 	case DRM_FORMAT_ABGR2101010:
5519 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5520 		break;
5521 	case DRM_FORMAT_XBGR8888:
5522 	case DRM_FORMAT_ABGR8888:
5523 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5524 		break;
5525 	case DRM_FORMAT_NV21:
5526 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5527 		break;
5528 	case DRM_FORMAT_NV12:
5529 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5530 		break;
5531 	case DRM_FORMAT_P010:
5532 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5533 		break;
5534 	case DRM_FORMAT_XRGB16161616F:
5535 	case DRM_FORMAT_ARGB16161616F:
5536 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5537 		break;
5538 	case DRM_FORMAT_XBGR16161616F:
5539 	case DRM_FORMAT_ABGR16161616F:
5540 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5541 		break;
5542 	case DRM_FORMAT_XRGB16161616:
5543 	case DRM_FORMAT_ARGB16161616:
5544 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5545 		break;
5546 	case DRM_FORMAT_XBGR16161616:
5547 	case DRM_FORMAT_ABGR16161616:
5548 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5549 		break;
5550 	default:
5551 		DRM_ERROR(
5552 			"Unsupported screen format %p4cc\n",
5553 			&fb->format->format);
5554 		return -EINVAL;
5555 	}
5556 
5557 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5558 	case DRM_MODE_ROTATE_0:
5559 		plane_info->rotation = ROTATION_ANGLE_0;
5560 		break;
5561 	case DRM_MODE_ROTATE_90:
5562 		plane_info->rotation = ROTATION_ANGLE_90;
5563 		break;
5564 	case DRM_MODE_ROTATE_180:
5565 		plane_info->rotation = ROTATION_ANGLE_180;
5566 		break;
5567 	case DRM_MODE_ROTATE_270:
5568 		plane_info->rotation = ROTATION_ANGLE_270;
5569 		break;
5570 	default:
5571 		plane_info->rotation = ROTATION_ANGLE_0;
5572 		break;
5573 	}
5574 
5575 	plane_info->visible = true;
5576 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5577 
5578 	plane_info->layer_index = 0;
5579 
5580 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5581 					  &plane_info->color_space);
5582 	if (ret)
5583 		return ret;
5584 
5585 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5586 					   plane_info->rotation, tiling_flags,
5587 					   &plane_info->tiling_info,
5588 					   &plane_info->plane_size,
5589 					   &plane_info->dcc, address, tmz_surface,
5590 					   force_disable_dcc);
5591 	if (ret)
5592 		return ret;
5593 
5594 	fill_blending_from_plane_state(
5595 		plane_state, &plane_info->per_pixel_alpha,
5596 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5597 
5598 	return 0;
5599 }
5600 
5601 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5602 				    struct dc_plane_state *dc_plane_state,
5603 				    struct drm_plane_state *plane_state,
5604 				    struct drm_crtc_state *crtc_state)
5605 {
5606 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5607 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5608 	struct dc_scaling_info scaling_info;
5609 	struct dc_plane_info plane_info;
5610 	int ret;
5611 	bool force_disable_dcc = false;
5612 
5613 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5614 	if (ret)
5615 		return ret;
5616 
5617 	dc_plane_state->src_rect = scaling_info.src_rect;
5618 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5619 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5620 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5621 
5622 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5623 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5624 					  afb->tiling_flags,
5625 					  &plane_info,
5626 					  &dc_plane_state->address,
5627 					  afb->tmz_surface,
5628 					  force_disable_dcc);
5629 	if (ret)
5630 		return ret;
5631 
5632 	dc_plane_state->format = plane_info.format;
5633 	dc_plane_state->color_space = plane_info.color_space;
5634 	dc_plane_state->format = plane_info.format;
5635 	dc_plane_state->plane_size = plane_info.plane_size;
5636 	dc_plane_state->rotation = plane_info.rotation;
5637 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5638 	dc_plane_state->stereo_format = plane_info.stereo_format;
5639 	dc_plane_state->tiling_info = plane_info.tiling_info;
5640 	dc_plane_state->visible = plane_info.visible;
5641 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5642 	dc_plane_state->global_alpha = plane_info.global_alpha;
5643 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5644 	dc_plane_state->dcc = plane_info.dcc;
5645 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5646 	dc_plane_state->flip_int_enabled = true;
5647 
5648 	/*
5649 	 * Always set input transfer function, since plane state is refreshed
5650 	 * every time.
5651 	 */
5652 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5653 	if (ret)
5654 		return ret;
5655 
5656 	return 0;
5657 }
5658 
5659 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5660 					   const struct dm_connector_state *dm_state,
5661 					   struct dc_stream_state *stream)
5662 {
5663 	enum amdgpu_rmx_type rmx_type;
5664 
5665 	struct rect src = { 0 }; /* viewport in composition space*/
5666 	struct rect dst = { 0 }; /* stream addressable area */
5667 
5668 	/* no mode. nothing to be done */
5669 	if (!mode)
5670 		return;
5671 
5672 	/* Full screen scaling by default */
5673 	src.width = mode->hdisplay;
5674 	src.height = mode->vdisplay;
5675 	dst.width = stream->timing.h_addressable;
5676 	dst.height = stream->timing.v_addressable;
5677 
5678 	if (dm_state) {
5679 		rmx_type = dm_state->scaling;
5680 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5681 			if (src.width * dst.height <
5682 					src.height * dst.width) {
5683 				/* height needs less upscaling/more downscaling */
5684 				dst.width = src.width *
5685 						dst.height / src.height;
5686 			} else {
5687 				/* width needs less upscaling/more downscaling */
5688 				dst.height = src.height *
5689 						dst.width / src.width;
5690 			}
5691 		} else if (rmx_type == RMX_CENTER) {
5692 			dst = src;
5693 		}
5694 
5695 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5696 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5697 
5698 		if (dm_state->underscan_enable) {
5699 			dst.x += dm_state->underscan_hborder / 2;
5700 			dst.y += dm_state->underscan_vborder / 2;
5701 			dst.width -= dm_state->underscan_hborder;
5702 			dst.height -= dm_state->underscan_vborder;
5703 		}
5704 	}
5705 
5706 	stream->src = src;
5707 	stream->dst = dst;
5708 
5709 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5710 		      dst.x, dst.y, dst.width, dst.height);
5711 
5712 }
5713 
5714 static enum dc_color_depth
5715 convert_color_depth_from_display_info(const struct drm_connector *connector,
5716 				      bool is_y420, int requested_bpc)
5717 {
5718 	uint8_t bpc;
5719 
5720 	if (is_y420) {
5721 		bpc = 8;
5722 
5723 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5724 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5725 			bpc = 16;
5726 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5727 			bpc = 12;
5728 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5729 			bpc = 10;
5730 	} else {
5731 		bpc = (uint8_t)connector->display_info.bpc;
5732 		/* Assume 8 bpc by default if no bpc is specified. */
5733 		bpc = bpc ? bpc : 8;
5734 	}
5735 
5736 	if (requested_bpc > 0) {
5737 		/*
5738 		 * Cap display bpc based on the user requested value.
5739 		 *
5740 		 * The value for state->max_bpc may not correctly updated
5741 		 * depending on when the connector gets added to the state
5742 		 * or if this was called outside of atomic check, so it
5743 		 * can't be used directly.
5744 		 */
5745 		bpc = min_t(u8, bpc, requested_bpc);
5746 
5747 		/* Round down to the nearest even number. */
5748 		bpc = bpc - (bpc & 1);
5749 	}
5750 
5751 	switch (bpc) {
5752 	case 0:
5753 		/*
5754 		 * Temporary Work around, DRM doesn't parse color depth for
5755 		 * EDID revision before 1.4
5756 		 * TODO: Fix edid parsing
5757 		 */
5758 		return COLOR_DEPTH_888;
5759 	case 6:
5760 		return COLOR_DEPTH_666;
5761 	case 8:
5762 		return COLOR_DEPTH_888;
5763 	case 10:
5764 		return COLOR_DEPTH_101010;
5765 	case 12:
5766 		return COLOR_DEPTH_121212;
5767 	case 14:
5768 		return COLOR_DEPTH_141414;
5769 	case 16:
5770 		return COLOR_DEPTH_161616;
5771 	default:
5772 		return COLOR_DEPTH_UNDEFINED;
5773 	}
5774 }
5775 
5776 static enum dc_aspect_ratio
5777 get_aspect_ratio(const struct drm_display_mode *mode_in)
5778 {
5779 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5780 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5781 }
5782 
5783 static enum dc_color_space
5784 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5785 {
5786 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5787 
5788 	switch (dc_crtc_timing->pixel_encoding)	{
5789 	case PIXEL_ENCODING_YCBCR422:
5790 	case PIXEL_ENCODING_YCBCR444:
5791 	case PIXEL_ENCODING_YCBCR420:
5792 	{
5793 		/*
5794 		 * 27030khz is the separation point between HDTV and SDTV
5795 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5796 		 * respectively
5797 		 */
5798 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5799 			if (dc_crtc_timing->flags.Y_ONLY)
5800 				color_space =
5801 					COLOR_SPACE_YCBCR709_LIMITED;
5802 			else
5803 				color_space = COLOR_SPACE_YCBCR709;
5804 		} else {
5805 			if (dc_crtc_timing->flags.Y_ONLY)
5806 				color_space =
5807 					COLOR_SPACE_YCBCR601_LIMITED;
5808 			else
5809 				color_space = COLOR_SPACE_YCBCR601;
5810 		}
5811 
5812 	}
5813 	break;
5814 	case PIXEL_ENCODING_RGB:
5815 		color_space = COLOR_SPACE_SRGB;
5816 		break;
5817 
5818 	default:
5819 		WARN_ON(1);
5820 		break;
5821 	}
5822 
5823 	return color_space;
5824 }
5825 
5826 static bool adjust_colour_depth_from_display_info(
5827 	struct dc_crtc_timing *timing_out,
5828 	const struct drm_display_info *info)
5829 {
5830 	enum dc_color_depth depth = timing_out->display_color_depth;
5831 	int normalized_clk;
5832 	do {
5833 		normalized_clk = timing_out->pix_clk_100hz / 10;
5834 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5835 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5836 			normalized_clk /= 2;
5837 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5838 		switch (depth) {
5839 		case COLOR_DEPTH_888:
5840 			break;
5841 		case COLOR_DEPTH_101010:
5842 			normalized_clk = (normalized_clk * 30) / 24;
5843 			break;
5844 		case COLOR_DEPTH_121212:
5845 			normalized_clk = (normalized_clk * 36) / 24;
5846 			break;
5847 		case COLOR_DEPTH_161616:
5848 			normalized_clk = (normalized_clk * 48) / 24;
5849 			break;
5850 		default:
5851 			/* The above depths are the only ones valid for HDMI. */
5852 			return false;
5853 		}
5854 		if (normalized_clk <= info->max_tmds_clock) {
5855 			timing_out->display_color_depth = depth;
5856 			return true;
5857 		}
5858 	} while (--depth > COLOR_DEPTH_666);
5859 	return false;
5860 }
5861 
5862 static void fill_stream_properties_from_drm_display_mode(
5863 	struct dc_stream_state *stream,
5864 	const struct drm_display_mode *mode_in,
5865 	const struct drm_connector *connector,
5866 	const struct drm_connector_state *connector_state,
5867 	const struct dc_stream_state *old_stream,
5868 	int requested_bpc)
5869 {
5870 	struct dc_crtc_timing *timing_out = &stream->timing;
5871 	const struct drm_display_info *info = &connector->display_info;
5872 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5873 	struct hdmi_vendor_infoframe hv_frame;
5874 	struct hdmi_avi_infoframe avi_frame;
5875 
5876 	memset(&hv_frame, 0, sizeof(hv_frame));
5877 	memset(&avi_frame, 0, sizeof(avi_frame));
5878 
5879 	timing_out->h_border_left = 0;
5880 	timing_out->h_border_right = 0;
5881 	timing_out->v_border_top = 0;
5882 	timing_out->v_border_bottom = 0;
5883 	/* TODO: un-hardcode */
5884 	if (drm_mode_is_420_only(info, mode_in)
5885 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5886 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5887 	else if (drm_mode_is_420_also(info, mode_in)
5888 			&& aconnector->force_yuv420_output)
5889 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5890 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5891 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5892 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5893 	else
5894 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5895 
5896 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5897 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5898 		connector,
5899 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5900 		requested_bpc);
5901 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5902 	timing_out->hdmi_vic = 0;
5903 
5904 	if(old_stream) {
5905 		timing_out->vic = old_stream->timing.vic;
5906 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5907 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5908 	} else {
5909 		timing_out->vic = drm_match_cea_mode(mode_in);
5910 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5911 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5912 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5913 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5914 	}
5915 
5916 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5917 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5918 		timing_out->vic = avi_frame.video_code;
5919 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5920 		timing_out->hdmi_vic = hv_frame.vic;
5921 	}
5922 
5923 	if (is_freesync_video_mode(mode_in, aconnector)) {
5924 		timing_out->h_addressable = mode_in->hdisplay;
5925 		timing_out->h_total = mode_in->htotal;
5926 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5927 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5928 		timing_out->v_total = mode_in->vtotal;
5929 		timing_out->v_addressable = mode_in->vdisplay;
5930 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5931 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5932 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5933 	} else {
5934 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5935 		timing_out->h_total = mode_in->crtc_htotal;
5936 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5937 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5938 		timing_out->v_total = mode_in->crtc_vtotal;
5939 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5940 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5941 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5942 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5943 	}
5944 
5945 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5946 
5947 	stream->output_color_space = get_output_color_space(timing_out);
5948 
5949 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5950 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5951 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5952 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5953 		    drm_mode_is_420_also(info, mode_in) &&
5954 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5955 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5956 			adjust_colour_depth_from_display_info(timing_out, info);
5957 		}
5958 	}
5959 }
5960 
5961 static void fill_audio_info(struct audio_info *audio_info,
5962 			    const struct drm_connector *drm_connector,
5963 			    const struct dc_sink *dc_sink)
5964 {
5965 	int i = 0;
5966 	int cea_revision = 0;
5967 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5968 
5969 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5970 	audio_info->product_id = edid_caps->product_id;
5971 
5972 	cea_revision = drm_connector->display_info.cea_rev;
5973 
5974 	strscpy(audio_info->display_name,
5975 		edid_caps->display_name,
5976 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5977 
5978 	if (cea_revision >= 3) {
5979 		audio_info->mode_count = edid_caps->audio_mode_count;
5980 
5981 		for (i = 0; i < audio_info->mode_count; ++i) {
5982 			audio_info->modes[i].format_code =
5983 					(enum audio_format_code)
5984 					(edid_caps->audio_modes[i].format_code);
5985 			audio_info->modes[i].channel_count =
5986 					edid_caps->audio_modes[i].channel_count;
5987 			audio_info->modes[i].sample_rates.all =
5988 					edid_caps->audio_modes[i].sample_rate;
5989 			audio_info->modes[i].sample_size =
5990 					edid_caps->audio_modes[i].sample_size;
5991 		}
5992 	}
5993 
5994 	audio_info->flags.all = edid_caps->speaker_flags;
5995 
5996 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5997 	if (drm_connector->latency_present[0]) {
5998 		audio_info->video_latency = drm_connector->video_latency[0];
5999 		audio_info->audio_latency = drm_connector->audio_latency[0];
6000 	}
6001 
6002 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6003 
6004 }
6005 
6006 static void
6007 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6008 				      struct drm_display_mode *dst_mode)
6009 {
6010 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6011 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6012 	dst_mode->crtc_clock = src_mode->crtc_clock;
6013 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6014 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6015 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6016 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6017 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6018 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6019 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6020 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6021 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6022 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6023 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6024 }
6025 
6026 static void
6027 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6028 					const struct drm_display_mode *native_mode,
6029 					bool scale_enabled)
6030 {
6031 	if (scale_enabled) {
6032 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6033 	} else if (native_mode->clock == drm_mode->clock &&
6034 			native_mode->htotal == drm_mode->htotal &&
6035 			native_mode->vtotal == drm_mode->vtotal) {
6036 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6037 	} else {
6038 		/* no scaling nor amdgpu inserted, no need to patch */
6039 	}
6040 }
6041 
6042 static struct dc_sink *
6043 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6044 {
6045 	struct dc_sink_init_data sink_init_data = { 0 };
6046 	struct dc_sink *sink = NULL;
6047 	sink_init_data.link = aconnector->dc_link;
6048 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6049 
6050 	sink = dc_sink_create(&sink_init_data);
6051 	if (!sink) {
6052 		DRM_ERROR("Failed to create sink!\n");
6053 		return NULL;
6054 	}
6055 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6056 
6057 	return sink;
6058 }
6059 
6060 static void set_multisync_trigger_params(
6061 		struct dc_stream_state *stream)
6062 {
6063 	struct dc_stream_state *master = NULL;
6064 
6065 	if (stream->triggered_crtc_reset.enabled) {
6066 		master = stream->triggered_crtc_reset.event_source;
6067 		stream->triggered_crtc_reset.event =
6068 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6069 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6070 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6071 	}
6072 }
6073 
6074 static void set_master_stream(struct dc_stream_state *stream_set[],
6075 			      int stream_count)
6076 {
6077 	int j, highest_rfr = 0, master_stream = 0;
6078 
6079 	for (j = 0;  j < stream_count; j++) {
6080 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6081 			int refresh_rate = 0;
6082 
6083 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6084 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6085 			if (refresh_rate > highest_rfr) {
6086 				highest_rfr = refresh_rate;
6087 				master_stream = j;
6088 			}
6089 		}
6090 	}
6091 	for (j = 0;  j < stream_count; j++) {
6092 		if (stream_set[j])
6093 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6094 	}
6095 }
6096 
6097 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6098 {
6099 	int i = 0;
6100 	struct dc_stream_state *stream;
6101 
6102 	if (context->stream_count < 2)
6103 		return;
6104 	for (i = 0; i < context->stream_count ; i++) {
6105 		if (!context->streams[i])
6106 			continue;
6107 		/*
6108 		 * TODO: add a function to read AMD VSDB bits and set
6109 		 * crtc_sync_master.multi_sync_enabled flag
6110 		 * For now it's set to false
6111 		 */
6112 	}
6113 
6114 	set_master_stream(context->streams, context->stream_count);
6115 
6116 	for (i = 0; i < context->stream_count ; i++) {
6117 		stream = context->streams[i];
6118 
6119 		if (!stream)
6120 			continue;
6121 
6122 		set_multisync_trigger_params(stream);
6123 	}
6124 }
6125 
6126 #if defined(CONFIG_DRM_AMD_DC_DCN)
6127 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6128 							struct dc_sink *sink, struct dc_stream_state *stream,
6129 							struct dsc_dec_dpcd_caps *dsc_caps)
6130 {
6131 	stream->timing.flags.DSC = 0;
6132 	dsc_caps->is_dsc_supported = false;
6133 
6134 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6135 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6136 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6137 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6138 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6139 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6140 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6141 				dsc_caps);
6142 	}
6143 }
6144 
6145 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6146 				    struct dc_sink *sink, struct dc_stream_state *stream,
6147 				    struct dsc_dec_dpcd_caps *dsc_caps,
6148 				    uint32_t max_dsc_target_bpp_limit_override)
6149 {
6150 	const struct dc_link_settings *verified_link_cap = NULL;
6151 	uint32_t link_bw_in_kbps;
6152 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6153 	struct dc *dc = sink->ctx->dc;
6154 	struct dc_dsc_bw_range bw_range = {0};
6155 	struct dc_dsc_config dsc_cfg = {0};
6156 
6157 	verified_link_cap = dc_link_get_link_cap(stream->link);
6158 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6159 	edp_min_bpp_x16 = 8 * 16;
6160 	edp_max_bpp_x16 = 8 * 16;
6161 
6162 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6163 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6164 
6165 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6166 		edp_min_bpp_x16 = edp_max_bpp_x16;
6167 
6168 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6169 				dc->debug.dsc_min_slice_height_override,
6170 				edp_min_bpp_x16, edp_max_bpp_x16,
6171 				dsc_caps,
6172 				&stream->timing,
6173 				&bw_range)) {
6174 
6175 		if (bw_range.max_kbps < link_bw_in_kbps) {
6176 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6177 					dsc_caps,
6178 					dc->debug.dsc_min_slice_height_override,
6179 					max_dsc_target_bpp_limit_override,
6180 					0,
6181 					&stream->timing,
6182 					&dsc_cfg)) {
6183 				stream->timing.dsc_cfg = dsc_cfg;
6184 				stream->timing.flags.DSC = 1;
6185 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6186 			}
6187 			return;
6188 		}
6189 	}
6190 
6191 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6192 				dsc_caps,
6193 				dc->debug.dsc_min_slice_height_override,
6194 				max_dsc_target_bpp_limit_override,
6195 				link_bw_in_kbps,
6196 				&stream->timing,
6197 				&dsc_cfg)) {
6198 		stream->timing.dsc_cfg = dsc_cfg;
6199 		stream->timing.flags.DSC = 1;
6200 	}
6201 }
6202 
6203 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6204 										struct dc_sink *sink, struct dc_stream_state *stream,
6205 										struct dsc_dec_dpcd_caps *dsc_caps)
6206 {
6207 	struct drm_connector *drm_connector = &aconnector->base;
6208 	uint32_t link_bandwidth_kbps;
6209 	uint32_t max_dsc_target_bpp_limit_override = 0;
6210 	struct dc *dc = sink->ctx->dc;
6211 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6212 	uint32_t dsc_max_supported_bw_in_kbps;
6213 
6214 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6215 							dc_link_get_link_cap(aconnector->dc_link));
6216 
6217 	if (stream->link && stream->link->local_sink)
6218 		max_dsc_target_bpp_limit_override =
6219 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6220 
6221 	/* Set DSC policy according to dsc_clock_en */
6222 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6223 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6224 
6225 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6226 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6227 
6228 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6229 
6230 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6231 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6232 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6233 						dsc_caps,
6234 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6235 						max_dsc_target_bpp_limit_override,
6236 						link_bandwidth_kbps,
6237 						&stream->timing,
6238 						&stream->timing.dsc_cfg)) {
6239 				stream->timing.flags.DSC = 1;
6240 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6241 								 __func__, drm_connector->name);
6242 			}
6243 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6244 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6245 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6246 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6247 
6248 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6249 					max_supported_bw_in_kbps > 0 &&
6250 					dsc_max_supported_bw_in_kbps > 0)
6251 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6252 						dsc_caps,
6253 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6254 						max_dsc_target_bpp_limit_override,
6255 						dsc_max_supported_bw_in_kbps,
6256 						&stream->timing,
6257 						&stream->timing.dsc_cfg)) {
6258 					stream->timing.flags.DSC = 1;
6259 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6260 									 __func__, drm_connector->name);
6261 				}
6262 		}
6263 	}
6264 
6265 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6266 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6267 		stream->timing.flags.DSC = 1;
6268 
6269 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6270 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6271 
6272 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6273 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6274 
6275 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6276 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6277 }
6278 #endif /* CONFIG_DRM_AMD_DC_DCN */
6279 
6280 /**
6281  * DOC: FreeSync Video
6282  *
6283  * When a userspace application wants to play a video, the content follows a
6284  * standard format definition that usually specifies the FPS for that format.
6285  * The below list illustrates some video format and the expected FPS,
6286  * respectively:
6287  *
6288  * - TV/NTSC (23.976 FPS)
6289  * - Cinema (24 FPS)
6290  * - TV/PAL (25 FPS)
6291  * - TV/NTSC (29.97 FPS)
6292  * - TV/NTSC (30 FPS)
6293  * - Cinema HFR (48 FPS)
6294  * - TV/PAL (50 FPS)
6295  * - Commonly used (60 FPS)
6296  * - Multiples of 24 (48,72,96,120 FPS)
6297  *
6298  * The list of standards video format is not huge and can be added to the
6299  * connector modeset list beforehand. With that, userspace can leverage
6300  * FreeSync to extends the front porch in order to attain the target refresh
6301  * rate. Such a switch will happen seamlessly, without screen blanking or
6302  * reprogramming of the output in any other way. If the userspace requests a
6303  * modesetting change compatible with FreeSync modes that only differ in the
6304  * refresh rate, DC will skip the full update and avoid blink during the
6305  * transition. For example, the video player can change the modesetting from
6306  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6307  * causing any display blink. This same concept can be applied to a mode
6308  * setting change.
6309  */
6310 static struct drm_display_mode *
6311 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6312 			  bool use_probed_modes)
6313 {
6314 	struct drm_display_mode *m, *m_pref = NULL;
6315 	u16 current_refresh, highest_refresh;
6316 	struct list_head *list_head = use_probed_modes ?
6317 						    &aconnector->base.probed_modes :
6318 						    &aconnector->base.modes;
6319 
6320 	if (aconnector->freesync_vid_base.clock != 0)
6321 		return &aconnector->freesync_vid_base;
6322 
6323 	/* Find the preferred mode */
6324 	list_for_each_entry (m, list_head, head) {
6325 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6326 			m_pref = m;
6327 			break;
6328 		}
6329 	}
6330 
6331 	if (!m_pref) {
6332 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6333 		m_pref = list_first_entry_or_null(
6334 			&aconnector->base.modes, struct drm_display_mode, head);
6335 		if (!m_pref) {
6336 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6337 			return NULL;
6338 		}
6339 	}
6340 
6341 	highest_refresh = drm_mode_vrefresh(m_pref);
6342 
6343 	/*
6344 	 * Find the mode with highest refresh rate with same resolution.
6345 	 * For some monitors, preferred mode is not the mode with highest
6346 	 * supported refresh rate.
6347 	 */
6348 	list_for_each_entry (m, list_head, head) {
6349 		current_refresh  = drm_mode_vrefresh(m);
6350 
6351 		if (m->hdisplay == m_pref->hdisplay &&
6352 		    m->vdisplay == m_pref->vdisplay &&
6353 		    highest_refresh < current_refresh) {
6354 			highest_refresh = current_refresh;
6355 			m_pref = m;
6356 		}
6357 	}
6358 
6359 	aconnector->freesync_vid_base = *m_pref;
6360 	return m_pref;
6361 }
6362 
6363 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6364 				   struct amdgpu_dm_connector *aconnector)
6365 {
6366 	struct drm_display_mode *high_mode;
6367 	int timing_diff;
6368 
6369 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6370 	if (!high_mode || !mode)
6371 		return false;
6372 
6373 	timing_diff = high_mode->vtotal - mode->vtotal;
6374 
6375 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6376 	    high_mode->hdisplay != mode->hdisplay ||
6377 	    high_mode->vdisplay != mode->vdisplay ||
6378 	    high_mode->hsync_start != mode->hsync_start ||
6379 	    high_mode->hsync_end != mode->hsync_end ||
6380 	    high_mode->htotal != mode->htotal ||
6381 	    high_mode->hskew != mode->hskew ||
6382 	    high_mode->vscan != mode->vscan ||
6383 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6384 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6385 		return false;
6386 	else
6387 		return true;
6388 }
6389 
6390 static struct dc_stream_state *
6391 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6392 		       const struct drm_display_mode *drm_mode,
6393 		       const struct dm_connector_state *dm_state,
6394 		       const struct dc_stream_state *old_stream,
6395 		       int requested_bpc)
6396 {
6397 	struct drm_display_mode *preferred_mode = NULL;
6398 	struct drm_connector *drm_connector;
6399 	const struct drm_connector_state *con_state =
6400 		dm_state ? &dm_state->base : NULL;
6401 	struct dc_stream_state *stream = NULL;
6402 	struct drm_display_mode mode = *drm_mode;
6403 	struct drm_display_mode saved_mode;
6404 	struct drm_display_mode *freesync_mode = NULL;
6405 	bool native_mode_found = false;
6406 	bool recalculate_timing = false;
6407 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6408 	int mode_refresh;
6409 	int preferred_refresh = 0;
6410 #if defined(CONFIG_DRM_AMD_DC_DCN)
6411 	struct dsc_dec_dpcd_caps dsc_caps;
6412 #endif
6413 	struct dc_sink *sink = NULL;
6414 
6415 	memset(&saved_mode, 0, sizeof(saved_mode));
6416 
6417 	if (aconnector == NULL) {
6418 		DRM_ERROR("aconnector is NULL!\n");
6419 		return stream;
6420 	}
6421 
6422 	drm_connector = &aconnector->base;
6423 
6424 	if (!aconnector->dc_sink) {
6425 		sink = create_fake_sink(aconnector);
6426 		if (!sink)
6427 			return stream;
6428 	} else {
6429 		sink = aconnector->dc_sink;
6430 		dc_sink_retain(sink);
6431 	}
6432 
6433 	stream = dc_create_stream_for_sink(sink);
6434 
6435 	if (stream == NULL) {
6436 		DRM_ERROR("Failed to create stream for sink!\n");
6437 		goto finish;
6438 	}
6439 
6440 	stream->dm_stream_context = aconnector;
6441 
6442 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6443 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6444 
6445 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6446 		/* Search for preferred mode */
6447 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6448 			native_mode_found = true;
6449 			break;
6450 		}
6451 	}
6452 	if (!native_mode_found)
6453 		preferred_mode = list_first_entry_or_null(
6454 				&aconnector->base.modes,
6455 				struct drm_display_mode,
6456 				head);
6457 
6458 	mode_refresh = drm_mode_vrefresh(&mode);
6459 
6460 	if (preferred_mode == NULL) {
6461 		/*
6462 		 * This may not be an error, the use case is when we have no
6463 		 * usermode calls to reset and set mode upon hotplug. In this
6464 		 * case, we call set mode ourselves to restore the previous mode
6465 		 * and the modelist may not be filled in in time.
6466 		 */
6467 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6468 	} else {
6469 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6470 		if (recalculate_timing) {
6471 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6472 			saved_mode = mode;
6473 			mode = *freesync_mode;
6474 		} else {
6475 			decide_crtc_timing_for_drm_display_mode(
6476 				&mode, preferred_mode, scale);
6477 
6478 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6479 		}
6480 	}
6481 
6482 	if (recalculate_timing)
6483 		drm_mode_set_crtcinfo(&saved_mode, 0);
6484 	else if (!dm_state)
6485 		drm_mode_set_crtcinfo(&mode, 0);
6486 
6487        /*
6488 	* If scaling is enabled and refresh rate didn't change
6489 	* we copy the vic and polarities of the old timings
6490 	*/
6491 	if (!scale || mode_refresh != preferred_refresh)
6492 		fill_stream_properties_from_drm_display_mode(
6493 			stream, &mode, &aconnector->base, con_state, NULL,
6494 			requested_bpc);
6495 	else
6496 		fill_stream_properties_from_drm_display_mode(
6497 			stream, &mode, &aconnector->base, con_state, old_stream,
6498 			requested_bpc);
6499 
6500 #if defined(CONFIG_DRM_AMD_DC_DCN)
6501 	/* SST DSC determination policy */
6502 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6503 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6504 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6505 #endif
6506 
6507 	update_stream_scaling_settings(&mode, dm_state, stream);
6508 
6509 	fill_audio_info(
6510 		&stream->audio_info,
6511 		drm_connector,
6512 		sink);
6513 
6514 	update_stream_signal(stream, sink);
6515 
6516 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6517 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6518 
6519 	if (stream->link->psr_settings.psr_feature_enabled) {
6520 		//
6521 		// should decide stream support vsc sdp colorimetry capability
6522 		// before building vsc info packet
6523 		//
6524 		stream->use_vsc_sdp_for_colorimetry = false;
6525 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6526 			stream->use_vsc_sdp_for_colorimetry =
6527 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6528 		} else {
6529 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6530 				stream->use_vsc_sdp_for_colorimetry = true;
6531 		}
6532 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6533 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6534 
6535 	}
6536 finish:
6537 	dc_sink_release(sink);
6538 
6539 	return stream;
6540 }
6541 
6542 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6543 {
6544 	drm_crtc_cleanup(crtc);
6545 	kfree(crtc);
6546 }
6547 
6548 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6549 				  struct drm_crtc_state *state)
6550 {
6551 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6552 
6553 	/* TODO Destroy dc_stream objects are stream object is flattened */
6554 	if (cur->stream)
6555 		dc_stream_release(cur->stream);
6556 
6557 
6558 	__drm_atomic_helper_crtc_destroy_state(state);
6559 
6560 
6561 	kfree(state);
6562 }
6563 
6564 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6565 {
6566 	struct dm_crtc_state *state;
6567 
6568 	if (crtc->state)
6569 		dm_crtc_destroy_state(crtc, crtc->state);
6570 
6571 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6572 	if (WARN_ON(!state))
6573 		return;
6574 
6575 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6576 }
6577 
6578 static struct drm_crtc_state *
6579 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6580 {
6581 	struct dm_crtc_state *state, *cur;
6582 
6583 	cur = to_dm_crtc_state(crtc->state);
6584 
6585 	if (WARN_ON(!crtc->state))
6586 		return NULL;
6587 
6588 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6589 	if (!state)
6590 		return NULL;
6591 
6592 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6593 
6594 	if (cur->stream) {
6595 		state->stream = cur->stream;
6596 		dc_stream_retain(state->stream);
6597 	}
6598 
6599 	state->active_planes = cur->active_planes;
6600 	state->vrr_infopacket = cur->vrr_infopacket;
6601 	state->abm_level = cur->abm_level;
6602 	state->vrr_supported = cur->vrr_supported;
6603 	state->freesync_config = cur->freesync_config;
6604 	state->cm_has_degamma = cur->cm_has_degamma;
6605 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6606 	state->force_dpms_off = cur->force_dpms_off;
6607 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6608 
6609 	return &state->base;
6610 }
6611 
6612 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6613 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6614 {
6615 	crtc_debugfs_init(crtc);
6616 
6617 	return 0;
6618 }
6619 #endif
6620 
6621 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6622 {
6623 	enum dc_irq_source irq_source;
6624 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6625 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6626 	int rc;
6627 
6628 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6629 
6630 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6631 
6632 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6633 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6634 	return rc;
6635 }
6636 
6637 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6638 {
6639 	enum dc_irq_source irq_source;
6640 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6641 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6642 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6643 #if defined(CONFIG_DRM_AMD_DC_DCN)
6644 	struct amdgpu_display_manager *dm = &adev->dm;
6645 	struct vblank_control_work *work;
6646 #endif
6647 	int rc = 0;
6648 
6649 	if (enable) {
6650 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6651 		if (amdgpu_dm_vrr_active(acrtc_state))
6652 			rc = dm_set_vupdate_irq(crtc, true);
6653 	} else {
6654 		/* vblank irq off -> vupdate irq off */
6655 		rc = dm_set_vupdate_irq(crtc, false);
6656 	}
6657 
6658 	if (rc)
6659 		return rc;
6660 
6661 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6662 
6663 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6664 		return -EBUSY;
6665 
6666 	if (amdgpu_in_reset(adev))
6667 		return 0;
6668 
6669 #if defined(CONFIG_DRM_AMD_DC_DCN)
6670 	if (dm->vblank_control_workqueue) {
6671 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6672 		if (!work)
6673 			return -ENOMEM;
6674 
6675 		INIT_WORK(&work->work, vblank_control_worker);
6676 		work->dm = dm;
6677 		work->acrtc = acrtc;
6678 		work->enable = enable;
6679 
6680 		if (acrtc_state->stream) {
6681 			dc_stream_retain(acrtc_state->stream);
6682 			work->stream = acrtc_state->stream;
6683 		}
6684 
6685 		queue_work(dm->vblank_control_workqueue, &work->work);
6686 	}
6687 #endif
6688 
6689 	return 0;
6690 }
6691 
6692 static int dm_enable_vblank(struct drm_crtc *crtc)
6693 {
6694 	return dm_set_vblank(crtc, true);
6695 }
6696 
6697 static void dm_disable_vblank(struct drm_crtc *crtc)
6698 {
6699 	dm_set_vblank(crtc, false);
6700 }
6701 
6702 /* Implemented only the options currently availible for the driver */
6703 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6704 	.reset = dm_crtc_reset_state,
6705 	.destroy = amdgpu_dm_crtc_destroy,
6706 	.set_config = drm_atomic_helper_set_config,
6707 	.page_flip = drm_atomic_helper_page_flip,
6708 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6709 	.atomic_destroy_state = dm_crtc_destroy_state,
6710 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6711 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6712 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6713 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6714 	.enable_vblank = dm_enable_vblank,
6715 	.disable_vblank = dm_disable_vblank,
6716 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6717 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6718 	.late_register = amdgpu_dm_crtc_late_register,
6719 #endif
6720 };
6721 
6722 static enum drm_connector_status
6723 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6724 {
6725 	bool connected;
6726 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6727 
6728 	/*
6729 	 * Notes:
6730 	 * 1. This interface is NOT called in context of HPD irq.
6731 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6732 	 * makes it a bad place for *any* MST-related activity.
6733 	 */
6734 
6735 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6736 	    !aconnector->fake_enable)
6737 		connected = (aconnector->dc_sink != NULL);
6738 	else
6739 		connected = (aconnector->base.force == DRM_FORCE_ON);
6740 
6741 	update_subconnector_property(aconnector);
6742 
6743 	return (connected ? connector_status_connected :
6744 			connector_status_disconnected);
6745 }
6746 
6747 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6748 					    struct drm_connector_state *connector_state,
6749 					    struct drm_property *property,
6750 					    uint64_t val)
6751 {
6752 	struct drm_device *dev = connector->dev;
6753 	struct amdgpu_device *adev = drm_to_adev(dev);
6754 	struct dm_connector_state *dm_old_state =
6755 		to_dm_connector_state(connector->state);
6756 	struct dm_connector_state *dm_new_state =
6757 		to_dm_connector_state(connector_state);
6758 
6759 	int ret = -EINVAL;
6760 
6761 	if (property == dev->mode_config.scaling_mode_property) {
6762 		enum amdgpu_rmx_type rmx_type;
6763 
6764 		switch (val) {
6765 		case DRM_MODE_SCALE_CENTER:
6766 			rmx_type = RMX_CENTER;
6767 			break;
6768 		case DRM_MODE_SCALE_ASPECT:
6769 			rmx_type = RMX_ASPECT;
6770 			break;
6771 		case DRM_MODE_SCALE_FULLSCREEN:
6772 			rmx_type = RMX_FULL;
6773 			break;
6774 		case DRM_MODE_SCALE_NONE:
6775 		default:
6776 			rmx_type = RMX_OFF;
6777 			break;
6778 		}
6779 
6780 		if (dm_old_state->scaling == rmx_type)
6781 			return 0;
6782 
6783 		dm_new_state->scaling = rmx_type;
6784 		ret = 0;
6785 	} else if (property == adev->mode_info.underscan_hborder_property) {
6786 		dm_new_state->underscan_hborder = val;
6787 		ret = 0;
6788 	} else if (property == adev->mode_info.underscan_vborder_property) {
6789 		dm_new_state->underscan_vborder = val;
6790 		ret = 0;
6791 	} else if (property == adev->mode_info.underscan_property) {
6792 		dm_new_state->underscan_enable = val;
6793 		ret = 0;
6794 	} else if (property == adev->mode_info.abm_level_property) {
6795 		dm_new_state->abm_level = val;
6796 		ret = 0;
6797 	}
6798 
6799 	return ret;
6800 }
6801 
6802 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6803 					    const struct drm_connector_state *state,
6804 					    struct drm_property *property,
6805 					    uint64_t *val)
6806 {
6807 	struct drm_device *dev = connector->dev;
6808 	struct amdgpu_device *adev = drm_to_adev(dev);
6809 	struct dm_connector_state *dm_state =
6810 		to_dm_connector_state(state);
6811 	int ret = -EINVAL;
6812 
6813 	if (property == dev->mode_config.scaling_mode_property) {
6814 		switch (dm_state->scaling) {
6815 		case RMX_CENTER:
6816 			*val = DRM_MODE_SCALE_CENTER;
6817 			break;
6818 		case RMX_ASPECT:
6819 			*val = DRM_MODE_SCALE_ASPECT;
6820 			break;
6821 		case RMX_FULL:
6822 			*val = DRM_MODE_SCALE_FULLSCREEN;
6823 			break;
6824 		case RMX_OFF:
6825 		default:
6826 			*val = DRM_MODE_SCALE_NONE;
6827 			break;
6828 		}
6829 		ret = 0;
6830 	} else if (property == adev->mode_info.underscan_hborder_property) {
6831 		*val = dm_state->underscan_hborder;
6832 		ret = 0;
6833 	} else if (property == adev->mode_info.underscan_vborder_property) {
6834 		*val = dm_state->underscan_vborder;
6835 		ret = 0;
6836 	} else if (property == adev->mode_info.underscan_property) {
6837 		*val = dm_state->underscan_enable;
6838 		ret = 0;
6839 	} else if (property == adev->mode_info.abm_level_property) {
6840 		*val = dm_state->abm_level;
6841 		ret = 0;
6842 	}
6843 
6844 	return ret;
6845 }
6846 
6847 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6848 {
6849 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6850 
6851 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6852 }
6853 
6854 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6855 {
6856 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6857 	const struct dc_link *link = aconnector->dc_link;
6858 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6859 	struct amdgpu_display_manager *dm = &adev->dm;
6860 	int i;
6861 
6862 	/*
6863 	 * Call only if mst_mgr was iniitalized before since it's not done
6864 	 * for all connector types.
6865 	 */
6866 	if (aconnector->mst_mgr.dev)
6867 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6868 
6869 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6870 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6871 	for (i = 0; i < dm->num_of_edps; i++) {
6872 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6873 			backlight_device_unregister(dm->backlight_dev[i]);
6874 			dm->backlight_dev[i] = NULL;
6875 		}
6876 	}
6877 #endif
6878 
6879 	if (aconnector->dc_em_sink)
6880 		dc_sink_release(aconnector->dc_em_sink);
6881 	aconnector->dc_em_sink = NULL;
6882 	if (aconnector->dc_sink)
6883 		dc_sink_release(aconnector->dc_sink);
6884 	aconnector->dc_sink = NULL;
6885 
6886 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6887 	drm_connector_unregister(connector);
6888 	drm_connector_cleanup(connector);
6889 	if (aconnector->i2c) {
6890 		i2c_del_adapter(&aconnector->i2c->base);
6891 		kfree(aconnector->i2c);
6892 	}
6893 	kfree(aconnector->dm_dp_aux.aux.name);
6894 
6895 	kfree(connector);
6896 }
6897 
6898 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6899 {
6900 	struct dm_connector_state *state =
6901 		to_dm_connector_state(connector->state);
6902 
6903 	if (connector->state)
6904 		__drm_atomic_helper_connector_destroy_state(connector->state);
6905 
6906 	kfree(state);
6907 
6908 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6909 
6910 	if (state) {
6911 		state->scaling = RMX_OFF;
6912 		state->underscan_enable = false;
6913 		state->underscan_hborder = 0;
6914 		state->underscan_vborder = 0;
6915 		state->base.max_requested_bpc = 8;
6916 		state->vcpi_slots = 0;
6917 		state->pbn = 0;
6918 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6919 			state->abm_level = amdgpu_dm_abm_level;
6920 
6921 		__drm_atomic_helper_connector_reset(connector, &state->base);
6922 	}
6923 }
6924 
6925 struct drm_connector_state *
6926 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6927 {
6928 	struct dm_connector_state *state =
6929 		to_dm_connector_state(connector->state);
6930 
6931 	struct dm_connector_state *new_state =
6932 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6933 
6934 	if (!new_state)
6935 		return NULL;
6936 
6937 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6938 
6939 	new_state->freesync_capable = state->freesync_capable;
6940 	new_state->abm_level = state->abm_level;
6941 	new_state->scaling = state->scaling;
6942 	new_state->underscan_enable = state->underscan_enable;
6943 	new_state->underscan_hborder = state->underscan_hborder;
6944 	new_state->underscan_vborder = state->underscan_vborder;
6945 	new_state->vcpi_slots = state->vcpi_slots;
6946 	new_state->pbn = state->pbn;
6947 	return &new_state->base;
6948 }
6949 
6950 static int
6951 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6952 {
6953 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6954 		to_amdgpu_dm_connector(connector);
6955 	int r;
6956 
6957 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6958 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6959 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6960 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6961 		if (r)
6962 			return r;
6963 	}
6964 
6965 #if defined(CONFIG_DEBUG_FS)
6966 	connector_debugfs_init(amdgpu_dm_connector);
6967 #endif
6968 
6969 	return 0;
6970 }
6971 
6972 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6973 	.reset = amdgpu_dm_connector_funcs_reset,
6974 	.detect = amdgpu_dm_connector_detect,
6975 	.fill_modes = drm_helper_probe_single_connector_modes,
6976 	.destroy = amdgpu_dm_connector_destroy,
6977 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6978 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6979 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6980 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6981 	.late_register = amdgpu_dm_connector_late_register,
6982 	.early_unregister = amdgpu_dm_connector_unregister
6983 };
6984 
6985 static int get_modes(struct drm_connector *connector)
6986 {
6987 	return amdgpu_dm_connector_get_modes(connector);
6988 }
6989 
6990 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6991 {
6992 	struct dc_sink_init_data init_params = {
6993 			.link = aconnector->dc_link,
6994 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6995 	};
6996 	struct edid *edid;
6997 
6998 	if (!aconnector->base.edid_blob_ptr) {
6999 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7000 				aconnector->base.name);
7001 
7002 		aconnector->base.force = DRM_FORCE_OFF;
7003 		aconnector->base.override_edid = false;
7004 		return;
7005 	}
7006 
7007 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7008 
7009 	aconnector->edid = edid;
7010 
7011 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7012 		aconnector->dc_link,
7013 		(uint8_t *)edid,
7014 		(edid->extensions + 1) * EDID_LENGTH,
7015 		&init_params);
7016 
7017 	if (aconnector->base.force == DRM_FORCE_ON) {
7018 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7019 		aconnector->dc_link->local_sink :
7020 		aconnector->dc_em_sink;
7021 		dc_sink_retain(aconnector->dc_sink);
7022 	}
7023 }
7024 
7025 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7026 {
7027 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7028 
7029 	/*
7030 	 * In case of headless boot with force on for DP managed connector
7031 	 * Those settings have to be != 0 to get initial modeset
7032 	 */
7033 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7034 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7035 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7036 	}
7037 
7038 
7039 	aconnector->base.override_edid = true;
7040 	create_eml_sink(aconnector);
7041 }
7042 
7043 struct dc_stream_state *
7044 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7045 				const struct drm_display_mode *drm_mode,
7046 				const struct dm_connector_state *dm_state,
7047 				const struct dc_stream_state *old_stream)
7048 {
7049 	struct drm_connector *connector = &aconnector->base;
7050 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7051 	struct dc_stream_state *stream;
7052 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7053 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7054 	enum dc_status dc_result = DC_OK;
7055 
7056 	do {
7057 		stream = create_stream_for_sink(aconnector, drm_mode,
7058 						dm_state, old_stream,
7059 						requested_bpc);
7060 		if (stream == NULL) {
7061 			DRM_ERROR("Failed to create stream for sink!\n");
7062 			break;
7063 		}
7064 
7065 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7066 
7067 		if (dc_result != DC_OK) {
7068 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7069 				      drm_mode->hdisplay,
7070 				      drm_mode->vdisplay,
7071 				      drm_mode->clock,
7072 				      dc_result,
7073 				      dc_status_to_str(dc_result));
7074 
7075 			dc_stream_release(stream);
7076 			stream = NULL;
7077 			requested_bpc -= 2; /* lower bpc to retry validation */
7078 		}
7079 
7080 	} while (stream == NULL && requested_bpc >= 6);
7081 
7082 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7083 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7084 
7085 		aconnector->force_yuv420_output = true;
7086 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7087 						dm_state, old_stream);
7088 		aconnector->force_yuv420_output = false;
7089 	}
7090 
7091 	return stream;
7092 }
7093 
7094 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7095 				   struct drm_display_mode *mode)
7096 {
7097 	int result = MODE_ERROR;
7098 	struct dc_sink *dc_sink;
7099 	/* TODO: Unhardcode stream count */
7100 	struct dc_stream_state *stream;
7101 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7102 
7103 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7104 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7105 		return result;
7106 
7107 	/*
7108 	 * Only run this the first time mode_valid is called to initilialize
7109 	 * EDID mgmt
7110 	 */
7111 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7112 		!aconnector->dc_em_sink)
7113 		handle_edid_mgmt(aconnector);
7114 
7115 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7116 
7117 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7118 				aconnector->base.force != DRM_FORCE_ON) {
7119 		DRM_ERROR("dc_sink is NULL!\n");
7120 		goto fail;
7121 	}
7122 
7123 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7124 	if (stream) {
7125 		dc_stream_release(stream);
7126 		result = MODE_OK;
7127 	}
7128 
7129 fail:
7130 	/* TODO: error handling*/
7131 	return result;
7132 }
7133 
7134 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7135 				struct dc_info_packet *out)
7136 {
7137 	struct hdmi_drm_infoframe frame;
7138 	unsigned char buf[30]; /* 26 + 4 */
7139 	ssize_t len;
7140 	int ret, i;
7141 
7142 	memset(out, 0, sizeof(*out));
7143 
7144 	if (!state->hdr_output_metadata)
7145 		return 0;
7146 
7147 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7148 	if (ret)
7149 		return ret;
7150 
7151 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7152 	if (len < 0)
7153 		return (int)len;
7154 
7155 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7156 	if (len != 30)
7157 		return -EINVAL;
7158 
7159 	/* Prepare the infopacket for DC. */
7160 	switch (state->connector->connector_type) {
7161 	case DRM_MODE_CONNECTOR_HDMIA:
7162 		out->hb0 = 0x87; /* type */
7163 		out->hb1 = 0x01; /* version */
7164 		out->hb2 = 0x1A; /* length */
7165 		out->sb[0] = buf[3]; /* checksum */
7166 		i = 1;
7167 		break;
7168 
7169 	case DRM_MODE_CONNECTOR_DisplayPort:
7170 	case DRM_MODE_CONNECTOR_eDP:
7171 		out->hb0 = 0x00; /* sdp id, zero */
7172 		out->hb1 = 0x87; /* type */
7173 		out->hb2 = 0x1D; /* payload len - 1 */
7174 		out->hb3 = (0x13 << 2); /* sdp version */
7175 		out->sb[0] = 0x01; /* version */
7176 		out->sb[1] = 0x1A; /* length */
7177 		i = 2;
7178 		break;
7179 
7180 	default:
7181 		return -EINVAL;
7182 	}
7183 
7184 	memcpy(&out->sb[i], &buf[4], 26);
7185 	out->valid = true;
7186 
7187 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7188 		       sizeof(out->sb), false);
7189 
7190 	return 0;
7191 }
7192 
7193 static int
7194 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7195 				 struct drm_atomic_state *state)
7196 {
7197 	struct drm_connector_state *new_con_state =
7198 		drm_atomic_get_new_connector_state(state, conn);
7199 	struct drm_connector_state *old_con_state =
7200 		drm_atomic_get_old_connector_state(state, conn);
7201 	struct drm_crtc *crtc = new_con_state->crtc;
7202 	struct drm_crtc_state *new_crtc_state;
7203 	int ret;
7204 
7205 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7206 
7207 	if (!crtc)
7208 		return 0;
7209 
7210 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7211 		struct dc_info_packet hdr_infopacket;
7212 
7213 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7214 		if (ret)
7215 			return ret;
7216 
7217 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7218 		if (IS_ERR(new_crtc_state))
7219 			return PTR_ERR(new_crtc_state);
7220 
7221 		/*
7222 		 * DC considers the stream backends changed if the
7223 		 * static metadata changes. Forcing the modeset also
7224 		 * gives a simple way for userspace to switch from
7225 		 * 8bpc to 10bpc when setting the metadata to enter
7226 		 * or exit HDR.
7227 		 *
7228 		 * Changing the static metadata after it's been
7229 		 * set is permissible, however. So only force a
7230 		 * modeset if we're entering or exiting HDR.
7231 		 */
7232 		new_crtc_state->mode_changed =
7233 			!old_con_state->hdr_output_metadata ||
7234 			!new_con_state->hdr_output_metadata;
7235 	}
7236 
7237 	return 0;
7238 }
7239 
7240 static const struct drm_connector_helper_funcs
7241 amdgpu_dm_connector_helper_funcs = {
7242 	/*
7243 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7244 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7245 	 * are missing after user start lightdm. So we need to renew modes list.
7246 	 * in get_modes call back, not just return the modes count
7247 	 */
7248 	.get_modes = get_modes,
7249 	.mode_valid = amdgpu_dm_connector_mode_valid,
7250 	.atomic_check = amdgpu_dm_connector_atomic_check,
7251 };
7252 
7253 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7254 {
7255 }
7256 
7257 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7258 {
7259 	struct drm_atomic_state *state = new_crtc_state->state;
7260 	struct drm_plane *plane;
7261 	int num_active = 0;
7262 
7263 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7264 		struct drm_plane_state *new_plane_state;
7265 
7266 		/* Cursor planes are "fake". */
7267 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7268 			continue;
7269 
7270 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7271 
7272 		if (!new_plane_state) {
7273 			/*
7274 			 * The plane is enable on the CRTC and hasn't changed
7275 			 * state. This means that it previously passed
7276 			 * validation and is therefore enabled.
7277 			 */
7278 			num_active += 1;
7279 			continue;
7280 		}
7281 
7282 		/* We need a framebuffer to be considered enabled. */
7283 		num_active += (new_plane_state->fb != NULL);
7284 	}
7285 
7286 	return num_active;
7287 }
7288 
7289 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7290 					 struct drm_crtc_state *new_crtc_state)
7291 {
7292 	struct dm_crtc_state *dm_new_crtc_state =
7293 		to_dm_crtc_state(new_crtc_state);
7294 
7295 	dm_new_crtc_state->active_planes = 0;
7296 
7297 	if (!dm_new_crtc_state->stream)
7298 		return;
7299 
7300 	dm_new_crtc_state->active_planes =
7301 		count_crtc_active_planes(new_crtc_state);
7302 }
7303 
7304 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7305 				       struct drm_atomic_state *state)
7306 {
7307 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7308 									  crtc);
7309 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7310 	struct dc *dc = adev->dm.dc;
7311 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7312 	int ret = -EINVAL;
7313 
7314 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7315 
7316 	dm_update_crtc_active_planes(crtc, crtc_state);
7317 
7318 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7319 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7320 		return ret;
7321 	}
7322 
7323 	/*
7324 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7325 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7326 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7327 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7328 	 */
7329 	if (crtc_state->enable &&
7330 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7331 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7332 		return -EINVAL;
7333 	}
7334 
7335 	/* In some use cases, like reset, no stream is attached */
7336 	if (!dm_crtc_state->stream)
7337 		return 0;
7338 
7339 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7340 		return 0;
7341 
7342 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7343 	return ret;
7344 }
7345 
7346 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7347 				      const struct drm_display_mode *mode,
7348 				      struct drm_display_mode *adjusted_mode)
7349 {
7350 	return true;
7351 }
7352 
7353 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7354 	.disable = dm_crtc_helper_disable,
7355 	.atomic_check = dm_crtc_helper_atomic_check,
7356 	.mode_fixup = dm_crtc_helper_mode_fixup,
7357 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7358 };
7359 
7360 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7361 {
7362 
7363 }
7364 
7365 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7366 {
7367 	switch (display_color_depth) {
7368 		case COLOR_DEPTH_666:
7369 			return 6;
7370 		case COLOR_DEPTH_888:
7371 			return 8;
7372 		case COLOR_DEPTH_101010:
7373 			return 10;
7374 		case COLOR_DEPTH_121212:
7375 			return 12;
7376 		case COLOR_DEPTH_141414:
7377 			return 14;
7378 		case COLOR_DEPTH_161616:
7379 			return 16;
7380 		default:
7381 			break;
7382 		}
7383 	return 0;
7384 }
7385 
7386 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7387 					  struct drm_crtc_state *crtc_state,
7388 					  struct drm_connector_state *conn_state)
7389 {
7390 	struct drm_atomic_state *state = crtc_state->state;
7391 	struct drm_connector *connector = conn_state->connector;
7392 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7393 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7394 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7395 	struct drm_dp_mst_topology_mgr *mst_mgr;
7396 	struct drm_dp_mst_port *mst_port;
7397 	enum dc_color_depth color_depth;
7398 	int clock, bpp = 0;
7399 	bool is_y420 = false;
7400 
7401 	if (!aconnector->port || !aconnector->dc_sink)
7402 		return 0;
7403 
7404 	mst_port = aconnector->port;
7405 	mst_mgr = &aconnector->mst_port->mst_mgr;
7406 
7407 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7408 		return 0;
7409 
7410 	if (!state->duplicated) {
7411 		int max_bpc = conn_state->max_requested_bpc;
7412 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7413 				aconnector->force_yuv420_output;
7414 		color_depth = convert_color_depth_from_display_info(connector,
7415 								    is_y420,
7416 								    max_bpc);
7417 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7418 		clock = adjusted_mode->clock;
7419 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7420 	}
7421 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7422 									   mst_mgr,
7423 									   mst_port,
7424 									   dm_new_connector_state->pbn,
7425 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7426 	if (dm_new_connector_state->vcpi_slots < 0) {
7427 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7428 		return dm_new_connector_state->vcpi_slots;
7429 	}
7430 	return 0;
7431 }
7432 
7433 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7434 	.disable = dm_encoder_helper_disable,
7435 	.atomic_check = dm_encoder_helper_atomic_check
7436 };
7437 
7438 #if defined(CONFIG_DRM_AMD_DC_DCN)
7439 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7440 					    struct dc_state *dc_state,
7441 					    struct dsc_mst_fairness_vars *vars)
7442 {
7443 	struct dc_stream_state *stream = NULL;
7444 	struct drm_connector *connector;
7445 	struct drm_connector_state *new_con_state;
7446 	struct amdgpu_dm_connector *aconnector;
7447 	struct dm_connector_state *dm_conn_state;
7448 	int i, j;
7449 	int vcpi, pbn_div, pbn, slot_num = 0;
7450 
7451 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7452 
7453 		aconnector = to_amdgpu_dm_connector(connector);
7454 
7455 		if (!aconnector->port)
7456 			continue;
7457 
7458 		if (!new_con_state || !new_con_state->crtc)
7459 			continue;
7460 
7461 		dm_conn_state = to_dm_connector_state(new_con_state);
7462 
7463 		for (j = 0; j < dc_state->stream_count; j++) {
7464 			stream = dc_state->streams[j];
7465 			if (!stream)
7466 				continue;
7467 
7468 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7469 				break;
7470 
7471 			stream = NULL;
7472 		}
7473 
7474 		if (!stream)
7475 			continue;
7476 
7477 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7478 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7479 		for (j = 0; j < dc_state->stream_count; j++) {
7480 			if (vars[j].aconnector == aconnector) {
7481 				pbn = vars[j].pbn;
7482 				break;
7483 			}
7484 		}
7485 
7486 		if (j == dc_state->stream_count)
7487 			continue;
7488 
7489 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7490 
7491 		if (stream->timing.flags.DSC != 1) {
7492 			dm_conn_state->pbn = pbn;
7493 			dm_conn_state->vcpi_slots = slot_num;
7494 
7495 			drm_dp_mst_atomic_enable_dsc(state,
7496 						     aconnector->port,
7497 						     dm_conn_state->pbn,
7498 						     0,
7499 						     false);
7500 			continue;
7501 		}
7502 
7503 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7504 						    aconnector->port,
7505 						    pbn, pbn_div,
7506 						    true);
7507 		if (vcpi < 0)
7508 			return vcpi;
7509 
7510 		dm_conn_state->pbn = pbn;
7511 		dm_conn_state->vcpi_slots = vcpi;
7512 	}
7513 	return 0;
7514 }
7515 #endif
7516 
7517 static void dm_drm_plane_reset(struct drm_plane *plane)
7518 {
7519 	struct dm_plane_state *amdgpu_state = NULL;
7520 
7521 	if (plane->state)
7522 		plane->funcs->atomic_destroy_state(plane, plane->state);
7523 
7524 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7525 	WARN_ON(amdgpu_state == NULL);
7526 
7527 	if (amdgpu_state)
7528 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7529 }
7530 
7531 static struct drm_plane_state *
7532 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7533 {
7534 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7535 
7536 	old_dm_plane_state = to_dm_plane_state(plane->state);
7537 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7538 	if (!dm_plane_state)
7539 		return NULL;
7540 
7541 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7542 
7543 	if (old_dm_plane_state->dc_state) {
7544 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7545 		dc_plane_state_retain(dm_plane_state->dc_state);
7546 	}
7547 
7548 	return &dm_plane_state->base;
7549 }
7550 
7551 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7552 				struct drm_plane_state *state)
7553 {
7554 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7555 
7556 	if (dm_plane_state->dc_state)
7557 		dc_plane_state_release(dm_plane_state->dc_state);
7558 
7559 	drm_atomic_helper_plane_destroy_state(plane, state);
7560 }
7561 
7562 static const struct drm_plane_funcs dm_plane_funcs = {
7563 	.update_plane	= drm_atomic_helper_update_plane,
7564 	.disable_plane	= drm_atomic_helper_disable_plane,
7565 	.destroy	= drm_primary_helper_destroy,
7566 	.reset = dm_drm_plane_reset,
7567 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7568 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7569 	.format_mod_supported = dm_plane_format_mod_supported,
7570 };
7571 
7572 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7573 				      struct drm_plane_state *new_state)
7574 {
7575 	struct amdgpu_framebuffer *afb;
7576 	struct drm_gem_object *obj;
7577 	struct amdgpu_device *adev;
7578 	struct amdgpu_bo *rbo;
7579 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7580 	struct list_head list;
7581 	struct ttm_validate_buffer tv;
7582 	struct ww_acquire_ctx ticket;
7583 	uint32_t domain;
7584 	int r;
7585 
7586 	if (!new_state->fb) {
7587 		DRM_DEBUG_KMS("No FB bound\n");
7588 		return 0;
7589 	}
7590 
7591 	afb = to_amdgpu_framebuffer(new_state->fb);
7592 	obj = new_state->fb->obj[0];
7593 	rbo = gem_to_amdgpu_bo(obj);
7594 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7595 	INIT_LIST_HEAD(&list);
7596 
7597 	tv.bo = &rbo->tbo;
7598 	tv.num_shared = 1;
7599 	list_add(&tv.head, &list);
7600 
7601 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7602 	if (r) {
7603 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7604 		return r;
7605 	}
7606 
7607 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7608 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7609 	else
7610 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7611 
7612 	r = amdgpu_bo_pin(rbo, domain);
7613 	if (unlikely(r != 0)) {
7614 		if (r != -ERESTARTSYS)
7615 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7616 		ttm_eu_backoff_reservation(&ticket, &list);
7617 		return r;
7618 	}
7619 
7620 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7621 	if (unlikely(r != 0)) {
7622 		amdgpu_bo_unpin(rbo);
7623 		ttm_eu_backoff_reservation(&ticket, &list);
7624 		DRM_ERROR("%p bind failed\n", rbo);
7625 		return r;
7626 	}
7627 
7628 	ttm_eu_backoff_reservation(&ticket, &list);
7629 
7630 	afb->address = amdgpu_bo_gpu_offset(rbo);
7631 
7632 	amdgpu_bo_ref(rbo);
7633 
7634 	/**
7635 	 * We don't do surface updates on planes that have been newly created,
7636 	 * but we also don't have the afb->address during atomic check.
7637 	 *
7638 	 * Fill in buffer attributes depending on the address here, but only on
7639 	 * newly created planes since they're not being used by DC yet and this
7640 	 * won't modify global state.
7641 	 */
7642 	dm_plane_state_old = to_dm_plane_state(plane->state);
7643 	dm_plane_state_new = to_dm_plane_state(new_state);
7644 
7645 	if (dm_plane_state_new->dc_state &&
7646 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7647 		struct dc_plane_state *plane_state =
7648 			dm_plane_state_new->dc_state;
7649 		bool force_disable_dcc = !plane_state->dcc.enable;
7650 
7651 		fill_plane_buffer_attributes(
7652 			adev, afb, plane_state->format, plane_state->rotation,
7653 			afb->tiling_flags,
7654 			&plane_state->tiling_info, &plane_state->plane_size,
7655 			&plane_state->dcc, &plane_state->address,
7656 			afb->tmz_surface, force_disable_dcc);
7657 	}
7658 
7659 	return 0;
7660 }
7661 
7662 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7663 				       struct drm_plane_state *old_state)
7664 {
7665 	struct amdgpu_bo *rbo;
7666 	int r;
7667 
7668 	if (!old_state->fb)
7669 		return;
7670 
7671 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7672 	r = amdgpu_bo_reserve(rbo, false);
7673 	if (unlikely(r)) {
7674 		DRM_ERROR("failed to reserve rbo before unpin\n");
7675 		return;
7676 	}
7677 
7678 	amdgpu_bo_unpin(rbo);
7679 	amdgpu_bo_unreserve(rbo);
7680 	amdgpu_bo_unref(&rbo);
7681 }
7682 
7683 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7684 				       struct drm_crtc_state *new_crtc_state)
7685 {
7686 	struct drm_framebuffer *fb = state->fb;
7687 	int min_downscale, max_upscale;
7688 	int min_scale = 0;
7689 	int max_scale = INT_MAX;
7690 
7691 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7692 	if (fb && state->crtc) {
7693 		/* Validate viewport to cover the case when only the position changes */
7694 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7695 			int viewport_width = state->crtc_w;
7696 			int viewport_height = state->crtc_h;
7697 
7698 			if (state->crtc_x < 0)
7699 				viewport_width += state->crtc_x;
7700 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7701 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7702 
7703 			if (state->crtc_y < 0)
7704 				viewport_height += state->crtc_y;
7705 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7706 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7707 
7708 			if (viewport_width < 0 || viewport_height < 0) {
7709 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7710 				return -EINVAL;
7711 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7712 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7713 				return -EINVAL;
7714 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7715 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7716 				return -EINVAL;
7717 			}
7718 
7719 		}
7720 
7721 		/* Get min/max allowed scaling factors from plane caps. */
7722 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7723 					     &min_downscale, &max_upscale);
7724 		/*
7725 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7726 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7727 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7728 		 */
7729 		min_scale = (1000 << 16) / max_upscale;
7730 		max_scale = (1000 << 16) / min_downscale;
7731 	}
7732 
7733 	return drm_atomic_helper_check_plane_state(
7734 		state, new_crtc_state, min_scale, max_scale, true, true);
7735 }
7736 
7737 static int dm_plane_atomic_check(struct drm_plane *plane,
7738 				 struct drm_atomic_state *state)
7739 {
7740 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7741 										 plane);
7742 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7743 	struct dc *dc = adev->dm.dc;
7744 	struct dm_plane_state *dm_plane_state;
7745 	struct dc_scaling_info scaling_info;
7746 	struct drm_crtc_state *new_crtc_state;
7747 	int ret;
7748 
7749 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7750 
7751 	dm_plane_state = to_dm_plane_state(new_plane_state);
7752 
7753 	if (!dm_plane_state->dc_state)
7754 		return 0;
7755 
7756 	new_crtc_state =
7757 		drm_atomic_get_new_crtc_state(state,
7758 					      new_plane_state->crtc);
7759 	if (!new_crtc_state)
7760 		return -EINVAL;
7761 
7762 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7763 	if (ret)
7764 		return ret;
7765 
7766 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7767 	if (ret)
7768 		return ret;
7769 
7770 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7771 		return 0;
7772 
7773 	return -EINVAL;
7774 }
7775 
7776 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7777 				       struct drm_atomic_state *state)
7778 {
7779 	/* Only support async updates on cursor planes. */
7780 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7781 		return -EINVAL;
7782 
7783 	return 0;
7784 }
7785 
7786 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7787 					 struct drm_atomic_state *state)
7788 {
7789 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7790 									   plane);
7791 	struct drm_plane_state *old_state =
7792 		drm_atomic_get_old_plane_state(state, plane);
7793 
7794 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7795 
7796 	swap(plane->state->fb, new_state->fb);
7797 
7798 	plane->state->src_x = new_state->src_x;
7799 	plane->state->src_y = new_state->src_y;
7800 	plane->state->src_w = new_state->src_w;
7801 	plane->state->src_h = new_state->src_h;
7802 	plane->state->crtc_x = new_state->crtc_x;
7803 	plane->state->crtc_y = new_state->crtc_y;
7804 	plane->state->crtc_w = new_state->crtc_w;
7805 	plane->state->crtc_h = new_state->crtc_h;
7806 
7807 	handle_cursor_update(plane, old_state);
7808 }
7809 
7810 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7811 	.prepare_fb = dm_plane_helper_prepare_fb,
7812 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7813 	.atomic_check = dm_plane_atomic_check,
7814 	.atomic_async_check = dm_plane_atomic_async_check,
7815 	.atomic_async_update = dm_plane_atomic_async_update
7816 };
7817 
7818 /*
7819  * TODO: these are currently initialized to rgb formats only.
7820  * For future use cases we should either initialize them dynamically based on
7821  * plane capabilities, or initialize this array to all formats, so internal drm
7822  * check will succeed, and let DC implement proper check
7823  */
7824 static const uint32_t rgb_formats[] = {
7825 	DRM_FORMAT_XRGB8888,
7826 	DRM_FORMAT_ARGB8888,
7827 	DRM_FORMAT_RGBA8888,
7828 	DRM_FORMAT_XRGB2101010,
7829 	DRM_FORMAT_XBGR2101010,
7830 	DRM_FORMAT_ARGB2101010,
7831 	DRM_FORMAT_ABGR2101010,
7832 	DRM_FORMAT_XRGB16161616,
7833 	DRM_FORMAT_XBGR16161616,
7834 	DRM_FORMAT_ARGB16161616,
7835 	DRM_FORMAT_ABGR16161616,
7836 	DRM_FORMAT_XBGR8888,
7837 	DRM_FORMAT_ABGR8888,
7838 	DRM_FORMAT_RGB565,
7839 };
7840 
7841 static const uint32_t overlay_formats[] = {
7842 	DRM_FORMAT_XRGB8888,
7843 	DRM_FORMAT_ARGB8888,
7844 	DRM_FORMAT_RGBA8888,
7845 	DRM_FORMAT_XBGR8888,
7846 	DRM_FORMAT_ABGR8888,
7847 	DRM_FORMAT_RGB565
7848 };
7849 
7850 static const u32 cursor_formats[] = {
7851 	DRM_FORMAT_ARGB8888
7852 };
7853 
7854 static int get_plane_formats(const struct drm_plane *plane,
7855 			     const struct dc_plane_cap *plane_cap,
7856 			     uint32_t *formats, int max_formats)
7857 {
7858 	int i, num_formats = 0;
7859 
7860 	/*
7861 	 * TODO: Query support for each group of formats directly from
7862 	 * DC plane caps. This will require adding more formats to the
7863 	 * caps list.
7864 	 */
7865 
7866 	switch (plane->type) {
7867 	case DRM_PLANE_TYPE_PRIMARY:
7868 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7869 			if (num_formats >= max_formats)
7870 				break;
7871 
7872 			formats[num_formats++] = rgb_formats[i];
7873 		}
7874 
7875 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7876 			formats[num_formats++] = DRM_FORMAT_NV12;
7877 		if (plane_cap && plane_cap->pixel_format_support.p010)
7878 			formats[num_formats++] = DRM_FORMAT_P010;
7879 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7880 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7881 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7882 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7883 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7884 		}
7885 		break;
7886 
7887 	case DRM_PLANE_TYPE_OVERLAY:
7888 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7889 			if (num_formats >= max_formats)
7890 				break;
7891 
7892 			formats[num_formats++] = overlay_formats[i];
7893 		}
7894 		break;
7895 
7896 	case DRM_PLANE_TYPE_CURSOR:
7897 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7898 			if (num_formats >= max_formats)
7899 				break;
7900 
7901 			formats[num_formats++] = cursor_formats[i];
7902 		}
7903 		break;
7904 	}
7905 
7906 	return num_formats;
7907 }
7908 
7909 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7910 				struct drm_plane *plane,
7911 				unsigned long possible_crtcs,
7912 				const struct dc_plane_cap *plane_cap)
7913 {
7914 	uint32_t formats[32];
7915 	int num_formats;
7916 	int res = -EPERM;
7917 	unsigned int supported_rotations;
7918 	uint64_t *modifiers = NULL;
7919 
7920 	num_formats = get_plane_formats(plane, plane_cap, formats,
7921 					ARRAY_SIZE(formats));
7922 
7923 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7924 	if (res)
7925 		return res;
7926 
7927 	if (modifiers == NULL)
7928 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7929 
7930 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7931 				       &dm_plane_funcs, formats, num_formats,
7932 				       modifiers, plane->type, NULL);
7933 	kfree(modifiers);
7934 	if (res)
7935 		return res;
7936 
7937 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7938 	    plane_cap && plane_cap->per_pixel_alpha) {
7939 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7940 					  BIT(DRM_MODE_BLEND_PREMULTI);
7941 
7942 		drm_plane_create_alpha_property(plane);
7943 		drm_plane_create_blend_mode_property(plane, blend_caps);
7944 	}
7945 
7946 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7947 	    plane_cap &&
7948 	    (plane_cap->pixel_format_support.nv12 ||
7949 	     plane_cap->pixel_format_support.p010)) {
7950 		/* This only affects YUV formats. */
7951 		drm_plane_create_color_properties(
7952 			plane,
7953 			BIT(DRM_COLOR_YCBCR_BT601) |
7954 			BIT(DRM_COLOR_YCBCR_BT709) |
7955 			BIT(DRM_COLOR_YCBCR_BT2020),
7956 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7957 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7958 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7959 	}
7960 
7961 	supported_rotations =
7962 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7963 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7964 
7965 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7966 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7967 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7968 						   supported_rotations);
7969 
7970 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7971 
7972 	/* Create (reset) the plane state */
7973 	if (plane->funcs->reset)
7974 		plane->funcs->reset(plane);
7975 
7976 	return 0;
7977 }
7978 
7979 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7980 			       struct drm_plane *plane,
7981 			       uint32_t crtc_index)
7982 {
7983 	struct amdgpu_crtc *acrtc = NULL;
7984 	struct drm_plane *cursor_plane;
7985 
7986 	int res = -ENOMEM;
7987 
7988 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7989 	if (!cursor_plane)
7990 		goto fail;
7991 
7992 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7993 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7994 
7995 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7996 	if (!acrtc)
7997 		goto fail;
7998 
7999 	res = drm_crtc_init_with_planes(
8000 			dm->ddev,
8001 			&acrtc->base,
8002 			plane,
8003 			cursor_plane,
8004 			&amdgpu_dm_crtc_funcs, NULL);
8005 
8006 	if (res)
8007 		goto fail;
8008 
8009 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8010 
8011 	/* Create (reset) the plane state */
8012 	if (acrtc->base.funcs->reset)
8013 		acrtc->base.funcs->reset(&acrtc->base);
8014 
8015 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8016 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8017 
8018 	acrtc->crtc_id = crtc_index;
8019 	acrtc->base.enabled = false;
8020 	acrtc->otg_inst = -1;
8021 
8022 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8023 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8024 				   true, MAX_COLOR_LUT_ENTRIES);
8025 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8026 
8027 	return 0;
8028 
8029 fail:
8030 	kfree(acrtc);
8031 	kfree(cursor_plane);
8032 	return res;
8033 }
8034 
8035 
8036 static int to_drm_connector_type(enum signal_type st)
8037 {
8038 	switch (st) {
8039 	case SIGNAL_TYPE_HDMI_TYPE_A:
8040 		return DRM_MODE_CONNECTOR_HDMIA;
8041 	case SIGNAL_TYPE_EDP:
8042 		return DRM_MODE_CONNECTOR_eDP;
8043 	case SIGNAL_TYPE_LVDS:
8044 		return DRM_MODE_CONNECTOR_LVDS;
8045 	case SIGNAL_TYPE_RGB:
8046 		return DRM_MODE_CONNECTOR_VGA;
8047 	case SIGNAL_TYPE_DISPLAY_PORT:
8048 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8049 		return DRM_MODE_CONNECTOR_DisplayPort;
8050 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8051 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8052 		return DRM_MODE_CONNECTOR_DVID;
8053 	case SIGNAL_TYPE_VIRTUAL:
8054 		return DRM_MODE_CONNECTOR_VIRTUAL;
8055 
8056 	default:
8057 		return DRM_MODE_CONNECTOR_Unknown;
8058 	}
8059 }
8060 
8061 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8062 {
8063 	struct drm_encoder *encoder;
8064 
8065 	/* There is only one encoder per connector */
8066 	drm_connector_for_each_possible_encoder(connector, encoder)
8067 		return encoder;
8068 
8069 	return NULL;
8070 }
8071 
8072 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8073 {
8074 	struct drm_encoder *encoder;
8075 	struct amdgpu_encoder *amdgpu_encoder;
8076 
8077 	encoder = amdgpu_dm_connector_to_encoder(connector);
8078 
8079 	if (encoder == NULL)
8080 		return;
8081 
8082 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8083 
8084 	amdgpu_encoder->native_mode.clock = 0;
8085 
8086 	if (!list_empty(&connector->probed_modes)) {
8087 		struct drm_display_mode *preferred_mode = NULL;
8088 
8089 		list_for_each_entry(preferred_mode,
8090 				    &connector->probed_modes,
8091 				    head) {
8092 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8093 				amdgpu_encoder->native_mode = *preferred_mode;
8094 
8095 			break;
8096 		}
8097 
8098 	}
8099 }
8100 
8101 static struct drm_display_mode *
8102 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8103 			     char *name,
8104 			     int hdisplay, int vdisplay)
8105 {
8106 	struct drm_device *dev = encoder->dev;
8107 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8108 	struct drm_display_mode *mode = NULL;
8109 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8110 
8111 	mode = drm_mode_duplicate(dev, native_mode);
8112 
8113 	if (mode == NULL)
8114 		return NULL;
8115 
8116 	mode->hdisplay = hdisplay;
8117 	mode->vdisplay = vdisplay;
8118 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8119 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8120 
8121 	return mode;
8122 
8123 }
8124 
8125 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8126 						 struct drm_connector *connector)
8127 {
8128 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8129 	struct drm_display_mode *mode = NULL;
8130 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8131 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8132 				to_amdgpu_dm_connector(connector);
8133 	int i;
8134 	int n;
8135 	struct mode_size {
8136 		char name[DRM_DISPLAY_MODE_LEN];
8137 		int w;
8138 		int h;
8139 	} common_modes[] = {
8140 		{  "640x480",  640,  480},
8141 		{  "800x600",  800,  600},
8142 		{ "1024x768", 1024,  768},
8143 		{ "1280x720", 1280,  720},
8144 		{ "1280x800", 1280,  800},
8145 		{"1280x1024", 1280, 1024},
8146 		{ "1440x900", 1440,  900},
8147 		{"1680x1050", 1680, 1050},
8148 		{"1600x1200", 1600, 1200},
8149 		{"1920x1080", 1920, 1080},
8150 		{"1920x1200", 1920, 1200}
8151 	};
8152 
8153 	n = ARRAY_SIZE(common_modes);
8154 
8155 	for (i = 0; i < n; i++) {
8156 		struct drm_display_mode *curmode = NULL;
8157 		bool mode_existed = false;
8158 
8159 		if (common_modes[i].w > native_mode->hdisplay ||
8160 		    common_modes[i].h > native_mode->vdisplay ||
8161 		   (common_modes[i].w == native_mode->hdisplay &&
8162 		    common_modes[i].h == native_mode->vdisplay))
8163 			continue;
8164 
8165 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8166 			if (common_modes[i].w == curmode->hdisplay &&
8167 			    common_modes[i].h == curmode->vdisplay) {
8168 				mode_existed = true;
8169 				break;
8170 			}
8171 		}
8172 
8173 		if (mode_existed)
8174 			continue;
8175 
8176 		mode = amdgpu_dm_create_common_mode(encoder,
8177 				common_modes[i].name, common_modes[i].w,
8178 				common_modes[i].h);
8179 		if (!mode)
8180 			continue;
8181 
8182 		drm_mode_probed_add(connector, mode);
8183 		amdgpu_dm_connector->num_modes++;
8184 	}
8185 }
8186 
8187 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8188 {
8189 	struct drm_encoder *encoder;
8190 	struct amdgpu_encoder *amdgpu_encoder;
8191 	const struct drm_display_mode *native_mode;
8192 
8193 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8194 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8195 		return;
8196 
8197 	encoder = amdgpu_dm_connector_to_encoder(connector);
8198 	if (!encoder)
8199 		return;
8200 
8201 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8202 
8203 	native_mode = &amdgpu_encoder->native_mode;
8204 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8205 		return;
8206 
8207 	drm_connector_set_panel_orientation_with_quirk(connector,
8208 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8209 						       native_mode->hdisplay,
8210 						       native_mode->vdisplay);
8211 }
8212 
8213 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8214 					      struct edid *edid)
8215 {
8216 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8217 			to_amdgpu_dm_connector(connector);
8218 
8219 	if (edid) {
8220 		/* empty probed_modes */
8221 		INIT_LIST_HEAD(&connector->probed_modes);
8222 		amdgpu_dm_connector->num_modes =
8223 				drm_add_edid_modes(connector, edid);
8224 
8225 		/* sorting the probed modes before calling function
8226 		 * amdgpu_dm_get_native_mode() since EDID can have
8227 		 * more than one preferred mode. The modes that are
8228 		 * later in the probed mode list could be of higher
8229 		 * and preferred resolution. For example, 3840x2160
8230 		 * resolution in base EDID preferred timing and 4096x2160
8231 		 * preferred resolution in DID extension block later.
8232 		 */
8233 		drm_mode_sort(&connector->probed_modes);
8234 		amdgpu_dm_get_native_mode(connector);
8235 
8236 		/* Freesync capabilities are reset by calling
8237 		 * drm_add_edid_modes() and need to be
8238 		 * restored here.
8239 		 */
8240 		amdgpu_dm_update_freesync_caps(connector, edid);
8241 
8242 		amdgpu_set_panel_orientation(connector);
8243 	} else {
8244 		amdgpu_dm_connector->num_modes = 0;
8245 	}
8246 }
8247 
8248 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8249 			      struct drm_display_mode *mode)
8250 {
8251 	struct drm_display_mode *m;
8252 
8253 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8254 		if (drm_mode_equal(m, mode))
8255 			return true;
8256 	}
8257 
8258 	return false;
8259 }
8260 
8261 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8262 {
8263 	const struct drm_display_mode *m;
8264 	struct drm_display_mode *new_mode;
8265 	uint i;
8266 	uint32_t new_modes_count = 0;
8267 
8268 	/* Standard FPS values
8269 	 *
8270 	 * 23.976       - TV/NTSC
8271 	 * 24 	        - Cinema
8272 	 * 25 	        - TV/PAL
8273 	 * 29.97        - TV/NTSC
8274 	 * 30 	        - TV/NTSC
8275 	 * 48 	        - Cinema HFR
8276 	 * 50 	        - TV/PAL
8277 	 * 60 	        - Commonly used
8278 	 * 48,72,96,120 - Multiples of 24
8279 	 */
8280 	static const uint32_t common_rates[] = {
8281 		23976, 24000, 25000, 29970, 30000,
8282 		48000, 50000, 60000, 72000, 96000, 120000
8283 	};
8284 
8285 	/*
8286 	 * Find mode with highest refresh rate with the same resolution
8287 	 * as the preferred mode. Some monitors report a preferred mode
8288 	 * with lower resolution than the highest refresh rate supported.
8289 	 */
8290 
8291 	m = get_highest_refresh_rate_mode(aconnector, true);
8292 	if (!m)
8293 		return 0;
8294 
8295 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8296 		uint64_t target_vtotal, target_vtotal_diff;
8297 		uint64_t num, den;
8298 
8299 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8300 			continue;
8301 
8302 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8303 		    common_rates[i] > aconnector->max_vfreq * 1000)
8304 			continue;
8305 
8306 		num = (unsigned long long)m->clock * 1000 * 1000;
8307 		den = common_rates[i] * (unsigned long long)m->htotal;
8308 		target_vtotal = div_u64(num, den);
8309 		target_vtotal_diff = target_vtotal - m->vtotal;
8310 
8311 		/* Check for illegal modes */
8312 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8313 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8314 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8315 			continue;
8316 
8317 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8318 		if (!new_mode)
8319 			goto out;
8320 
8321 		new_mode->vtotal += (u16)target_vtotal_diff;
8322 		new_mode->vsync_start += (u16)target_vtotal_diff;
8323 		new_mode->vsync_end += (u16)target_vtotal_diff;
8324 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8325 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8326 
8327 		if (!is_duplicate_mode(aconnector, new_mode)) {
8328 			drm_mode_probed_add(&aconnector->base, new_mode);
8329 			new_modes_count += 1;
8330 		} else
8331 			drm_mode_destroy(aconnector->base.dev, new_mode);
8332 	}
8333  out:
8334 	return new_modes_count;
8335 }
8336 
8337 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8338 						   struct edid *edid)
8339 {
8340 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8341 		to_amdgpu_dm_connector(connector);
8342 
8343 	if (!edid)
8344 		return;
8345 
8346 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8347 		amdgpu_dm_connector->num_modes +=
8348 			add_fs_modes(amdgpu_dm_connector);
8349 }
8350 
8351 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8352 {
8353 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8354 			to_amdgpu_dm_connector(connector);
8355 	struct drm_encoder *encoder;
8356 	struct edid *edid = amdgpu_dm_connector->edid;
8357 
8358 	encoder = amdgpu_dm_connector_to_encoder(connector);
8359 
8360 	if (!drm_edid_is_valid(edid)) {
8361 		amdgpu_dm_connector->num_modes =
8362 				drm_add_modes_noedid(connector, 640, 480);
8363 	} else {
8364 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8365 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8366 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8367 	}
8368 	amdgpu_dm_fbc_init(connector);
8369 
8370 	return amdgpu_dm_connector->num_modes;
8371 }
8372 
8373 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8374 				     struct amdgpu_dm_connector *aconnector,
8375 				     int connector_type,
8376 				     struct dc_link *link,
8377 				     int link_index)
8378 {
8379 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8380 
8381 	/*
8382 	 * Some of the properties below require access to state, like bpc.
8383 	 * Allocate some default initial connector state with our reset helper.
8384 	 */
8385 	if (aconnector->base.funcs->reset)
8386 		aconnector->base.funcs->reset(&aconnector->base);
8387 
8388 	aconnector->connector_id = link_index;
8389 	aconnector->dc_link = link;
8390 	aconnector->base.interlace_allowed = false;
8391 	aconnector->base.doublescan_allowed = false;
8392 	aconnector->base.stereo_allowed = false;
8393 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8394 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8395 	aconnector->audio_inst = -1;
8396 	mutex_init(&aconnector->hpd_lock);
8397 
8398 	/*
8399 	 * configure support HPD hot plug connector_>polled default value is 0
8400 	 * which means HPD hot plug not supported
8401 	 */
8402 	switch (connector_type) {
8403 	case DRM_MODE_CONNECTOR_HDMIA:
8404 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8405 		aconnector->base.ycbcr_420_allowed =
8406 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8407 		break;
8408 	case DRM_MODE_CONNECTOR_DisplayPort:
8409 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8410 		link->link_enc = link_enc_cfg_get_link_enc(link);
8411 		ASSERT(link->link_enc);
8412 		if (link->link_enc)
8413 			aconnector->base.ycbcr_420_allowed =
8414 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8415 		break;
8416 	case DRM_MODE_CONNECTOR_DVID:
8417 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8418 		break;
8419 	default:
8420 		break;
8421 	}
8422 
8423 	drm_object_attach_property(&aconnector->base.base,
8424 				dm->ddev->mode_config.scaling_mode_property,
8425 				DRM_MODE_SCALE_NONE);
8426 
8427 	drm_object_attach_property(&aconnector->base.base,
8428 				adev->mode_info.underscan_property,
8429 				UNDERSCAN_OFF);
8430 	drm_object_attach_property(&aconnector->base.base,
8431 				adev->mode_info.underscan_hborder_property,
8432 				0);
8433 	drm_object_attach_property(&aconnector->base.base,
8434 				adev->mode_info.underscan_vborder_property,
8435 				0);
8436 
8437 	if (!aconnector->mst_port)
8438 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8439 
8440 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8441 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8442 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8443 
8444 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8445 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8446 		drm_object_attach_property(&aconnector->base.base,
8447 				adev->mode_info.abm_level_property, 0);
8448 	}
8449 
8450 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8451 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8452 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8453 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8454 
8455 		if (!aconnector->mst_port)
8456 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8457 
8458 #ifdef CONFIG_DRM_AMD_DC_HDCP
8459 		if (adev->dm.hdcp_workqueue)
8460 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8461 #endif
8462 	}
8463 }
8464 
8465 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8466 			      struct i2c_msg *msgs, int num)
8467 {
8468 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8469 	struct ddc_service *ddc_service = i2c->ddc_service;
8470 	struct i2c_command cmd;
8471 	int i;
8472 	int result = -EIO;
8473 
8474 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8475 
8476 	if (!cmd.payloads)
8477 		return result;
8478 
8479 	cmd.number_of_payloads = num;
8480 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8481 	cmd.speed = 100;
8482 
8483 	for (i = 0; i < num; i++) {
8484 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8485 		cmd.payloads[i].address = msgs[i].addr;
8486 		cmd.payloads[i].length = msgs[i].len;
8487 		cmd.payloads[i].data = msgs[i].buf;
8488 	}
8489 
8490 	if (dc_submit_i2c(
8491 			ddc_service->ctx->dc,
8492 			ddc_service->ddc_pin->hw_info.ddc_channel,
8493 			&cmd))
8494 		result = num;
8495 
8496 	kfree(cmd.payloads);
8497 	return result;
8498 }
8499 
8500 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8501 {
8502 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8503 }
8504 
8505 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8506 	.master_xfer = amdgpu_dm_i2c_xfer,
8507 	.functionality = amdgpu_dm_i2c_func,
8508 };
8509 
8510 static struct amdgpu_i2c_adapter *
8511 create_i2c(struct ddc_service *ddc_service,
8512 	   int link_index,
8513 	   int *res)
8514 {
8515 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8516 	struct amdgpu_i2c_adapter *i2c;
8517 
8518 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8519 	if (!i2c)
8520 		return NULL;
8521 	i2c->base.owner = THIS_MODULE;
8522 	i2c->base.class = I2C_CLASS_DDC;
8523 	i2c->base.dev.parent = &adev->pdev->dev;
8524 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8525 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8526 	i2c_set_adapdata(&i2c->base, i2c);
8527 	i2c->ddc_service = ddc_service;
8528 	if (i2c->ddc_service->ddc_pin)
8529 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8530 
8531 	return i2c;
8532 }
8533 
8534 
8535 /*
8536  * Note: this function assumes that dc_link_detect() was called for the
8537  * dc_link which will be represented by this aconnector.
8538  */
8539 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8540 				    struct amdgpu_dm_connector *aconnector,
8541 				    uint32_t link_index,
8542 				    struct amdgpu_encoder *aencoder)
8543 {
8544 	int res = 0;
8545 	int connector_type;
8546 	struct dc *dc = dm->dc;
8547 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8548 	struct amdgpu_i2c_adapter *i2c;
8549 
8550 	link->priv = aconnector;
8551 
8552 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8553 
8554 	i2c = create_i2c(link->ddc, link->link_index, &res);
8555 	if (!i2c) {
8556 		DRM_ERROR("Failed to create i2c adapter data\n");
8557 		return -ENOMEM;
8558 	}
8559 
8560 	aconnector->i2c = i2c;
8561 	res = i2c_add_adapter(&i2c->base);
8562 
8563 	if (res) {
8564 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8565 		goto out_free;
8566 	}
8567 
8568 	connector_type = to_drm_connector_type(link->connector_signal);
8569 
8570 	res = drm_connector_init_with_ddc(
8571 			dm->ddev,
8572 			&aconnector->base,
8573 			&amdgpu_dm_connector_funcs,
8574 			connector_type,
8575 			&i2c->base);
8576 
8577 	if (res) {
8578 		DRM_ERROR("connector_init failed\n");
8579 		aconnector->connector_id = -1;
8580 		goto out_free;
8581 	}
8582 
8583 	drm_connector_helper_add(
8584 			&aconnector->base,
8585 			&amdgpu_dm_connector_helper_funcs);
8586 
8587 	amdgpu_dm_connector_init_helper(
8588 		dm,
8589 		aconnector,
8590 		connector_type,
8591 		link,
8592 		link_index);
8593 
8594 	drm_connector_attach_encoder(
8595 		&aconnector->base, &aencoder->base);
8596 
8597 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8598 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8599 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8600 
8601 out_free:
8602 	if (res) {
8603 		kfree(i2c);
8604 		aconnector->i2c = NULL;
8605 	}
8606 	return res;
8607 }
8608 
8609 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8610 {
8611 	switch (adev->mode_info.num_crtc) {
8612 	case 1:
8613 		return 0x1;
8614 	case 2:
8615 		return 0x3;
8616 	case 3:
8617 		return 0x7;
8618 	case 4:
8619 		return 0xf;
8620 	case 5:
8621 		return 0x1f;
8622 	case 6:
8623 	default:
8624 		return 0x3f;
8625 	}
8626 }
8627 
8628 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8629 				  struct amdgpu_encoder *aencoder,
8630 				  uint32_t link_index)
8631 {
8632 	struct amdgpu_device *adev = drm_to_adev(dev);
8633 
8634 	int res = drm_encoder_init(dev,
8635 				   &aencoder->base,
8636 				   &amdgpu_dm_encoder_funcs,
8637 				   DRM_MODE_ENCODER_TMDS,
8638 				   NULL);
8639 
8640 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8641 
8642 	if (!res)
8643 		aencoder->encoder_id = link_index;
8644 	else
8645 		aencoder->encoder_id = -1;
8646 
8647 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8648 
8649 	return res;
8650 }
8651 
8652 static void manage_dm_interrupts(struct amdgpu_device *adev,
8653 				 struct amdgpu_crtc *acrtc,
8654 				 bool enable)
8655 {
8656 	/*
8657 	 * We have no guarantee that the frontend index maps to the same
8658 	 * backend index - some even map to more than one.
8659 	 *
8660 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8661 	 */
8662 	int irq_type =
8663 		amdgpu_display_crtc_idx_to_irq_type(
8664 			adev,
8665 			acrtc->crtc_id);
8666 
8667 	if (enable) {
8668 		drm_crtc_vblank_on(&acrtc->base);
8669 		amdgpu_irq_get(
8670 			adev,
8671 			&adev->pageflip_irq,
8672 			irq_type);
8673 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8674 		amdgpu_irq_get(
8675 			adev,
8676 			&adev->vline0_irq,
8677 			irq_type);
8678 #endif
8679 	} else {
8680 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8681 		amdgpu_irq_put(
8682 			adev,
8683 			&adev->vline0_irq,
8684 			irq_type);
8685 #endif
8686 		amdgpu_irq_put(
8687 			adev,
8688 			&adev->pageflip_irq,
8689 			irq_type);
8690 		drm_crtc_vblank_off(&acrtc->base);
8691 	}
8692 }
8693 
8694 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8695 				      struct amdgpu_crtc *acrtc)
8696 {
8697 	int irq_type =
8698 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8699 
8700 	/**
8701 	 * This reads the current state for the IRQ and force reapplies
8702 	 * the setting to hardware.
8703 	 */
8704 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8705 }
8706 
8707 static bool
8708 is_scaling_state_different(const struct dm_connector_state *dm_state,
8709 			   const struct dm_connector_state *old_dm_state)
8710 {
8711 	if (dm_state->scaling != old_dm_state->scaling)
8712 		return true;
8713 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8714 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8715 			return true;
8716 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8717 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8718 			return true;
8719 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8720 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8721 		return true;
8722 	return false;
8723 }
8724 
8725 #ifdef CONFIG_DRM_AMD_DC_HDCP
8726 static bool is_content_protection_different(struct drm_connector_state *state,
8727 					    const struct drm_connector_state *old_state,
8728 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8729 {
8730 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8731 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8732 
8733 	/* Handle: Type0/1 change */
8734 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8735 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8736 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8737 		return true;
8738 	}
8739 
8740 	/* CP is being re enabled, ignore this
8741 	 *
8742 	 * Handles:	ENABLED -> DESIRED
8743 	 */
8744 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8745 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8746 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8747 		return false;
8748 	}
8749 
8750 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8751 	 *
8752 	 * Handles:	UNDESIRED -> ENABLED
8753 	 */
8754 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8755 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8756 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8757 
8758 	/* Stream removed and re-enabled
8759 	 *
8760 	 * Can sometimes overlap with the HPD case,
8761 	 * thus set update_hdcp to false to avoid
8762 	 * setting HDCP multiple times.
8763 	 *
8764 	 * Handles:	DESIRED -> DESIRED (Special case)
8765 	 */
8766 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8767 		state->crtc && state->crtc->enabled &&
8768 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8769 		dm_con_state->update_hdcp = false;
8770 		return true;
8771 	}
8772 
8773 	/* Hot-plug, headless s3, dpms
8774 	 *
8775 	 * Only start HDCP if the display is connected/enabled.
8776 	 * update_hdcp flag will be set to false until the next
8777 	 * HPD comes in.
8778 	 *
8779 	 * Handles:	DESIRED -> DESIRED (Special case)
8780 	 */
8781 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8782 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8783 		dm_con_state->update_hdcp = false;
8784 		return true;
8785 	}
8786 
8787 	/*
8788 	 * Handles:	UNDESIRED -> UNDESIRED
8789 	 *		DESIRED -> DESIRED
8790 	 *		ENABLED -> ENABLED
8791 	 */
8792 	if (old_state->content_protection == state->content_protection)
8793 		return false;
8794 
8795 	/*
8796 	 * Handles:	UNDESIRED -> DESIRED
8797 	 *		DESIRED -> UNDESIRED
8798 	 *		ENABLED -> UNDESIRED
8799 	 */
8800 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8801 		return true;
8802 
8803 	/*
8804 	 * Handles:	DESIRED -> ENABLED
8805 	 */
8806 	return false;
8807 }
8808 
8809 #endif
8810 static void remove_stream(struct amdgpu_device *adev,
8811 			  struct amdgpu_crtc *acrtc,
8812 			  struct dc_stream_state *stream)
8813 {
8814 	/* this is the update mode case */
8815 
8816 	acrtc->otg_inst = -1;
8817 	acrtc->enabled = false;
8818 }
8819 
8820 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8821 			       struct dc_cursor_position *position)
8822 {
8823 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8824 	int x, y;
8825 	int xorigin = 0, yorigin = 0;
8826 
8827 	if (!crtc || !plane->state->fb)
8828 		return 0;
8829 
8830 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8831 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8832 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8833 			  __func__,
8834 			  plane->state->crtc_w,
8835 			  plane->state->crtc_h);
8836 		return -EINVAL;
8837 	}
8838 
8839 	x = plane->state->crtc_x;
8840 	y = plane->state->crtc_y;
8841 
8842 	if (x <= -amdgpu_crtc->max_cursor_width ||
8843 	    y <= -amdgpu_crtc->max_cursor_height)
8844 		return 0;
8845 
8846 	if (x < 0) {
8847 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8848 		x = 0;
8849 	}
8850 	if (y < 0) {
8851 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8852 		y = 0;
8853 	}
8854 	position->enable = true;
8855 	position->translate_by_source = true;
8856 	position->x = x;
8857 	position->y = y;
8858 	position->x_hotspot = xorigin;
8859 	position->y_hotspot = yorigin;
8860 
8861 	return 0;
8862 }
8863 
8864 static void handle_cursor_update(struct drm_plane *plane,
8865 				 struct drm_plane_state *old_plane_state)
8866 {
8867 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8868 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8869 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8870 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8871 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8872 	uint64_t address = afb ? afb->address : 0;
8873 	struct dc_cursor_position position = {0};
8874 	struct dc_cursor_attributes attributes;
8875 	int ret;
8876 
8877 	if (!plane->state->fb && !old_plane_state->fb)
8878 		return;
8879 
8880 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8881 		      __func__,
8882 		      amdgpu_crtc->crtc_id,
8883 		      plane->state->crtc_w,
8884 		      plane->state->crtc_h);
8885 
8886 	ret = get_cursor_position(plane, crtc, &position);
8887 	if (ret)
8888 		return;
8889 
8890 	if (!position.enable) {
8891 		/* turn off cursor */
8892 		if (crtc_state && crtc_state->stream) {
8893 			mutex_lock(&adev->dm.dc_lock);
8894 			dc_stream_set_cursor_position(crtc_state->stream,
8895 						      &position);
8896 			mutex_unlock(&adev->dm.dc_lock);
8897 		}
8898 		return;
8899 	}
8900 
8901 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8902 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8903 
8904 	memset(&attributes, 0, sizeof(attributes));
8905 	attributes.address.high_part = upper_32_bits(address);
8906 	attributes.address.low_part  = lower_32_bits(address);
8907 	attributes.width             = plane->state->crtc_w;
8908 	attributes.height            = plane->state->crtc_h;
8909 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8910 	attributes.rotation_angle    = 0;
8911 	attributes.attribute_flags.value = 0;
8912 
8913 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8914 
8915 	if (crtc_state->stream) {
8916 		mutex_lock(&adev->dm.dc_lock);
8917 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8918 							 &attributes))
8919 			DRM_ERROR("DC failed to set cursor attributes\n");
8920 
8921 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8922 						   &position))
8923 			DRM_ERROR("DC failed to set cursor position\n");
8924 		mutex_unlock(&adev->dm.dc_lock);
8925 	}
8926 }
8927 
8928 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8929 {
8930 
8931 	assert_spin_locked(&acrtc->base.dev->event_lock);
8932 	WARN_ON(acrtc->event);
8933 
8934 	acrtc->event = acrtc->base.state->event;
8935 
8936 	/* Set the flip status */
8937 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8938 
8939 	/* Mark this event as consumed */
8940 	acrtc->base.state->event = NULL;
8941 
8942 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8943 		     acrtc->crtc_id);
8944 }
8945 
8946 static void update_freesync_state_on_stream(
8947 	struct amdgpu_display_manager *dm,
8948 	struct dm_crtc_state *new_crtc_state,
8949 	struct dc_stream_state *new_stream,
8950 	struct dc_plane_state *surface,
8951 	u32 flip_timestamp_in_us)
8952 {
8953 	struct mod_vrr_params vrr_params;
8954 	struct dc_info_packet vrr_infopacket = {0};
8955 	struct amdgpu_device *adev = dm->adev;
8956 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8957 	unsigned long flags;
8958 	bool pack_sdp_v1_3 = false;
8959 
8960 	if (!new_stream)
8961 		return;
8962 
8963 	/*
8964 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8965 	 * For now it's sufficient to just guard against these conditions.
8966 	 */
8967 
8968 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8969 		return;
8970 
8971 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8972         vrr_params = acrtc->dm_irq_params.vrr_params;
8973 
8974 	if (surface) {
8975 		mod_freesync_handle_preflip(
8976 			dm->freesync_module,
8977 			surface,
8978 			new_stream,
8979 			flip_timestamp_in_us,
8980 			&vrr_params);
8981 
8982 		if (adev->family < AMDGPU_FAMILY_AI &&
8983 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8984 			mod_freesync_handle_v_update(dm->freesync_module,
8985 						     new_stream, &vrr_params);
8986 
8987 			/* Need to call this before the frame ends. */
8988 			dc_stream_adjust_vmin_vmax(dm->dc,
8989 						   new_crtc_state->stream,
8990 						   &vrr_params.adjust);
8991 		}
8992 	}
8993 
8994 	mod_freesync_build_vrr_infopacket(
8995 		dm->freesync_module,
8996 		new_stream,
8997 		&vrr_params,
8998 		PACKET_TYPE_VRR,
8999 		TRANSFER_FUNC_UNKNOWN,
9000 		&vrr_infopacket,
9001 		pack_sdp_v1_3);
9002 
9003 	new_crtc_state->freesync_timing_changed |=
9004 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9005 			&vrr_params.adjust,
9006 			sizeof(vrr_params.adjust)) != 0);
9007 
9008 	new_crtc_state->freesync_vrr_info_changed |=
9009 		(memcmp(&new_crtc_state->vrr_infopacket,
9010 			&vrr_infopacket,
9011 			sizeof(vrr_infopacket)) != 0);
9012 
9013 	acrtc->dm_irq_params.vrr_params = vrr_params;
9014 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9015 
9016 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9017 	new_stream->vrr_infopacket = vrr_infopacket;
9018 
9019 	if (new_crtc_state->freesync_vrr_info_changed)
9020 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9021 			      new_crtc_state->base.crtc->base.id,
9022 			      (int)new_crtc_state->base.vrr_enabled,
9023 			      (int)vrr_params.state);
9024 
9025 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9026 }
9027 
9028 static void update_stream_irq_parameters(
9029 	struct amdgpu_display_manager *dm,
9030 	struct dm_crtc_state *new_crtc_state)
9031 {
9032 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9033 	struct mod_vrr_params vrr_params;
9034 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9035 	struct amdgpu_device *adev = dm->adev;
9036 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9037 	unsigned long flags;
9038 
9039 	if (!new_stream)
9040 		return;
9041 
9042 	/*
9043 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9044 	 * For now it's sufficient to just guard against these conditions.
9045 	 */
9046 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9047 		return;
9048 
9049 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9050 	vrr_params = acrtc->dm_irq_params.vrr_params;
9051 
9052 	if (new_crtc_state->vrr_supported &&
9053 	    config.min_refresh_in_uhz &&
9054 	    config.max_refresh_in_uhz) {
9055 		/*
9056 		 * if freesync compatible mode was set, config.state will be set
9057 		 * in atomic check
9058 		 */
9059 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9060 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9061 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9062 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9063 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9064 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9065 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9066 		} else {
9067 			config.state = new_crtc_state->base.vrr_enabled ?
9068 						     VRR_STATE_ACTIVE_VARIABLE :
9069 						     VRR_STATE_INACTIVE;
9070 		}
9071 	} else {
9072 		config.state = VRR_STATE_UNSUPPORTED;
9073 	}
9074 
9075 	mod_freesync_build_vrr_params(dm->freesync_module,
9076 				      new_stream,
9077 				      &config, &vrr_params);
9078 
9079 	new_crtc_state->freesync_timing_changed |=
9080 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9081 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9082 
9083 	new_crtc_state->freesync_config = config;
9084 	/* Copy state for access from DM IRQ handler */
9085 	acrtc->dm_irq_params.freesync_config = config;
9086 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9087 	acrtc->dm_irq_params.vrr_params = vrr_params;
9088 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9089 }
9090 
9091 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9092 					    struct dm_crtc_state *new_state)
9093 {
9094 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9095 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9096 
9097 	if (!old_vrr_active && new_vrr_active) {
9098 		/* Transition VRR inactive -> active:
9099 		 * While VRR is active, we must not disable vblank irq, as a
9100 		 * reenable after disable would compute bogus vblank/pflip
9101 		 * timestamps if it likely happened inside display front-porch.
9102 		 *
9103 		 * We also need vupdate irq for the actual core vblank handling
9104 		 * at end of vblank.
9105 		 */
9106 		dm_set_vupdate_irq(new_state->base.crtc, true);
9107 		drm_crtc_vblank_get(new_state->base.crtc);
9108 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9109 				 __func__, new_state->base.crtc->base.id);
9110 	} else if (old_vrr_active && !new_vrr_active) {
9111 		/* Transition VRR active -> inactive:
9112 		 * Allow vblank irq disable again for fixed refresh rate.
9113 		 */
9114 		dm_set_vupdate_irq(new_state->base.crtc, false);
9115 		drm_crtc_vblank_put(new_state->base.crtc);
9116 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9117 				 __func__, new_state->base.crtc->base.id);
9118 	}
9119 }
9120 
9121 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9122 {
9123 	struct drm_plane *plane;
9124 	struct drm_plane_state *old_plane_state;
9125 	int i;
9126 
9127 	/*
9128 	 * TODO: Make this per-stream so we don't issue redundant updates for
9129 	 * commits with multiple streams.
9130 	 */
9131 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9132 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9133 			handle_cursor_update(plane, old_plane_state);
9134 }
9135 
9136 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9137 				    struct dc_state *dc_state,
9138 				    struct drm_device *dev,
9139 				    struct amdgpu_display_manager *dm,
9140 				    struct drm_crtc *pcrtc,
9141 				    bool wait_for_vblank)
9142 {
9143 	uint32_t i;
9144 	uint64_t timestamp_ns;
9145 	struct drm_plane *plane;
9146 	struct drm_plane_state *old_plane_state, *new_plane_state;
9147 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9148 	struct drm_crtc_state *new_pcrtc_state =
9149 			drm_atomic_get_new_crtc_state(state, pcrtc);
9150 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9151 	struct dm_crtc_state *dm_old_crtc_state =
9152 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9153 	int planes_count = 0, vpos, hpos;
9154 	long r;
9155 	unsigned long flags;
9156 	struct amdgpu_bo *abo;
9157 	uint32_t target_vblank, last_flip_vblank;
9158 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9159 	bool pflip_present = false;
9160 	struct {
9161 		struct dc_surface_update surface_updates[MAX_SURFACES];
9162 		struct dc_plane_info plane_infos[MAX_SURFACES];
9163 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9164 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9165 		struct dc_stream_update stream_update;
9166 	} *bundle;
9167 
9168 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9169 
9170 	if (!bundle) {
9171 		dm_error("Failed to allocate update bundle\n");
9172 		goto cleanup;
9173 	}
9174 
9175 	/*
9176 	 * Disable the cursor first if we're disabling all the planes.
9177 	 * It'll remain on the screen after the planes are re-enabled
9178 	 * if we don't.
9179 	 */
9180 	if (acrtc_state->active_planes == 0)
9181 		amdgpu_dm_commit_cursors(state);
9182 
9183 	/* update planes when needed */
9184 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9185 		struct drm_crtc *crtc = new_plane_state->crtc;
9186 		struct drm_crtc_state *new_crtc_state;
9187 		struct drm_framebuffer *fb = new_plane_state->fb;
9188 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9189 		bool plane_needs_flip;
9190 		struct dc_plane_state *dc_plane;
9191 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9192 
9193 		/* Cursor plane is handled after stream updates */
9194 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9195 			continue;
9196 
9197 		if (!fb || !crtc || pcrtc != crtc)
9198 			continue;
9199 
9200 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9201 		if (!new_crtc_state->active)
9202 			continue;
9203 
9204 		dc_plane = dm_new_plane_state->dc_state;
9205 
9206 		bundle->surface_updates[planes_count].surface = dc_plane;
9207 		if (new_pcrtc_state->color_mgmt_changed) {
9208 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9209 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9210 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9211 		}
9212 
9213 		fill_dc_scaling_info(dm->adev, new_plane_state,
9214 				     &bundle->scaling_infos[planes_count]);
9215 
9216 		bundle->surface_updates[planes_count].scaling_info =
9217 			&bundle->scaling_infos[planes_count];
9218 
9219 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9220 
9221 		pflip_present = pflip_present || plane_needs_flip;
9222 
9223 		if (!plane_needs_flip) {
9224 			planes_count += 1;
9225 			continue;
9226 		}
9227 
9228 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9229 
9230 		/*
9231 		 * Wait for all fences on this FB. Do limited wait to avoid
9232 		 * deadlock during GPU reset when this fence will not signal
9233 		 * but we hold reservation lock for the BO.
9234 		 */
9235 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9236 					  msecs_to_jiffies(5000));
9237 		if (unlikely(r <= 0))
9238 			DRM_ERROR("Waiting for fences timed out!");
9239 
9240 		fill_dc_plane_info_and_addr(
9241 			dm->adev, new_plane_state,
9242 			afb->tiling_flags,
9243 			&bundle->plane_infos[planes_count],
9244 			&bundle->flip_addrs[planes_count].address,
9245 			afb->tmz_surface, false);
9246 
9247 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9248 				 new_plane_state->plane->index,
9249 				 bundle->plane_infos[planes_count].dcc.enable);
9250 
9251 		bundle->surface_updates[planes_count].plane_info =
9252 			&bundle->plane_infos[planes_count];
9253 
9254 		/*
9255 		 * Only allow immediate flips for fast updates that don't
9256 		 * change FB pitch, DCC state, rotation or mirroing.
9257 		 */
9258 		bundle->flip_addrs[planes_count].flip_immediate =
9259 			crtc->state->async_flip &&
9260 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9261 
9262 		timestamp_ns = ktime_get_ns();
9263 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9264 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9265 		bundle->surface_updates[planes_count].surface = dc_plane;
9266 
9267 		if (!bundle->surface_updates[planes_count].surface) {
9268 			DRM_ERROR("No surface for CRTC: id=%d\n",
9269 					acrtc_attach->crtc_id);
9270 			continue;
9271 		}
9272 
9273 		if (plane == pcrtc->primary)
9274 			update_freesync_state_on_stream(
9275 				dm,
9276 				acrtc_state,
9277 				acrtc_state->stream,
9278 				dc_plane,
9279 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9280 
9281 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9282 				 __func__,
9283 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9284 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9285 
9286 		planes_count += 1;
9287 
9288 	}
9289 
9290 	if (pflip_present) {
9291 		if (!vrr_active) {
9292 			/* Use old throttling in non-vrr fixed refresh rate mode
9293 			 * to keep flip scheduling based on target vblank counts
9294 			 * working in a backwards compatible way, e.g., for
9295 			 * clients using the GLX_OML_sync_control extension or
9296 			 * DRI3/Present extension with defined target_msc.
9297 			 */
9298 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9299 		}
9300 		else {
9301 			/* For variable refresh rate mode only:
9302 			 * Get vblank of last completed flip to avoid > 1 vrr
9303 			 * flips per video frame by use of throttling, but allow
9304 			 * flip programming anywhere in the possibly large
9305 			 * variable vrr vblank interval for fine-grained flip
9306 			 * timing control and more opportunity to avoid stutter
9307 			 * on late submission of flips.
9308 			 */
9309 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9310 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9311 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9312 		}
9313 
9314 		target_vblank = last_flip_vblank + wait_for_vblank;
9315 
9316 		/*
9317 		 * Wait until we're out of the vertical blank period before the one
9318 		 * targeted by the flip
9319 		 */
9320 		while ((acrtc_attach->enabled &&
9321 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9322 							    0, &vpos, &hpos, NULL,
9323 							    NULL, &pcrtc->hwmode)
9324 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9325 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9326 			(int)(target_vblank -
9327 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9328 			usleep_range(1000, 1100);
9329 		}
9330 
9331 		/**
9332 		 * Prepare the flip event for the pageflip interrupt to handle.
9333 		 *
9334 		 * This only works in the case where we've already turned on the
9335 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9336 		 * from 0 -> n planes we have to skip a hardware generated event
9337 		 * and rely on sending it from software.
9338 		 */
9339 		if (acrtc_attach->base.state->event &&
9340 		    acrtc_state->active_planes > 0 &&
9341 		    !acrtc_state->force_dpms_off) {
9342 			drm_crtc_vblank_get(pcrtc);
9343 
9344 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9345 
9346 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9347 			prepare_flip_isr(acrtc_attach);
9348 
9349 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9350 		}
9351 
9352 		if (acrtc_state->stream) {
9353 			if (acrtc_state->freesync_vrr_info_changed)
9354 				bundle->stream_update.vrr_infopacket =
9355 					&acrtc_state->stream->vrr_infopacket;
9356 		}
9357 	}
9358 
9359 	/* Update the planes if changed or disable if we don't have any. */
9360 	if ((planes_count || acrtc_state->active_planes == 0) &&
9361 		acrtc_state->stream) {
9362 #if defined(CONFIG_DRM_AMD_DC_DCN)
9363 		/*
9364 		 * If PSR or idle optimizations are enabled then flush out
9365 		 * any pending work before hardware programming.
9366 		 */
9367 		if (dm->vblank_control_workqueue)
9368 			flush_workqueue(dm->vblank_control_workqueue);
9369 #endif
9370 
9371 		bundle->stream_update.stream = acrtc_state->stream;
9372 		if (new_pcrtc_state->mode_changed) {
9373 			bundle->stream_update.src = acrtc_state->stream->src;
9374 			bundle->stream_update.dst = acrtc_state->stream->dst;
9375 		}
9376 
9377 		if (new_pcrtc_state->color_mgmt_changed) {
9378 			/*
9379 			 * TODO: This isn't fully correct since we've actually
9380 			 * already modified the stream in place.
9381 			 */
9382 			bundle->stream_update.gamut_remap =
9383 				&acrtc_state->stream->gamut_remap_matrix;
9384 			bundle->stream_update.output_csc_transform =
9385 				&acrtc_state->stream->csc_color_matrix;
9386 			bundle->stream_update.out_transfer_func =
9387 				acrtc_state->stream->out_transfer_func;
9388 		}
9389 
9390 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9391 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9392 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9393 
9394 		/*
9395 		 * If FreeSync state on the stream has changed then we need to
9396 		 * re-adjust the min/max bounds now that DC doesn't handle this
9397 		 * as part of commit.
9398 		 */
9399 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9400 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9401 			dc_stream_adjust_vmin_vmax(
9402 				dm->dc, acrtc_state->stream,
9403 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9404 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9405 		}
9406 		mutex_lock(&dm->dc_lock);
9407 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9408 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9409 			amdgpu_dm_psr_disable(acrtc_state->stream);
9410 
9411 		dc_commit_updates_for_stream(dm->dc,
9412 						     bundle->surface_updates,
9413 						     planes_count,
9414 						     acrtc_state->stream,
9415 						     &bundle->stream_update,
9416 						     dc_state);
9417 
9418 		/**
9419 		 * Enable or disable the interrupts on the backend.
9420 		 *
9421 		 * Most pipes are put into power gating when unused.
9422 		 *
9423 		 * When power gating is enabled on a pipe we lose the
9424 		 * interrupt enablement state when power gating is disabled.
9425 		 *
9426 		 * So we need to update the IRQ control state in hardware
9427 		 * whenever the pipe turns on (since it could be previously
9428 		 * power gated) or off (since some pipes can't be power gated
9429 		 * on some ASICs).
9430 		 */
9431 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9432 			dm_update_pflip_irq_state(drm_to_adev(dev),
9433 						  acrtc_attach);
9434 
9435 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9436 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9437 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9438 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9439 
9440 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9441 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9442 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9443 			struct amdgpu_dm_connector *aconn =
9444 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9445 
9446 			if (aconn->psr_skip_count > 0)
9447 				aconn->psr_skip_count--;
9448 
9449 			/* Allow PSR when skip count is 0. */
9450 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9451 		} else {
9452 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9453 		}
9454 
9455 		mutex_unlock(&dm->dc_lock);
9456 	}
9457 
9458 	/*
9459 	 * Update cursor state *after* programming all the planes.
9460 	 * This avoids redundant programming in the case where we're going
9461 	 * to be disabling a single plane - those pipes are being disabled.
9462 	 */
9463 	if (acrtc_state->active_planes)
9464 		amdgpu_dm_commit_cursors(state);
9465 
9466 cleanup:
9467 	kfree(bundle);
9468 }
9469 
9470 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9471 				   struct drm_atomic_state *state)
9472 {
9473 	struct amdgpu_device *adev = drm_to_adev(dev);
9474 	struct amdgpu_dm_connector *aconnector;
9475 	struct drm_connector *connector;
9476 	struct drm_connector_state *old_con_state, *new_con_state;
9477 	struct drm_crtc_state *new_crtc_state;
9478 	struct dm_crtc_state *new_dm_crtc_state;
9479 	const struct dc_stream_status *status;
9480 	int i, inst;
9481 
9482 	/* Notify device removals. */
9483 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9484 		if (old_con_state->crtc != new_con_state->crtc) {
9485 			/* CRTC changes require notification. */
9486 			goto notify;
9487 		}
9488 
9489 		if (!new_con_state->crtc)
9490 			continue;
9491 
9492 		new_crtc_state = drm_atomic_get_new_crtc_state(
9493 			state, new_con_state->crtc);
9494 
9495 		if (!new_crtc_state)
9496 			continue;
9497 
9498 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9499 			continue;
9500 
9501 	notify:
9502 		aconnector = to_amdgpu_dm_connector(connector);
9503 
9504 		mutex_lock(&adev->dm.audio_lock);
9505 		inst = aconnector->audio_inst;
9506 		aconnector->audio_inst = -1;
9507 		mutex_unlock(&adev->dm.audio_lock);
9508 
9509 		amdgpu_dm_audio_eld_notify(adev, inst);
9510 	}
9511 
9512 	/* Notify audio device additions. */
9513 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9514 		if (!new_con_state->crtc)
9515 			continue;
9516 
9517 		new_crtc_state = drm_atomic_get_new_crtc_state(
9518 			state, new_con_state->crtc);
9519 
9520 		if (!new_crtc_state)
9521 			continue;
9522 
9523 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9524 			continue;
9525 
9526 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9527 		if (!new_dm_crtc_state->stream)
9528 			continue;
9529 
9530 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9531 		if (!status)
9532 			continue;
9533 
9534 		aconnector = to_amdgpu_dm_connector(connector);
9535 
9536 		mutex_lock(&adev->dm.audio_lock);
9537 		inst = status->audio_inst;
9538 		aconnector->audio_inst = inst;
9539 		mutex_unlock(&adev->dm.audio_lock);
9540 
9541 		amdgpu_dm_audio_eld_notify(adev, inst);
9542 	}
9543 }
9544 
9545 /*
9546  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9547  * @crtc_state: the DRM CRTC state
9548  * @stream_state: the DC stream state.
9549  *
9550  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9551  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9552  */
9553 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9554 						struct dc_stream_state *stream_state)
9555 {
9556 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9557 }
9558 
9559 /**
9560  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9561  * @state: The atomic state to commit
9562  *
9563  * This will tell DC to commit the constructed DC state from atomic_check,
9564  * programming the hardware. Any failures here implies a hardware failure, since
9565  * atomic check should have filtered anything non-kosher.
9566  */
9567 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9568 {
9569 	struct drm_device *dev = state->dev;
9570 	struct amdgpu_device *adev = drm_to_adev(dev);
9571 	struct amdgpu_display_manager *dm = &adev->dm;
9572 	struct dm_atomic_state *dm_state;
9573 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9574 	uint32_t i, j;
9575 	struct drm_crtc *crtc;
9576 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9577 	unsigned long flags;
9578 	bool wait_for_vblank = true;
9579 	struct drm_connector *connector;
9580 	struct drm_connector_state *old_con_state, *new_con_state;
9581 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9582 	int crtc_disable_count = 0;
9583 	bool mode_set_reset_required = false;
9584 
9585 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9586 
9587 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9588 
9589 	dm_state = dm_atomic_get_new_state(state);
9590 	if (dm_state && dm_state->context) {
9591 		dc_state = dm_state->context;
9592 	} else {
9593 		/* No state changes, retain current state. */
9594 		dc_state_temp = dc_create_state(dm->dc);
9595 		ASSERT(dc_state_temp);
9596 		dc_state = dc_state_temp;
9597 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9598 	}
9599 
9600 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9601 				       new_crtc_state, i) {
9602 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9603 
9604 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9605 
9606 		if (old_crtc_state->active &&
9607 		    (!new_crtc_state->active ||
9608 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9609 			manage_dm_interrupts(adev, acrtc, false);
9610 			dc_stream_release(dm_old_crtc_state->stream);
9611 		}
9612 	}
9613 
9614 	drm_atomic_helper_calc_timestamping_constants(state);
9615 
9616 	/* update changed items */
9617 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9618 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9619 
9620 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9621 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9622 
9623 		DRM_DEBUG_ATOMIC(
9624 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9625 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9626 			"connectors_changed:%d\n",
9627 			acrtc->crtc_id,
9628 			new_crtc_state->enable,
9629 			new_crtc_state->active,
9630 			new_crtc_state->planes_changed,
9631 			new_crtc_state->mode_changed,
9632 			new_crtc_state->active_changed,
9633 			new_crtc_state->connectors_changed);
9634 
9635 		/* Disable cursor if disabling crtc */
9636 		if (old_crtc_state->active && !new_crtc_state->active) {
9637 			struct dc_cursor_position position;
9638 
9639 			memset(&position, 0, sizeof(position));
9640 			mutex_lock(&dm->dc_lock);
9641 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9642 			mutex_unlock(&dm->dc_lock);
9643 		}
9644 
9645 		/* Copy all transient state flags into dc state */
9646 		if (dm_new_crtc_state->stream) {
9647 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9648 							    dm_new_crtc_state->stream);
9649 		}
9650 
9651 		/* handles headless hotplug case, updating new_state and
9652 		 * aconnector as needed
9653 		 */
9654 
9655 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9656 
9657 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9658 
9659 			if (!dm_new_crtc_state->stream) {
9660 				/*
9661 				 * this could happen because of issues with
9662 				 * userspace notifications delivery.
9663 				 * In this case userspace tries to set mode on
9664 				 * display which is disconnected in fact.
9665 				 * dc_sink is NULL in this case on aconnector.
9666 				 * We expect reset mode will come soon.
9667 				 *
9668 				 * This can also happen when unplug is done
9669 				 * during resume sequence ended
9670 				 *
9671 				 * In this case, we want to pretend we still
9672 				 * have a sink to keep the pipe running so that
9673 				 * hw state is consistent with the sw state
9674 				 */
9675 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9676 						__func__, acrtc->base.base.id);
9677 				continue;
9678 			}
9679 
9680 			if (dm_old_crtc_state->stream)
9681 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9682 
9683 			pm_runtime_get_noresume(dev->dev);
9684 
9685 			acrtc->enabled = true;
9686 			acrtc->hw_mode = new_crtc_state->mode;
9687 			crtc->hwmode = new_crtc_state->mode;
9688 			mode_set_reset_required = true;
9689 		} else if (modereset_required(new_crtc_state)) {
9690 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9691 			/* i.e. reset mode */
9692 			if (dm_old_crtc_state->stream)
9693 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9694 
9695 			mode_set_reset_required = true;
9696 		}
9697 	} /* for_each_crtc_in_state() */
9698 
9699 	if (dc_state) {
9700 		/* if there mode set or reset, disable eDP PSR */
9701 		if (mode_set_reset_required) {
9702 #if defined(CONFIG_DRM_AMD_DC_DCN)
9703 			if (dm->vblank_control_workqueue)
9704 				flush_workqueue(dm->vblank_control_workqueue);
9705 #endif
9706 			amdgpu_dm_psr_disable_all(dm);
9707 		}
9708 
9709 		dm_enable_per_frame_crtc_master_sync(dc_state);
9710 		mutex_lock(&dm->dc_lock);
9711 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9712 #if defined(CONFIG_DRM_AMD_DC_DCN)
9713                /* Allow idle optimization when vblank count is 0 for display off */
9714                if (dm->active_vblank_irq_count == 0)
9715                    dc_allow_idle_optimizations(dm->dc,true);
9716 #endif
9717 		mutex_unlock(&dm->dc_lock);
9718 	}
9719 
9720 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9721 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9722 
9723 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9724 
9725 		if (dm_new_crtc_state->stream != NULL) {
9726 			const struct dc_stream_status *status =
9727 					dc_stream_get_status(dm_new_crtc_state->stream);
9728 
9729 			if (!status)
9730 				status = dc_stream_get_status_from_state(dc_state,
9731 									 dm_new_crtc_state->stream);
9732 			if (!status)
9733 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9734 			else
9735 				acrtc->otg_inst = status->primary_otg_inst;
9736 		}
9737 	}
9738 #ifdef CONFIG_DRM_AMD_DC_HDCP
9739 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9740 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9741 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9742 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9743 
9744 		new_crtc_state = NULL;
9745 
9746 		if (acrtc)
9747 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9748 
9749 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9750 
9751 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9752 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9753 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9754 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9755 			dm_new_con_state->update_hdcp = true;
9756 			continue;
9757 		}
9758 
9759 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9760 			hdcp_update_display(
9761 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9762 				new_con_state->hdcp_content_type,
9763 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9764 	}
9765 #endif
9766 
9767 	/* Handle connector state changes */
9768 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9769 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9770 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9771 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9772 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9773 		struct dc_stream_update stream_update;
9774 		struct dc_info_packet hdr_packet;
9775 		struct dc_stream_status *status = NULL;
9776 		bool abm_changed, hdr_changed, scaling_changed;
9777 
9778 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9779 		memset(&stream_update, 0, sizeof(stream_update));
9780 
9781 		if (acrtc) {
9782 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9783 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9784 		}
9785 
9786 		/* Skip any modesets/resets */
9787 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9788 			continue;
9789 
9790 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9791 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9792 
9793 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9794 							     dm_old_con_state);
9795 
9796 		abm_changed = dm_new_crtc_state->abm_level !=
9797 			      dm_old_crtc_state->abm_level;
9798 
9799 		hdr_changed =
9800 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9801 
9802 		if (!scaling_changed && !abm_changed && !hdr_changed)
9803 			continue;
9804 
9805 		stream_update.stream = dm_new_crtc_state->stream;
9806 		if (scaling_changed) {
9807 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9808 					dm_new_con_state, dm_new_crtc_state->stream);
9809 
9810 			stream_update.src = dm_new_crtc_state->stream->src;
9811 			stream_update.dst = dm_new_crtc_state->stream->dst;
9812 		}
9813 
9814 		if (abm_changed) {
9815 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9816 
9817 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9818 		}
9819 
9820 		if (hdr_changed) {
9821 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9822 			stream_update.hdr_static_metadata = &hdr_packet;
9823 		}
9824 
9825 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9826 
9827 		if (WARN_ON(!status))
9828 			continue;
9829 
9830 		WARN_ON(!status->plane_count);
9831 
9832 		/*
9833 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9834 		 * Here we create an empty update on each plane.
9835 		 * To fix this, DC should permit updating only stream properties.
9836 		 */
9837 		for (j = 0; j < status->plane_count; j++)
9838 			dummy_updates[j].surface = status->plane_states[0];
9839 
9840 
9841 		mutex_lock(&dm->dc_lock);
9842 		dc_commit_updates_for_stream(dm->dc,
9843 						     dummy_updates,
9844 						     status->plane_count,
9845 						     dm_new_crtc_state->stream,
9846 						     &stream_update,
9847 						     dc_state);
9848 		mutex_unlock(&dm->dc_lock);
9849 	}
9850 
9851 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9852 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9853 				      new_crtc_state, i) {
9854 		if (old_crtc_state->active && !new_crtc_state->active)
9855 			crtc_disable_count++;
9856 
9857 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9858 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9859 
9860 		/* For freesync config update on crtc state and params for irq */
9861 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9862 
9863 		/* Handle vrr on->off / off->on transitions */
9864 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9865 						dm_new_crtc_state);
9866 	}
9867 
9868 	/**
9869 	 * Enable interrupts for CRTCs that are newly enabled or went through
9870 	 * a modeset. It was intentionally deferred until after the front end
9871 	 * state was modified to wait until the OTG was on and so the IRQ
9872 	 * handlers didn't access stale or invalid state.
9873 	 */
9874 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9875 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9876 #ifdef CONFIG_DEBUG_FS
9877 		bool configure_crc = false;
9878 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9879 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9880 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9881 #endif
9882 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9883 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9884 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9885 #endif
9886 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9887 
9888 		if (new_crtc_state->active &&
9889 		    (!old_crtc_state->active ||
9890 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9891 			dc_stream_retain(dm_new_crtc_state->stream);
9892 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9893 			manage_dm_interrupts(adev, acrtc, true);
9894 
9895 #ifdef CONFIG_DEBUG_FS
9896 			/**
9897 			 * Frontend may have changed so reapply the CRC capture
9898 			 * settings for the stream.
9899 			 */
9900 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9901 
9902 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9903 				configure_crc = true;
9904 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9905 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9906 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9907 					acrtc->dm_irq_params.crc_window.update_win = true;
9908 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9909 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9910 					crc_rd_wrk->crtc = crtc;
9911 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9912 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9913 				}
9914 #endif
9915 			}
9916 
9917 			if (configure_crc)
9918 				if (amdgpu_dm_crtc_configure_crc_source(
9919 					crtc, dm_new_crtc_state, cur_crc_src))
9920 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9921 #endif
9922 		}
9923 	}
9924 
9925 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9926 		if (new_crtc_state->async_flip)
9927 			wait_for_vblank = false;
9928 
9929 	/* update planes when needed per crtc*/
9930 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9931 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9932 
9933 		if (dm_new_crtc_state->stream)
9934 			amdgpu_dm_commit_planes(state, dc_state, dev,
9935 						dm, crtc, wait_for_vblank);
9936 	}
9937 
9938 	/* Update audio instances for each connector. */
9939 	amdgpu_dm_commit_audio(dev, state);
9940 
9941 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9942 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9943 	/* restore the backlight level */
9944 	for (i = 0; i < dm->num_of_edps; i++) {
9945 		if (dm->backlight_dev[i] &&
9946 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9947 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9948 	}
9949 #endif
9950 	/*
9951 	 * send vblank event on all events not handled in flip and
9952 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9953 	 */
9954 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9955 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9956 
9957 		if (new_crtc_state->event)
9958 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9959 
9960 		new_crtc_state->event = NULL;
9961 	}
9962 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9963 
9964 	/* Signal HW programming completion */
9965 	drm_atomic_helper_commit_hw_done(state);
9966 
9967 	if (wait_for_vblank)
9968 		drm_atomic_helper_wait_for_flip_done(dev, state);
9969 
9970 	drm_atomic_helper_cleanup_planes(dev, state);
9971 
9972 	/* return the stolen vga memory back to VRAM */
9973 	if (!adev->mman.keep_stolen_vga_memory)
9974 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9975 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9976 
9977 	/*
9978 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9979 	 * so we can put the GPU into runtime suspend if we're not driving any
9980 	 * displays anymore
9981 	 */
9982 	for (i = 0; i < crtc_disable_count; i++)
9983 		pm_runtime_put_autosuspend(dev->dev);
9984 	pm_runtime_mark_last_busy(dev->dev);
9985 
9986 	if (dc_state_temp)
9987 		dc_release_state(dc_state_temp);
9988 }
9989 
9990 
9991 static int dm_force_atomic_commit(struct drm_connector *connector)
9992 {
9993 	int ret = 0;
9994 	struct drm_device *ddev = connector->dev;
9995 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9996 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9997 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9998 	struct drm_connector_state *conn_state;
9999 	struct drm_crtc_state *crtc_state;
10000 	struct drm_plane_state *plane_state;
10001 
10002 	if (!state)
10003 		return -ENOMEM;
10004 
10005 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10006 
10007 	/* Construct an atomic state to restore previous display setting */
10008 
10009 	/*
10010 	 * Attach connectors to drm_atomic_state
10011 	 */
10012 	conn_state = drm_atomic_get_connector_state(state, connector);
10013 
10014 	ret = PTR_ERR_OR_ZERO(conn_state);
10015 	if (ret)
10016 		goto out;
10017 
10018 	/* Attach crtc to drm_atomic_state*/
10019 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10020 
10021 	ret = PTR_ERR_OR_ZERO(crtc_state);
10022 	if (ret)
10023 		goto out;
10024 
10025 	/* force a restore */
10026 	crtc_state->mode_changed = true;
10027 
10028 	/* Attach plane to drm_atomic_state */
10029 	plane_state = drm_atomic_get_plane_state(state, plane);
10030 
10031 	ret = PTR_ERR_OR_ZERO(plane_state);
10032 	if (ret)
10033 		goto out;
10034 
10035 	/* Call commit internally with the state we just constructed */
10036 	ret = drm_atomic_commit(state);
10037 
10038 out:
10039 	drm_atomic_state_put(state);
10040 	if (ret)
10041 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10042 
10043 	return ret;
10044 }
10045 
10046 /*
10047  * This function handles all cases when set mode does not come upon hotplug.
10048  * This includes when a display is unplugged then plugged back into the
10049  * same port and when running without usermode desktop manager supprot
10050  */
10051 void dm_restore_drm_connector_state(struct drm_device *dev,
10052 				    struct drm_connector *connector)
10053 {
10054 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10055 	struct amdgpu_crtc *disconnected_acrtc;
10056 	struct dm_crtc_state *acrtc_state;
10057 
10058 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10059 		return;
10060 
10061 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10062 	if (!disconnected_acrtc)
10063 		return;
10064 
10065 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10066 	if (!acrtc_state->stream)
10067 		return;
10068 
10069 	/*
10070 	 * If the previous sink is not released and different from the current,
10071 	 * we deduce we are in a state where we can not rely on usermode call
10072 	 * to turn on the display, so we do it here
10073 	 */
10074 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10075 		dm_force_atomic_commit(&aconnector->base);
10076 }
10077 
10078 /*
10079  * Grabs all modesetting locks to serialize against any blocking commits,
10080  * Waits for completion of all non blocking commits.
10081  */
10082 static int do_aquire_global_lock(struct drm_device *dev,
10083 				 struct drm_atomic_state *state)
10084 {
10085 	struct drm_crtc *crtc;
10086 	struct drm_crtc_commit *commit;
10087 	long ret;
10088 
10089 	/*
10090 	 * Adding all modeset locks to aquire_ctx will
10091 	 * ensure that when the framework release it the
10092 	 * extra locks we are locking here will get released to
10093 	 */
10094 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10095 	if (ret)
10096 		return ret;
10097 
10098 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10099 		spin_lock(&crtc->commit_lock);
10100 		commit = list_first_entry_or_null(&crtc->commit_list,
10101 				struct drm_crtc_commit, commit_entry);
10102 		if (commit)
10103 			drm_crtc_commit_get(commit);
10104 		spin_unlock(&crtc->commit_lock);
10105 
10106 		if (!commit)
10107 			continue;
10108 
10109 		/*
10110 		 * Make sure all pending HW programming completed and
10111 		 * page flips done
10112 		 */
10113 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10114 
10115 		if (ret > 0)
10116 			ret = wait_for_completion_interruptible_timeout(
10117 					&commit->flip_done, 10*HZ);
10118 
10119 		if (ret == 0)
10120 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10121 				  "timed out\n", crtc->base.id, crtc->name);
10122 
10123 		drm_crtc_commit_put(commit);
10124 	}
10125 
10126 	return ret < 0 ? ret : 0;
10127 }
10128 
10129 static void get_freesync_config_for_crtc(
10130 	struct dm_crtc_state *new_crtc_state,
10131 	struct dm_connector_state *new_con_state)
10132 {
10133 	struct mod_freesync_config config = {0};
10134 	struct amdgpu_dm_connector *aconnector =
10135 			to_amdgpu_dm_connector(new_con_state->base.connector);
10136 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10137 	int vrefresh = drm_mode_vrefresh(mode);
10138 	bool fs_vid_mode = false;
10139 
10140 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10141 					vrefresh >= aconnector->min_vfreq &&
10142 					vrefresh <= aconnector->max_vfreq;
10143 
10144 	if (new_crtc_state->vrr_supported) {
10145 		new_crtc_state->stream->ignore_msa_timing_param = true;
10146 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10147 
10148 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10149 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10150 		config.vsif_supported = true;
10151 		config.btr = true;
10152 
10153 		if (fs_vid_mode) {
10154 			config.state = VRR_STATE_ACTIVE_FIXED;
10155 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10156 			goto out;
10157 		} else if (new_crtc_state->base.vrr_enabled) {
10158 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10159 		} else {
10160 			config.state = VRR_STATE_INACTIVE;
10161 		}
10162 	}
10163 out:
10164 	new_crtc_state->freesync_config = config;
10165 }
10166 
10167 static void reset_freesync_config_for_crtc(
10168 	struct dm_crtc_state *new_crtc_state)
10169 {
10170 	new_crtc_state->vrr_supported = false;
10171 
10172 	memset(&new_crtc_state->vrr_infopacket, 0,
10173 	       sizeof(new_crtc_state->vrr_infopacket));
10174 }
10175 
10176 static bool
10177 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10178 				 struct drm_crtc_state *new_crtc_state)
10179 {
10180 	struct drm_display_mode old_mode, new_mode;
10181 
10182 	if (!old_crtc_state || !new_crtc_state)
10183 		return false;
10184 
10185 	old_mode = old_crtc_state->mode;
10186 	new_mode = new_crtc_state->mode;
10187 
10188 	if (old_mode.clock       == new_mode.clock &&
10189 	    old_mode.hdisplay    == new_mode.hdisplay &&
10190 	    old_mode.vdisplay    == new_mode.vdisplay &&
10191 	    old_mode.htotal      == new_mode.htotal &&
10192 	    old_mode.vtotal      != new_mode.vtotal &&
10193 	    old_mode.hsync_start == new_mode.hsync_start &&
10194 	    old_mode.vsync_start != new_mode.vsync_start &&
10195 	    old_mode.hsync_end   == new_mode.hsync_end &&
10196 	    old_mode.vsync_end   != new_mode.vsync_end &&
10197 	    old_mode.hskew       == new_mode.hskew &&
10198 	    old_mode.vscan       == new_mode.vscan &&
10199 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10200 	    (new_mode.vsync_end - new_mode.vsync_start))
10201 		return true;
10202 
10203 	return false;
10204 }
10205 
10206 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10207 	uint64_t num, den, res;
10208 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10209 
10210 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10211 
10212 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10213 	den = (unsigned long long)new_crtc_state->mode.htotal *
10214 	      (unsigned long long)new_crtc_state->mode.vtotal;
10215 
10216 	res = div_u64(num, den);
10217 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10218 }
10219 
10220 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10221 			 struct drm_atomic_state *state,
10222 			 struct drm_crtc *crtc,
10223 			 struct drm_crtc_state *old_crtc_state,
10224 			 struct drm_crtc_state *new_crtc_state,
10225 			 bool enable,
10226 			 bool *lock_and_validation_needed)
10227 {
10228 	struct dm_atomic_state *dm_state = NULL;
10229 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10230 	struct dc_stream_state *new_stream;
10231 	int ret = 0;
10232 
10233 	/*
10234 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10235 	 * update changed items
10236 	 */
10237 	struct amdgpu_crtc *acrtc = NULL;
10238 	struct amdgpu_dm_connector *aconnector = NULL;
10239 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10240 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10241 
10242 	new_stream = NULL;
10243 
10244 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10245 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10246 	acrtc = to_amdgpu_crtc(crtc);
10247 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10248 
10249 	/* TODO This hack should go away */
10250 	if (aconnector && enable) {
10251 		/* Make sure fake sink is created in plug-in scenario */
10252 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10253 							    &aconnector->base);
10254 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10255 							    &aconnector->base);
10256 
10257 		if (IS_ERR(drm_new_conn_state)) {
10258 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10259 			goto fail;
10260 		}
10261 
10262 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10263 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10264 
10265 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10266 			goto skip_modeset;
10267 
10268 		new_stream = create_validate_stream_for_sink(aconnector,
10269 							     &new_crtc_state->mode,
10270 							     dm_new_conn_state,
10271 							     dm_old_crtc_state->stream);
10272 
10273 		/*
10274 		 * we can have no stream on ACTION_SET if a display
10275 		 * was disconnected during S3, in this case it is not an
10276 		 * error, the OS will be updated after detection, and
10277 		 * will do the right thing on next atomic commit
10278 		 */
10279 
10280 		if (!new_stream) {
10281 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10282 					__func__, acrtc->base.base.id);
10283 			ret = -ENOMEM;
10284 			goto fail;
10285 		}
10286 
10287 		/*
10288 		 * TODO: Check VSDB bits to decide whether this should
10289 		 * be enabled or not.
10290 		 */
10291 		new_stream->triggered_crtc_reset.enabled =
10292 			dm->force_timing_sync;
10293 
10294 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10295 
10296 		ret = fill_hdr_info_packet(drm_new_conn_state,
10297 					   &new_stream->hdr_static_metadata);
10298 		if (ret)
10299 			goto fail;
10300 
10301 		/*
10302 		 * If we already removed the old stream from the context
10303 		 * (and set the new stream to NULL) then we can't reuse
10304 		 * the old stream even if the stream and scaling are unchanged.
10305 		 * We'll hit the BUG_ON and black screen.
10306 		 *
10307 		 * TODO: Refactor this function to allow this check to work
10308 		 * in all conditions.
10309 		 */
10310 		if (dm_new_crtc_state->stream &&
10311 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10312 			goto skip_modeset;
10313 
10314 		if (dm_new_crtc_state->stream &&
10315 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10316 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10317 			new_crtc_state->mode_changed = false;
10318 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10319 					 new_crtc_state->mode_changed);
10320 		}
10321 	}
10322 
10323 	/* mode_changed flag may get updated above, need to check again */
10324 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10325 		goto skip_modeset;
10326 
10327 	DRM_DEBUG_ATOMIC(
10328 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10329 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10330 		"connectors_changed:%d\n",
10331 		acrtc->crtc_id,
10332 		new_crtc_state->enable,
10333 		new_crtc_state->active,
10334 		new_crtc_state->planes_changed,
10335 		new_crtc_state->mode_changed,
10336 		new_crtc_state->active_changed,
10337 		new_crtc_state->connectors_changed);
10338 
10339 	/* Remove stream for any changed/disabled CRTC */
10340 	if (!enable) {
10341 
10342 		if (!dm_old_crtc_state->stream)
10343 			goto skip_modeset;
10344 
10345 		if (dm_new_crtc_state->stream &&
10346 		    is_timing_unchanged_for_freesync(new_crtc_state,
10347 						     old_crtc_state)) {
10348 			new_crtc_state->mode_changed = false;
10349 			DRM_DEBUG_DRIVER(
10350 				"Mode change not required for front porch change, "
10351 				"setting mode_changed to %d",
10352 				new_crtc_state->mode_changed);
10353 
10354 			set_freesync_fixed_config(dm_new_crtc_state);
10355 
10356 			goto skip_modeset;
10357 		} else if (aconnector &&
10358 			   is_freesync_video_mode(&new_crtc_state->mode,
10359 						  aconnector)) {
10360 			struct drm_display_mode *high_mode;
10361 
10362 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10363 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10364 				set_freesync_fixed_config(dm_new_crtc_state);
10365 			}
10366 		}
10367 
10368 		ret = dm_atomic_get_state(state, &dm_state);
10369 		if (ret)
10370 			goto fail;
10371 
10372 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10373 				crtc->base.id);
10374 
10375 		/* i.e. reset mode */
10376 		if (dc_remove_stream_from_ctx(
10377 				dm->dc,
10378 				dm_state->context,
10379 				dm_old_crtc_state->stream) != DC_OK) {
10380 			ret = -EINVAL;
10381 			goto fail;
10382 		}
10383 
10384 		dc_stream_release(dm_old_crtc_state->stream);
10385 		dm_new_crtc_state->stream = NULL;
10386 
10387 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10388 
10389 		*lock_and_validation_needed = true;
10390 
10391 	} else {/* Add stream for any updated/enabled CRTC */
10392 		/*
10393 		 * Quick fix to prevent NULL pointer on new_stream when
10394 		 * added MST connectors not found in existing crtc_state in the chained mode
10395 		 * TODO: need to dig out the root cause of that
10396 		 */
10397 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10398 			goto skip_modeset;
10399 
10400 		if (modereset_required(new_crtc_state))
10401 			goto skip_modeset;
10402 
10403 		if (modeset_required(new_crtc_state, new_stream,
10404 				     dm_old_crtc_state->stream)) {
10405 
10406 			WARN_ON(dm_new_crtc_state->stream);
10407 
10408 			ret = dm_atomic_get_state(state, &dm_state);
10409 			if (ret)
10410 				goto fail;
10411 
10412 			dm_new_crtc_state->stream = new_stream;
10413 
10414 			dc_stream_retain(new_stream);
10415 
10416 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10417 					 crtc->base.id);
10418 
10419 			if (dc_add_stream_to_ctx(
10420 					dm->dc,
10421 					dm_state->context,
10422 					dm_new_crtc_state->stream) != DC_OK) {
10423 				ret = -EINVAL;
10424 				goto fail;
10425 			}
10426 
10427 			*lock_and_validation_needed = true;
10428 		}
10429 	}
10430 
10431 skip_modeset:
10432 	/* Release extra reference */
10433 	if (new_stream)
10434 		 dc_stream_release(new_stream);
10435 
10436 	/*
10437 	 * We want to do dc stream updates that do not require a
10438 	 * full modeset below.
10439 	 */
10440 	if (!(enable && aconnector && new_crtc_state->active))
10441 		return 0;
10442 	/*
10443 	 * Given above conditions, the dc state cannot be NULL because:
10444 	 * 1. We're in the process of enabling CRTCs (just been added
10445 	 *    to the dc context, or already is on the context)
10446 	 * 2. Has a valid connector attached, and
10447 	 * 3. Is currently active and enabled.
10448 	 * => The dc stream state currently exists.
10449 	 */
10450 	BUG_ON(dm_new_crtc_state->stream == NULL);
10451 
10452 	/* Scaling or underscan settings */
10453 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10454 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10455 		update_stream_scaling_settings(
10456 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10457 
10458 	/* ABM settings */
10459 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10460 
10461 	/*
10462 	 * Color management settings. We also update color properties
10463 	 * when a modeset is needed, to ensure it gets reprogrammed.
10464 	 */
10465 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10466 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10467 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10468 		if (ret)
10469 			goto fail;
10470 	}
10471 
10472 	/* Update Freesync settings. */
10473 	get_freesync_config_for_crtc(dm_new_crtc_state,
10474 				     dm_new_conn_state);
10475 
10476 	return ret;
10477 
10478 fail:
10479 	if (new_stream)
10480 		dc_stream_release(new_stream);
10481 	return ret;
10482 }
10483 
10484 static bool should_reset_plane(struct drm_atomic_state *state,
10485 			       struct drm_plane *plane,
10486 			       struct drm_plane_state *old_plane_state,
10487 			       struct drm_plane_state *new_plane_state)
10488 {
10489 	struct drm_plane *other;
10490 	struct drm_plane_state *old_other_state, *new_other_state;
10491 	struct drm_crtc_state *new_crtc_state;
10492 	int i;
10493 
10494 	/*
10495 	 * TODO: Remove this hack once the checks below are sufficient
10496 	 * enough to determine when we need to reset all the planes on
10497 	 * the stream.
10498 	 */
10499 	if (state->allow_modeset)
10500 		return true;
10501 
10502 	/* Exit early if we know that we're adding or removing the plane. */
10503 	if (old_plane_state->crtc != new_plane_state->crtc)
10504 		return true;
10505 
10506 	/* old crtc == new_crtc == NULL, plane not in context. */
10507 	if (!new_plane_state->crtc)
10508 		return false;
10509 
10510 	new_crtc_state =
10511 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10512 
10513 	if (!new_crtc_state)
10514 		return true;
10515 
10516 	/* CRTC Degamma changes currently require us to recreate planes. */
10517 	if (new_crtc_state->color_mgmt_changed)
10518 		return true;
10519 
10520 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10521 		return true;
10522 
10523 	/*
10524 	 * If there are any new primary or overlay planes being added or
10525 	 * removed then the z-order can potentially change. To ensure
10526 	 * correct z-order and pipe acquisition the current DC architecture
10527 	 * requires us to remove and recreate all existing planes.
10528 	 *
10529 	 * TODO: Come up with a more elegant solution for this.
10530 	 */
10531 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10532 		struct amdgpu_framebuffer *old_afb, *new_afb;
10533 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10534 			continue;
10535 
10536 		if (old_other_state->crtc != new_plane_state->crtc &&
10537 		    new_other_state->crtc != new_plane_state->crtc)
10538 			continue;
10539 
10540 		if (old_other_state->crtc != new_other_state->crtc)
10541 			return true;
10542 
10543 		/* Src/dst size and scaling updates. */
10544 		if (old_other_state->src_w != new_other_state->src_w ||
10545 		    old_other_state->src_h != new_other_state->src_h ||
10546 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10547 		    old_other_state->crtc_h != new_other_state->crtc_h)
10548 			return true;
10549 
10550 		/* Rotation / mirroring updates. */
10551 		if (old_other_state->rotation != new_other_state->rotation)
10552 			return true;
10553 
10554 		/* Blending updates. */
10555 		if (old_other_state->pixel_blend_mode !=
10556 		    new_other_state->pixel_blend_mode)
10557 			return true;
10558 
10559 		/* Alpha updates. */
10560 		if (old_other_state->alpha != new_other_state->alpha)
10561 			return true;
10562 
10563 		/* Colorspace changes. */
10564 		if (old_other_state->color_range != new_other_state->color_range ||
10565 		    old_other_state->color_encoding != new_other_state->color_encoding)
10566 			return true;
10567 
10568 		/* Framebuffer checks fall at the end. */
10569 		if (!old_other_state->fb || !new_other_state->fb)
10570 			continue;
10571 
10572 		/* Pixel format changes can require bandwidth updates. */
10573 		if (old_other_state->fb->format != new_other_state->fb->format)
10574 			return true;
10575 
10576 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10577 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10578 
10579 		/* Tiling and DCC changes also require bandwidth updates. */
10580 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10581 		    old_afb->base.modifier != new_afb->base.modifier)
10582 			return true;
10583 	}
10584 
10585 	return false;
10586 }
10587 
10588 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10589 			      struct drm_plane_state *new_plane_state,
10590 			      struct drm_framebuffer *fb)
10591 {
10592 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10593 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10594 	unsigned int pitch;
10595 	bool linear;
10596 
10597 	if (fb->width > new_acrtc->max_cursor_width ||
10598 	    fb->height > new_acrtc->max_cursor_height) {
10599 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10600 				 new_plane_state->fb->width,
10601 				 new_plane_state->fb->height);
10602 		return -EINVAL;
10603 	}
10604 	if (new_plane_state->src_w != fb->width << 16 ||
10605 	    new_plane_state->src_h != fb->height << 16) {
10606 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10607 		return -EINVAL;
10608 	}
10609 
10610 	/* Pitch in pixels */
10611 	pitch = fb->pitches[0] / fb->format->cpp[0];
10612 
10613 	if (fb->width != pitch) {
10614 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10615 				 fb->width, pitch);
10616 		return -EINVAL;
10617 	}
10618 
10619 	switch (pitch) {
10620 	case 64:
10621 	case 128:
10622 	case 256:
10623 		/* FB pitch is supported by cursor plane */
10624 		break;
10625 	default:
10626 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10627 		return -EINVAL;
10628 	}
10629 
10630 	/* Core DRM takes care of checking FB modifiers, so we only need to
10631 	 * check tiling flags when the FB doesn't have a modifier. */
10632 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10633 		if (adev->family < AMDGPU_FAMILY_AI) {
10634 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10635 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10636 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10637 		} else {
10638 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10639 		}
10640 		if (!linear) {
10641 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10642 			return -EINVAL;
10643 		}
10644 	}
10645 
10646 	return 0;
10647 }
10648 
10649 static int dm_update_plane_state(struct dc *dc,
10650 				 struct drm_atomic_state *state,
10651 				 struct drm_plane *plane,
10652 				 struct drm_plane_state *old_plane_state,
10653 				 struct drm_plane_state *new_plane_state,
10654 				 bool enable,
10655 				 bool *lock_and_validation_needed)
10656 {
10657 
10658 	struct dm_atomic_state *dm_state = NULL;
10659 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10660 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10661 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10662 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10663 	struct amdgpu_crtc *new_acrtc;
10664 	bool needs_reset;
10665 	int ret = 0;
10666 
10667 
10668 	new_plane_crtc = new_plane_state->crtc;
10669 	old_plane_crtc = old_plane_state->crtc;
10670 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10671 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10672 
10673 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10674 		if (!enable || !new_plane_crtc ||
10675 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10676 			return 0;
10677 
10678 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10679 
10680 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10681 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10682 			return -EINVAL;
10683 		}
10684 
10685 		if (new_plane_state->fb) {
10686 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10687 						 new_plane_state->fb);
10688 			if (ret)
10689 				return ret;
10690 		}
10691 
10692 		return 0;
10693 	}
10694 
10695 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10696 					 new_plane_state);
10697 
10698 	/* Remove any changed/removed planes */
10699 	if (!enable) {
10700 		if (!needs_reset)
10701 			return 0;
10702 
10703 		if (!old_plane_crtc)
10704 			return 0;
10705 
10706 		old_crtc_state = drm_atomic_get_old_crtc_state(
10707 				state, old_plane_crtc);
10708 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10709 
10710 		if (!dm_old_crtc_state->stream)
10711 			return 0;
10712 
10713 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10714 				plane->base.id, old_plane_crtc->base.id);
10715 
10716 		ret = dm_atomic_get_state(state, &dm_state);
10717 		if (ret)
10718 			return ret;
10719 
10720 		if (!dc_remove_plane_from_context(
10721 				dc,
10722 				dm_old_crtc_state->stream,
10723 				dm_old_plane_state->dc_state,
10724 				dm_state->context)) {
10725 
10726 			return -EINVAL;
10727 		}
10728 
10729 
10730 		dc_plane_state_release(dm_old_plane_state->dc_state);
10731 		dm_new_plane_state->dc_state = NULL;
10732 
10733 		*lock_and_validation_needed = true;
10734 
10735 	} else { /* Add new planes */
10736 		struct dc_plane_state *dc_new_plane_state;
10737 
10738 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10739 			return 0;
10740 
10741 		if (!new_plane_crtc)
10742 			return 0;
10743 
10744 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10745 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10746 
10747 		if (!dm_new_crtc_state->stream)
10748 			return 0;
10749 
10750 		if (!needs_reset)
10751 			return 0;
10752 
10753 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10754 		if (ret)
10755 			return ret;
10756 
10757 		WARN_ON(dm_new_plane_state->dc_state);
10758 
10759 		dc_new_plane_state = dc_create_plane_state(dc);
10760 		if (!dc_new_plane_state)
10761 			return -ENOMEM;
10762 
10763 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10764 				 plane->base.id, new_plane_crtc->base.id);
10765 
10766 		ret = fill_dc_plane_attributes(
10767 			drm_to_adev(new_plane_crtc->dev),
10768 			dc_new_plane_state,
10769 			new_plane_state,
10770 			new_crtc_state);
10771 		if (ret) {
10772 			dc_plane_state_release(dc_new_plane_state);
10773 			return ret;
10774 		}
10775 
10776 		ret = dm_atomic_get_state(state, &dm_state);
10777 		if (ret) {
10778 			dc_plane_state_release(dc_new_plane_state);
10779 			return ret;
10780 		}
10781 
10782 		/*
10783 		 * Any atomic check errors that occur after this will
10784 		 * not need a release. The plane state will be attached
10785 		 * to the stream, and therefore part of the atomic
10786 		 * state. It'll be released when the atomic state is
10787 		 * cleaned.
10788 		 */
10789 		if (!dc_add_plane_to_context(
10790 				dc,
10791 				dm_new_crtc_state->stream,
10792 				dc_new_plane_state,
10793 				dm_state->context)) {
10794 
10795 			dc_plane_state_release(dc_new_plane_state);
10796 			return -EINVAL;
10797 		}
10798 
10799 		dm_new_plane_state->dc_state = dc_new_plane_state;
10800 
10801 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10802 
10803 		/* Tell DC to do a full surface update every time there
10804 		 * is a plane change. Inefficient, but works for now.
10805 		 */
10806 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10807 
10808 		*lock_and_validation_needed = true;
10809 	}
10810 
10811 
10812 	return ret;
10813 }
10814 
10815 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10816 				       int *src_w, int *src_h)
10817 {
10818 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10819 	case DRM_MODE_ROTATE_90:
10820 	case DRM_MODE_ROTATE_270:
10821 		*src_w = plane_state->src_h >> 16;
10822 		*src_h = plane_state->src_w >> 16;
10823 		break;
10824 	case DRM_MODE_ROTATE_0:
10825 	case DRM_MODE_ROTATE_180:
10826 	default:
10827 		*src_w = plane_state->src_w >> 16;
10828 		*src_h = plane_state->src_h >> 16;
10829 		break;
10830 	}
10831 }
10832 
10833 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10834 				struct drm_crtc *crtc,
10835 				struct drm_crtc_state *new_crtc_state)
10836 {
10837 	struct drm_plane *cursor = crtc->cursor, *underlying;
10838 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10839 	int i;
10840 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10841 	int cursor_src_w, cursor_src_h;
10842 	int underlying_src_w, underlying_src_h;
10843 
10844 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10845 	 * cursor per pipe but it's going to inherit the scaling and
10846 	 * positioning from the underlying pipe. Check the cursor plane's
10847 	 * blending properties match the underlying planes'. */
10848 
10849 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10850 	if (!new_cursor_state || !new_cursor_state->fb) {
10851 		return 0;
10852 	}
10853 
10854 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10855 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10856 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10857 
10858 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10859 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10860 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10861 			continue;
10862 
10863 		/* Ignore disabled planes */
10864 		if (!new_underlying_state->fb)
10865 			continue;
10866 
10867 		dm_get_oriented_plane_size(new_underlying_state,
10868 					   &underlying_src_w, &underlying_src_h);
10869 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10870 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10871 
10872 		if (cursor_scale_w != underlying_scale_w ||
10873 		    cursor_scale_h != underlying_scale_h) {
10874 			drm_dbg_atomic(crtc->dev,
10875 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10876 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10877 			return -EINVAL;
10878 		}
10879 
10880 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10881 		if (new_underlying_state->crtc_x <= 0 &&
10882 		    new_underlying_state->crtc_y <= 0 &&
10883 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10884 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10885 			break;
10886 	}
10887 
10888 	return 0;
10889 }
10890 
10891 #if defined(CONFIG_DRM_AMD_DC_DCN)
10892 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10893 {
10894 	struct drm_connector *connector;
10895 	struct drm_connector_state *conn_state, *old_conn_state;
10896 	struct amdgpu_dm_connector *aconnector = NULL;
10897 	int i;
10898 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10899 		if (!conn_state->crtc)
10900 			conn_state = old_conn_state;
10901 
10902 		if (conn_state->crtc != crtc)
10903 			continue;
10904 
10905 		aconnector = to_amdgpu_dm_connector(connector);
10906 		if (!aconnector->port || !aconnector->mst_port)
10907 			aconnector = NULL;
10908 		else
10909 			break;
10910 	}
10911 
10912 	if (!aconnector)
10913 		return 0;
10914 
10915 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10916 }
10917 #endif
10918 
10919 /**
10920  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10921  * @dev: The DRM device
10922  * @state: The atomic state to commit
10923  *
10924  * Validate that the given atomic state is programmable by DC into hardware.
10925  * This involves constructing a &struct dc_state reflecting the new hardware
10926  * state we wish to commit, then querying DC to see if it is programmable. It's
10927  * important not to modify the existing DC state. Otherwise, atomic_check
10928  * may unexpectedly commit hardware changes.
10929  *
10930  * When validating the DC state, it's important that the right locks are
10931  * acquired. For full updates case which removes/adds/updates streams on one
10932  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10933  * that any such full update commit will wait for completion of any outstanding
10934  * flip using DRMs synchronization events.
10935  *
10936  * Note that DM adds the affected connectors for all CRTCs in state, when that
10937  * might not seem necessary. This is because DC stream creation requires the
10938  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10939  * be possible but non-trivial - a possible TODO item.
10940  *
10941  * Return: -Error code if validation failed.
10942  */
10943 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10944 				  struct drm_atomic_state *state)
10945 {
10946 	struct amdgpu_device *adev = drm_to_adev(dev);
10947 	struct dm_atomic_state *dm_state = NULL;
10948 	struct dc *dc = adev->dm.dc;
10949 	struct drm_connector *connector;
10950 	struct drm_connector_state *old_con_state, *new_con_state;
10951 	struct drm_crtc *crtc;
10952 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10953 	struct drm_plane *plane;
10954 	struct drm_plane_state *old_plane_state, *new_plane_state;
10955 	enum dc_status status;
10956 	int ret, i;
10957 	bool lock_and_validation_needed = false;
10958 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10959 #if defined(CONFIG_DRM_AMD_DC_DCN)
10960 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10961 	struct drm_dp_mst_topology_state *mst_state;
10962 	struct drm_dp_mst_topology_mgr *mgr;
10963 #endif
10964 
10965 	trace_amdgpu_dm_atomic_check_begin(state);
10966 
10967 	ret = drm_atomic_helper_check_modeset(dev, state);
10968 	if (ret) {
10969 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10970 		goto fail;
10971 	}
10972 
10973 	/* Check connector changes */
10974 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10975 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10976 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10977 
10978 		/* Skip connectors that are disabled or part of modeset already. */
10979 		if (!old_con_state->crtc && !new_con_state->crtc)
10980 			continue;
10981 
10982 		if (!new_con_state->crtc)
10983 			continue;
10984 
10985 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10986 		if (IS_ERR(new_crtc_state)) {
10987 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10988 			ret = PTR_ERR(new_crtc_state);
10989 			goto fail;
10990 		}
10991 
10992 		if (dm_old_con_state->abm_level !=
10993 		    dm_new_con_state->abm_level)
10994 			new_crtc_state->connectors_changed = true;
10995 	}
10996 
10997 #if defined(CONFIG_DRM_AMD_DC_DCN)
10998 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10999 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11000 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11001 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11002 				if (ret) {
11003 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11004 					goto fail;
11005 				}
11006 			}
11007 		}
11008 		pre_validate_dsc(state, &dm_state, vars);
11009 	}
11010 #endif
11011 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11012 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11013 
11014 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11015 		    !new_crtc_state->color_mgmt_changed &&
11016 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11017 			dm_old_crtc_state->dsc_force_changed == false)
11018 			continue;
11019 
11020 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11021 		if (ret) {
11022 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11023 			goto fail;
11024 		}
11025 
11026 		if (!new_crtc_state->enable)
11027 			continue;
11028 
11029 		ret = drm_atomic_add_affected_connectors(state, crtc);
11030 		if (ret) {
11031 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11032 			goto fail;
11033 		}
11034 
11035 		ret = drm_atomic_add_affected_planes(state, crtc);
11036 		if (ret) {
11037 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11038 			goto fail;
11039 		}
11040 
11041 		if (dm_old_crtc_state->dsc_force_changed)
11042 			new_crtc_state->mode_changed = true;
11043 	}
11044 
11045 	/*
11046 	 * Add all primary and overlay planes on the CRTC to the state
11047 	 * whenever a plane is enabled to maintain correct z-ordering
11048 	 * and to enable fast surface updates.
11049 	 */
11050 	drm_for_each_crtc(crtc, dev) {
11051 		bool modified = false;
11052 
11053 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11054 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11055 				continue;
11056 
11057 			if (new_plane_state->crtc == crtc ||
11058 			    old_plane_state->crtc == crtc) {
11059 				modified = true;
11060 				break;
11061 			}
11062 		}
11063 
11064 		if (!modified)
11065 			continue;
11066 
11067 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11068 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11069 				continue;
11070 
11071 			new_plane_state =
11072 				drm_atomic_get_plane_state(state, plane);
11073 
11074 			if (IS_ERR(new_plane_state)) {
11075 				ret = PTR_ERR(new_plane_state);
11076 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11077 				goto fail;
11078 			}
11079 		}
11080 	}
11081 
11082 	/* Remove exiting planes if they are modified */
11083 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11084 		ret = dm_update_plane_state(dc, state, plane,
11085 					    old_plane_state,
11086 					    new_plane_state,
11087 					    false,
11088 					    &lock_and_validation_needed);
11089 		if (ret) {
11090 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11091 			goto fail;
11092 		}
11093 	}
11094 
11095 	/* Disable all crtcs which require disable */
11096 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11097 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11098 					   old_crtc_state,
11099 					   new_crtc_state,
11100 					   false,
11101 					   &lock_and_validation_needed);
11102 		if (ret) {
11103 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11104 			goto fail;
11105 		}
11106 	}
11107 
11108 	/* Enable all crtcs which require enable */
11109 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11110 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11111 					   old_crtc_state,
11112 					   new_crtc_state,
11113 					   true,
11114 					   &lock_and_validation_needed);
11115 		if (ret) {
11116 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11117 			goto fail;
11118 		}
11119 	}
11120 
11121 	/* Add new/modified planes */
11122 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11123 		ret = dm_update_plane_state(dc, state, plane,
11124 					    old_plane_state,
11125 					    new_plane_state,
11126 					    true,
11127 					    &lock_and_validation_needed);
11128 		if (ret) {
11129 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11130 			goto fail;
11131 		}
11132 	}
11133 
11134 	/* Run this here since we want to validate the streams we created */
11135 	ret = drm_atomic_helper_check_planes(dev, state);
11136 	if (ret) {
11137 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11138 		goto fail;
11139 	}
11140 
11141 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11142 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11143 		if (dm_new_crtc_state->mpo_requested)
11144 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11145 	}
11146 
11147 	/* Check cursor planes scaling */
11148 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11149 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11150 		if (ret) {
11151 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11152 			goto fail;
11153 		}
11154 	}
11155 
11156 	if (state->legacy_cursor_update) {
11157 		/*
11158 		 * This is a fast cursor update coming from the plane update
11159 		 * helper, check if it can be done asynchronously for better
11160 		 * performance.
11161 		 */
11162 		state->async_update =
11163 			!drm_atomic_helper_async_check(dev, state);
11164 
11165 		/*
11166 		 * Skip the remaining global validation if this is an async
11167 		 * update. Cursor updates can be done without affecting
11168 		 * state or bandwidth calcs and this avoids the performance
11169 		 * penalty of locking the private state object and
11170 		 * allocating a new dc_state.
11171 		 */
11172 		if (state->async_update)
11173 			return 0;
11174 	}
11175 
11176 	/* Check scaling and underscan changes*/
11177 	/* TODO Removed scaling changes validation due to inability to commit
11178 	 * new stream into context w\o causing full reset. Need to
11179 	 * decide how to handle.
11180 	 */
11181 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11182 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11183 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11184 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11185 
11186 		/* Skip any modesets/resets */
11187 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11188 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11189 			continue;
11190 
11191 		/* Skip any thing not scale or underscan changes */
11192 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11193 			continue;
11194 
11195 		lock_and_validation_needed = true;
11196 	}
11197 
11198 #if defined(CONFIG_DRM_AMD_DC_DCN)
11199 	/* set the slot info for each mst_state based on the link encoding format */
11200 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11201 		struct amdgpu_dm_connector *aconnector;
11202 		struct drm_connector *connector;
11203 		struct drm_connector_list_iter iter;
11204 		u8 link_coding_cap;
11205 
11206 		if (!mgr->mst_state )
11207 			continue;
11208 
11209 		drm_connector_list_iter_begin(dev, &iter);
11210 		drm_for_each_connector_iter(connector, &iter) {
11211 			int id = connector->index;
11212 
11213 			if (id == mst_state->mgr->conn_base_id) {
11214 				aconnector = to_amdgpu_dm_connector(connector);
11215 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11216 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11217 
11218 				break;
11219 			}
11220 		}
11221 		drm_connector_list_iter_end(&iter);
11222 
11223 	}
11224 #endif
11225 	/**
11226 	 * Streams and planes are reset when there are changes that affect
11227 	 * bandwidth. Anything that affects bandwidth needs to go through
11228 	 * DC global validation to ensure that the configuration can be applied
11229 	 * to hardware.
11230 	 *
11231 	 * We have to currently stall out here in atomic_check for outstanding
11232 	 * commits to finish in this case because our IRQ handlers reference
11233 	 * DRM state directly - we can end up disabling interrupts too early
11234 	 * if we don't.
11235 	 *
11236 	 * TODO: Remove this stall and drop DM state private objects.
11237 	 */
11238 	if (lock_and_validation_needed) {
11239 		ret = dm_atomic_get_state(state, &dm_state);
11240 		if (ret) {
11241 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11242 			goto fail;
11243 		}
11244 
11245 		ret = do_aquire_global_lock(dev, state);
11246 		if (ret) {
11247 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11248 			goto fail;
11249 		}
11250 
11251 #if defined(CONFIG_DRM_AMD_DC_DCN)
11252 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11253 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11254 			goto fail;
11255 		}
11256 
11257 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11258 		if (ret) {
11259 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11260 			goto fail;
11261 		}
11262 #endif
11263 
11264 		/*
11265 		 * Perform validation of MST topology in the state:
11266 		 * We need to perform MST atomic check before calling
11267 		 * dc_validate_global_state(), or there is a chance
11268 		 * to get stuck in an infinite loop and hang eventually.
11269 		 */
11270 		ret = drm_dp_mst_atomic_check(state);
11271 		if (ret) {
11272 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11273 			goto fail;
11274 		}
11275 		status = dc_validate_global_state(dc, dm_state->context, true);
11276 		if (status != DC_OK) {
11277 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11278 				       dc_status_to_str(status), status);
11279 			ret = -EINVAL;
11280 			goto fail;
11281 		}
11282 	} else {
11283 		/*
11284 		 * The commit is a fast update. Fast updates shouldn't change
11285 		 * the DC context, affect global validation, and can have their
11286 		 * commit work done in parallel with other commits not touching
11287 		 * the same resource. If we have a new DC context as part of
11288 		 * the DM atomic state from validation we need to free it and
11289 		 * retain the existing one instead.
11290 		 *
11291 		 * Furthermore, since the DM atomic state only contains the DC
11292 		 * context and can safely be annulled, we can free the state
11293 		 * and clear the associated private object now to free
11294 		 * some memory and avoid a possible use-after-free later.
11295 		 */
11296 
11297 		for (i = 0; i < state->num_private_objs; i++) {
11298 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11299 
11300 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11301 				int j = state->num_private_objs-1;
11302 
11303 				dm_atomic_destroy_state(obj,
11304 						state->private_objs[i].state);
11305 
11306 				/* If i is not at the end of the array then the
11307 				 * last element needs to be moved to where i was
11308 				 * before the array can safely be truncated.
11309 				 */
11310 				if (i != j)
11311 					state->private_objs[i] =
11312 						state->private_objs[j];
11313 
11314 				state->private_objs[j].ptr = NULL;
11315 				state->private_objs[j].state = NULL;
11316 				state->private_objs[j].old_state = NULL;
11317 				state->private_objs[j].new_state = NULL;
11318 
11319 				state->num_private_objs = j;
11320 				break;
11321 			}
11322 		}
11323 	}
11324 
11325 	/* Store the overall update type for use later in atomic check. */
11326 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11327 		struct dm_crtc_state *dm_new_crtc_state =
11328 			to_dm_crtc_state(new_crtc_state);
11329 
11330 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11331 							 UPDATE_TYPE_FULL :
11332 							 UPDATE_TYPE_FAST;
11333 	}
11334 
11335 	/* Must be success */
11336 	WARN_ON(ret);
11337 
11338 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11339 
11340 	return ret;
11341 
11342 fail:
11343 	if (ret == -EDEADLK)
11344 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11345 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11346 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11347 	else
11348 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11349 
11350 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11351 
11352 	return ret;
11353 }
11354 
11355 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11356 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11357 {
11358 	uint8_t dpcd_data;
11359 	bool capable = false;
11360 
11361 	if (amdgpu_dm_connector->dc_link &&
11362 		dm_helpers_dp_read_dpcd(
11363 				NULL,
11364 				amdgpu_dm_connector->dc_link,
11365 				DP_DOWN_STREAM_PORT_COUNT,
11366 				&dpcd_data,
11367 				sizeof(dpcd_data))) {
11368 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11369 	}
11370 
11371 	return capable;
11372 }
11373 
11374 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11375 		unsigned int offset,
11376 		unsigned int total_length,
11377 		uint8_t *data,
11378 		unsigned int length,
11379 		struct amdgpu_hdmi_vsdb_info *vsdb)
11380 {
11381 	bool res;
11382 	union dmub_rb_cmd cmd;
11383 	struct dmub_cmd_send_edid_cea *input;
11384 	struct dmub_cmd_edid_cea_output *output;
11385 
11386 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11387 		return false;
11388 
11389 	memset(&cmd, 0, sizeof(cmd));
11390 
11391 	input = &cmd.edid_cea.data.input;
11392 
11393 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11394 	cmd.edid_cea.header.sub_type = 0;
11395 	cmd.edid_cea.header.payload_bytes =
11396 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11397 	input->offset = offset;
11398 	input->length = length;
11399 	input->cea_total_length = total_length;
11400 	memcpy(input->payload, data, length);
11401 
11402 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11403 	if (!res) {
11404 		DRM_ERROR("EDID CEA parser failed\n");
11405 		return false;
11406 	}
11407 
11408 	output = &cmd.edid_cea.data.output;
11409 
11410 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11411 		if (!output->ack.success) {
11412 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11413 					output->ack.offset);
11414 		}
11415 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11416 		if (!output->amd_vsdb.vsdb_found)
11417 			return false;
11418 
11419 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11420 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11421 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11422 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11423 	} else {
11424 		DRM_WARN("Unknown EDID CEA parser results\n");
11425 		return false;
11426 	}
11427 
11428 	return true;
11429 }
11430 
11431 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11432 		uint8_t *edid_ext, int len,
11433 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11434 {
11435 	int i;
11436 
11437 	/* send extension block to DMCU for parsing */
11438 	for (i = 0; i < len; i += 8) {
11439 		bool res;
11440 		int offset;
11441 
11442 		/* send 8 bytes a time */
11443 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11444 			return false;
11445 
11446 		if (i+8 == len) {
11447 			/* EDID block sent completed, expect result */
11448 			int version, min_rate, max_rate;
11449 
11450 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11451 			if (res) {
11452 				/* amd vsdb found */
11453 				vsdb_info->freesync_supported = 1;
11454 				vsdb_info->amd_vsdb_version = version;
11455 				vsdb_info->min_refresh_rate_hz = min_rate;
11456 				vsdb_info->max_refresh_rate_hz = max_rate;
11457 				return true;
11458 			}
11459 			/* not amd vsdb */
11460 			return false;
11461 		}
11462 
11463 		/* check for ack*/
11464 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11465 		if (!res)
11466 			return false;
11467 	}
11468 
11469 	return false;
11470 }
11471 
11472 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11473 		uint8_t *edid_ext, int len,
11474 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11475 {
11476 	int i;
11477 
11478 	/* send extension block to DMCU for parsing */
11479 	for (i = 0; i < len; i += 8) {
11480 		/* send 8 bytes a time */
11481 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11482 			return false;
11483 	}
11484 
11485 	return vsdb_info->freesync_supported;
11486 }
11487 
11488 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11489 		uint8_t *edid_ext, int len,
11490 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11491 {
11492 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11493 
11494 	if (adev->dm.dmub_srv)
11495 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11496 	else
11497 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11498 }
11499 
11500 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11501 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11502 {
11503 	uint8_t *edid_ext = NULL;
11504 	int i;
11505 	bool valid_vsdb_found = false;
11506 
11507 	/*----- drm_find_cea_extension() -----*/
11508 	/* No EDID or EDID extensions */
11509 	if (edid == NULL || edid->extensions == 0)
11510 		return -ENODEV;
11511 
11512 	/* Find CEA extension */
11513 	for (i = 0; i < edid->extensions; i++) {
11514 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11515 		if (edid_ext[0] == CEA_EXT)
11516 			break;
11517 	}
11518 
11519 	if (i == edid->extensions)
11520 		return -ENODEV;
11521 
11522 	/*----- cea_db_offsets() -----*/
11523 	if (edid_ext[0] != CEA_EXT)
11524 		return -ENODEV;
11525 
11526 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11527 
11528 	return valid_vsdb_found ? i : -ENODEV;
11529 }
11530 
11531 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11532 					struct edid *edid)
11533 {
11534 	int i = 0;
11535 	struct detailed_timing *timing;
11536 	struct detailed_non_pixel *data;
11537 	struct detailed_data_monitor_range *range;
11538 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11539 			to_amdgpu_dm_connector(connector);
11540 	struct dm_connector_state *dm_con_state = NULL;
11541 	struct dc_sink *sink;
11542 
11543 	struct drm_device *dev = connector->dev;
11544 	struct amdgpu_device *adev = drm_to_adev(dev);
11545 	bool freesync_capable = false;
11546 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11547 
11548 	if (!connector->state) {
11549 		DRM_ERROR("%s - Connector has no state", __func__);
11550 		goto update;
11551 	}
11552 
11553 	sink = amdgpu_dm_connector->dc_sink ?
11554 		amdgpu_dm_connector->dc_sink :
11555 		amdgpu_dm_connector->dc_em_sink;
11556 
11557 	if (!edid || !sink) {
11558 		dm_con_state = to_dm_connector_state(connector->state);
11559 
11560 		amdgpu_dm_connector->min_vfreq = 0;
11561 		amdgpu_dm_connector->max_vfreq = 0;
11562 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11563 		connector->display_info.monitor_range.min_vfreq = 0;
11564 		connector->display_info.monitor_range.max_vfreq = 0;
11565 		freesync_capable = false;
11566 
11567 		goto update;
11568 	}
11569 
11570 	dm_con_state = to_dm_connector_state(connector->state);
11571 
11572 	if (!adev->dm.freesync_module)
11573 		goto update;
11574 
11575 
11576 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11577 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11578 		bool edid_check_required = false;
11579 
11580 		if (edid) {
11581 			edid_check_required = is_dp_capable_without_timing_msa(
11582 						adev->dm.dc,
11583 						amdgpu_dm_connector);
11584 		}
11585 
11586 		if (edid_check_required == true && (edid->version > 1 ||
11587 		   (edid->version == 1 && edid->revision > 1))) {
11588 			for (i = 0; i < 4; i++) {
11589 
11590 				timing	= &edid->detailed_timings[i];
11591 				data	= &timing->data.other_data;
11592 				range	= &data->data.range;
11593 				/*
11594 				 * Check if monitor has continuous frequency mode
11595 				 */
11596 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11597 					continue;
11598 				/*
11599 				 * Check for flag range limits only. If flag == 1 then
11600 				 * no additional timing information provided.
11601 				 * Default GTF, GTF Secondary curve and CVT are not
11602 				 * supported
11603 				 */
11604 				if (range->flags != 1)
11605 					continue;
11606 
11607 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11608 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11609 				amdgpu_dm_connector->pixel_clock_mhz =
11610 					range->pixel_clock_mhz * 10;
11611 
11612 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11613 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11614 
11615 				break;
11616 			}
11617 
11618 			if (amdgpu_dm_connector->max_vfreq -
11619 			    amdgpu_dm_connector->min_vfreq > 10) {
11620 
11621 				freesync_capable = true;
11622 			}
11623 		}
11624 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11625 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11626 		if (i >= 0 && vsdb_info.freesync_supported) {
11627 			timing  = &edid->detailed_timings[i];
11628 			data    = &timing->data.other_data;
11629 
11630 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11631 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11632 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11633 				freesync_capable = true;
11634 
11635 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11636 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11637 		}
11638 	}
11639 
11640 update:
11641 	if (dm_con_state)
11642 		dm_con_state->freesync_capable = freesync_capable;
11643 
11644 	if (connector->vrr_capable_property)
11645 		drm_connector_set_vrr_capable_property(connector,
11646 						       freesync_capable);
11647 }
11648 
11649 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11650 {
11651 	struct amdgpu_device *adev = drm_to_adev(dev);
11652 	struct dc *dc = adev->dm.dc;
11653 	int i;
11654 
11655 	mutex_lock(&adev->dm.dc_lock);
11656 	if (dc->current_state) {
11657 		for (i = 0; i < dc->current_state->stream_count; ++i)
11658 			dc->current_state->streams[i]
11659 				->triggered_crtc_reset.enabled =
11660 				adev->dm.force_timing_sync;
11661 
11662 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11663 		dc_trigger_sync(dc, dc->current_state);
11664 	}
11665 	mutex_unlock(&adev->dm.dc_lock);
11666 }
11667 
11668 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11669 		       uint32_t value, const char *func_name)
11670 {
11671 #ifdef DM_CHECK_ADDR_0
11672 	if (address == 0) {
11673 		DC_ERR("invalid register write. address = 0");
11674 		return;
11675 	}
11676 #endif
11677 	cgs_write_register(ctx->cgs_device, address, value);
11678 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11679 }
11680 
11681 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11682 			  const char *func_name)
11683 {
11684 	uint32_t value;
11685 #ifdef DM_CHECK_ADDR_0
11686 	if (address == 0) {
11687 		DC_ERR("invalid register read; address = 0\n");
11688 		return 0;
11689 	}
11690 #endif
11691 
11692 	if (ctx->dmub_srv &&
11693 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11694 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11695 		ASSERT(false);
11696 		return 0;
11697 	}
11698 
11699 	value = cgs_read_register(ctx->cgs_device, address);
11700 
11701 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11702 
11703 	return value;
11704 }
11705 
11706 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11707 						struct dc_context *ctx,
11708 						uint8_t status_type,
11709 						uint32_t *operation_result)
11710 {
11711 	struct amdgpu_device *adev = ctx->driver_context;
11712 	int return_status = -1;
11713 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11714 
11715 	if (is_cmd_aux) {
11716 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11717 			return_status = p_notify->aux_reply.length;
11718 			*operation_result = p_notify->result;
11719 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11720 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11721 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11722 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11723 		} else {
11724 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11725 		}
11726 	} else {
11727 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11728 			return_status = 0;
11729 			*operation_result = p_notify->sc_status;
11730 		} else {
11731 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11732 		}
11733 	}
11734 
11735 	return return_status;
11736 }
11737 
11738 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11739 	unsigned int link_index, void *cmd_payload, void *operation_result)
11740 {
11741 	struct amdgpu_device *adev = ctx->driver_context;
11742 	int ret = 0;
11743 
11744 	if (is_cmd_aux) {
11745 		dc_process_dmub_aux_transfer_async(ctx->dc,
11746 			link_index, (struct aux_payload *)cmd_payload);
11747 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11748 					(struct set_config_cmd_payload *)cmd_payload,
11749 					adev->dm.dmub_notify)) {
11750 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11751 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11752 					(uint32_t *)operation_result);
11753 	}
11754 
11755 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11756 	if (ret == 0) {
11757 		DRM_ERROR("wait_for_completion_timeout timeout!");
11758 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11759 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11760 				(uint32_t *)operation_result);
11761 	}
11762 
11763 	if (is_cmd_aux) {
11764 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11765 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11766 
11767 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11768 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11769 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11770 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11771 				       adev->dm.dmub_notify->aux_reply.length);
11772 			}
11773 		}
11774 	}
11775 
11776 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11777 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11778 			(uint32_t *)operation_result);
11779 }
11780 
11781 /*
11782  * Check whether seamless boot is supported.
11783  *
11784  * So far we only support seamless boot on CHIP_VANGOGH.
11785  * If everything goes well, we may consider expanding
11786  * seamless boot to other ASICs.
11787  */
11788 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11789 {
11790 	switch (adev->asic_type) {
11791 	case CHIP_VANGOGH:
11792 		if (!adev->mman.keep_stolen_vga_memory)
11793 			return true;
11794 		break;
11795 	default:
11796 		break;
11797 	}
11798 
11799 	return false;
11800 }
11801